From 7ab82534bc29f987a36330c61eed43c3a3f31c6c Mon Sep 17 00:00:00 2001 From: James Kong Date: Wed, 10 Mar 2021 18:14:08 +0800 Subject: [PATCH 001/116] Display the minPayment field for a Flux Monitor definition in the UI --- core/web/presenters/job.go | 3 ++ operator_ui/@types/core/store/models.d.ts | 48 ++++--------------- .../Jobs/generateJobSpecDefinition.test.ts | 1 + .../pages/Jobs/generateJobSpecDefinition.ts | 2 + 4 files changed, 16 insertions(+), 38 deletions(-) diff --git a/core/web/presenters/job.go b/core/web/presenters/job.go index 4fcd8257ffa..f4b21d09e24 100644 --- a/core/web/presenters/job.go +++ b/core/web/presenters/job.go @@ -4,6 +4,7 @@ import ( "time" "github.com/lib/pq" + "github.com/smartcontractkit/chainlink/core/assets" "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/chainlink/core/store/models" @@ -54,6 +55,7 @@ type FluxMonitorSpec struct { PollTimerDisabled bool `json:"pollTimerDisabled"` IdleTimerPeriod string `json:"idleTimerPeriod"` IdleTimerDisabled bool `json:"idleTimerDisabled"` + MinPayment *assets.Link `json:"minPayment"` CreatedAt time.Time `json:"createdAt"` UpdatedAt time.Time `json:"updatedAt"` } @@ -70,6 +72,7 @@ func NewFluxMonitorSpec(spec *job.FluxMonitorSpec) *FluxMonitorSpec { PollTimerDisabled: spec.PollTimerDisabled, IdleTimerPeriod: spec.IdleTimerPeriod.String(), IdleTimerDisabled: spec.IdleTimerDisabled, + MinPayment: spec.MinPayment, CreatedAt: spec.CreatedAt, UpdatedAt: spec.UpdatedAt, } diff --git a/operator_ui/@types/core/store/models.d.ts b/operator_ui/@types/core/store/models.d.ts index a41f407e263..975f0fb9026 100644 --- a/operator_ui/@types/core/store/models.d.ts +++ b/operator_ui/@types/core/store/models.d.ts @@ -473,7 +473,7 @@ declare module 'core/store/models' { pipelineSpec: { dotDagSource: string } - schemaVersion: number; + schemaVersion: number } export type DirectRequestJobV2Spec = BaseJobSpecV2 & { @@ -489,14 +489,15 @@ declare module 'core/store/models' { type: 'fluxmonitor' fluxMonitorSpec: { contractAddress: common.Address - precision: number; - threshold: number; - absoluteThreshold: number; - idleTimerDisabled: false; - idleTimerPeriod: string; - pollTimerDisabled: false; - pollTimerPeriod: string; - createdAt: time.Time + precision: number + threshold: number + absoluteThreshold: number + idleTimerDisabled: false + idleTimerPeriod: string + pollTimerDisabled: false + pollTimerPeriod: string + minPayment: number | null + createdAt: time.Time } directRequestSpec: null offChainReportingOracleSpec: null @@ -526,35 +527,6 @@ declare module 'core/store/models' { export type JobSpecV2 = DirectRequestJobV2Spec | FluxMonitorJobV2Spec | OffChainReportingOracleJobV2Spec - // export interface JobSpecV2 { - // name: string | null - // type: string - // errors: JobSpecError[] - // offChainReportingOracleSpec: { - // contractAddress: common.Address - // p2pPeerID: string - // p2pBootstrapPeers: string[] - // isBootstrapPeer: boolean - // keyBundleID: string - // monitoringEndpoint: string - // transmitterAddress: common.Address - // observationTimeout: string - // blockchainTimeout: string - // contractConfigTrackerSubscribeInterval: string - // contractConfigTrackerPollInterval: string - // contractConfigConfirmations: number - // createdAt: time.Time - // updatedAt: time.Time - // } | null - // directRequestSpec: DirectRequestSpec | null - // fluxMonitorSpec: FluxMonitorSpec | null - // maxTaskDuration: string - // pipelineSpec: { - // dotDagSource: string - // } - // schemaVersion: number; - // } - export interface OcrJobRun { outputs: PipelineTaskOutput[] errors: PipelineTaskError[] diff --git a/operator_ui/src/pages/Jobs/generateJobSpecDefinition.test.ts b/operator_ui/src/pages/Jobs/generateJobSpecDefinition.test.ts index 2b9a1eb85b5..d6a252b00c8 100644 --- a/operator_ui/src/pages/Jobs/generateJobSpecDefinition.test.ts +++ b/operator_ui/src/pages/Jobs/generateJobSpecDefinition.test.ts @@ -259,6 +259,7 @@ maxTaskDuration = "10s" precision: 2, threshold: 0.5, updatedAt: '2021-02-19T16:00:01.115227+08:00', + minPayment: null, }, directRequestSpec: null, offChainReportingOracleSpec: null, diff --git a/operator_ui/src/pages/Jobs/generateJobSpecDefinition.ts b/operator_ui/src/pages/Jobs/generateJobSpecDefinition.ts index 541333c6edb..e4b870dfc1b 100644 --- a/operator_ui/src/pages/Jobs/generateJobSpecDefinition.ts +++ b/operator_ui/src/pages/Jobs/generateJobSpecDefinition.ts @@ -149,6 +149,7 @@ function generateFluxMonitorDefinition( idleTimerDisabled, pollTimerPeriod, pollTimerDisabled, + minPayment, } = fluxMonitorSpec return stringifyJobSpec({ @@ -165,6 +166,7 @@ function generateFluxMonitorDefinition( pollTimerPeriod, pollTimerDisabled, maxTaskDuration, + minPayment, observationSource: pipelineSpec.dotDagSource, }, format: JobSpecFormats.TOML, From 3114ac0b54bca3a03dd3b7e025a6d8d2e2b91e42 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Fri, 12 Mar 2021 09:40:55 -0500 Subject: [PATCH 002/116] api endpoint for toggle logger debug level --- .gitignore | 1 + core/web/log_controller.go | 46 ++++++++++++++++++++++++++++++++++++++ core/web/router.go | 3 +++ go.sum | 3 +++ 4 files changed, 53 insertions(+) create mode 100644 core/web/log_controller.go diff --git a/.gitignore b/.gitignore index 84aea9325da..6cfb84f80f8 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ tools/clroot/db.sqlite3-wal .DS_Store .envrc .env* +.idea # codeship *.aes diff --git a/core/web/log_controller.go b/core/web/log_controller.go new file mode 100644 index 00000000000..adcb247fc68 --- /dev/null +++ b/core/web/log_controller.go @@ -0,0 +1,46 @@ +package web + +import ( + "net/http" + + "go.uber.org/zap/zapcore" + + "github.com/smartcontractkit/chainlink/core/logger" + + "github.com/gin-gonic/gin" + "github.com/smartcontractkit/chainlink/core/services/chainlink" +) + +// LogController manages the logger config +type LogController struct { + App chainlink.Application +} + +type loglevelPatchRequest struct { + EnableDebugLog *bool `json:"isDebugEnabled"` +} + +type loglevelPatchResponse struct { + IsDebugEnabled bool `json:"isDebugEnabled"` +} + +// ToggleDebug toggles the debug log mode +func (cc *LogController) ToggleDebug(c *gin.Context) { + request := &loglevelPatchRequest{} + if err := c.ShouldBindJSON(request); err != nil { + jsonAPIError(c, http.StatusUnprocessableEntity, err) + return + } + + if *request.EnableDebugLog { + cc.App.GetStore().Config.Set("LOG_LEVEL", zapcore.DebugLevel.String()) + } else { + cc.App.GetStore().Config.Set("LOG_LEVEL", zapcore.InfoLevel.String()) + } + logger.SetLogger(cc.App.GetStore().Config.CreateProductionLogger()) + + response := &loglevelPatchResponse{ + IsDebugEnabled: *request.EnableDebugLog, + } + jsonAPIResponse(c, response, "log") +} diff --git a/core/web/router.go b/core/web/router.go index b208c4964e7..12fb354f55e 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -290,6 +290,9 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { authv2.GET("/jobs/:ID/runs", paginatedRequest(prc.Index)) authv2.GET("/jobs/:ID/runs/:runID", prc.Show) authv2.POST("/jobs/:ID/runs", prc.Create) + + lgc := LogController{app} + authv2.PATCH("/log/debug/enable", lgc.ToggleDebug) } ping := PingController{app} diff --git a/go.sum b/go.sum index 705cac4dfad..7693c7c59e9 100644 --- a/go.sum +++ b/go.sum @@ -335,6 +335,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1120,6 +1121,7 @@ github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDs github.com/onsi/gomega v1.10.5 h1:7n6FEkpFmfCoo2t+YYqXH0evK+a9ICQz0xcAy9dYcaQ= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c h1:a380JP+B7xlMbEQOlha1buKhzBPXFqgFXplyWCEIGEY= github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= @@ -1740,6 +1742,7 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= From cb0980edbb394cac539d43839e6553a3fcaf34c9 Mon Sep 17 00:00:00 2001 From: James Kong Date: Fri, 12 Mar 2021 14:50:28 +0800 Subject: [PATCH 003/116] Flux Monitor V2 integration tests * InvalidSubmission * Deviation * NewRound * HibernationMode * AntiSpamLogic --- core/internal/cltest/cltest.go | 48 + core/internal/features_test.go | 2 +- .../fluxmonitorv2/integrations_test.go | 946 ++++++++++++++++++ 3 files changed, 995 insertions(+), 1 deletion(-) create mode 100644 core/services/fluxmonitorv2/integrations_test.go diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index fe90dedf5e8..3b9b019ad69 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -48,6 +48,7 @@ import ( "github.com/smartcontractkit/chainlink/core/store/presenters" "github.com/smartcontractkit/chainlink/core/utils" "github.com/smartcontractkit/chainlink/core/web" + webpresenters "github.com/smartcontractkit/chainlink/core/web/presenters" ocrtypes "github.com/smartcontractkit/libocr/offchainreporting/types" "github.com/DATA-DOG/go-txdb" @@ -834,6 +835,20 @@ func CreateJobViaWeb(t testing.TB, app *TestApplication, spec string) job.Job { return createdJob } +func CreateJobViaWeb2(t testing.TB, app *TestApplication, spec string) webpresenters.JobResource { + t.Helper() + + client := app.NewHTTPClient() + resp, cleanup := client.Post("/v2/jobs", bytes.NewBufferString(spec)) + defer cleanup() + AssertServerResponse(t, resp, http.StatusOK) + + var jobResponse webpresenters.JobResource + err := ParseJSONAPIResponse(t, resp, &jobResponse) + require.NoError(t, err) + return jobResponse +} + // CreateJobRunViaWeb creates JobRun via web using /v2/specs/ID/runs func CreateJobRunViaWeb(t testing.TB, app *TestApplication, j models.JobSpec, body ...string) models.JobRun { t.Helper() @@ -1109,6 +1124,23 @@ func WaitForSpecError(t *testing.T, store *strpkg.Store, jobID models.JobID, cou return jse } +// WaitForSpecErrorV2 polls until the passed in jobID has count number +// of job spec errors. +func WaitForSpecErrorV2(t *testing.T, store *strpkg.Store, jobID int32, count int) []job.SpecError { + t.Helper() + + g := gomega.NewGomegaWithT(t) + var jse []job.SpecError + g.Eventually(func() []job.SpecError { + err := store.DB. + Where("job_id = ?", jobID). + Find(&jse).Error + assert.NoError(t, err) + return jse + }, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(count)) + return jse +} + // WaitForRuns waits for the wanted number of runs then returns a slice of the JobRuns func WaitForRuns(t testing.TB, j models.JobSpec, store *strpkg.Store, want int) []models.JobRun { t.Helper() @@ -1170,6 +1202,22 @@ func AssertRunsStays(t testing.TB, j models.JobSpec, store *strpkg.Store, want i return jrs } +// AssertPipelineRunsStays asserts that the number of pipeline runs for a particular job remains at the provided values +func AssertPipelineRunsStays(t testing.TB, pipelineSpecID int32, store *strpkg.Store, want int) []pipeline.Run { + t.Helper() + g := gomega.NewGomegaWithT(t) + + var prs []pipeline.Run + g.Consistently(func() []pipeline.Run { + err := store.DB. + Where("pipeline_spec_id = ?", pipelineSpecID). + Find(&prs).Error + assert.NoError(t, err) + return prs + }, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) + return prs +} + // WaitForRunsAtLeast waits for at least the passed number of runs to start. func WaitForRunsAtLeast(t testing.TB, j models.JobSpec, store *strpkg.Store, want int) { t.Helper() diff --git a/core/internal/features_test.go b/core/internal/features_test.go index 541b3f245b2..a779db0f714 100644 --- a/core/internal/features_test.go +++ b/core/internal/features_test.go @@ -790,7 +790,7 @@ func TestIntegration_FluxMonitor_Deviation(t *testing.T) { kst.On("HasAccountWithAddress", address).Return(true) kst.On("GetAccountByAddress", mock.Anything).Maybe().Return(accounts.Account{}, nil) kst.On("SignTx", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(&types.Transaction{}, nil) - kst.On("Accounts").Return([]accounts.Account{}) + kst.On("Accounts").Return([]accounts.Account{{Address: address}}) app.Store.KeyStore = kst diff --git a/core/services/fluxmonitorv2/integrations_test.go b/core/services/fluxmonitorv2/integrations_test.go new file mode 100644 index 00000000000..af24deba63f --- /dev/null +++ b/core/services/fluxmonitorv2/integrations_test.go @@ -0,0 +1,946 @@ +package fluxmonitorv2_test + +import ( + "encoding/json" + "fmt" + "math/big" + "strconv" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + goEthereumEth "github.com/ethereum/go-ethereum/eth" + "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flags_wrapper" + faw "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/link_token_interface" + "github.com/smartcontractkit/chainlink/core/store/models" + "github.com/smartcontractkit/chainlink/core/store/orm" + "github.com/smartcontractkit/chainlink/core/utils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const description = "exactly thirty-three characters!!" + +const decimals = 8 +const fee = int64(100) // Amount paid by FA contract, in LINK-wei +const faTimeout = uint32(1) + +var pollTimerPeriod = 200 * time.Millisecond // if failing due to timeouts, increase this +var oneEth = big.NewInt(1000000000000000000) +var emptyList = []common.Address{} + +// fluxAggregatorUniverse represents the universe with which the aggregator +// contract interacts +type fluxAggregatorUniverse struct { + key models.Key + aggregatorContract *faw.FluxAggregator + aggregatorContractAddress common.Address + linkContract *link_token_interface.LinkToken + flagsContract *flags_wrapper.Flags + flagsContractAddress common.Address + // Abstraction representation of the ethereum blockchain + backend *backends.SimulatedBackend + aggregatorABI abi.ABI + // Cast of participants + sergey *bind.TransactOpts // Owns all the LINK initially + neil *bind.TransactOpts // Node operator Flux Monitor Oracle + ned *bind.TransactOpts // Node operator Flux Monitor Oracle + nallory *bind.TransactOpts // Node operator Flux Monitor Oracle running this node +} + +// newIdentity returns a go-ethereum abstraction of an ethereum account for +// interacting with contract golang wrappers +func newIdentity(t *testing.T) *bind.TransactOpts { + key, err := crypto.GenerateKey() + require.NoError(t, err, "failed to generate ethereum identity") + return cltest.MustNewSimulatedBackendKeyedTransactor(t, key) +} + +type fluxAggregatorUniverseConfig struct { + MinSubmission *big.Int + MaxSubmission *big.Int +} + +func WithMinMaxSubmission(min, max *big.Int) func(cfg *fluxAggregatorUniverseConfig) { + return func(cfg *fluxAggregatorUniverseConfig) { + cfg.MinSubmission = min + cfg.MaxSubmission = max + } +} + +// setupFluxAggregatorUniverse returns a fully initialized fluxAggregator universe. The +// arguments match the arguments of the same name in the FluxAggregator +// constructor. +func setupFluxAggregatorUniverse(t *testing.T, configOptions ...func(cfg *fluxAggregatorUniverseConfig)) fluxAggregatorUniverse { + cfg := &fluxAggregatorUniverseConfig{ + MinSubmission: big.NewInt(0), + MaxSubmission: big.NewInt(100000000000), + } + + for _, optFn := range configOptions { + optFn(cfg) + } + + key := cltest.MustGenerateRandomKey(t) + k, err := keystore.DecryptKey(key.JSON.Bytes(), cltest.Password) + require.NoError(t, err) + oracleTransactor := cltest.MustNewSimulatedBackendKeyedTransactor(t, k.PrivateKey) + + var f fluxAggregatorUniverse + f.key = key + f.sergey = newIdentity(t) + f.neil = newIdentity(t) + f.ned = newIdentity(t) + f.nallory = oracleTransactor + genesisData := core.GenesisAlloc{ + f.sergey.From: {Balance: oneEth}, + f.neil.From: {Balance: oneEth}, + f.ned.From: {Balance: oneEth}, + f.nallory.From: {Balance: oneEth}, + } + gasLimit := goEthereumEth.DefaultConfig.Miner.GasCeil * 2 + f.backend = backends.NewSimulatedBackend(genesisData, gasLimit) + + f.aggregatorABI, err = abi.JSON(strings.NewReader(faw.FluxAggregatorABI)) + require.NoError(t, err, "could not parse FluxAggregator ABI") + + var linkAddress common.Address + linkAddress, _, f.linkContract, err = link_token_interface.DeployLinkToken(f.sergey, f.backend) + require.NoError(t, err, "failed to deploy link contract to simulated ethereum blockchain") + + f.flagsContractAddress, _, f.flagsContract, err = flags_wrapper.DeployFlags(f.sergey, f.backend, f.sergey.From) + require.NoError(t, err, "failed to deploy flags contract to simulated ethereum blockchain") + + f.backend.Commit() + + // FluxAggregator contract subtracts timeout from block timestamp, which will + // be less than the timeout, leading to a SafeMath error. Wait for longer than + // the timeout... Golang is unpleasant about mixing int64 and time.Duration in + // arithmetic operations, so do everything as int64 and then convert. + waitTimeMs := int64(faTimeout * 5000) + time.Sleep(time.Duration((waitTimeMs + waitTimeMs/20) * int64(time.Millisecond))) + oldGasLimit := f.sergey.GasLimit + f.sergey.GasLimit = gasLimit + f.aggregatorContractAddress, _, f.aggregatorContract, err = faw.DeployFluxAggregator( + f.sergey, + f.backend, + linkAddress, + big.NewInt(fee), + faTimeout, + common.Address{}, + cfg.MinSubmission, + cfg.MaxSubmission, + decimals, + description, + ) + f.backend.Commit() // Must commit contract to chain before we can fund with LINK + require.NoError(t, err, "failed to deploy FluxAggregator contract to simulated ethereum blockchain") + + f.sergey.GasLimit = oldGasLimit + + _, err = f.linkContract.Transfer(f.sergey, f.aggregatorContractAddress, oneEth) // Actually, LINK + require.NoError(t, err, "failed to fund FluxAggregator contract with LINK") + + _, err = f.aggregatorContract.UpdateAvailableFunds(f.sergey) + require.NoError(t, err, "failed to update aggregator's availableFunds field") + + f.backend.Commit() + availableFunds, err := f.aggregatorContract.AvailableFunds(nil) + require.NoError(t, err, "failed to retrieve AvailableFunds") + require.Equal(t, availableFunds, oneEth) + + ilogs, err := f.aggregatorContract.FilterAvailableFundsUpdated(nil, []*big.Int{oneEth}) + require.NoError(t, err, "failed to gather AvailableFundsUpdated logs") + + logs := cltest.GetLogs(t, nil, ilogs) + require.Len(t, logs, 1, "a single AvailableFundsUpdated log should be emitted") + + return f +} + +// watchSubmissionReceived creates a channel which sends the log when a +// submission is received. When event appears on submissionReceived, +// it indicates that flux monitor job run is complete. +// +// It will only watch for logs from addresses that are provided +func (fau fluxAggregatorUniverse) WatchSubmissionReceived(t *testing.T, addresses []common.Address) chan *faw.FluxAggregatorSubmissionReceived { + submissionReceived := make(chan *faw.FluxAggregatorSubmissionReceived) + subscription, err := fau.aggregatorContract.WatchSubmissionReceived( + nil, + submissionReceived, + []*big.Int{}, + []uint32{}, + addresses, + ) + require.NoError(t, err, "failed to subscribe to SubmissionReceived events") + t.Cleanup(subscription.Unsubscribe) + + return submissionReceived +} + +func setupApplication( + t *testing.T, + fa fluxAggregatorUniverse, + setConfig func(cfg *orm.Config), +) *cltest.TestApplication { + config, cfgCleanup := cltest.NewConfig(t) + setConfig(config.Config) + + t.Cleanup(cfgCleanup) + + app, cleanup := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, fa.backend, fa.key) + t.Cleanup(cleanup) + + return app +} + +// checkOraclesAdded asserts that the correct logs were emitted for each oracle added +func checkOraclesAdded(t *testing.T, f fluxAggregatorUniverse, oracleList []common.Address) { + iaddedLogs, err := f.aggregatorContract.FilterOraclePermissionsUpdated(nil, oracleList, []bool{true}) + require.NoError(t, err, "failed to gather OraclePermissionsUpdated logs") + + addedLogs := cltest.GetLogs(t, nil, iaddedLogs) + require.Len(t, addedLogs, len(oracleList), "should have log for each oracle") + + iadminLogs, err := f.aggregatorContract.FilterOracleAdminUpdated(nil, oracleList, oracleList) + require.NoError(t, err, "failed to gather OracleAdminUpdated logs") + + adminLogs := cltest.GetLogs(t, nil, iadminLogs) + require.Len(t, adminLogs, len(oracleList), "should have log for each oracle") + + for oracleIdx, oracle := range oracleList { + require.Equal(t, oracle, addedLogs[oracleIdx].(*faw.FluxAggregatorOraclePermissionsUpdated).Oracle, "log for wrong oracle emitted") + require.Equal(t, oracle, adminLogs[oracleIdx].(*faw.FluxAggregatorOracleAdminUpdated).Oracle, "log for wrong oracle emitted") + } +} + +func generatePriceResponseFn(price *int64) func() string { + return func() string { + return fmt.Sprintf(`{"data":{"result": %d}}`, atomic.LoadInt64(price)) + } +} + +type answerParams struct { + fa *fluxAggregatorUniverse + roundId, answer int64 + from *bind.TransactOpts + isNewRound, completesAnswer bool +} + +// checkSubmission verifies all the logs emitted by fa's FluxAggregator +// contract after an updateAnswer with the given values. +func checkSubmission(t *testing.T, p answerParams, currentBalance int64, receiptBlock uint64) { + t.Helper() + if receiptBlock == 0 { + receiptBlock = p.fa.backend.Blockchain().CurrentBlock().Number().Uint64() + } + blockRange := &bind.FilterOpts{Start: 0, End: &receiptBlock} + + // Could filter for the known values here, but while that would be more + // succinct it leads to less informative error messages... Did the log not + // appear at all, or did it just have a wrong value? + ilogs, err := p.fa.aggregatorContract.FilterSubmissionReceived( + blockRange, + []*big.Int{big.NewInt(p.answer)}, + []uint32{uint32(p.roundId)}, + []common.Address{p.from.From}, + ) + require.NoError(t, err, "failed to get SubmissionReceived logs") + + var srlogs []*faw.FluxAggregatorSubmissionReceived + _ = cltest.GetLogs(t, &srlogs, ilogs) + require.Len(t, srlogs, 1, "FluxAggregator did not emit correct "+ + "SubmissionReceived log") + + inrlogs, err := p.fa.aggregatorContract.FilterNewRound( + blockRange, []*big.Int{big.NewInt(p.roundId)}, []common.Address{p.from.From}, + ) + require.NoError(t, err, "failed to get NewRound logs") + + if p.isNewRound { + var nrlogs []*faw.FluxAggregatorNewRound + cltest.GetLogs(t, &nrlogs, inrlogs) + require.Len(t, nrlogs, 1, "FluxAggregator did not emit correct NewRound "+ + "log") + } else { + assert.Len(t, cltest.GetLogs(t, nil, inrlogs), 0, "FluxAggregator emitted "+ + "unexpected NewRound log") + } + + iaflogs, err := p.fa.aggregatorContract.FilterAvailableFundsUpdated( + blockRange, []*big.Int{big.NewInt(currentBalance - fee)}, + ) + require.NoError(t, err, "failed to get AvailableFundsUpdated logs") + var aflogs []*faw.FluxAggregatorAvailableFundsUpdated + _ = cltest.GetLogs(t, &aflogs, iaflogs) + assert.Len(t, aflogs, 1, "FluxAggregator did not emit correct "+ + "AvailableFundsUpdated log") + + iaulogs, err := p.fa.aggregatorContract.FilterAnswerUpdated(blockRange, + []*big.Int{big.NewInt(p.answer)}, []*big.Int{big.NewInt(p.roundId)}, + ) + require.NoError(t, err, "failed to get AnswerUpdated logs") + if p.completesAnswer { + var aulogs []*faw.FluxAggregatorAnswerUpdated + _ = cltest.GetLogs(t, &aulogs, iaulogs) + // XXX: sometimes this log is repeated; don't know why... + assert.NotEmpty(t, aulogs, "FluxAggregator did not emit correct "+ + "AnswerUpdated log") + } +} + +// currentbalance returns the current balance of fa's FluxAggregator +func currentBalance(t *testing.T, fa *fluxAggregatorUniverse) *big.Int { + currentBalance, err := fa.aggregatorContract.AvailableFunds(nil) + require.NoError(t, err, "failed to get current FA balance") + return currentBalance +} + +// submitAnswer simulates a call to fa's FluxAggregator contract from a fake +// node (neil or ned), with the given roundId and answer, and checks that all +// the logs emitted by the contract are correct +func submitAnswer(t *testing.T, p answerParams) { + cb := currentBalance(t, p.fa) + + // used to ensure that the simulated backend has processed the submission, + // before we search for the log and checek it. + srCh := make(chan *faw.FluxAggregatorSubmissionReceived) + fromBlock := uint64(0) + srSubscription, err := p.fa.aggregatorContract.WatchSubmissionReceived( + &bind.WatchOpts{Start: &fromBlock}, + srCh, + []*big.Int{big.NewInt(p.answer)}, + []uint32{uint32(p.roundId)}, + []common.Address{p.from.From}, + ) + defer func() { + srSubscription.Unsubscribe() + err = <-srSubscription.Err() + require.NoError(t, err, "failed to unsubscribe from AvailableFundsUpdated logs") + }() + + _, err = p.fa.aggregatorContract.Submit( + p.from, big.NewInt(p.roundId), big.NewInt(p.answer), + ) + require.NoError(t, err, "failed to submit answer to flux aggregator") + + p.fa.backend.Commit() + + select { + case <-srCh: + case <-time.After(5 * time.Second): + t.Fatalf("failed to complete submission to flux aggregator") + } + checkSubmission(t, p, cb.Int64(), 0) +} + +func awaitSubmission(t *testing.T, submissionReceived chan *faw.FluxAggregatorSubmissionReceived) ( + receiptBlock uint64, answer int64, +) { + select { // block until FluxAggregator contract acknowledges chainlink message + case log := <-submissionReceived: + return log.Raw.BlockNumber, log.Submission.Int64() + case <-time.After(20 * pollTimerPeriod): + t.Fatalf("chainlink failed to submit answer to FluxAggregator contract") + return 0, 0 // unreachable + } +} + +// assertNoSubmission asserts that no submission was sent for a given duration +func assertNoSubmission(t *testing.T, + submissionReceived chan *faw.FluxAggregatorSubmissionReceived, + duration time.Duration, + msg string, +) { + select { + case <-submissionReceived: + assert.Fail(t, "flags are up, but submission was sent") + case <-time.After(2 * time.Second): + } +} + +func TestFluxMonitor_Deviation(t *testing.T) { + fa := setupFluxAggregatorUniverse(t) + + // - add oracles + oracleList := []common.Address{fa.nallory.From} + _, err := fa.aggregatorContract.ChangeOracles(fa.sergey, emptyList, oracleList, oracleList, 1, 1, 0) + assert.NoError(t, err, "failed to add oracles to aggregator") + fa.backend.Commit() + checkOraclesAdded(t, fa, oracleList) + + // Set up chainlink app + app := setupApplication(t, fa, func(cfg *orm.Config) { + cfg.Set("DEFAULT_HTTP_TIMEOUT", "100ms") + cfg.Set("TRIGGER_FALLBACK_DB_POLL_INTERVAL", "1s") + }) + require.NoError(t, app.StartAndConnect()) + + // Create mock server + reportPrice := int64(100) + mockServer := cltest.NewHTTPMockServerWithAlterableResponse(t, + generatePriceResponseFn(&reportPrice), + ) + t.Cleanup(mockServer.Close) + + // When event appears on submissionReceived, flux monitor job run is complete + submissionReceived := fa.WatchSubmissionReceived(t, + []common.Address{fa.nallory.From}, + ) + + // Create the job + s := ` + type = "fluxmonitor" + schemaVersion = 1 + name = "integration test" + contractAddress = "%s" + precision = 0 + threshold = 2.0 + absoluteThreshold = 0.0 + + idleTimerPeriod = "1s" + idleTimerDisabled = false + + pollTimerPeriod = "%s" + pollTimerDisabled = false + + observationSource = """ + ds1 [type=http method=GET url="%s"]; + ds1_parse [type=jsonparse path="data,result"]; + + ds1 -> ds1_parse + """ + ` + + s = fmt.Sprintf(s, fa.aggregatorContractAddress, pollTimerPeriod, mockServer.URL) + + requestBody, err := json.Marshal(models.CreateJobSpecRequest{ + TOML: string(s), + }) + assert.NoError(t, err) + + initialBalance := currentBalance(t, &fa).Int64() + + cltest.CreateJobViaWeb2(t, app, string(requestBody)) + + // Initial Poll + receiptBlock, answer := awaitSubmission(t, submissionReceived) + + assert.Equal(t, atomic.LoadInt64(&reportPrice), answer, + "failed to report correct price to contract") + checkSubmission(t, + answerParams{ + fa: &fa, + roundId: 1, + answer: int64(100), + from: fa.nallory, + isNewRound: true, + completesAnswer: true, + }, + initialBalance, + receiptBlock, + ) + + // Change reported price to a value outside the deviation + reportPrice = int64(103) + receiptBlock, answer = awaitSubmission(t, submissionReceived) + + assert.Equal(t, atomic.LoadInt64(&reportPrice), answer, + "failed to report correct price to contract") + checkSubmission(t, + answerParams{ + fa: &fa, + roundId: 2, + answer: int64(103), + from: fa.nallory, + isNewRound: true, + completesAnswer: true, + }, + initialBalance-fee, + receiptBlock, + ) + + // Should not received a submission as it is inside the deviation + reportPrice = int64(104) + assertNoSubmission(t, submissionReceived, 2*time.Second, "Should not receive a submission") +} + +func TestFluxMonitor_NewRound(t *testing.T) { + fa := setupFluxAggregatorUniverse(t) + + // - add oracles + oracleList := []common.Address{fa.neil.From, fa.nallory.From} + _, err := fa.aggregatorContract.ChangeOracles(fa.sergey, emptyList, oracleList, oracleList, 1, 2, 1) + assert.NoError(t, err, "failed to add oracles to aggregator") + fa.backend.Commit() + checkOraclesAdded(t, fa, oracleList) + + // Set up chainlink app + app := setupApplication(t, fa, func(cfg *orm.Config) { + cfg.Set("DEFAULT_HTTP_TIMEOUT", "100ms") + cfg.Set("FLAGS_CONTRACT_ADDRESS", fa.flagsContractAddress.Hex()) + cfg.Set("TRIGGER_FALLBACK_DB_POLL_INTERVAL", "1s") + }) + require.NoError(t, app.StartAndConnect()) + + initialBalance := currentBalance(t, &fa).Int64() + + // Create mock server + reportPrice := int64(1) + mockServer := cltest.NewHTTPMockServerWithAlterableResponse(t, + generatePriceResponseFn(&reportPrice), + ) + t.Cleanup(mockServer.Close) + + // When event appears on submissionReceived, flux monitor job run is complete + submissionReceived := fa.WatchSubmissionReceived(t, + []common.Address{fa.nallory.From}, + ) + + // Create the job + s := ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "%s" +precision = 2 +threshold = 0.5 +absoluteThreshold = 0.0 + +idleTimerPeriod = "1s" +idleTimerDisabled = false + +pollTimerPeriod = "%s" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="%s"]; +ds1_parse [type=jsonparse path="data,result"]; + +ds1 -> ds1_parse +""" + ` + + s = fmt.Sprintf(s, fa.aggregatorContractAddress, pollTimerPeriod, mockServer.URL) + + // raise flags to disable polling + fa.flagsContract.RaiseFlag(fa.sergey, utils.ZeroAddress) // global kill switch + fa.flagsContract.RaiseFlag(fa.sergey, fa.aggregatorContractAddress) + fa.backend.Commit() + + requestBody, err := json.Marshal(models.CreateJobSpecRequest{ + TOML: string(s), + }) + assert.NoError(t, err) + + cltest.CreateJobViaWeb2(t, app, string(requestBody)) + + // Have the the fake node start a new round + submitAnswer(t, answerParams{ + fa: &fa, + roundId: 1, + answer: 2, + from: fa.neil, + isNewRound: true, + completesAnswer: false, + }) + + // Wait for the node's submission, and ensure it submits to the round + // started by the fake node + receiptBlock, _ := awaitSubmission(t, submissionReceived) + checkSubmission(t, + answerParams{ + fa: &fa, + roundId: 1, + answer: int64(1), + from: fa.nallory, + isNewRound: false, + completesAnswer: true, + }, + initialBalance-fee, + receiptBlock, + ) +} + +func TestFluxMonitor_HibernationMode(t *testing.T) { + fa := setupFluxAggregatorUniverse(t) + + // - add oracles + oracleList := []common.Address{fa.nallory.From} + _, err := fa.aggregatorContract.ChangeOracles(fa.sergey, emptyList, oracleList, oracleList, 1, 1, 0) + assert.NoError(t, err, "failed to add oracles to aggregator") + fa.backend.Commit() + checkOraclesAdded(t, fa, oracleList) + + // Set up chainlink app + app := setupApplication(t, fa, func(cfg *orm.Config) { + cfg.Set("DEFAULT_HTTP_TIMEOUT", "100ms") + cfg.Set("FLAGS_CONTRACT_ADDRESS", fa.flagsContractAddress.Hex()) + cfg.Set("TRIGGER_FALLBACK_DB_POLL_INTERVAL", "1s") + }) + require.NoError(t, app.StartAndConnect()) + + // Create mock server + reportPrice := int64(1) + mockServer := cltest.NewHTTPMockServerWithAlterableResponse(t, + generatePriceResponseFn(&reportPrice), + ) + t.Cleanup(mockServer.Close) + + // When event appears on submissionReceived, flux monitor job run is complete + submissionReceived := fa.WatchSubmissionReceived(t, + []common.Address{fa.nallory.From}, + ) + + // Create the job + s := ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "%s" +precision = 0 +threshold = 0.5 +absoluteThreshold = 0.0 + +idleTimerPeriod = "1s" +idleTimerDisabled = false + +pollTimerPeriod = "%s" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="%s"]; +ds1_parse [type=jsonparse path="data,result"]; + +ds1 -> ds1_parse +""" + ` + + s = fmt.Sprintf(s, fa.aggregatorContractAddress, "1000ms", mockServer.URL) + + // raise flags + fa.flagsContract.RaiseFlag(fa.sergey, utils.ZeroAddress) // global kill switch + fa.flagsContract.RaiseFlag(fa.sergey, fa.aggregatorContractAddress) + fa.backend.Commit() + + requestBody, err := json.Marshal(models.CreateJobSpecRequest{ + TOML: string(s), + }) + assert.NoError(t, err) + + j := cltest.CreateJobViaWeb2(t, app, string(requestBody)) + + // node doesn't submit initial response, because flag is up + // Wait here so the next lower flags doesn't trigger immediately + cltest.AssertPipelineRunsStays(t, j.PipelineSpec.ID, app.Store, 0) + + // lower global kill switch flag - should trigger job run + fa.flagsContract.LowerFlags(fa.sergey, []common.Address{utils.ZeroAddress}) + fa.backend.Commit() + awaitSubmission(t, submissionReceived) + + reportPrice = int64(2) // change in price should trigger run + awaitSubmission(t, submissionReceived) + + // lower contract's flag - should have no effect (but currently does) + // TODO - https://www.pivotaltracker.com/story/show/175419789 + fa.flagsContract.LowerFlags(fa.sergey, []common.Address{fa.aggregatorContractAddress}) + fa.backend.Commit() + awaitSubmission(t, submissionReceived) + + // change in price should trigger run + reportPrice = int64(4) + awaitSubmission(t, submissionReceived) + + // raise both flags + fa.flagsContract.RaiseFlag(fa.sergey, fa.aggregatorContractAddress) + fa.flagsContract.RaiseFlag(fa.sergey, utils.ZeroAddress) + fa.backend.Commit() + + // wait for FM to receive flags raised logs + assert.Eventually(t, func() bool { + ilogs, err := fa.flagsContract.FilterFlagRaised(nil, []common.Address{}) + require.NoError(t, err) + logs := cltest.GetLogs(t, nil, ilogs) + return len(logs) == 4 + }, 7*time.Second, 100*time.Millisecond) + + // change in price should not trigger run + reportPrice = int64(8) + assertNoSubmission(t, submissionReceived, 5*time.Second, "should not trigger a new run, while flag is raised") +} + +func TestFluxMonitor_InvalidSubmission(t *testing.T) { + // 8 decimals places used for prices. + fa := setupFluxAggregatorUniverse(t, WithMinMaxSubmission( + big.NewInt(100000000), // 1 * 10^8 + big.NewInt(1000000000000), // 10000 * 10^8 + )) + + oracleList := []common.Address{fa.neil.From, fa.ned.From, fa.nallory.From} + _, err := fa.aggregatorContract.ChangeOracles(fa.sergey, emptyList, oracleList, oracleList, 1, 3, 2) + assert.NoError(t, err, "failed to add oracles to aggregator") + fa.backend.Commit() + + // Set up chainlink app + app := setupApplication(t, fa, func(cfg *orm.Config) { + cfg.Set("DEFAULT_HTTP_TIMEOUT", "100ms") + cfg.Set("TRIGGER_FALLBACK_DB_POLL_INTERVAL", "1s") + cfg.Set("MIN_OUTGOING_CONFIRMATIONS", "2") + cfg.Set("MIN_OUTGOING_CONFIRMATIONS", "2") + cfg.Set("ETH_HEAD_TRACKER_MAX_BUFFER_SIZE", "100") + }) + require.NoError(t, app.StartAndConnect()) + + // Report a price that is above the maximum allowed value, + // causing it to revert. + reportPrice := int64(10001) // 10001 ETH/USD price is outside the range. + mockServer := cltest.NewHTTPMockServerWithAlterableResponse(t, + generatePriceResponseFn(&reportPrice), + ) + t.Cleanup(mockServer.Close) + + // Generate custom TOML for this test due to precision change + toml := ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "%s" +precision = %d +threshold = 0.5 +absoluteThreshold = 0.01 + +idleTimerPeriod = "1h" +idleTimerDisabled = false + +pollTimerPeriod = "%s" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="%s"]; +ds1_parse [type=jsonparse path="data,result"]; + +ds1 -> ds1_parse +""" +` + + s := fmt.Sprintf(toml, fa.aggregatorContractAddress, 8, "100ms", mockServer.URL) + + // raise flags + fa.flagsContract.RaiseFlag(fa.sergey, utils.ZeroAddress) // global kill switch + fa.flagsContract.RaiseFlag(fa.sergey, fa.aggregatorContractAddress) + fa.backend.Commit() + + requestBody, err := json.Marshal(models.CreateJobSpecRequest{ + TOML: string(s), + }) + assert.NoError(t, err) + + j := cltest.CreateJobViaWeb2(t, app, string(requestBody)) + + go func() { + for { + fa.backend.Commit() + time.Sleep(500 * time.Millisecond) + } + }() + + // We should see a spec error because the value is too large to submit on-chain. + jobID, err := strconv.ParseInt(j.ID, 10, 32) + require.NoError(t, err) + + jse := cltest.WaitForSpecErrorV2(t, app.Store, int32(jobID), 1) + assert.Contains(t, jse[0].Description, "Answer is outside acceptable range") +} + +func TestFluxMonitorAntiSpamLogic(t *testing.T) { + // - deploy a brand new FM contract + fa := setupFluxAggregatorUniverse(t) + + // - add oracles + oracleList := []common.Address{fa.neil.From, fa.ned.From, fa.nallory.From} + _, err := fa.aggregatorContract.ChangeOracles(fa.sergey, emptyList, oracleList, oracleList, 2, 3, 2) + assert.NoError(t, err, "failed to add oracles to aggregator") + fa.backend.Commit() + checkOraclesAdded(t, fa, oracleList) + + // Set up chainlink app + app := setupApplication(t, fa, func(cfg *orm.Config) { + cfg.Set("DEFAULT_HTTP_TIMEOUT", "100ms") + cfg.Set("TRIGGER_FALLBACK_DB_POLL_INTERVAL", "1s") + }) + require.NoError(t, app.StartAndConnect()) + + minFee := app.Store.Config.MinimumContractPayment().ToInt().Int64() + require.Equal(t, fee, minFee, "fee paid by FluxAggregator (%d) must at "+ + "least match MinimumContractPayment (%s). (Which is currently set in "+ + "cltest.go.)", fee, minFee) + + answer := int64(1) // Answer the nodes give on the first round + + //- have one of the fake nodes start a round. + roundId := int64(1) + processedAnswer := answer * 100 /* job has multiply times 100 */ + submitAnswer(t, answerParams{ + fa: &fa, + roundId: roundId, + answer: processedAnswer, + from: fa.neil, + isNewRound: true, + completesAnswer: false, + }) + + // - successfully close the round through the submissions of the other nodes + // Response by malicious chainlink node, nallory + initialBalance := currentBalance(t, &fa).Int64() + reportPrice := answer + priceResponse := func() string { + return fmt.Sprintf(`{"data":{"result": %d}}`, atomic.LoadInt64(&reportPrice)) + } + mockServer := cltest.NewHTTPMockServerWithAlterableResponse(t, priceResponse) + t.Cleanup(mockServer.Close) + + // When event appears on submissionReceived, flux monitor job run is complete + submissionReceived := fa.WatchSubmissionReceived(t, + []common.Address{fa.nallory.From}, + ) + + // Create FM Job, and wait for job run to start (the above submitAnswr call + // to FluxAggregator contract initiates a run.) + s := ` +type = "fluxmonitor" +schemaVersion = 1 +name = "example flux monitor spec" +contractAddress = "%s" +precision = 2 +threshold = 0.5 +absoluteThreshold = 0.0 + +idleTimerPeriod = "1s" +idleTimerDisabled = false + +pollTimerPeriod = "%s" +pollTimerDisabled = false + +observationSource = """ +ds1 [type=http method=GET url="%s"]; +ds1_parse [type=jsonparse path="data,result"]; +ds1_multiply [type=multiply times=100] + +ds1 -> ds1_parse -> ds1_multiply +""" + ` + + s = fmt.Sprintf(s, fa.aggregatorContractAddress, "200ms", mockServer.URL) + requestBody, err := json.Marshal(models.CreateJobSpecRequest{ + TOML: string(s), + }) + assert.NoError(t, err) + + cltest.CreateJobViaWeb2(t, app, string(requestBody)) + + receiptBlock, answer := awaitSubmission(t, submissionReceived) + + assert.Equal(t, 100*atomic.LoadInt64(&reportPrice), answer, + "failed to report correct price to contract") + checkSubmission(t, + answerParams{ + fa: &fa, + roundId: roundId, + answer: processedAnswer, + from: fa.nallory, + isNewRound: false, + completesAnswer: true}, + initialBalance, + receiptBlock, + ) + + //- have the malicious node start the next round. + nextRoundBalance := initialBalance - fee + // Triggers a new round, since price deviation exceeds threshold + atomic.StoreInt64(&reportPrice, answer+1) + + receiptBlock, _ = awaitSubmission(t, submissionReceived) + newRound := roundId + 1 + processedAnswer = 100 * atomic.LoadInt64(&reportPrice) + checkSubmission(t, + answerParams{ + fa: &fa, + roundId: newRound, + answer: processedAnswer, + from: fa.nallory, + isNewRound: true, + completesAnswer: false}, + nextRoundBalance, + receiptBlock, + ) + + // Successfully close the round through the submissions of the other nodes + submitAnswer(t, + answerParams{ + fa: &fa, + roundId: newRound, + answer: processedAnswer, + from: fa.neil, + isNewRound: false, + completesAnswer: true}, + ) + + // Have the malicious node try to start another round. It should not pass as + // restartDelay has not been reached. + newRound = newRound + 1 + processedAnswer = 100 * atomic.LoadInt64(&reportPrice) + + submitMaliciousAnswer(t, + answerParams{ + fa: &fa, + roundId: newRound, + answer: processedAnswer, + from: fa.nallory, + isNewRound: true, + completesAnswer: false}, + ) + + assertNoSubmission(t, submissionReceived, 5*pollTimerPeriod, "FA allowed chainlink node to start a new round early") + + // Try to start a new round directly, should fail because of delay + _, err = fa.aggregatorContract.RequestNewRound(fa.nallory) + assert.Error(t, err, "FA allowed chainlink node to start a new round early") + + //- finally, ensure it can start a legitimate round after restartDelay is + //reached start an intervening round + submitAnswer(t, answerParams{fa: &fa, roundId: newRound, + answer: processedAnswer, from: fa.ned, isNewRound: true, + completesAnswer: false}) + submitAnswer(t, answerParams{fa: &fa, roundId: newRound, + answer: processedAnswer, from: fa.neil, isNewRound: false, + completesAnswer: true}) + + // start a legitimate new round + atomic.StoreInt64(&reportPrice, reportPrice+3) + + // Wait for the node's submission, and ensure it submits to the round + // started by the fake node + awaitSubmission(t, submissionReceived) +} + +// submitMaliciousAnswer simulates a call to fa's FluxAggregator contract from +// nallory, with the given roundId and answer and errors +func submitMaliciousAnswer(t *testing.T, p answerParams) { + _, err := p.fa.aggregatorContract.Submit( + p.from, big.NewInt(p.roundId), big.NewInt(p.answer), + ) + require.Error(t, err) + + p.fa.backend.Commit() +} From d12d6771d09bb1afed7ffdfdb927cd0fe8e5612e Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 15 Mar 2021 21:29:08 -0400 Subject: [PATCH 004/116] reducing api path to /log --- core/web/router.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/web/router.go b/core/web/router.go index 12fb354f55e..f54ef4f974f 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -292,7 +292,7 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { authv2.POST("/jobs/:ID/runs", prc.Create) lgc := LogController{app} - authv2.PATCH("/log/debug/enable", lgc.ToggleDebug) + authv2.PATCH("/log", lgc.ToggleDebug) } ping := PingController{app} From 05172792e22473fc3d1b6ce8d94ea9329c2646eb Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 15 Mar 2021 22:19:42 -0400 Subject: [PATCH 005/116] updating api response to use presenters pkg --- core/web/log_controller.go | 21 ++++++++++----------- core/web/presenters/log.go | 11 +++++++++++ 2 files changed, 21 insertions(+), 11 deletions(-) create mode 100644 core/web/presenters/log.go diff --git a/core/web/log_controller.go b/core/web/log_controller.go index adcb247fc68..1278e007a12 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -3,12 +3,11 @@ package web import ( "net/http" - "go.uber.org/zap/zapcore" - - "github.com/smartcontractkit/chainlink/core/logger" - "github.com/gin-gonic/gin" + "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/chainlink" + "github.com/smartcontractkit/chainlink/core/web/presenters" + "go.uber.org/zap/zapcore" ) // LogController manages the logger config @@ -17,11 +16,7 @@ type LogController struct { } type loglevelPatchRequest struct { - EnableDebugLog *bool `json:"isDebugEnabled"` -} - -type loglevelPatchResponse struct { - IsDebugEnabled bool `json:"isDebugEnabled"` + EnableDebugLog *bool `json:"debugEnabled"` } // ToggleDebug toggles the debug log mode @@ -39,8 +34,12 @@ func (cc *LogController) ToggleDebug(c *gin.Context) { } logger.SetLogger(cc.App.GetStore().Config.CreateProductionLogger()) - response := &loglevelPatchResponse{ - IsDebugEnabled: *request.EnableDebugLog, + response := &presenters.LogResource{ + JAID: presenters.JAID{ + ID: "log", + }, + DebugEnabled: *request.EnableDebugLog, } + jsonAPIResponse(c, response, "log") } diff --git a/core/web/presenters/log.go b/core/web/presenters/log.go new file mode 100644 index 00000000000..5c408545a1f --- /dev/null +++ b/core/web/presenters/log.go @@ -0,0 +1,11 @@ +package presenters + +type LogResource struct { + JAID + DebugEnabled bool `json:"debugEnabled"` +} + +// GetName implements the api2go EntityNamer interface +func (r LogResource) GetName() string { + return "logs" +} From 404eb5762bbae9f3094f08279f5deb19cd33f1b4 Mon Sep 17 00:00:00 2001 From: John Barker Date: Tue, 16 Mar 2021 10:10:07 -0600 Subject: [PATCH 006/116] Point at fork with connection reclaim fix --- go.mod | 2 ++ go.sum | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index fa130de44e1..8e097661554 100644 --- a/go.mod +++ b/go.mod @@ -71,3 +71,5 @@ require ( gorm.io/driver/sqlserver v1.0.5 // indirect gorm.io/gorm v1.20.12 ) + +replace github.com/DATA-DOG/go-txdb => github.com/smartcontractkit/go-txdb v0.1.4-0.20210313013032-3a5ba5dff784 diff --git a/go.sum b/go.sum index 705cac4dfad..bdb4d87e898 100644 --- a/go.sum +++ b/go.sum @@ -40,8 +40,6 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-txdb v0.1.3 h1:R4v6OuOcy2O147e2zHxU0B4NDtF+INb5R9q/CV7AEMg= -github.com/DATA-DOG/go-txdb v0.1.3/go.mod h1:DhAhxMXZpUJVGnT+p9IbzJoRKvlArO2pkHjnGX7o0n0= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Depado/ginprom v1.2.1-0.20200115153638-53bbba851bd8 h1:Ic3MehOyypWF/AW91Z/6FA2R2vnBzaDjRzoLmkP1DW8= github.com/Depado/ginprom v1.2.1-0.20200115153638-53bbba851bd8/go.mod h1:VHRucFf/9saDXsYg6uzQ8Oo8gUwngtWec9ZJ00H+ZCc= @@ -1266,6 +1264,8 @@ github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/smartcontractkit/chainlink v0.8.10-0.20200825114219-81dd2fc95bac/go.mod h1:j7qIYHGCN4QqMXdO8g8A9dmUT5vKFmkxPSbjAIfrfNU= github.com/smartcontractkit/chainlink v0.9.5-0.20201207211610-6c7fee37d5b7/go.mod h1:kmdLJbVZRCnBLiL6gG+U+1+0ofT3bB48DOF8tjQvcoI= +github.com/smartcontractkit/go-txdb v0.1.4-0.20210313013032-3a5ba5dff784 h1:8rMSBiLE7U01bZ2qEaLjH2e+K96nlDwM410CLa5bKzg= +github.com/smartcontractkit/go-txdb v0.1.4-0.20210313013032-3a5ba5dff784/go.mod h1:DhAhxMXZpUJVGnT+p9IbzJoRKvlArO2pkHjnGX7o0n0= github.com/smartcontractkit/libocr v0.0.0-20201203233047-5d9b24f0cbb5/go.mod h1:bfdSuLnBWCkafDvPGsQ1V6nrXhg046gh227MKi4zkpc= github.com/smartcontractkit/libocr v0.0.0-20210302210303-55a103050dc5 h1:O5Op1j4dpCAebdxoDTTrdL30wrZP1nNrvzX1GfcocsY= github.com/smartcontractkit/libocr v0.0.0-20210302210303-55a103050dc5/go.mod h1:cm4TomvY09A1mADIHeIo1dOcOVL1EeSEqga4cmCxhl4= From 8e0e76036eabea3b89d6661f7e3dfc3bfda39b52 Mon Sep 17 00:00:00 2001 From: John Barker Date: Tue, 16 Mar 2021 10:51:09 -0600 Subject: [PATCH 007/116] Don't fatal if no err --- core/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/main.go b/core/main.go index ac0deda76ad..d4a63d478c2 100644 --- a/core/main.go +++ b/core/main.go @@ -31,7 +31,7 @@ func NewProductionClient() *cmd.Client { if credentialsFile := config.AdminCredentialsFile(); credentialsFile != "" { var err error sr, err = sessionRequestBuilder.Build(credentialsFile) - if errors.Cause(err) != cmd.ErrNoCredentialFile && !os.IsNotExist(err) { + if err != nil && errors.Cause(err) != cmd.ErrNoCredentialFile && !os.IsNotExist(err) { logger.Fatalw("Error loading API credentials", "error", err, "credentialsFile", credentialsFile) } } From 9d80673003ddbe6e31a90696fdf3847aef094242 Mon Sep 17 00:00:00 2001 From: John Barker Date: Tue, 16 Mar 2021 14:56:41 -0600 Subject: [PATCH 008/116] Add a small script for cleaning up test dbs --- tools/bin/clean_test_dbs | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 tools/bin/clean_test_dbs diff --git a/tools/bin/clean_test_dbs b/tools/bin/clean_test_dbs new file mode 100644 index 00000000000..ef0142187b0 --- /dev/null +++ b/tools/bin/clean_test_dbs @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +psql -l --csv | awk -F, '{print$1}' | grep chainlink_test_ | xargs -n 1 -J % dropdb % From a648832df5e928a7c4d1a75c4ec92863296083a4 Mon Sep 17 00:00:00 2001 From: John Barker Date: Thu, 11 Mar 2021 15:29:37 -0700 Subject: [PATCH 009/116] Handle ctx.Err() before err Many third party libraries use this pattern: err := DoSomething(ctx) if err != nil { return err } else if ctx.Err() != nil { return ctx.Err() } Which means we cancel things like "shutting down the app" as a failure. So instead, check `ctx.Err()` first and handle as though the context was cancelled. Rather than the operation failed. Reduces "every branch on the way down" style logging when shutting down. --- core/services/head_tracker.go | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/core/services/head_tracker.go b/core/services/head_tracker.go index 5b1c97f9ab1..24b462cb754 100644 --- a/core/services/head_tracker.go +++ b/core/services/head_tracker.go @@ -222,7 +222,9 @@ func (ht *HeadTracker) Save(ctx context.Context, h models.Head) error { ht.headMutex.Unlock() err := ht.store.IdempotentInsertHead(ctx, h) - if err != nil { + if ctx.Err() != nil { + return nil + } else if err != nil { return err } return ht.store.TrimOldHeads(ht.store.Config.EthHeadTrackerHistoryDepth()) @@ -339,6 +341,9 @@ func (ht *HeadTracker) backfill(ctx context.Context, head models.Head, baseHeigh "fromBlockHeight", baseHeight, "toBlockHeight", head.Number-1) defer func() { + if ctx.Err() != nil { + return + } logger.Debugw("HeadTracker: finished backfill", "fetched", fetched, "blockNumber", head.Number, @@ -354,7 +359,9 @@ func (ht *HeadTracker) backfill(ctx context.Context, head models.Head, baseHeigh // NOTE: Sequential requests here mean it's a potential performance bottleneck, be aware! var existingHead *models.Head existingHead, err = ht.store.HeadByHash(ctx, head.ParentHash) - if err != nil { + if ctx.Err() != nil { + break + } else if err != nil { return errors.Wrap(err, "HeadByHash failed") } if existingHead != nil { @@ -363,7 +370,9 @@ func (ht *HeadTracker) backfill(ctx context.Context, head models.Head, baseHeigh } head, err = ht.fetchAndSaveHead(ctx, i) fetched++ - if err != nil { + if ctx.Err() != nil { + break + } else if err != nil { return errors.Wrap(err, "fetchAndSaveHead failed") } } @@ -373,7 +382,9 @@ func (ht *HeadTracker) backfill(ctx context.Context, head models.Head, baseHeigh func (ht *HeadTracker) fetchAndSaveHead(ctx context.Context, n int64) (models.Head, error) { logger.Debugw("HeadTracker: fetching head", "blockHeight", n) head, err := ht.store.EthClient.HeaderByNumber(ctx, big.NewInt(n)) - if err != nil { + if ctx.Err() != nil { + return models.Head{}, nil + } else if err != nil { return models.Head{}, err } else if head == nil { return models.Head{}, errors.New("got nil head") @@ -483,7 +494,9 @@ func (ht *HeadTracker) handleNewHighestHead(ctx context.Context, head models.Hea promCurrentHead.Set(float64(head.Number)) headWithChain, err := ht.store.Chain(ctx, head.Hash, ht.store.Config.EthFinalityDepth()) - if err != nil { + if ctx.Err() != nil { + return nil + } else if err != nil { return errors.Wrap(err, "HeadTracker#handleNewHighestHead failed fetching chain") } ht.backfillMB.Deliver(headWithChain) From 5a3c13021c61db6d697e01d1763aceeb65902fdd Mon Sep 17 00:00:00 2001 From: John Barker Date: Thu, 11 Mar 2021 15:39:42 -0700 Subject: [PATCH 010/116] When a gorm operation is cancelled, log as debug, with msg --- core/store/orm/log_wrapper.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/store/orm/log_wrapper.go b/core/store/orm/log_wrapper.go index 5ee0ce9a8ff..e6ba72e00ff 100644 --- a/core/store/orm/log_wrapper.go +++ b/core/store/orm/log_wrapper.go @@ -44,6 +44,9 @@ func (o *ormLogWrapper) Error(ctx context.Context, s string, i ...interface{}) { func (o *ormLogWrapper) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) { elapsed := time.Since(begin) switch { + case ctx.Err() != nil: + sql, _ := fc() + o.SugaredLogger.Debugw("Operation cancelled via context", "err", err, "elapsed", float64(elapsed.Nanoseconds())/1e6, "sql", sql) case err != nil: // NOTE: Silence "record not found" errors since it is the one type of // error that we expect/handle and otherwise it fills our logs with From 2404d2be1618546c10adcb58da26ecd0250c88de Mon Sep 17 00:00:00 2001 From: John Barker Date: Thu, 11 Mar 2021 16:22:51 -0700 Subject: [PATCH 011/116] In HeadTracker handle done/timeout separately --- core/services/head_tracker.go | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/core/services/head_tracker.go b/core/services/head_tracker.go index 24b462cb754..633bff14ebd 100644 --- a/core/services/head_tracker.go +++ b/core/services/head_tracker.go @@ -269,15 +269,21 @@ func (ht *HeadTracker) listenForNewHeads() { logger.WarnIf(errors.Wrap(err, "HeadTracker failed when unsubscribe from head")) }() + ctx, cancel := utils.ContextFromChan(ht.done) + defer cancel() + for { if !ht.subscribe() { - return + break } - if err := ht.receiveHeaders(); err != nil { + err := ht.receiveHeaders(ctx) + if ctx.Err() != nil { + break + } else if err != nil { ht.logger.Errorw(fmt.Sprintf("Error in new head subscription, unsubscribed: %s", err.Error()), "err", err) continue } else { - return + break } } } @@ -305,7 +311,7 @@ func (ht *HeadTracker) backfiller() { defer cancel() if err != nil { logger.Warnw("HeadTracker: unexpected error while backfilling heads", "err", err) - } else if err := ctx.Err(); err != nil { + } else if ctx.Err() != nil { break } } @@ -424,7 +430,7 @@ func (ht *HeadTracker) subscribe() bool { } // This should be safe to run concurrently across multiple nodes connected to the same database -func (ht *HeadTracker) receiveHeaders() error { +func (ht *HeadTracker) receiveHeaders(ctx context.Context) error { for { select { case <-ht.done: @@ -435,12 +441,16 @@ func (ht *HeadTracker) receiveHeaders() error { } timeBudget := ht.totalNewHeadTimeBudget() { - ctx, cancel := utils.CombinedContext(ht.done, timeBudget) + deadlineCtx, cancel := context.WithTimeout(ctx, timeBudget) defer cancel() - if err := ht.handleNewHead(ctx, blockHeader); err != nil { + + err := ht.handleNewHead(ctx, blockHeader) + if ctx.Err() != nil { + return nil + } else if deadlineCtx.Err() != nil { + logger.Warnw("HeadTracker: handling of new head timed out", "error", ctx.Err(), "timeBudget", timeBudget.String()) return err - } else if err := ctx.Err(); err != nil { - logger.Debugw("HeadTracker: handling of new head canceled", "error", err, "timeBudget", timeBudget.String()) + } else if err != nil { return err } } @@ -471,7 +481,10 @@ func (ht *HeadTracker) handleNewHead(ctx context.Context, head models.Head) erro "blockHash", head.Hash, ) - if err := ht.Save(ctx, head); err != nil { + err := ht.Save(ctx, head) + if ctx.Err() != nil { + return nil + } else if err != nil { return err } From e1e6df73555e80efea9d13116d2b87f3d9cf2082 Mon Sep 17 00:00:00 2001 From: John Barker Date: Thu, 11 Mar 2021 16:23:15 -0700 Subject: [PATCH 012/116] Make sure getInProgressEthTxAttempts has a context --- .../services/bulletprooftxmanager/eth_confirmer.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/core/services/bulletprooftxmanager/eth_confirmer.go b/core/services/bulletprooftxmanager/eth_confirmer.go index 0e896143216..c990f1c87c5 100644 --- a/core/services/bulletprooftxmanager/eth_confirmer.go +++ b/core/services/bulletprooftxmanager/eth_confirmer.go @@ -536,21 +536,27 @@ func (ec *ethConfirmer) rebroadcastWhereNecessary(ctx context.Context, address g // re-org, so multiple attempts are allowed to be in in_progress state (but // only one per eth_tx). func (ec *ethConfirmer) handleAnyInProgressAttempts(ctx context.Context, address gethCommon.Address, blockHeight int64) error { - attempts, err := getInProgressEthTxAttempts(ec.store, address) - if err != nil { + attempts, err := getInProgressEthTxAttempts(ctx, ec.store, address) + if ctx.Err() != nil { + return nil + } else if err != nil { return errors.Wrap(err, "getInProgressEthTxAttempts failed") } for _, a := range attempts { - if err := ec.handleInProgressAttempt(ctx, a.EthTx, a, blockHeight); err != nil { + err := ec.handleInProgressAttempt(ctx, a.EthTx, a, blockHeight) + if ctx.Err() != nil { + break + } else if err != nil { return errors.Wrap(err, "handleInProgressAttempt failed") } } return nil } -func getInProgressEthTxAttempts(s *store.Store, address gethCommon.Address) ([]models.EthTxAttempt, error) { +func getInProgressEthTxAttempts(ctx context.Context, s *store.Store, address gethCommon.Address) ([]models.EthTxAttempt, error) { var attempts []models.EthTxAttempt err := s.DB. + WithContext(ctx). Preload("EthTx"). Joins("INNER JOIN eth_txes ON eth_txes.id = eth_tx_attempts.eth_tx_id AND eth_txes.state in ('confirmed', 'confirmed_missing_receipt', 'unconfirmed')"). Where("eth_tx_attempts.state = 'in_progress'"). From 3e51df80c3698ded6a9fed72610fbc651336065b Mon Sep 17 00:00:00 2001 From: John Barker Date: Thu, 11 Mar 2021 16:57:30 -0700 Subject: [PATCH 013/116] Give AwaitOrFail, Maybe on optional mocks, wait for subscription --- core/internal/cltest/cltest.go | 11 ++++++++--- core/internal/features_test.go | 15 +++++++++++---- core/services/job/spawner_test.go | 8 ++++---- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index 4bc6ae4d4e6..ea60e43c334 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -1472,11 +1472,16 @@ func NewAwaiter() Awaiter { return make(Awaiter) } func (a Awaiter) ItHappened() { close(a) } -func (a Awaiter) AwaitOrFail(t testing.TB, d time.Duration) { +func (a Awaiter) AwaitOrFail(t testing.TB, durationParams ...time.Duration) { + duration := 10 * time.Second + if len(durationParams) > 0 { + duration = durationParams[0] + } + select { case <-a: - case <-time.After(d): - t.Fatal("timed out") + case <-time.After(duration): + t.Fatal("timed out waiting for Awaiter to get ItHappened") } } diff --git a/core/internal/features_test.go b/core/internal/features_test.go index 541b3f245b2..ce015acbea8 100644 --- a/core/internal/features_test.go +++ b/core/internal/features_test.go @@ -933,18 +933,25 @@ func TestIntegration_FluxMonitor_NewRound(t *testing.T) { availableFunds := minPayment * 100 // Start, connect, and initialize node - sub.On("Err").Return(nil) - sub.On("Unsubscribe").Return(nil).Maybe() - gethClient.On("ChainID", mock.Anything).Return(app.Store.Config.ChainID(), nil) + gethClient.On("ChainID", mock.Anything).Maybe().Return(app.Store.Config.ChainID(), nil) gethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(oneETH.ToInt(), nil) + newHeadsCh := make(chan chan<- *models.Head, 1) + rpcClientDone := cltest.NewAwaiter() rpcClient.On("EthSubscribe", mock.Anything, mock.Anything, "newHeads"). - Run(func(args mock.Arguments) { newHeadsCh <- args.Get(1).(chan<- *models.Head) }). + Run(func(args mock.Arguments) { + rpcClientDone.ItHappened() + newHeadsCh <- args.Get(1).(chan<- *models.Head) + }). Return(sub, nil) + sub.On("Err").Maybe().Return(nil) + sub.On("Unsubscribe").Maybe().Return(nil) + err := app.StartAndConnect() require.NoError(t, err) + rpcClientDone.AwaitOrFail(t) gethClient.AssertExpectations(t) rpcClient.AssertExpectations(t) sub.AssertExpectations(t) diff --git a/core/services/job/spawner_test.go b/core/services/job/spawner_test.go index 7118b8c6c7b..c7a59ea05b8 100644 --- a/core/services/job/spawner_test.go +++ b/core/services/job/spawner_test.go @@ -160,7 +160,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { spawner.Start() defer spawner.Close() - eventually.AwaitOrFail(t, 10*time.Second) + eventually.AwaitOrFail(t) mock.AssertExpectationsForObjects(t, serviceA1, serviceA2) serviceA1.On("Close").Return(nil).Once() @@ -190,7 +190,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { spawner.Start() - eventually.AwaitOrFail(t, 10*time.Second) + eventually.AwaitOrFail(t) mock.AssertExpectationsForObjects(t, serviceA1, serviceA2) serviceA1.On("Close").Return(nil).Once() @@ -227,7 +227,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { spawner.Start() defer spawner.Close() - eventuallyStart.AwaitOrFail(t, 10*time.Second) + eventuallyStart.AwaitOrFail(t) advisoryLockClassID := job.GetORMAdvisoryLockClassID(orm) @@ -248,7 +248,7 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { require.NoError(t, db.Exec(`DELETE FROM jobs WHERE id = ?`, jobSpecIDA).Error) - eventuallyClose.AwaitOrFail(t, 10*time.Second) + eventuallyClose.AwaitOrFail(t) // Wait for the claim lock to be released gomega.NewGomegaWithT(t).Eventually(func() int { From 7f5d3e0cf969eaa6f23202c7fc1e310a8c25462a Mon Sep 17 00:00:00 2001 From: John Barker Date: Thu, 11 Mar 2021 16:58:50 -0700 Subject: [PATCH 014/116] verifyEthereumChainID is called before started = true --- core/services/head_tracker.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/core/services/head_tracker.go b/core/services/head_tracker.go index 633bff14ebd..d42644ee5db 100644 --- a/core/services/head_tracker.go +++ b/core/services/head_tracker.go @@ -632,9 +632,7 @@ func (ht *HeadTracker) setHighestSeenHeadFromDB() error { // chainIDVerify checks whether or not the ChainID from the Chainlink config // matches the ChainID reported by the ETH node connected to this Chainlink node. func verifyEthereumChainID(ht *HeadTracker) error { - ctx, cancel := utils.ContextFromChan(ht.done) - defer cancel() - ethereumChainID, err := ht.store.EthClient.ChainID(ctx) + ethereumChainID, err := ht.store.EthClient.ChainID(context.Background()) if err != nil { return err } From 259bf321dd98e543958a1a2ebe4b1aeb8ebeda93 Mon Sep 17 00:00:00 2001 From: John Barker Date: Thu, 11 Mar 2021 16:59:54 -0700 Subject: [PATCH 015/116] setHighestSeenHeadFromDB is called before started = true --- core/services/head_tracker.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/core/services/head_tracker.go b/core/services/head_tracker.go index d42644ee5db..c257b45e076 100644 --- a/core/services/head_tracker.go +++ b/core/services/head_tracker.go @@ -619,9 +619,7 @@ func (ht *HeadTracker) unsubscribeFromHead() error { } func (ht *HeadTracker) setHighestSeenHeadFromDB() error { - ctx, cancel := utils.ContextFromChan(ht.done) - defer cancel() - head, err := ht.store.LastHead(ctx) + head, err := ht.store.LastHead(context.Background()) if err != nil { return err } From 12bbf46faf9a8adb9b8dc34017aa45606907d418 Mon Sep 17 00:00:00 2001 From: John Barker Date: Fri, 12 Mar 2021 11:43:06 -0700 Subject: [PATCH 016/116] Add summary to log_wrapper lines so you can grep for them --- core/store/orm/log_wrapper.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/core/store/orm/log_wrapper.go b/core/store/orm/log_wrapper.go index e6ba72e00ff..2b9ab1a502a 100644 --- a/core/store/orm/log_wrapper.go +++ b/core/store/orm/log_wrapper.go @@ -56,23 +56,23 @@ func (o *ormLogWrapper) Trace(ctx context.Context, begin time.Time, fc func() (s } sql, rows := fc() if rows == -1 { - o.SugaredLogger.Errorw("", "err", err, "elapsed", float64(elapsed.Nanoseconds())/1e6, "sql", sql) + o.SugaredLogger.Errorw("Operation failed", "err", err, "elapsed", float64(elapsed.Nanoseconds())/1e6, "sql", sql) } else { - o.SugaredLogger.Errorw("", "err", err, "elapsed", float64(elapsed.Nanoseconds())/1e6, "rows", rows, "sql", sql) + o.SugaredLogger.Errorw("Operation failed", "err", err, "elapsed", float64(elapsed.Nanoseconds())/1e6, "rows", rows, "sql", sql) } case elapsed > o.slowThreshold && o.slowThreshold != 0: sql, rows := fc() if rows == -1 { - o.SugaredLogger.Warnw("", "elapsed", float64(elapsed.Nanoseconds())/1e6, "sql", sql) + o.SugaredLogger.Warnw("Operation timed out", "elapsed", float64(elapsed.Nanoseconds())/1e6, "sql", sql) } else { - o.SugaredLogger.Warnw("", "elapsed", float64(elapsed.Nanoseconds())/1e6, "rows", rows, "sql", sql) + o.SugaredLogger.Warnw("Operation timed out", "elapsed", float64(elapsed.Nanoseconds())/1e6, "rows", rows, "sql", sql) } case o.logAllQueries: sql, rows := fc() if rows == -1 { - o.SugaredLogger.Infow("", "elapsed", float64(elapsed.Nanoseconds())/1e6, "sql", sql) + o.SugaredLogger.Infow("Query executed", "elapsed", float64(elapsed.Nanoseconds())/1e6, "sql", sql) } else { - o.SugaredLogger.Infow("", "elapsed", float64(elapsed.Nanoseconds())/1e6, "rows", rows, "sql", sql) + o.SugaredLogger.Infow("Query executed", "elapsed", float64(elapsed.Nanoseconds())/1e6, "rows", rows, "sql", sql) } } } From 3df49c8547c47b1368c79b4c175ac0daf2c26282 Mon Sep 17 00:00:00 2001 From: John Barker Date: Fri, 12 Mar 2021 12:24:33 -0700 Subject: [PATCH 017/116] Make sure markOldTxesMissingReceiptAsErrored always closes Rows() --- core/services/bulletprooftxmanager/eth_confirmer.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/services/bulletprooftxmanager/eth_confirmer.go b/core/services/bulletprooftxmanager/eth_confirmer.go index c990f1c87c5..dcbf74a065b 100644 --- a/core/services/bulletprooftxmanager/eth_confirmer.go +++ b/core/services/bulletprooftxmanager/eth_confirmer.go @@ -446,6 +446,7 @@ RETURNING id, nonce, from_address`, ErrCouldNotGetReceipt, cutoff) if err != nil { return errors.Wrap(err, "markOldTxesMissingReceiptAsErrored failed to query") } + defer logger.ErrorIfCalling(rows.Close) for rows.Next() { var ethTxID int64 @@ -462,7 +463,7 @@ RETURNING id, nonce, from_address`, ErrCouldNotGetReceipt, cutoff) ethTxID, blockNum, fromAddress.Hex(), nonce.Int64) } - return errors.Wrap(rows.Close(), "markOldTxesMissingReceiptAsErrored failed to close rows") + return nil } func (ec *ethConfirmer) RebroadcastWhereNecessary(ctx context.Context, keys []models.Key, blockHeight int64) error { From 1314ce4dde47a30698c769c3604df8b438415535 Mon Sep 17 00:00:00 2001 From: James Kong Date: Wed, 17 Mar 2021 17:07:15 +0800 Subject: [PATCH 018/116] Fix FMV2 issues raised by review --- core/internal/features_test.go | 2 +- .../fluxmonitorv2/integrations_test.go | 28 +++- core/services/fluxmonitorv2/poll_manager.go | 143 ++++++++++++++++++ 3 files changed, 169 insertions(+), 4 deletions(-) create mode 100644 core/services/fluxmonitorv2/poll_manager.go diff --git a/core/internal/features_test.go b/core/internal/features_test.go index a779db0f714..541b3f245b2 100644 --- a/core/internal/features_test.go +++ b/core/internal/features_test.go @@ -790,7 +790,7 @@ func TestIntegration_FluxMonitor_Deviation(t *testing.T) { kst.On("HasAccountWithAddress", address).Return(true) kst.On("GetAccountByAddress", mock.Anything).Maybe().Return(accounts.Account{}, nil) kst.On("SignTx", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(&types.Transaction{}, nil) - kst.On("Accounts").Return([]accounts.Account{{Address: address}}) + kst.On("Accounts").Return([]accounts.Account{}) app.Store.KeyStore = kst diff --git a/core/services/fluxmonitorv2/integrations_test.go b/core/services/fluxmonitorv2/integrations_test.go index af24deba63f..139ac75cc6a 100644 --- a/core/services/fluxmonitorv2/integrations_test.go +++ b/core/services/fluxmonitorv2/integrations_test.go @@ -22,11 +22,14 @@ import ( "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flags_wrapper" faw "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flux_aggregator_wrapper" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/link_token_interface" + "github.com/smartcontractkit/chainlink/core/services/fluxmonitorv2" + "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/store/orm" "github.com/smartcontractkit/chainlink/core/utils" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "gorm.io/gorm" ) const description = "exactly thirty-three characters!!" @@ -369,6 +372,19 @@ func assertNoSubmission(t *testing.T, } } +// assertPipelineRunCreated checks that a pipeline exists for a given round and +// verifies the answer +func assertPipelineRunCreated(t *testing.T, db *gorm.DB, roundID int64, result float64) { + // Fetch the stats to extract the run id + stats := fluxmonitorv2.FluxMonitorRoundStatsV2{} + db.Where("round_id = ?", roundID).Find(&stats) + + // Verify the pipeline run data + run := pipeline.Run{} + db.Find(&run, stats.PipelineRunID) + assert.Equal(t, []interface{}{result}, run.Outputs.Val) +} + func TestFluxMonitor_Deviation(t *testing.T) { fa := setupFluxAggregatorUniverse(t) @@ -450,6 +466,7 @@ func TestFluxMonitor_Deviation(t *testing.T) { initialBalance, receiptBlock, ) + assertPipelineRunCreated(t, app.Store.DB, 1, float64(100)) // Change reported price to a value outside the deviation reportPrice = int64(103) @@ -469,6 +486,7 @@ func TestFluxMonitor_Deviation(t *testing.T) { initialBalance-fee, receiptBlock, ) + assertPipelineRunCreated(t, app.Store.DB, 2, float64(103)) // Should not received a submission as it is inside the deviation reportPrice = int64(104) @@ -748,10 +766,11 @@ ds1 -> ds1_parse j := cltest.CreateJobViaWeb2(t, app, string(requestBody)) + tick := time.NewTicker(500 * time.Millisecond) + defer tick.Stop() go func() { - for { + for range tick.C { fa.backend.Commit() - time.Sleep(500 * time.Millisecond) } }() @@ -801,7 +820,10 @@ func TestFluxMonitorAntiSpamLogic(t *testing.T) { }) // - successfully close the round through the submissions of the other nodes - // Response by malicious chainlink node, nallory + // Response by spammy chainlink node, nallory + // + // The initial balance is the LINK balance of flux aggregator contract. We + // use it to check that the fee for submitting an answer has been paid out. initialBalance := currentBalance(t, &fa).Int64() reportPrice := answer priceResponse := func() string { diff --git a/core/services/fluxmonitorv2/poll_manager.go b/core/services/fluxmonitorv2/poll_manager.go new file mode 100644 index 00000000000..eaa488f11c8 --- /dev/null +++ b/core/services/fluxmonitorv2/poll_manager.go @@ -0,0 +1,143 @@ +package fluxmonitorv2 + +import ( + "time" + + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flux_aggregator_wrapper" +) + +type PollManagerConfig struct { + IsHibernating bool + PollTickerInterval time.Duration + PollTickerDisabled bool + IdleTimerInterval time.Duration + IdleTimerDisabled bool +} + +// PollManager manages the tickers/timers which cause the Flux Monitor to start +// a poll +type PollManager struct { + cfg PollManagerConfig + + isHibernating bool + pollTicker *PollTicker + idleTimer *IdleTimer +} + +// NewPollManager initializes a new PollManager +func NewPollManager(cfg PollManagerConfig) *PollManager { + return &PollManager{ + cfg: cfg, + isHibernating: cfg.IsHibernating, + + pollTicker: NewPollTicker(cfg.PollTickerInterval, cfg.PollTickerDisabled), + idleTimer: NewIdleTimer(cfg.IdleTimerInterval, cfg.IdleTimerDisabled), + } +} + +// PollTickerTicks ticks on a given interval +func (pm *PollManager) PollTickerTicks() <-chan time.Time { + return pm.pollTicker.Ticks() +} + +// IdleTimerTicks ticks after a given period +func (pm *PollManager) IdleTimerTicks() <-chan time.Time { + return pm.idleTimer.Ticks() +} + +func (pm *PollManager) Stop() { + pm.pollTicker.Stop() + pm.idleTimer.Stop() +} + +// Reset resets all tickers/timers +func (pm *PollManager) Reset(roundState flux_aggregator_wrapper.OracleRoundState) { + pm.ResetPollTicker() + pm.ResetIdleTimer(roundStateTimesOutAt(roundState)) +} + +// ResetPollTicker resets the poll ticker if enabled and not hibernating +func (pm *PollManager) ResetPollTicker() { + if pm.pollTicker.IsEnabled() && !pm.isHibernating { + pm.pollTicker.Resume() + } else { + pm.pollTicker.Pause() + } +} + +func (pm *PollManager) IsPollTickerDisabled() bool { + return pm.pollTicker.IsDisabled() +} + +func (pm *PollManager) IsIdleTimerDisabled() bool { + return pm.idleTimer.IsDisabled() +} + +func (pm *PollManager) ResetIdleTimer(roundStartedAtUTC uint64) { + // Stop the timer if hibernating or disabled + if pm.isHibernating || pm.idleTimer.IsDisabled() { + pm.idleTimer.Stop() + + return + } + + // There is no active round, so keep using the idleTimer we already have + if roundStartedAtUTC == 0 { + return + } + + startedAt := time.Unix(int64(roundStartedAtUTC), 0) + idleDeadline := startedAt.Add(pm.idleTimer.Period()) + timeUntilIdleDeadline := time.Until(idleDeadline) + + // loggerFields := fm.loggerFields( + // "startedAt", roundStartedAtUTC, + // "timeUntilIdleDeadline", timeUntilIdleDeadline, + // ) + + if timeUntilIdleDeadline <= 0 { + // fm.logger.Debugw("not resetting idleTimer, negative duration", loggerFields...) + + return + } + + pm.idleTimer.Reset(timeUntilIdleDeadline) + // fm.logger.Debugw("resetting idleTimer", loggerFields...) +} + +// func (fm *FluxMonitor) resetIdleTimer(roundStartedAtUTC uint64) { +// if fm.isHibernating || fm.idleTimer.IsDisabled() { +// fm.idleTimer.Stop() +// return +// } else if roundStartedAtUTC == 0 { +// // There is no active round, so keep using the idleTimer we already have +// return +// } + +// startedAt := time.Unix(int64(roundStartedAtUTC), 0) +// idleDeadline := startedAt.Add(fm.idleTimer.Period()) +// timeUntilIdleDeadline := time.Until(idleDeadline) +// loggerFields := fm.loggerFields( +// "startedAt", roundStartedAtUTC, +// "timeUntilIdleDeadline", timeUntilIdleDeadline, +// ) + +// if timeUntilIdleDeadline <= 0 { +// fm.logger.Debugw("not resetting idleTimer, negative duration", loggerFields...) +// return +// } +// fm.idleTimer.Reset(timeUntilIdleDeadline) +// fm.logger.Debugw("resetting idleTimer", loggerFields...) +// } + +// Hibernate sets hibernation to true and resets all ticker/timers +func (pm *PollManager) Hibernate(roundState flux_aggregator_wrapper.OracleRoundState) { + pm.isHibernating = true + pm.Reset(roundState) +} + +// Awaken sets hibernation to false and resets all ticker/timers +func (pm *PollManager) Awaken(roundState flux_aggregator_wrapper.OracleRoundState) { + pm.isHibernating = false + pm.Reset(roundState) +} From 1d42229080a439e61e7f9f42e0afc6fd82efffa8 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Wed, 17 Mar 2021 10:14:31 -0400 Subject: [PATCH 019/116] adding cli command for toggling debug logging --- core/cmd/app.go | 19 ++++++++++++++++++- core/cmd/remote_client.go | 30 ++++++++++++++++++++++++++++++ core/web/log_controller.go | 4 ++-- 3 files changed, 50 insertions(+), 3 deletions(-) diff --git a/core/cmd/app.go b/core/cmd/app.go index caf2afb2c5a..ac02b4cfc62 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -461,7 +461,24 @@ func NewApp(client *Client) *cli.App { }, }, }, - + { + Name: "logs", + Aliases: []string{"log"}, + Usage: "Commands for dynamic configuration and actions for the logger", + Subcommands: []cli.Command{ + { + Name: "enabledebug", + Usage: "Enable and disable debug logging", + Action: client.ToggleDebugLogging, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "enabled, true", + Usage: "enable or disable debug logger", + }, + }, + }, + }, + }, { Name: "node", Aliases: []string{"local"}, diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index 80794d84e0a..66225f5c42e 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -27,6 +27,7 @@ import ( "github.com/smartcontractkit/chainlink/core/store/presenters" "github.com/smartcontractkit/chainlink/core/utils" "github.com/smartcontractkit/chainlink/core/web" + webPresenter "github.com/smartcontractkit/chainlink/core/web/presenters" ) var errUnauthorized = errors.New(http.StatusText(http.StatusUnauthorized)) @@ -1192,6 +1193,35 @@ func normalizePassword(password string) string { return url.PathEscape(strings.TrimSpace(password)) } +// ToggleDebugLogging enables or disables debug logging on the node +func (cli *Client) ToggleDebugLogging(c *clipkg.Context) (err error) { + if !c.Args().Present() { + return cli.errorOut(errors.New("Must set enabled or disabled (true || false)")) + } + + isDebugEnabld := c.Bool("enabled") + request := web.LoglevelPatchRequest{EnableDebugLog: &isDebugEnabld} + requestData, err := json.Marshal(request) + if err != nil { + return cli.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + resp, err := cli.HTTP.Patch("/v2/log", buf) + if err != nil { + return cli.errorOut(errors.Wrap(err, "from toggling debug logging")) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var lR webPresenter.LogResource + err = cli.renderAPIResponse(resp, &lR) + return err +} + func getBufferFromJSON(s string) (*bytes.Buffer, error) { if gjson.Valid(s) { return bytes.NewBufferString(s), nil diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 1278e007a12..80c575257a4 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -15,13 +15,13 @@ type LogController struct { App chainlink.Application } -type loglevelPatchRequest struct { +type LoglevelPatchRequest struct { EnableDebugLog *bool `json:"debugEnabled"` } // ToggleDebug toggles the debug log mode func (cc *LogController) ToggleDebug(c *gin.Context) { - request := &loglevelPatchRequest{} + request := &LoglevelPatchRequest{} if err := c.ShouldBindJSON(request); err != nil { jsonAPIError(c, http.StatusUnprocessableEntity, err) return From 9f309d164c32184bcdfb935236a0df79e8dc7b63 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Wed, 17 Mar 2021 10:23:41 -0400 Subject: [PATCH 020/116] renaming toggleLog -> setLog --- core/cmd/app.go | 2 +- core/cmd/remote_client.go | 4 ++-- core/web/log_controller.go | 4 ++-- core/web/router.go | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/core/cmd/app.go b/core/cmd/app.go index ac02b4cfc62..b887e3971cb 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -469,7 +469,7 @@ func NewApp(client *Client) *cli.App { { Name: "enabledebug", Usage: "Enable and disable debug logging", - Action: client.ToggleDebugLogging, + Action: client.SetDebugLogging, Flags: []cli.Flag{ cli.BoolFlag{ Name: "enabled, true", diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index 66225f5c42e..f310918c70d 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -1193,8 +1193,8 @@ func normalizePassword(password string) string { return url.PathEscape(strings.TrimSpace(password)) } -// ToggleDebugLogging enables or disables debug logging on the node -func (cli *Client) ToggleDebugLogging(c *clipkg.Context) (err error) { +// SetDebugLogging enables or disables debug logging on the node +func (cli *Client) SetDebugLogging(c *clipkg.Context) (err error) { if !c.Args().Present() { return cli.errorOut(errors.New("Must set enabled or disabled (true || false)")) } diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 80c575257a4..00ea625a650 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -19,8 +19,8 @@ type LoglevelPatchRequest struct { EnableDebugLog *bool `json:"debugEnabled"` } -// ToggleDebug toggles the debug log mode -func (cc *LogController) ToggleDebug(c *gin.Context) { +// SetDebug sets the debug log mode for the logger +func (cc *LogController) SetDebug(c *gin.Context) { request := &LoglevelPatchRequest{} if err := c.ShouldBindJSON(request); err != nil { jsonAPIError(c, http.StatusUnprocessableEntity, err) diff --git a/core/web/router.go b/core/web/router.go index f54ef4f974f..2f77f3c199d 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -292,7 +292,7 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { authv2.POST("/jobs/:ID/runs", prc.Create) lgc := LogController{app} - authv2.PATCH("/log", lgc.ToggleDebug) + authv2.PATCH("/log", lgc.SetDebug) } ping := PingController{app} From 8dbaf76cb582c00665f74231f166e301959d4b73 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Wed, 17 Mar 2021 12:37:55 -0400 Subject: [PATCH 021/116] Fix Kovan special casing in head tracker (#4039) --- .gitignore | 1 + core/services/head_tracker.go | 28 +++------------------------- core/store/orm/config.go | 5 +++++ core/store/orm/schema.go | 1 + 4 files changed, 10 insertions(+), 25 deletions(-) diff --git a/.gitignore b/.gitignore index 84aea9325da..6cfb84f80f8 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ tools/clroot/db.sqlite3-wal .DS_Store .envrc .env* +.idea # codeship *.aes diff --git a/core/services/head_tracker.go b/core/services/head_tracker.go index c257b45e076..c5b3c72ebbe 100644 --- a/core/services/head_tracker.go +++ b/core/services/head_tracker.go @@ -50,8 +50,6 @@ var ( Name: "head_tracker_eth_connection_errors", Help: "The total number of eth node connection errors", }) - // kovanChainID is the Chain ID for Kovan test network - kovanChainID = big.NewInt(42) ) // headRingBuffer is a small goroutine that sits between the eth client and the @@ -439,7 +437,7 @@ func (ht *HeadTracker) receiveHeaders(ctx context.Context) error { if !open { return errors.New("HeadTracker: outHeaders prematurely closed") } - timeBudget := ht.totalNewHeadTimeBudget() + timeBudget := ht.store.Config.HeadTimeBudget() { deadlineCtx, cancel := context.WithTimeout(ctx, timeBudget) defer cancel() @@ -469,7 +467,7 @@ func (ht *HeadTracker) handleNewHead(ctx context.Context, head models.Head) erro promCallbackDuration.Set(ms) promCallbackDurationHist.Observe(ms) if elapsed > ht.callbackExecutionThreshold() { - ht.logger.Warnw(fmt.Sprintf("HeadTracker finished processing head %v in %s which exceeds callback execution threshold of %s", number, elapsed.String(), ht.callbackExecutionThreshold().String()), "blockNumber", number, "time", elapsed, "id", "head_tracker") + ht.logger.Warnw(fmt.Sprintf("HeadTracker finished processing head %v in %s which exceeds callback execution threshold of %s", number, elapsed.String(), ht.store.Config.HeadTimeBudget().String()), "blockNumber", number, "time", elapsed, "id", "head_tracker") } else { ht.logger.Debugw(fmt.Sprintf("HeadTracker finished processing head %v in %s", number, elapsed.String()), "blockNumber", number, "time", elapsed, "id", "head_tracker") } @@ -518,31 +516,11 @@ func (ht *HeadTracker) handleNewHighestHead(ctx context.Context, head models.Hea return nil } -func (ht *HeadTracker) isKovan() bool { - return ht.store.Config.ChainID().Cmp(kovanChainID) == 0 -} - -// totalNewHeadTimeBudget is the timeout on the shared context for all -// requests triggered by a new head -// -// These values are chosen to be roughly 2 * block time (to give some leeway -// for temporary overload). They are by no means set in stone and may require -// adjustment based on real world feedback. -func (ht *HeadTracker) totalNewHeadTimeBudget() time.Duration { - if ht.isKovan() { - return 8 * time.Second - } - return 26 * time.Second -} - // If total callback execution time exceeds this threshold we consider this to // be a problem and will log a warning. // Here we set it to the average time between blocks. func (ht *HeadTracker) callbackExecutionThreshold() time.Duration { - if ht.isKovan() { - return 4 * time.Second - } - return 13 * time.Second + return ht.store.Config.HeadTimeBudget() / 2 } func (ht *HeadTracker) onNewLongestChain(ctx context.Context, headWithChain models.Head) { diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 1907104ffae..1b6a1d095c2 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -944,6 +944,11 @@ func (c Config) CertFile() string { return c.TLSCertPath() } +// HeadTimeBudget returns the time allowed for context timeout in head tracker +func (c Config) HeadTimeBudget() time.Duration { + return c.getWithFallback("HeadTimeBudget", parseDuration).(time.Duration) +} + // CreateProductionLogger returns a custom logger for the config's root // directory and LogLevel, with pretty printing for stdout. If LOG_TO_DISK is // false, the logger will only log to stdout. diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index 16db9559658..b3eea4f1686 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -63,6 +63,7 @@ type ConfigSchema struct { GasUpdaterBlockHistorySize uint16 `env:"GAS_UPDATER_BLOCK_HISTORY_SIZE" default:"24"` GasUpdaterTransactionPercentile uint16 `env:"GAS_UPDATER_TRANSACTION_PERCENTILE" default:"60"` GasUpdaterEnabled bool `env:"GAS_UPDATER_ENABLED" default:"true"` + HeadTimeBudget time.Duration `env:"HEAD_TIME_BUDGET" default:"8s"` InsecureFastScrypt bool `env:"INSECURE_FAST_SCRYPT" default:"false"` JobPipelineMaxRunDuration time.Duration `env:"JOB_PIPELINE_MAX_RUN_DURATION" default:"10m"` JobPipelineResultWriteQueueDepth uint64 `env:"JOB_PIPELINE_RESULT_WRITE_QUEUE_DEPTH" default:"100"` From 8d0228c1ea43e5662dfb7ec477186e7c81f8088e Mon Sep 17 00:00:00 2001 From: John Barker Date: Wed, 17 Mar 2021 11:31:15 -0600 Subject: [PATCH 022/116] Allow CallContract on geth mock --- core/services/job/runner_integration_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/services/job/runner_integration_test.go b/core/services/job/runner_integration_test.go index aa2fe673039..606e6c75a4a 100644 --- a/core/services/job/runner_integration_test.go +++ b/core/services/job/runner_integration_test.go @@ -61,6 +61,7 @@ func TestRunner(t *testing.T) { *head = cltest.Head(10) }). Return(nil) + geth.On("CallContract", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(nil, nil) t.Run("gets the election result winner", func(t *testing.T) { var httpURL string From 0be895f4be3ff9102771625b535073701267112d Mon Sep 17 00:00:00 2001 From: Connor Stein Date: Wed, 17 Mar 2021 14:10:09 -0400 Subject: [PATCH 023/116] Bugfix/176786678 Optimize job creates (#4059) Remove pipeline_task_specs simplify pipeline run creation. Improves performance of job adds and deletes. --- core/cmd/remote_client_test.go | 4 +- core/internal/cltest/factories.go | 25 +- core/internal/mocks/advisory_locker.go | 2 +- core/internal/mocks/after_nower.go | 2 +- core/internal/mocks/application.go | 2 +- core/internal/mocks/client.go | 2 +- core/internal/mocks/deviation_checker.go | 2 +- .../mocks/deviation_checker_factory.go | 2 +- .../mocks/external_initiator_manager.go | 2 +- core/internal/mocks/fetcher.go | 2 +- core/internal/mocks/flags.go | 2 +- core/internal/mocks/flux_aggregator.go | 2 +- core/internal/mocks/geth_client.go | 2 +- core/internal/mocks/head_trackable.go | 2 +- core/internal/mocks/job_subscriber.go | 2 +- core/internal/mocks/key_store_interface.go | 2 +- core/internal/mocks/notify_new_eth_tx.go | 2 +- core/internal/mocks/prometheus_backend.go | 2 +- core/internal/mocks/rpc_client.go | 2 +- core/internal/mocks/run_executor.go | 2 +- core/internal/mocks/run_manager.go | 2 +- core/internal/mocks/run_queue.go | 2 +- core/internal/mocks/service.go | 2 +- core/internal/mocks/stats_pusher.go | 2 +- core/internal/mocks/subscription.go | 2 +- core/services/gasupdater/mocks/config.go | 2 +- core/services/job/job_orm_test.go | 17 +- .../job/job_pipeline_orm_integration_test.go | 86 ++---- core/services/job/mocks/delegate.go | 2 +- core/services/job/mocks/orm.go | 25 +- core/services/job/mocks/service.go | 2 +- core/services/job/mocks/spawner.go | 2 +- core/services/job/orm.go | 49 +++- core/services/job/runner_integration_test.go | 86 +++--- core/services/job/spawner_test.go | 19 +- core/services/log/mocks/abigen_contract.go | 2 +- core/services/log/mocks/broadcast.go | 2 +- core/services/log/mocks/broadcaster.go | 2 +- core/services/log/mocks/listener.go | 2 +- core/services/log/mocks/orm.go | 12 +- .../services/offchainreporting/data_source.go | 10 +- .../mocks/ocr_contract_tracker_db.go | 2 +- core/services/pipeline/common.go | 53 +++- core/services/pipeline/common_test.go | 4 +- core/services/pipeline/graph.go | 26 +- core/services/pipeline/graph_test.go | 16 +- core/services/pipeline/mocks/runner.go | 14 +- core/services/pipeline/models.go | 159 ++++++----- core/services/pipeline/orm.go | 179 +++++-------- core/services/pipeline/orm_test.go | 1 - core/services/pipeline/runner.go | 163 +++++------- core/services/pipeline/runner_test.go | 246 +++--------------- core/services/pipeline/task.bridge.go | 23 +- core/services/pipeline/task.http.go | 7 +- core/services/pipeline/test_helpers.go | 6 +- .../postgres/mocks/event_broadcaster.go | 2 +- core/services/postgres/mocks/subscription.go | 2 +- core/services/prom_reporter_test.go | 1 - .../0016_pipeline_task_run_dot_id.go | 48 ++++ core/store/migrations/migrate_test.go | 98 ++++++- core/web/bridge_types_controller.go | 13 +- core/web/jobs_controller_test.go | 8 +- core/web/pipeline_runs_controller.go | 3 +- core/web/pipeline_runs_controller_test.go | 4 +- docs/CHANGELOG.md | 4 + 65 files changed, 700 insertions(+), 777 deletions(-) create mode 100644 core/store/migrations/0016_pipeline_task_run_dot_id.go diff --git a/core/cmd/remote_client_test.go b/core/cmd/remote_client_test.go index 0489f4fe12e..618d1419546 100644 --- a/core/cmd/remote_client_test.go +++ b/core/cmd/remote_client_test.go @@ -1350,9 +1350,9 @@ func TestClient_RunOCRJob_HappyPath(t *testing.T) { defer cleanup() require.NoError(t, app.Start()) - _, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah") + _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, app.Store.DB.Create(bridge).Error) - _, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah") + _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") require.NoError(t, app.Store.DB.Create(bridge2).Error) client, _ := app.NewClientAndRenderer() diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go index f7045010b81..af80c3479fd 100644 --- a/core/internal/cltest/factories.go +++ b/core/internal/cltest/factories.go @@ -10,6 +10,7 @@ import ( "math/big" mathrand "math/rand" "net/url" + "strconv" "strings" "testing" "time" @@ -45,7 +46,6 @@ import ( "github.com/tidwall/gjson" "github.com/tidwall/sjson" "github.com/urfave/cli" - "gopkg.in/guregu/null.v4" ) // NewJob return new NoOp JobSpec @@ -830,7 +830,8 @@ func MustInsertPipelineRun(t *testing.T, db *gorm.DB) pipeline.Run { } func MustInsertUnfinishedPipelineTaskRun(t *testing.T, store *strpkg.Store, pipelineRunID int64) pipeline.TaskRun { - p := pipeline.TaskRun{PipelineTaskSpecID: mathrand.Int31(), PipelineRunID: pipelineRunID} + /* #nosec G404 */ + p := pipeline.TaskRun{DotID: strconv.Itoa(mathrand.Int()), PipelineRunID: pipelineRunID} require.NoError(t, store.DB.Create(&p).Error) return p } @@ -838,20 +839,14 @@ func MustInsertUnfinishedPipelineTaskRun(t *testing.T, store *strpkg.Store, pipe func MustInsertSampleDirectRequestJob(t *testing.T, db *gorm.DB) job.Job { t.Helper() - pspec := pipeline.Spec{} - require.NoError(t, db.Create(&pspec).Error) - - finalTspec := pipeline.TaskSpec{PipelineSpecID: pspec.ID} - require.NoError(t, db.Create(&finalTspec).Error) - - tspecPath1 := pipeline.TaskSpec{PipelineSpecID: pspec.ID, SuccessorID: null.IntFrom(int64(finalTspec.ID))} - require.NoError(t, db.Create(&tspecPath1).Error) + pspec := pipeline.Spec{DotDagSource: ` + // data source 1 + ds1 [type=bridge name=voter_turnout]; + ds1_parse [type=jsonparse path="one,two"]; + ds1_multiply [type=multiply times=1.23]; +`} - tspecPath2_2 := pipeline.TaskSpec{PipelineSpecID: pspec.ID, SuccessorID: null.IntFrom(int64(finalTspec.ID))} - require.NoError(t, db.Create(&tspecPath2_2).Error) - - tspecPath2_1 := pipeline.TaskSpec{PipelineSpecID: pspec.ID, SuccessorID: null.IntFrom(int64(tspecPath2_2.ID))} - require.NoError(t, db.Create(&tspecPath2_1).Error) + require.NoError(t, db.Create(&pspec).Error) drspec := job.DirectRequestSpec{} require.NoError(t, db.Create(&drspec).Error) diff --git a/core/internal/mocks/advisory_locker.go b/core/internal/mocks/advisory_locker.go index 3b5e7d0d618..227d8f14d3e 100644 --- a/core/internal/mocks/advisory_locker.go +++ b/core/internal/mocks/advisory_locker.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/after_nower.go b/core/internal/mocks/after_nower.go index 1ff9f9a0156..50d9cc6f6ef 100644 --- a/core/internal/mocks/after_nower.go +++ b/core/internal/mocks/after_nower.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/application.go b/core/internal/mocks/application.go index f383b1fc23f..7d0e8048e93 100644 --- a/core/internal/mocks/application.go +++ b/core/internal/mocks/application.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/client.go b/core/internal/mocks/client.go index 02b2d6e9777..5449138a59a 100644 --- a/core/internal/mocks/client.go +++ b/core/internal/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/deviation_checker.go b/core/internal/mocks/deviation_checker.go index 4b112695285..ee44d42f3a4 100644 --- a/core/internal/mocks/deviation_checker.go +++ b/core/internal/mocks/deviation_checker.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/deviation_checker_factory.go b/core/internal/mocks/deviation_checker_factory.go index 13ffc5d14d6..1edfe7dd9bb 100644 --- a/core/internal/mocks/deviation_checker_factory.go +++ b/core/internal/mocks/deviation_checker_factory.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/external_initiator_manager.go b/core/internal/mocks/external_initiator_manager.go index 50185565e73..2f7f2835aa7 100644 --- a/core/internal/mocks/external_initiator_manager.go +++ b/core/internal/mocks/external_initiator_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/fetcher.go b/core/internal/mocks/fetcher.go index 662a289f944..d1bc39109b9 100644 --- a/core/internal/mocks/fetcher.go +++ b/core/internal/mocks/fetcher.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/flags.go b/core/internal/mocks/flags.go index 15537082578..988aedcefd6 100644 --- a/core/internal/mocks/flags.go +++ b/core/internal/mocks/flags.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/flux_aggregator.go b/core/internal/mocks/flux_aggregator.go index 6085a0c9514..9647e7196ac 100644 --- a/core/internal/mocks/flux_aggregator.go +++ b/core/internal/mocks/flux_aggregator.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/geth_client.go b/core/internal/mocks/geth_client.go index 94abc4d6e6c..b62b8712ffa 100644 --- a/core/internal/mocks/geth_client.go +++ b/core/internal/mocks/geth_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/head_trackable.go b/core/internal/mocks/head_trackable.go index b634880c9eb..69b646b3119 100644 --- a/core/internal/mocks/head_trackable.go +++ b/core/internal/mocks/head_trackable.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/job_subscriber.go b/core/internal/mocks/job_subscriber.go index b7f24d058d2..50796a641de 100644 --- a/core/internal/mocks/job_subscriber.go +++ b/core/internal/mocks/job_subscriber.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/key_store_interface.go b/core/internal/mocks/key_store_interface.go index cd715d1c8bc..11076dc27e2 100644 --- a/core/internal/mocks/key_store_interface.go +++ b/core/internal/mocks/key_store_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/notify_new_eth_tx.go b/core/internal/mocks/notify_new_eth_tx.go index 4a08058514d..a19e309b3bd 100644 --- a/core/internal/mocks/notify_new_eth_tx.go +++ b/core/internal/mocks/notify_new_eth_tx.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/prometheus_backend.go b/core/internal/mocks/prometheus_backend.go index a32fd6a31b7..343618c2323 100644 --- a/core/internal/mocks/prometheus_backend.go +++ b/core/internal/mocks/prometheus_backend.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/rpc_client.go b/core/internal/mocks/rpc_client.go index be11a7b287d..e29bda6aaf2 100644 --- a/core/internal/mocks/rpc_client.go +++ b/core/internal/mocks/rpc_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/run_executor.go b/core/internal/mocks/run_executor.go index 45769ea0765..29f57ca8c7b 100644 --- a/core/internal/mocks/run_executor.go +++ b/core/internal/mocks/run_executor.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/run_manager.go b/core/internal/mocks/run_manager.go index 48a4469c3b6..6cf8e7ac562 100644 --- a/core/internal/mocks/run_manager.go +++ b/core/internal/mocks/run_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/run_queue.go b/core/internal/mocks/run_queue.go index 29d18d902c9..3decb8cb4b4 100644 --- a/core/internal/mocks/run_queue.go +++ b/core/internal/mocks/run_queue.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/service.go b/core/internal/mocks/service.go index 501f2a65b06..5f9da002aad 100644 --- a/core/internal/mocks/service.go +++ b/core/internal/mocks/service.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/stats_pusher.go b/core/internal/mocks/stats_pusher.go index 8988d76e48b..4406a86498f 100644 --- a/core/internal/mocks/stats_pusher.go +++ b/core/internal/mocks/stats_pusher.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/internal/mocks/subscription.go b/core/internal/mocks/subscription.go index fedc30d5e14..af659c68547 100644 --- a/core/internal/mocks/subscription.go +++ b/core/internal/mocks/subscription.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/gasupdater/mocks/config.go b/core/services/gasupdater/mocks/config.go index a3a5bae7b4b..b4d3f94590d 100644 --- a/core/services/gasupdater/mocks/config.go +++ b/core/services/gasupdater/mocks/config.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/job/job_orm_test.go b/core/services/job/job_orm_test.go index 678827c698f..c3d67b3b70a 100644 --- a/core/services/job/job_orm_test.go +++ b/core/services/job/job_orm_test.go @@ -33,9 +33,9 @@ func TestORM(t *testing.T) { orm := job.NewORM(db, config.Config, pipelineORM, eventBroadcaster, &postgres.NullAdvisoryLocker{}) defer orm.Close() - _, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah") + _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, db.Create(bridge).Error) - _, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah") + _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") require.NoError(t, db.Create(bridge2).Error) key := cltest.MustInsertRandomKey(t, db) address := key.Address.Address() @@ -137,11 +137,6 @@ func TestORM(t *testing.T) { err = db.Find(&pipelineSpecs).Error require.NoError(t, err) require.Len(t, pipelineSpecs, 1) - - var pipelineTaskSpecs []pipeline.TaskSpec - err = db.Find(&pipelineTaskSpecs).Error - require.NoError(t, err) - require.Len(t, pipelineTaskSpecs, 9) // 8 explicitly-defined tasks + 1 automatically added ResultTask }) t.Run("increase job spec error occurrence", func(t *testing.T) { @@ -193,9 +188,9 @@ func TestORM_CheckForDeletedJobs(t *testing.T) { key := cltest.MustInsertRandomKey(t, db) address := key.Address.Address() - _, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah") + _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, db.Create(bridge).Error) - _, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah") + _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") require.NoError(t, db.Create(bridge2).Error) pipelineORM, eventBroadcaster, cleanupORM := cltest.NewPipelineORM(t, config, db) @@ -279,9 +274,9 @@ func TestORM_DeleteJob_DeletesAssociatedRecords(t *testing.T) { defer orm.Close() t.Run("it deletes records for offchainreporting jobs", func(t *testing.T) { - _, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah") + _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, db.Create(bridge).Error) - _, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah") + _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") require.NoError(t, db.Create(bridge2).Error) key := cltest.MustInsertRandomKey(t, store.DB) diff --git a/core/services/job/job_pipeline_orm_integration_test.go b/core/services/job/job_pipeline_orm_integration_test.go index 907a9daf50f..b078860aa0f 100644 --- a/core/services/job/job_pipeline_orm_integration_test.go +++ b/core/services/job/job_pipeline_orm_integration_test.go @@ -23,7 +23,7 @@ import ( ) func clearJobsDb(t *testing.T, db *gorm.DB) { - err := db.Exec(`TRUNCATE jobs, pipeline_runs, pipeline_specs, pipeline_task_runs, pipeline_task_specs CASCADE`).Error + err := db.Exec(`TRUNCATE jobs, pipeline_runs, pipeline_specs, pipeline_task_runs CASCADE`).Error require.NoError(t, err) } @@ -41,56 +41,45 @@ func TestPipelineORM_Integration(t *testing.T) { require.NoError(t, err) result := &pipeline.ResultTask{ - BaseTask: pipeline.NewBaseTask("__result__", nil, 0), + BaseTask: pipeline.NewBaseTask("__result__", nil, 0, 0), } answer1 := &pipeline.MedianTask{ - BaseTask: pipeline.NewBaseTask("answer1", nil, 0), + BaseTask: pipeline.NewBaseTask("answer1", nil, 0, 0), } answer2 := &pipeline.BridgeTask{ Name: "election_winner", - BaseTask: pipeline.NewBaseTask("answer2", nil, 1), + BaseTask: pipeline.NewBaseTask("answer2", nil, 1, 0), } ds1_multiply := &pipeline.MultiplyTask{ Times: decimal.NewFromFloat(1.23), - BaseTask: pipeline.NewBaseTask("ds1_multiply", answer1, 0), + BaseTask: pipeline.NewBaseTask("ds1_multiply", answer1, 0, 0), } ds1_parse := &pipeline.JSONParseTask{ Path: []string{"one", "two"}, - BaseTask: pipeline.NewBaseTask("ds1_parse", ds1_multiply, 0), + BaseTask: pipeline.NewBaseTask("ds1_parse", ds1_multiply, 0, 0), } ds1 := &pipeline.BridgeTask{ Name: "voter_turnout", - BaseTask: pipeline.NewBaseTask("ds1", ds1_parse, 0), + BaseTask: pipeline.NewBaseTask("ds1", ds1_parse, 0, 0), } ds2_multiply := &pipeline.MultiplyTask{ Times: decimal.NewFromFloat(4.56), - BaseTask: pipeline.NewBaseTask("ds2_multiply", answer1, 0), + BaseTask: pipeline.NewBaseTask("ds2_multiply", answer1, 0, 0), } ds2_parse := &pipeline.JSONParseTask{ Path: []string{"three", "four"}, - BaseTask: pipeline.NewBaseTask("ds2_parse", ds2_multiply, 0), + BaseTask: pipeline.NewBaseTask("ds2_parse", ds2_multiply, 0, 0), } ds2 := &pipeline.HTTPTask{ URL: models.WebURL(*u), Method: "GET", RequestData: pipeline.HttpRequestData{"hi": "hello"}, - BaseTask: pipeline.NewBaseTask("ds2", ds2_parse, 0), + BaseTask: pipeline.NewBaseTask("ds2", ds2_parse, 0, 0), } expectedTasks := []pipeline.Task{result, answer1, answer2, ds1_multiply, ds1_parse, ds1, ds2_multiply, ds2_parse, ds2} - var expectedTaskSpecs []pipeline.TaskSpec - for _, task := range expectedTasks { - expectedTaskSpecs = append(expectedTaskSpecs, pipeline.TaskSpec{ - DotID: task.DotID(), - PipelineSpecID: specID, - Type: task.Type(), - JSON: pipeline.JSONSerializable{Val: task}, - Index: task.OutputIndex(), - }) - } - - _, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah") + _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, db.Create(bridge).Error) - _, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah") + _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") require.NoError(t, db.Create(bridge2).Error) t.Run("creates task DAGs", func(t *testing.T) { @@ -111,33 +100,6 @@ func TestPipelineORM_Integration(t *testing.T) { require.Equal(t, specID, specs[0].ID) require.Equal(t, pipeline.DotStr, specs[0].DotDagSource) - var taskSpecs []pipeline.TaskSpec - err = db.Find(&taskSpecs).Error - require.NoError(t, err) - require.Len(t, taskSpecs, len(expectedTaskSpecs)) - - type equalser interface { - ExportedEquals(otherTask pipeline.Task) bool - } - - for _, taskSpec := range taskSpecs { - taskSpec.JSON.Val.(map[string]interface{})["index"] = taskSpec.Index - taskSpec.JSON.Val, err = pipeline.UnmarshalTaskFromMap(taskSpec.Type, taskSpec.JSON.Val, taskSpec.DotID, nil, nil, nil) - require.NoError(t, err) - - var found bool - for _, expected := range expectedTaskSpecs { - if taskSpec.PipelineSpecID == specID && - taskSpec.Type == expected.Type && - taskSpec.Index == expected.Index && - taskSpec.JSON.Val.(equalser).ExportedEquals(expected.JSON.Val.(pipeline.Task)) { - found = true - break - } - } - require.True(t, found) - } - require.NoError(t, db.Exec(`DELETE FROM pipeline_specs`).Error) }) @@ -161,15 +123,6 @@ func TestPipelineORM_Integration(t *testing.T) { require.Equal(t, dbSpec.PipelineSpecID, pipelineSpecs[0].ID) pipelineSpecID := pipelineSpecs[0].ID - var taskSpecs []pipeline.TaskSpec - err = db.Find(&taskSpecs).Error - require.NoError(t, err) - - var taskSpecIDs []int32 - for _, taskSpec := range taskSpecs { - taskSpecIDs = append(taskSpecIDs, taskSpec.ID) - } - // Create the run runID, err = orm.CreateRun(context.Background(), dbSpec.ID, nil) require.NoError(t, err) @@ -186,11 +139,10 @@ func TestPipelineORM_Integration(t *testing.T) { var taskRuns []pipeline.TaskRun err = db.Where("pipeline_run_id = ?", runID).Find(&taskRuns).Error require.NoError(t, err) - require.Len(t, taskRuns, len(taskSpecIDs)) + require.Len(t, taskRuns, len(expectedTasks)) for _, taskRun := range taskRuns { require.Equal(t, runID, taskRun.PipelineRunID) - require.Contains(t, taskSpecIDs, taskRun.PipelineTaskSpecID) require.Nil(t, taskRun.Output) require.True(t, taskRun.Error.IsZero()) } @@ -313,11 +265,11 @@ func TestPipelineORM_Integration(t *testing.T) { // Process the run { var anyRemaining bool - anyRemaining, err = orm.ProcessNextUnfinishedRun(context.Background(), func(_ context.Context, db *gorm.DB, run pipeline.Run, l logger.Logger) (trrs pipeline.TaskRunResults, err error) { + anyRemaining, err = orm.ProcessNextUnfinishedRun(context.Background(), func(_ context.Context, db *gorm.DB, spec pipeline.Spec, l logger.Logger) (trrs pipeline.TaskRunResults, err error) { for dotID, result := range test.answers { var tr pipeline.TaskRun require.NoError(t, db. - Joins("INNER JOIN pipeline_task_specs ON pipeline_task_specs.id = pipeline_task_runs.pipeline_task_spec_id AND dot_id = ?", dotID). + Where("dot_id = ?", dotID). Where("pipeline_run_id = ? ", runID). First(&tr).Error) trr := pipeline.TaskRunResult{ @@ -336,7 +288,7 @@ func TestPipelineORM_Integration(t *testing.T) { // Ensure that the ORM doesn't think there are more runs { - anyRemaining, err2 := orm.ProcessNextUnfinishedRun(context.Background(), func(_ context.Context, db *gorm.DB, run pipeline.Run, l logger.Logger) (pipeline.TaskRunResults, error) { + anyRemaining, err2 := orm.ProcessNextUnfinishedRun(context.Background(), func(_ context.Context, db *gorm.DB, spec pipeline.Spec, l logger.Logger) (pipeline.TaskRunResults, error) { t.Fatal("this callback should never be reached") return nil, nil }) @@ -361,16 +313,16 @@ func TestPipelineORM_Integration(t *testing.T) { } var finishedTaskRuns []pipeline.TaskRun - err = db.Preload("PipelineTaskSpec").Find(&finishedTaskRuns, "pipeline_run_id = ?", runID).Error + err = db.Find(&finishedTaskRuns, "pipeline_run_id = ?", runID).Error require.NoError(t, err) require.Len(t, finishedTaskRuns, len(expectedTasks)) for _, run := range finishedTaskRuns { require.True(t, run.Output != nil || !run.Error.IsZero()) if run.Output != nil { - require.Equal(t, test.answers[run.DotID()].Value, run.Output.Val) + require.Equal(t, test.answers[run.GetDotID()].Value, run.Output.Val) } else if !run.Error.IsZero() { - require.Equal(t, test.answers[run.DotID()].Error.Error(), run.Error.ValueOrZero()) + require.Equal(t, test.answers[run.GetDotID()].Error.Error(), run.Error.ValueOrZero()) } } diff --git a/core/services/job/mocks/delegate.go b/core/services/job/mocks/delegate.go index 1ad54c8b291..f040961d766 100644 --- a/core/services/job/mocks/delegate.go +++ b/core/services/job/mocks/delegate.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/job/mocks/orm.go b/core/services/job/mocks/orm.go index 6668c0be3b2..399eaf06691 100644 --- a/core/services/job/mocks/orm.go +++ b/core/services/job/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks @@ -127,6 +127,29 @@ func (_m *ORM) FindJob(id int32) (job.Job, error) { return r0, r1 } +// FindJobIDsWithBridge provides a mock function with given fields: name +func (_m *ORM) FindJobIDsWithBridge(name string) ([]int32, error) { + ret := _m.Called(name) + + var r0 []int32 + if rf, ok := ret.Get(0).(func(string) []int32); ok { + r0 = rf(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]int32) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // JobsV2 provides a mock function with given fields: func (_m *ORM) JobsV2() ([]job.Job, error) { ret := _m.Called() diff --git a/core/services/job/mocks/service.go b/core/services/job/mocks/service.go index 3a588142a34..d374b180676 100644 --- a/core/services/job/mocks/service.go +++ b/core/services/job/mocks/service.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/job/mocks/spawner.go b/core/services/job/mocks/spawner.go index a58402b96fa..93e13de9486 100644 --- a/core/services/job/mocks/spawner.go +++ b/core/services/job/mocks/spawner.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/job/orm.go b/core/services/job/orm.go index 15260681645..58f91991574 100644 --- a/core/services/job/orm.go +++ b/core/services/job/orm.go @@ -43,6 +43,7 @@ type ORM interface { CreateJob(ctx context.Context, jobSpec *Job, taskDAG pipeline.TaskDAG) error JobsV2() ([]Job, error) FindJob(id int32) (Job, error) + FindJobIDsWithBridge(name string) ([]int32, error) DeleteJob(ctx context.Context, id int32) error RecordError(ctx context.Context, jobID int32, description string) UnclaimJob(ctx context.Context, id int32) error @@ -125,7 +126,7 @@ func (o *orm) ClaimUnclaimedJobs(ctx context.Context) ([]Job, error) { Preload("FluxMonitorSpec"). Preload("OffchainreportingOracleSpec"). Preload("KeeperSpec"). - Preload("PipelineSpec.PipelineTaskSpecs"). + Preload("PipelineSpec"). Find(&newlyClaimedJobs).Error if err != nil { return nil, errors.Wrap(err, "ClaimUnclaimedJobs failed to load jobs") @@ -150,6 +151,23 @@ func (o *orm) CreateJob(ctx context.Context, jobSpec *Job, taskDAG pipeline.Task if taskDAG.HasCycles() { return errors.New("task DAG has cycles, which are not permitted") } + tasks, err := taskDAG.TasksInDependencyOrder() + if err != nil { + return err + } + for _, task := range tasks { + if task.Type() == pipeline.TaskTypeBridge { + // Bridge must exist + name := task.(*pipeline.BridgeTask).Name + bt := models.BridgeType{} + if err := o.db.First(&bt, "name = ?", name).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return errors.Wrap(pipeline.ErrNoSuchBridge, name) + } + return err + } + } + } ctx, cancel := utils.CombinedContext(ctx, o.config.DatabaseMaximumTxDuration()) defer cancel() @@ -332,6 +350,34 @@ func (o *orm) FindJob(id int32) (Job, error) { return job, err } +func (o *orm) FindJobIDsWithBridge(name string) ([]int32, error) { + var jobs []Job + err := o.db.Preload("PipelineSpec").Find(&jobs).Error + if err != nil { + return nil, err + } + var jids []int32 + for _, job := range jobs { + d := pipeline.TaskDAG{} + err = d.UnmarshalText([]byte(job.PipelineSpec.DotDagSource)) + if err != nil { + return nil, err + } + tasks, err := d.TasksInDependencyOrder() + if err != nil { + return nil, err + } + for _, task := range tasks { + if task.Type() == pipeline.TaskTypeBridge { + if task.(*pipeline.BridgeTask).Name == name { + jids = append(jids, job.ID) + } + } + } + } + return jids, nil +} + // PipelineRunsByJobID returns pipeline runs for a job func (o *orm) PipelineRunsByJobID(jobID int32, offset, size int) ([]pipeline.Run, int, error) { var pipelineRuns []pipeline.Run @@ -354,7 +400,6 @@ func (o *orm) PipelineRunsByJobID(jobID int32, offset, size int) ([]pipeline.Run Where(`pipeline_task_runs.type != 'result'`). Order("created_at ASC, id ASC") }). - Preload("PipelineTaskRuns.PipelineTaskSpec"). Joins("INNER JOIN jobs ON pipeline_runs.pipeline_spec_id = jobs.pipeline_spec_id"). Where("jobs.id = ?", jobID). Limit(size). diff --git a/core/services/job/runner_integration_test.go b/core/services/job/runner_integration_test.go index aa2fe673039..775c4292332 100644 --- a/core/services/job/runner_integration_test.go +++ b/core/services/job/runner_integration_test.go @@ -101,7 +101,7 @@ func TestRunner(t *testing.T) { results, err := runner.ResultsForRun(context.Background(), runID) require.NoError(t, err) - assert.Len(t, results, 2) + require.Len(t, results, 2) assert.NoError(t, results[0].Error) assert.NoError(t, results[1].Error) assert.Equal(t, "6225.6", results[0].Value) @@ -110,66 +110,67 @@ func TestRunner(t *testing.T) { // Verify individual task results var runs []pipeline.TaskRun err = db. - Preload("PipelineTaskSpec"). Where("pipeline_run_id = ?", runID). Find(&runs).Error assert.NoError(t, err) assert.Len(t, runs, 9) for _, run := range runs { - if run.DotID() == "answer2" { + if run.GetDotID() == "answer2" { assert.Equal(t, "Hal Finney", run.Output.Val) - } else if run.DotID() == "ds2" { + } else if run.GetDotID() == "ds2" { assert.Equal(t, `{"turnout": 61.942}`, run.Output.Val) - } else if run.DotID() == "ds2_parse" { + } else if run.GetDotID() == "ds2_parse" { assert.Equal(t, float64(61.942), run.Output.Val) - } else if run.DotID() == "ds2_multiply" { + } else if run.GetDotID() == "ds2_multiply" { assert.Equal(t, "6194.2", run.Output.Val) - } else if run.DotID() == "ds1" { + } else if run.GetDotID() == "ds1" { assert.Equal(t, `{"data": {"result": 62.57}}`, run.Output.Val) - } else if run.DotID() == "ds1_parse" { + } else if run.GetDotID() == "ds1_parse" { assert.Equal(t, float64(62.57), run.Output.Val) - } else if run.DotID() == "ds1_multiply" { + } else if run.GetDotID() == "ds1_multiply" { assert.Equal(t, "6257", run.Output.Val) - } else if run.DotID() == "answer1" { + } else if run.GetDotID() == "answer1" { assert.Equal(t, "6225.6", run.Output.Val) - } else if run.DotID() == "__result__" { + } else if run.GetDotID() == "__result__" { assert.Equal(t, []interface{}{"6225.6", "Hal Finney"}, run.Output.Val) } else { - t.Fatalf("unknown task '%v'", run.DotID()) + t.Fatalf("unknown task '%v'", run.GetDotID()) } } }) t.Run("must delete job before deleting bridge", func(t *testing.T) { - _, bridge := cltest.NewBridgeType(t, "testbridge", "blah") + _, bridge := cltest.NewBridgeType(t, "testbridge", "http://blah.com") require.NoError(t, db.Create(bridge).Error) dbSpec := makeOCRJobSpecFromToml(t, db, ` type = "offchainreporting" schemaVersion = 1 observationSource = """ - ds1 [type=bridge name="testbridge" url="http://data.com"]; + ds1 [type=bridge name="testbridge"]; """ `) require.NoError(t, jobORM.CreateJob(context.Background(), dbSpec, dbSpec.Pipeline)) // Should not be able to delete a bridge in use. - require.EqualError(t, - db.Delete(&bridge).Error, - "ERROR: update or delete on table \"bridge_types\" violates foreign key constraint \"fk_pipeline_task_specs_bridge_name\" on table \"pipeline_task_specs\" (SQLSTATE 23503)") + jids, err := jobORM.FindJobIDsWithBridge(bridge.Name.String()) + require.NoError(t, err) + require.Equal(t, 1, len(jids)) // But if we delete the job, then we can. require.NoError(t, jobORM.DeleteJob(context.Background(), dbSpec.ID)) - require.NoError(t, db.Delete(&bridge).Error) + jids, err = jobORM.FindJobIDsWithBridge(bridge.Name.String()) + require.NoError(t, err) + require.Equal(t, 0, len(jids)) }) t.Run("referencing a non-existent bridge should error", func(t *testing.T) { - _, bridge := cltest.NewBridgeType(t, "testbridge", "blah") + _, bridge := cltest.NewBridgeType(t, "testbridge2", "http://blah.com") require.NoError(t, db.Create(bridge).Error) dbSpec := makeOCRJobSpecFromToml(t, db, ` type = "offchainreporting" schemaVersion = 1 observationSource = """ - ds1 [type=bridge name="testbridge2" url="http://data.com"]; + ds1 [type=bridge name="testbridge2"]; """ `) require.Error(t, @@ -213,29 +214,28 @@ func TestRunner(t *testing.T) { // Verify individual task results var runs []pipeline.TaskRun err = db. - Preload("PipelineTaskSpec"). Where("pipeline_run_id = ?", runID). Find(&runs).Error assert.NoError(t, err) require.Len(t, runs, 4) for _, run := range runs { - if run.DotID() == "ds1" { + if run.GetDotID() == "ds1" { assert.True(t, run.Error.IsZero()) require.NotNil(t, resp, run.Output) assert.Equal(t, resp, run.Output.Val) - } else if run.DotID() == "ds1_parse" { + } else if run.GetDotID() == "ds1_parse" { assert.True(t, run.Error.IsZero()) // FIXME: Shouldn't it be the Val that is null? assert.Nil(t, run.Output) - } else if run.DotID() == "ds1_multiply" { + } else if run.GetDotID() == "ds1_multiply" { assert.Equal(t, "type cannot be converted to decimal.Decimal", run.Error.ValueOrZero()) assert.Nil(t, run.Output) - } else if run.DotID() == "__result__" { + } else if run.GetDotID() == "__result__" { assert.Equal(t, []interface{}{nil}, run.Output.Val) assert.Equal(t, "[\"type \\u003cnil\\u003e cannot be converted to decimal.Decimal\"]", run.Error.ValueOrZero()) } else { - t.Fatalf("unknown task '%v'", run.DotID()) + t.Fatalf("unknown task '%v'", run.GetDotID()) } } }) @@ -274,27 +274,26 @@ func TestRunner(t *testing.T) { // Verify individual task results var runs []pipeline.TaskRun err = db. - Preload("PipelineTaskSpec"). Where("pipeline_run_id = ?", runID). Find(&runs).Error assert.NoError(t, err) require.Len(t, runs, 4) for _, run := range runs { - if run.DotID() == "ds1" { + if run.GetDotID() == "ds1" { assert.True(t, run.Error.IsZero()) assert.Equal(t, resp, run.Output.Val) - } else if run.DotID() == "ds1_parse" { + } else if run.GetDotID() == "ds1_parse" { assert.Equal(t, "could not resolve path [\"USD\"] in {\"Response\":\"Error\",\"Message\":\"You are over your rate limit please upgrade your account!\",\"HasWarning\":false,\"Type\":99,\"RateLimit\":{\"calls_made\":{\"second\":5,\"minute\":5,\"hour\":955,\"day\":10004,\"month\":15146,\"total_calls\":15152},\"max_calls\":{\"second\":20,\"minute\":300,\"hour\":3000,\"day\":10000,\"month\":75000}},\"Data\":{}}", run.Error.ValueOrZero()) assert.Nil(t, run.Output) - } else if run.DotID() == "ds1_multiply" { + } else if run.GetDotID() == "ds1_multiply" { assert.Equal(t, "could not resolve path [\"USD\"] in {\"Response\":\"Error\",\"Message\":\"You are over your rate limit please upgrade your account!\",\"HasWarning\":false,\"Type\":99,\"RateLimit\":{\"calls_made\":{\"second\":5,\"minute\":5,\"hour\":955,\"day\":10004,\"month\":15146,\"total_calls\":15152},\"max_calls\":{\"second\":20,\"minute\":300,\"hour\":3000,\"day\":10000,\"month\":75000}},\"Data\":{}}", run.Error.ValueOrZero()) assert.Nil(t, run.Output) - } else if run.DotID() == "__result__" { + } else if run.GetDotID() == "__result__" { assert.Equal(t, []interface{}{nil}, run.Output.Val) assert.Equal(t, "[\"could not resolve path [\\\"USD\\\"] in {\\\"Response\\\":\\\"Error\\\",\\\"Message\\\":\\\"You are over your rate limit please upgrade your account!\\\",\\\"HasWarning\\\":false,\\\"Type\\\":99,\\\"RateLimit\\\":{\\\"calls_made\\\":{\\\"second\\\":5,\\\"minute\\\":5,\\\"hour\\\":955,\\\"day\\\":10004,\\\"month\\\":15146,\\\"total_calls\\\":15152},\\\"max_calls\\\":{\\\"second\\\":20,\\\"minute\\\":300,\\\"hour\\\":3000,\\\"day\\\":10000,\\\"month\\\":75000}},\\\"Data\\\":{}}\"]", run.Error.ValueOrZero()) } else { - t.Fatalf("unknown task '%v'", run.DotID()) + t.Fatalf("unknown task '%v'", run.GetDotID()) } } }) @@ -333,27 +332,26 @@ func TestRunner(t *testing.T) { // Verify individual task results var runs []pipeline.TaskRun err = db. - Preload("PipelineTaskSpec"). Where("pipeline_run_id = ?", runID). Find(&runs).Error assert.NoError(t, err) require.Len(t, runs, 4) for _, run := range runs { - if run.DotID() == "ds1" { + if run.GetDotID() == "ds1" { assert.True(t, run.Error.IsZero()) assert.Equal(t, resp, run.Output.Val) - } else if run.DotID() == "ds1_parse" { + } else if run.GetDotID() == "ds1_parse" { assert.True(t, run.Error.IsZero()) assert.Nil(t, run.Output) - } else if run.DotID() == "ds1_multiply" { + } else if run.GetDotID() == "ds1_multiply" { assert.Equal(t, "type cannot be converted to decimal.Decimal", run.Error.ValueOrZero()) assert.Nil(t, run.Output) - } else if run.DotID() == "__result__" { + } else if run.GetDotID() == "__result__" { assert.Equal(t, []interface{}{nil}, run.Output.Val) assert.Equal(t, "[\"type \\u003cnil\\u003e cannot be converted to decimal.Decimal\"]", run.Error.ValueOrZero()) } else { - t.Fatalf("unknown task '%v'", run.DotID()) + t.Fatalf("unknown task '%v'", run.GetDotID()) } } }) @@ -383,7 +381,7 @@ ds1 -> ds1_parse; err = jobORM.CreateJob(context.Background(), &os, os.Pipeline) require.NoError(t, err) var jb job.Job - err = db.Preload("PipelineSpec.PipelineTaskSpecs"). + err = db.Preload("PipelineSpec"). Preload("OffchainreportingOracleSpec").Where("id = ?", os.ID). First(&jb).Error require.NoError(t, err) @@ -426,7 +424,7 @@ ds1 -> ds1_parse; err = jobORM.CreateJob(context.Background(), &os, os.Pipeline) require.NoError(t, err) var jb job.Job - err = db.Preload("PipelineSpec.PipelineTaskSpecs"). + err = db.Preload("PipelineSpec"). Preload("OffchainreportingOracleSpec"). Where("id = ?", os.ID). First(&jb).Error @@ -485,7 +483,7 @@ ds1 -> ds1_parse; err = jobORM.CreateJob(context.Background(), &os, os.Pipeline) require.NoError(t, err) var jb job.Job - err = db.Preload("PipelineSpec.PipelineTaskSpecs"). + err = db.Preload("PipelineSpec"). Preload("OffchainreportingOracleSpec").Where("id = ?", os.ID). First(&jb).Error require.NoError(t, err) @@ -532,7 +530,7 @@ ds1 -> ds1_parse; err = jobORM.CreateJob(context.Background(), &os, os.Pipeline) require.NoError(t, err) var jb job.Job - err = db.Preload("PipelineSpec.PipelineTaskSpecs"). + err = db.Preload("PipelineSpec"). Preload("OffchainreportingOracleSpec"). Where("id = ?", os.ID). First(&jb).Error @@ -572,7 +570,7 @@ ds1 -> ds1_parse; err = jobORM.CreateJob(context.Background(), &os, os.Pipeline) require.NoError(t, err) var jb job.Job - err = db.Preload("PipelineSpec.PipelineTaskSpecs"). + err = db.Preload("PipelineSpec"). Preload("OffchainreportingOracleSpec").Where("id = ?", os.ID). First(&jb).Error require.NoError(t, err) @@ -609,7 +607,7 @@ ds1 -> ds1_parse; err = jobORM.CreateJob(context.Background(), dbSpec, dbSpec.Pipeline) require.NoError(t, err) var jb job.Job - err = db.Preload("PipelineSpec.PipelineTaskSpecs"). + err = db.Preload("PipelineSpec"). Preload("OffchainreportingOracleSpec").Where("id = ?", dbSpec.ID). First(&jb).Error require.NoError(t, err) diff --git a/core/services/job/spawner_test.go b/core/services/job/spawner_test.go index c7a59ea05b8..11de28406e2 100644 --- a/core/services/job/spawner_test.go +++ b/core/services/job/spawner_test.go @@ -6,12 +6,12 @@ import ( "time" "github.com/jackc/pgtype" + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" "github.com/smartcontractkit/chainlink/core/services/eth" "github.com/smartcontractkit/chainlink/core/store/models" - "github.com/onsi/gomega" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "gorm.io/gorm" @@ -45,7 +45,7 @@ func (d delegate) ServicesForSpec(js job.Job) ([]job.Service, error) { } func clearDB(t *testing.T, db *gorm.DB) { - err := db.Exec(`TRUNCATE jobs, pipeline_runs, pipeline_specs, pipeline_task_runs, pipeline_task_specs CASCADE`).Error + err := db.Exec(`TRUNCATE jobs, pipeline_runs, pipeline_specs, pipeline_task_runs CASCADE`).Error require.NoError(t, err) } @@ -63,9 +63,9 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { key := cltest.MustInsertRandomKey(t, db) address := key.Address.Address() - _, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah") + _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, db.Create(bridge).Error) - _, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah") + _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") require.NoError(t, db.Create(bridge2).Error) rpc, geth, _, _ := cltest.NewEthMocks(t) @@ -136,6 +136,8 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { serviceB2.AssertExpectations(t) }) + clearDB(t, db) + t.Run("starts job services from the DB when .Start() is called", func(t *testing.T) { jobSpecA := makeOCRJobSpec(t, address) jobSpecA.Type = jobTypeA @@ -167,6 +169,8 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { serviceA2.On("Close").Return(nil).Once() }) + clearDB(t, db) + t.Run("stops job services when .Stop() is called", func(t *testing.T) { jobSpecA := makeOCRJobSpec(t, address) jobSpecA.Type = jobTypeA @@ -174,9 +178,6 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { eventually := cltest.NewAwaiter() serviceA1 := new(mocks.Service) serviceA2 := new(mocks.Service) - serviceA1.On("Start").Return(nil).Once() - serviceA2.On("Start").Return(nil).Once().Run(func(mock.Arguments) { eventually.ItHappened() }) - orm := job.NewORM(db, config.Config, pipeline.NewORM(db, config, eventBroadcaster), eventBroadcaster, &postgres.NullAdvisoryLocker{}) defer orm.Close() delegateA := &delegate{jobTypeA, []job.Service{serviceA1, serviceA2}, 0, nil, offchainreporting.NewDelegate(nil, orm, nil, nil, nil, eth.NewClientWith(rpc, geth), nil, nil, monitoringEndpoint)} @@ -184,6 +185,8 @@ func TestSpawner_CreateJobDeleteJob(t *testing.T) { jobTypeA: delegateA, }) + serviceA1.On("Start").Return(nil).Once() + serviceA2.On("Start").Return(nil).Once().Run(func(mock.Arguments) { eventually.ItHappened() }) jobSpecIDA, err := spawner.CreateJob(context.Background(), *jobSpecA, null.String{}) require.NoError(t, err) delegateA.jobID = jobSpecIDA diff --git a/core/services/log/mocks/abigen_contract.go b/core/services/log/mocks/abigen_contract.go index 79846df8d06..aae58811aa2 100644 --- a/core/services/log/mocks/abigen_contract.go +++ b/core/services/log/mocks/abigen_contract.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/log/mocks/broadcast.go b/core/services/log/mocks/broadcast.go index 5fe7717ab14..c90cf1eee6f 100644 --- a/core/services/log/mocks/broadcast.go +++ b/core/services/log/mocks/broadcast.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/log/mocks/broadcaster.go b/core/services/log/mocks/broadcaster.go index 36ef6fd6454..1ffb85c3740 100644 --- a/core/services/log/mocks/broadcaster.go +++ b/core/services/log/mocks/broadcaster.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/log/mocks/listener.go b/core/services/log/mocks/listener.go index 50b4fa8ab65..d30b1d10528 100644 --- a/core/services/log/mocks/listener.go +++ b/core/services/log/mocks/listener.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/log/mocks/orm.go b/core/services/log/mocks/orm.go index c38c39b5a35..f2b68cdf5c8 100644 --- a/core/services/log/mocks/orm.go +++ b/core/services/log/mocks/orm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks @@ -13,13 +13,13 @@ type ORM struct { mock.Mock } -// MarkBroadcastConsumed provides a mock function with given fields: blockHash, logIndex, jobID -func (_m *ORM) MarkBroadcastConsumed(blockHash common.Hash, logIndex uint, jobID interface{}) error { - ret := _m.Called(blockHash, logIndex, jobID) +// MarkBroadcastConsumed provides a mock function with given fields: blockHash, blockNumber, logIndex, jobID +func (_m *ORM) MarkBroadcastConsumed(blockHash common.Hash, blockNumber uint64, logIndex uint, jobID interface{}) error { + ret := _m.Called(blockHash, blockNumber, logIndex, jobID) var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, uint, interface{}) error); ok { - r0 = rf(blockHash, logIndex, jobID) + if rf, ok := ret.Get(0).(func(common.Hash, uint64, uint, interface{}) error); ok { + r0 = rf(blockHash, blockNumber, logIndex, jobID) } else { r0 = ret.Error(0) } diff --git a/core/services/offchainreporting/data_source.go b/core/services/offchainreporting/data_source.go index 7736b0f53c4..a9762972f2b 100644 --- a/core/services/offchainreporting/data_source.go +++ b/core/services/offchainreporting/data_source.go @@ -29,17 +29,15 @@ var _ ocrtypes.DataSource = (*dataSource)(nil) func (ds dataSource) Observe(ctx context.Context) (ocrtypes.Observation, error) { var observation ocrtypes.Observation start := time.Now() - run, err := pipeline.NewRun(ds.spec, start) - if err != nil { - return observation, errors.Wrapf(err, "error creating new run for spec ID %v", ds.spec.ID) - } - - trrs, err := ds.pipelineRunner.ExecuteRun(ctx, run, ds.ocrLogger) + trrs, err := ds.pipelineRunner.ExecuteRun(ctx, ds.spec, ds.ocrLogger) if err != nil { return observation, errors.Wrapf(err, "error executing run for spec ID %v", ds.spec.ID) } end := time.Now() + var run pipeline.Run + run.PipelineSpecID = ds.spec.ID + run.CreatedAt = start run.FinishedAt = &end finalResult := trrs.FinalResult() diff --git a/core/services/offchainreporting/mocks/ocr_contract_tracker_db.go b/core/services/offchainreporting/mocks/ocr_contract_tracker_db.go index 8a9c5a2726f..265432d597a 100644 --- a/core/services/offchainreporting/mocks/ocr_contract_tracker_db.go +++ b/core/services/offchainreporting/mocks/ocr_contract_tracker_db.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/pipeline/common.go b/core/services/pipeline/common.go index 25aca204fbb..15144a1db9c 100644 --- a/core/services/pipeline/common.go +++ b/core/services/pipeline/common.go @@ -33,6 +33,7 @@ type ( OutputIndex() int32 TaskTimeout() (time.Duration, bool) SetDefaults(inputValues map[string]string, g TaskDAG, self taskDAGNode) error + NPreds() int } Config interface { @@ -56,6 +57,16 @@ var ( ErrBadInput = errors.New("bad input for task") ) +// Bundled tx and txmutex for multiple goroutines inside the same transaction. +// This mutex is necessary to work to avoid +// concurrent database calls inside the same transaction to fail. +// With the pq driver: `pq: unexpected Parse response 'C'` +// With the pgx driver: `conn busy`. +type SafeTx struct { + tx *gorm.DB + txMu *sync.Mutex +} + // Result is the result of a TaskRun type Result struct { Value interface{} @@ -129,7 +140,8 @@ func (result FinalResult) SingularResult() (Result, error) { // TaskSpecID will always be non-zero type TaskRunResult struct { ID int64 - TaskSpecID int32 + Task Task + TaskRun TaskRun Result Result FinishedAt time.Time IsTerminal bool @@ -185,14 +197,31 @@ type RunWithResults struct { type BaseTask struct { outputTask Task dotID string `mapstructure:"-"` + nPreds int `mapstructure:"-"` Index int32 `mapstructure:"index" json:"-" ` Timeout time.Duration `mapstructure:"timeout"` } -func (t BaseTask) DotID() string { return t.dotID } -func (t BaseTask) OutputIndex() int32 { return t.Index } -func (t BaseTask) OutputTask() Task { return t.outputTask } -func (t *BaseTask) SetOutputTask(outputTask Task) { t.outputTask = outputTask } +func (t BaseTask) NPreds() int { + return t.nPreds +} + +func (t BaseTask) DotID() string { + return t.dotID +} + +func (t BaseTask) OutputIndex() int32 { + return t.Index +} + +func (t BaseTask) OutputTask() Task { + return t.outputTask +} + +func (t *BaseTask) SetOutputTask(outputTask Task) { + t.outputTask = outputTask +} + func (t BaseTask) TaskTimeout() (time.Duration, bool) { if t.Timeout == time.Duration(0) { return time.Duration(0), false @@ -257,7 +286,7 @@ const ( const ResultTaskDotID = "__result__" -func UnmarshalTaskFromMap(taskType TaskType, taskMap interface{}, dotID string, config Config, txdb *gorm.DB, txdbMutex *sync.Mutex) (_ Task, err error) { +func UnmarshalTaskFromMap(taskType TaskType, taskMap interface{}, dotID string, config Config, txdb *gorm.DB, txdbMutex *sync.Mutex, nPreds int) (_ Task, err error) { defer utils.WrapIfError(&err, "UnmarshalTaskFromMap") switch taskMap.(type) { @@ -271,17 +300,17 @@ func UnmarshalTaskFromMap(taskType TaskType, taskMap interface{}, dotID string, var task Task switch taskType { case TaskTypeHTTP: - task = &HTTPTask{config: config, BaseTask: BaseTask{dotID: dotID}} + task = &HTTPTask{config: config, BaseTask: BaseTask{dotID: dotID, nPreds: nPreds}} case TaskTypeBridge: - task = &BridgeTask{config: config, txdb: txdb, txdbMutex: txdbMutex, BaseTask: BaseTask{dotID: dotID}} + task = &BridgeTask{config: config, safeTx: SafeTx{txdb, txdbMutex}, BaseTask: BaseTask{dotID: dotID, nPreds: nPreds}} case TaskTypeMedian: - task = &MedianTask{BaseTask: BaseTask{dotID: dotID}} + task = &MedianTask{BaseTask: BaseTask{dotID: dotID, nPreds: nPreds}} case TaskTypeAny: - task = &AnyTask{BaseTask: BaseTask{dotID: dotID}} + task = &AnyTask{BaseTask: BaseTask{dotID: dotID, nPreds: nPreds}} case TaskTypeJSONParse: - task = &JSONParseTask{BaseTask: BaseTask{dotID: dotID}} + task = &JSONParseTask{BaseTask: BaseTask{dotID: dotID, nPreds: nPreds}} case TaskTypeMultiply: - task = &MultiplyTask{BaseTask: BaseTask{dotID: dotID}} + task = &MultiplyTask{BaseTask: BaseTask{dotID: dotID, nPreds: nPreds}} case TaskTypeResult: task = &ResultTask{BaseTask: BaseTask{dotID: ResultTaskDotID}} default: diff --git a/core/services/pipeline/common_test.go b/core/services/pipeline/common_test.go index f5df9cfa6e3..1a7eb273f4a 100644 --- a/core/services/pipeline/common_test.go +++ b/core/services/pipeline/common_test.go @@ -69,14 +69,14 @@ func Test_UnmarshalTaskFromMap(t *testing.T) { t.Run("returns error if task is not the right type", func(t *testing.T) { taskMap := interface{}(nil) - _, err := pipeline.UnmarshalTaskFromMap(pipeline.TaskType("http"), taskMap, "foo-dot-id", nil, nil, nil) + _, err := pipeline.UnmarshalTaskFromMap(pipeline.TaskType("http"), taskMap, "foo-dot-id", nil, nil, nil, 0) require.EqualError(t, err, "UnmarshalTaskFromMap: UnmarshalTaskFromMap only accepts a map[string]interface{} or a map[string]string. Got () of type ") taskMap = struct { foo time.Time bar int }{time.Unix(42, 42), 42} - _, err = pipeline.UnmarshalTaskFromMap(pipeline.TaskType("http"), taskMap, "foo-dot-id", nil, nil, nil) + _, err = pipeline.UnmarshalTaskFromMap(pipeline.TaskType("http"), taskMap, "foo-dot-id", nil, nil, nil, 0) require.Error(t, err) require.Contains(t, err.Error(), "UnmarshalTaskFromMap: UnmarshalTaskFromMap only accepts a map[string]interface{} or a map[string]string") }) diff --git a/core/services/pipeline/graph.go b/core/services/pipeline/graph.go index 608d819c8bd..1982a8dfe9e 100644 --- a/core/services/pipeline/graph.go +++ b/core/services/pipeline/graph.go @@ -14,6 +14,7 @@ import ( // TaskDAG fulfills the graph.DirectedGraph interface, which makes it possible // for us to `dot.Unmarshal(...)` a DOT string directly into it. Once unmarshalled, // calling `TaskDAG#TasksInDependencyOrder()` will return the unmarshaled tasks. +// NOTE: We only permit one child type TaskDAG struct { *simple.DirectedGraph DOTSource string @@ -62,7 +63,8 @@ func (g TaskDAG) TasksInDependencyOrder() ([]Task, error) { continue } - task, err := UnmarshalTaskFromMap(TaskType(node.attrs["type"]), node.attrs, node.dotID, nil, nil, nil) + nPreds := g.To(node.ID()).Len() + task, err := UnmarshalTaskFromMap(TaskType(node.attrs["type"]), node.attrs, node.dotID, nil, nil, nil, nPreds) if err != nil { return nil, err } @@ -90,6 +92,28 @@ func (g TaskDAG) TasksInDependencyOrder() ([]Task, error) { return tasks, nil } +func (g TaskDAG) TasksInDependencyOrderWithResultTask() ([]Task, error) { + tasks, err := g.TasksInDependencyOrder() + if err != nil { + return nil, err + } + // Create the final result task that collects the answers from the pipeline's + // outputs. This is a Postgres-related performance optimization. + resultTask := ResultTask{BaseTask{dotID: ResultTaskDotID}} + resultPreds := 0 + for _, task := range tasks { + if task.DotID() == ResultTaskDotID { + return nil, errors.Errorf("%v is a reserved keyword and cannot be used in job specs", ResultTaskDotID) + } + if task.OutputTask() == nil { + task.SetOutputTask(&resultTask) + resultPreds++ + } + } + resultTask.nPreds = resultPreds + return append([]Task{&resultTask}, tasks...), nil +} + func (g TaskDAG) MinTimeout() (time.Duration, bool, error) { var minTimeout time.Duration = 1<<63 - 1 var aTimeoutSet bool diff --git a/core/services/pipeline/graph_test.go b/core/services/pipeline/graph_test.go index d647b711ac5..b8e596472da 100644 --- a/core/services/pipeline/graph_test.go +++ b/core/services/pipeline/graph_test.go @@ -126,38 +126,38 @@ func TestGraph_TasksInDependencyOrder(t *testing.T) { require.NoError(t, err) answer1 := &MedianTask{ - BaseTask: NewBaseTask("answer1", nil, 0), + BaseTask: NewBaseTask("answer1", nil, 0, 2), AllowedFaults: 1, } answer2 := &BridgeTask{ Name: "election_winner", - BaseTask: NewBaseTask("answer2", nil, 1), + BaseTask: NewBaseTask("answer2", nil, 1, 0), } ds1_multiply := &MultiplyTask{ Times: decimal.NewFromFloat(1.23), - BaseTask: NewBaseTask("ds1_multiply", answer1, 0), + BaseTask: NewBaseTask("ds1_multiply", answer1, 0, 1), } ds1_parse := &JSONParseTask{ Path: []string{"one", "two"}, - BaseTask: NewBaseTask("ds1_parse", ds1_multiply, 0), + BaseTask: NewBaseTask("ds1_parse", ds1_multiply, 0, 1), } ds1 := &BridgeTask{ Name: "voter_turnout", - BaseTask: NewBaseTask("ds1", ds1_parse, 0), + BaseTask: NewBaseTask("ds1", ds1_parse, 0, 0), } ds2_multiply := &MultiplyTask{ Times: decimal.NewFromFloat(4.56), - BaseTask: NewBaseTask("ds2_multiply", answer1, 0), + BaseTask: NewBaseTask("ds2_multiply", answer1, 0, 1), } ds2_parse := &JSONParseTask{ Path: []string{"three", "four"}, - BaseTask: NewBaseTask("ds2_parse", ds2_multiply, 0), + BaseTask: NewBaseTask("ds2_parse", ds2_multiply, 0, 1), } ds2 := &HTTPTask{ URL: models.WebURL(*u), Method: "GET", RequestData: HttpRequestData{"hi": "hello"}, - BaseTask: NewBaseTask("ds2", ds2_parse, 0), + BaseTask: NewBaseTask("ds2", ds2_parse, 0, 0), } tasks, err := g.TasksInDependencyOrder() diff --git a/core/services/pipeline/mocks/runner.go b/core/services/pipeline/mocks/runner.go index dd7aa14bf5c..72e3cf75eaf 100644 --- a/core/services/pipeline/mocks/runner.go +++ b/core/services/pipeline/mocks/runner.go @@ -93,13 +93,13 @@ func (_m *Runner) ExecuteAndInsertNewRun(ctx context.Context, spec pipeline.Spec return r0, r1, r2 } -// ExecuteRun provides a mock function with given fields: ctx, run, l -func (_m *Runner) ExecuteRun(ctx context.Context, run pipeline.Run, l logger.Logger) (pipeline.TaskRunResults, error) { - ret := _m.Called(ctx, run, l) +// ExecuteRun provides a mock function with given fields: ctx, spec, l +func (_m *Runner) ExecuteRun(ctx context.Context, spec pipeline.Spec, l logger.Logger) (pipeline.TaskRunResults, error) { + ret := _m.Called(ctx, spec, l) var r0 pipeline.TaskRunResults - if rf, ok := ret.Get(0).(func(context.Context, pipeline.Run, logger.Logger) pipeline.TaskRunResults); ok { - r0 = rf(ctx, run, l) + if rf, ok := ret.Get(0).(func(context.Context, pipeline.Spec, logger.Logger) pipeline.TaskRunResults); ok { + r0 = rf(ctx, spec, l) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(pipeline.TaskRunResults) @@ -107,8 +107,8 @@ func (_m *Runner) ExecuteRun(ctx context.Context, run pipeline.Run, l logger.Log } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, pipeline.Run, logger.Logger) error); ok { - r1 = rf(ctx, run, l) + if rf, ok := ret.Get(1).(func(context.Context, pipeline.Spec, logger.Logger) error); ok { + r1 = rf(ctx, spec, l) } else { r1 = ret.Error(1) } diff --git a/core/services/pipeline/models.go b/core/services/pipeline/models.go index 375f53c8d40..a80d58941db 100644 --- a/core/services/pipeline/models.go +++ b/core/services/pipeline/models.go @@ -5,65 +5,39 @@ import ( "strconv" "time" + "github.com/pkg/errors" + "github.com/smartcontractkit/chainlink/core/store/models" - "github.com/pkg/errors" "gopkg.in/guregu/null.v4" ) -type ( - Spec struct { - ID int32 `gorm:"primary_key"` - DotDagSource string `json:"dotDagSource"` - CreatedAt time.Time `json:"-"` - MaxTaskDuration models.Interval `json:"-"` - PipelineTaskSpecs []TaskSpec `json:"-" gorm:"foreignkey:PipelineSpecID;->"` - } - - TaskSpec struct { - ID int32 `json:"-" gorm:"primary_key"` - DotID string `json:"dotId"` - PipelineSpecID int32 `json:"-"` - PipelineSpec Spec `json:"-"` - Type TaskType `json:"-"` - JSON JSONSerializable `json:"-" gorm:"type:jsonb"` - Index int32 `json:"-"` - SuccessorID null.Int `json:"-"` - CreatedAt time.Time `json:"-"` - BridgeName *string `json:"-"` - Bridge models.BridgeType `json:"-" gorm:"foreignKey:BridgeName;->"` - } +type Spec struct { + ID int32 `gorm:"primary_key"` + DotDagSource string `json:"dotDagSource"` + CreatedAt time.Time `json:"-"` + MaxTaskDuration models.Interval `json:"-"` +} - Run struct { - ID int64 `json:"-" gorm:"primary_key"` - PipelineSpecID int32 `json:"-"` - PipelineSpec Spec `json:"pipelineSpec"` - Meta JSONSerializable `json:"meta"` - Errors JSONSerializable `json:"errors" gorm:"type:jsonb"` - Outputs JSONSerializable `json:"outputs" gorm:"type:jsonb"` - CreatedAt time.Time `json:"createdAt"` - FinishedAt *time.Time `json:"finishedAt"` - PipelineTaskRuns []TaskRun `json:"taskRuns" gorm:"foreignkey:PipelineRunID;->"` - } +func (Spec) TableName() string { + return "pipeline_specs" +} - TaskRun struct { - ID int64 `json:"-" gorm:"primary_key"` - Type TaskType `json:"type"` - PipelineRun Run `json:"-"` - PipelineRunID int64 `json:"-"` - Output *JSONSerializable `json:"output" gorm:"type:jsonb"` - Error null.String `json:"error"` - PipelineTaskSpecID int32 `json:"-"` - PipelineTaskSpec TaskSpec `json:"taskSpec" gorm:"foreignkey:PipelineTaskSpecID;->"` - CreatedAt time.Time `json:"createdAt"` - FinishedAt *time.Time `json:"finishedAt"` - } -) +type Run struct { + ID int64 `json:"-" gorm:"primary_key"` + PipelineSpecID int32 `json:"-"` + PipelineSpec Spec `json:"pipelineSpec"` + Meta JSONSerializable `json:"meta"` + Errors JSONSerializable `json:"errors" gorm:"type:jsonb"` + Outputs JSONSerializable `json:"outputs" gorm:"type:jsonb"` + CreatedAt time.Time `json:"createdAt"` + FinishedAt *time.Time `json:"finishedAt"` + PipelineTaskRuns []TaskRun `json:"taskRuns" gorm:"foreignkey:PipelineRunID;->"` +} -func (Spec) TableName() string { return "pipeline_specs" } -func (Run) TableName() string { return "pipeline_runs" } -func (TaskSpec) TableName() string { return "pipeline_task_specs" } -func (TaskRun) TableName() string { return "pipeline_task_runs" } +func (Run) TableName() string { + return "pipeline_runs" +} func (r Run) GetID() string { return fmt.Sprintf("%v", r.ID) @@ -87,35 +61,6 @@ func (r Run) FinalErrors() (f FinalErrors) { return f } -// RunStatus represents the status of a run -type RunStatus int - -const ( - // RunStatusUnknown is the when the run status cannot be determined. - RunStatusUnknown RunStatus = iota - // RunStatusInProgress is used for when a run is actively being executed. - RunStatusInProgress - // RunStatusErrored is used for when a run has errored and will not complete. - RunStatusErrored - // RunStatusCompleted is used for when a run has successfully completed execution. - RunStatusCompleted -) - -// Completed returns true if the status is RunStatusCompleted. -func (s RunStatus) Completed() bool { - return s == RunStatusCompleted -} - -// Errored returns true if the status is RunStatusErrored. -func (s RunStatus) Errored() bool { - return s == RunStatusErrored -} - -// Finished returns true if the status is final and can't be changed. -func (s RunStatus) Finished() bool { - return s.Completed() || s.Errored() -} - // Status determines the status of the run. func (r *Run) Status() RunStatus { if r.HasErrors() { @@ -127,6 +72,23 @@ func (r *Run) Status() RunStatus { return RunStatusInProgress } +type TaskRun struct { + ID int64 `json:"-" gorm:"primary_key"` + Type TaskType `json:"type"` + PipelineRun Run `json:"-"` + PipelineRunID int64 `json:"-"` + Output *JSONSerializable `json:"output" gorm:"type:jsonb"` + Error null.String `json:"error"` + CreatedAt time.Time `json:"createdAt"` + FinishedAt *time.Time `json:"finishedAt"` + Index int32 + DotID string +} + +func (TaskRun) TableName() string { + return "pipeline_task_runs" +} + func (tr TaskRun) GetID() string { return fmt.Sprintf("%v", tr.ID) } @@ -140,12 +102,8 @@ func (tr *TaskRun) SetID(value string) error { return nil } -func (s TaskSpec) IsFinalPipelineOutput() bool { - return s.SuccessorID.IsZero() -} - -func (tr TaskRun) DotID() string { - return tr.PipelineTaskSpec.DotID +func (tr TaskRun) GetDotID() string { + return tr.DotID } func (tr TaskRun) Result() Result { @@ -157,3 +115,32 @@ func (tr TaskRun) Result() Result { } return result } + +// RunStatus represents the status of a run +type RunStatus int + +const ( + // RunStatusUnknown is the when the run status cannot be determined. + RunStatusUnknown RunStatus = iota + // RunStatusInProgress is used for when a run is actively being executed. + RunStatusInProgress + // RunStatusErrored is used for when a run has errored and will not complete. + RunStatusErrored + // RunStatusCompleted is used for when a run has successfully completed execution. + RunStatusCompleted +) + +// Completed returns true if the status is RunStatusCompleted. +func (s RunStatus) Completed() bool { + return s == RunStatusCompleted +} + +// Errored returns true if the status is RunStatusErrored. +func (s RunStatus) Errored() bool { + return s == RunStatusErrored +} + +// Finished returns true if the status is final and can't be changed. +func (s RunStatus) Finished() bool { + return s.Completed() || s.Errored() +} diff --git a/core/services/pipeline/orm.go b/core/services/pipeline/orm.go index cea18ebb760..bd9f7e24f80 100644 --- a/core/services/pipeline/orm.go +++ b/core/services/pipeline/orm.go @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/jackc/pgconn" + "gorm.io/gorm/clause" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -16,7 +16,6 @@ import ( "github.com/smartcontractkit/chainlink/core/services/postgres" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/utils" - "gopkg.in/guregu/null.v4" "gorm.io/gorm" ) @@ -78,71 +77,15 @@ func NewORM(db *gorm.DB, config Config, eventBroadcaster postgres.EventBroadcast // The tx argument must be an already started transaction. func (o *orm) CreateSpec(ctx context.Context, tx *gorm.DB, taskDAG TaskDAG, maxTaskDuration models.Interval) (int32, error) { - var specID int32 spec := Spec{ DotDagSource: taskDAG.DOTSource, MaxTaskDuration: maxTaskDuration, } err := tx.Create(&spec).Error if err != nil { - return specID, err + return 0, err } - specID = spec.ID - - // Create the pipeline task specs in dependency order so - // that we know what the successor ID for each task is - tasks, err := taskDAG.TasksInDependencyOrder() - if err != nil { - return specID, err - } - - // Create the final result task that collects the answers from the pipeline's - // outputs. This is a Postgres-related performance optimization. - resultTask := ResultTask{BaseTask{dotID: ResultTaskDotID}} - for _, task := range tasks { - if task.DotID() == ResultTaskDotID { - return specID, errors.Errorf("%v is a reserved keyword and cannot be used in job specs", ResultTaskDotID) - } - if task.OutputTask() == nil { - task.SetOutputTask(&resultTask) - } - } - tasks = append([]Task{&resultTask}, tasks...) - - taskSpecIDs := make(map[Task]int32) - for _, task := range tasks { - var successorID null.Int - if task.OutputTask() != nil { - successor := task.OutputTask() - successorID = null.IntFrom(int64(taskSpecIDs[successor])) - } - - taskSpec := TaskSpec{ - DotID: task.DotID(), - PipelineSpecID: spec.ID, - Type: task.Type(), - JSON: JSONSerializable{task, false}, - Index: task.OutputIndex(), - SuccessorID: successorID, - } - if task.Type() == TaskTypeBridge { - btName := task.(*BridgeTask).Name - taskSpec.BridgeName = &btName - } - err = tx.Create(&taskSpec).Error - if err != nil { - pqErr, ok := err.(*pgconn.PgError) - if ok && pqErr.Code == "23503" { - if pqErr.ConstraintName == "fk_pipeline_task_specs_bridge_name" { - return specID, errors.Wrap(ErrNoSuchBridge, *taskSpec.BridgeName) - } - } - return specID, err - } - - taskSpecIDs[task] = taskSpec.ID - } - return specID, errors.WithStack(err) + return spec.ID, errors.WithStack(err) } // CreateRun adds a Run record to the DB, and one TaskRun @@ -168,17 +111,29 @@ func (o *orm) CreateRun(ctx context.Context, jobID int32, meta map[string]interf return errors.Wrap(err, "could not create pipeline run") } - runID = run.ID + if err = tx.Preload("PipelineSpec").First(&run).Error; err != nil { + return err + } + d := TaskDAG{} + if err = d.UnmarshalText([]byte(run.PipelineSpec.DotDagSource)); err != nil { + return err + } - // Create the task runs - err = tx.Exec(` - INSERT INTO pipeline_task_runs ( - pipeline_run_id, pipeline_task_spec_id, type, index, created_at - ) - SELECT ? AS pipeline_run_id, id AS pipeline_task_spec_id, type, index, NOW() AS created_at - FROM pipeline_task_specs - WHERE pipeline_spec_id = ?`, run.ID, run.PipelineSpecID).Error - return errors.Wrap(err, "could not create pipeline task runs") + var trs []TaskRun + tasks, err := d.TasksInDependencyOrderWithResultTask() + if err != nil { + return err + } + for _, ts := range tasks { + trs = append(trs, TaskRun{ + Type: ts.Type(), + PipelineRunID: run.ID, + Index: ts.OutputIndex(), + DotID: ts.DotID(), + }) + } + runID = run.ID + return tx.Create(&trs).Error }) return runID, errors.WithStack(err) } @@ -186,7 +141,7 @@ func (o *orm) CreateRun(ctx context.Context, jobID int32, meta map[string]interf // TODO: Remove generation of special "result" task // TODO: Remove the unique index on successor_id // https://www.pivotaltracker.com/story/show/176557536 -type ProcessRunFunc func(ctx context.Context, txdb *gorm.DB, pRun Run, l logger.Logger) (TaskRunResults, error) +type ProcessRunFunc func(ctx context.Context, txdb *gorm.DB, spec Spec, l logger.Logger) (TaskRunResults, error) // ProcessNextUnfinishedRun pulls the next available unfinished run from the // database and passes it into the provided ProcessRunFunc for execution. @@ -221,36 +176,36 @@ func (o *orm) processNextUnfinishedRun(ctx context.Context, fn ProcessRunFunc) e var pRun Run err := postgres.GormTransaction(txContext, o.db, func(tx *gorm.DB) error { - err := tx.Raw(` - SELECT id FROM pipeline_runs - WHERE finished_at IS NULL - ORDER BY id ASC - FOR UPDATE SKIP LOCKED - LIMIT 1 - `).Scan(&pRun).Error - if err != nil { - return errors.Wrap(err, "error finding next pipeline run") - } - // NOTE: We have to lock and load in two distinct queries to work - // around a bizarre bug in gormv1. - // Trying to lock and load in one hit _sometimes_ fails to preload - // associations for no discernable reason. - err = tx. + err := tx. Preload("PipelineSpec"). - Preload("PipelineTaskRuns.PipelineTaskSpec"). - Where("pipeline_runs.id = ?", pRun.ID). + Preload("PipelineTaskRuns"). + Where("pipeline_runs.finished_at IS NULL"). + Order("id ASC"). + Clauses(clause.Locking{ + Strength: "UPDATE", + Options: "SKIP LOCKED", + }). First(&pRun).Error if err != nil { return errors.Wrap(err, "error loading run associations") } - logger.Infow("Pipeline run started", "runID", pRun.ID) - trrs, err := fn(ctx, tx, pRun, *logger.Default) + trrs, err := fn(ctx, tx, pRun.PipelineSpec, *logger.Default) if err != nil { return errors.Wrap(err, "error calling ProcessRunFunc") } + // Populate the task run result IDs by matching the dot + // IDs. + for i, trr := range trrs { + for _, tr := range pRun.PipelineTaskRuns { + if trr.TaskRun.DotID == tr.DotID { + trrs[i].ID = tr.ID + } + } + } + if err = o.updateTaskRuns(tx, trrs); err != nil { return errors.Wrap(err, "could not update task runs") } @@ -334,19 +289,15 @@ func (o *orm) InsertFinishedRunWithResults(ctx context.Context, run Run, trrs [] } runID = run.ID - sql := ` - INSERT INTO pipeline_task_runs (pipeline_run_id, type, index, output, error, pipeline_task_spec_id, created_at, finished_at) - SELECT ?, pts.type, pts.index, ptruns.output, ptruns.error, pts.id, ptruns.created_at, ptruns.finished_at - FROM (VALUES %s) ptruns (pipeline_task_spec_id, output, error, created_at, finished_at) - JOIN pipeline_task_specs pts ON pts.id = ptruns.pipeline_task_spec_id + INSERT INTO pipeline_task_runs (pipeline_run_id, type, index, output, error, dot_id, created_at, finished_at) + VALUES %s ` - valueStrings := []string{} - valueArgs := []interface{}{runID} + valueArgs := []interface{}{} for _, trr := range trrs { - valueStrings = append(valueStrings, "(?::int,?::jsonb,?::text,?::timestamptz,?::timestamptz)") - valueArgs = append(valueArgs, trr.TaskSpecID, trr.Result.OutputDB(), trr.Result.ErrorDB(), run.CreatedAt, trr.FinishedAt) + valueStrings = append(valueStrings, "(?,?,?,?,?,?,?,?)") + valueArgs = append(valueArgs, run.ID, trr.Task.Type(), trr.Task.OutputIndex(), trr.Result.OutputDB(), trr.Result.ErrorDB(), trr.TaskRun.DotID, trr.TaskRun.CreatedAt, trr.TaskRun.FinishedAt) } /* #nosec G201 */ @@ -427,14 +378,12 @@ func (o *orm) ResultsForRun(ctx context.Context, runID int64) ([]Result, error) var results []Result err = postgres.GormTransaction(ctx, o.db, func(tx *gorm.DB) error { var resultTaskRun TaskRun - err = tx. - Preload("PipelineTaskSpec"). - Joins("INNER JOIN pipeline_task_specs ON pipeline_task_runs.pipeline_task_spec_id = pipeline_task_specs.id"). - Where("pipeline_run_id = ?", runID). - Where("finished_at IS NOT NULL"). - Where("pipeline_task_specs.successor_id IS NULL"). - Where("pipeline_task_specs.dot_id = ?", ResultTaskDotID). - First(&resultTaskRun). + err = tx.Raw(` + SELECT * FROM pipeline_task_runs + WHERE pipeline_run_id = ? + AND finished_at IS NOT NULL + AND dot_id = ? +`, runID, ResultTaskDotID).Scan(&resultTaskRun). Error if err != nil { return errors.Wrapf(err, "Pipeline runner could not fetch pipeline results (runID: %v)", runID) @@ -471,20 +420,12 @@ func (o *orm) ResultsForRun(ctx context.Context, runID int64) ([]Result, error) } func (o *orm) RunFinished(runID int64) (bool, error) { - // TODO: Since we denormalised this can be made more efficient - // https://www.pivotaltracker.com/story/show/176557536 - var tr TaskRun - err := o.db.Raw(` - SELECT * - FROM pipeline_task_runs - INNER JOIN pipeline_task_specs ON pipeline_task_runs.pipeline_task_spec_id = pipeline_task_specs.id - WHERE pipeline_task_runs.pipeline_run_id = ? AND pipeline_task_specs.successor_id IS NULL - LIMIT 1 - `, runID).Scan(&tr).Error - if err != nil { + var tr Run + err := o.db.Where("id = ?", runID).First(&tr).Error + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { return false, errors.Wrapf(err, "could not determine if run is finished (run ID: %v)", runID) } - if tr.ID == 0 { + if errors.Is(err, gorm.ErrRecordNotFound) { return false, errors.Errorf("run not found - could not determine if run is finished (run ID: %v)", runID) } return tr.FinishedAt != nil, nil diff --git a/core/services/pipeline/orm_test.go b/core/services/pipeline/orm_test.go index 5fa62ef3265..310d555f03c 100644 --- a/core/services/pipeline/orm_test.go +++ b/core/services/pipeline/orm_test.go @@ -27,7 +27,6 @@ func Test_PipelineORM_CreateRun(t *testing.T) { require.NoError(t, err) // Check that JobRun, TaskRuns were created - var prs []pipeline.Run var trs []pipeline.TaskRun diff --git a/core/services/pipeline/runner.go b/core/services/pipeline/runner.go index e9329577064..94babbf89b6 100644 --- a/core/services/pipeline/runner.go +++ b/core/services/pipeline/runner.go @@ -27,7 +27,7 @@ type Runner interface { Start() error Close() error CreateRun(ctx context.Context, jobID int32, meta map[string]interface{}) (runID int64, err error) - ExecuteRun(ctx context.Context, run Run, l logger.Logger) (trrs TaskRunResults, err error) + ExecuteRun(ctx context.Context, spec Spec, l logger.Logger) (trrs TaskRunResults, err error) ExecuteAndInsertNewRun(ctx context.Context, spec Spec, l logger.Logger) (runID int64, finalResult FinalResult, err error) AwaitRun(ctx context.Context, runID int64) error ResultsForRun(ctx context.Context, runID int64) ([]Result, error) @@ -75,6 +75,7 @@ func (r *runner) Start() error { if !r.OkayToStart() { return errors.New("Pipeline runner has already been started") } + go r.runLoop() newRunsSubscription, err := r.orm.ListenForNewRuns() @@ -99,6 +100,7 @@ func (r *runner) Start() error { } }() } + return nil } @@ -149,31 +151,6 @@ func (r *runner) runLoop() { } } -// NewRun creates an in-memory Run along with its TaskRuns for the provided job -// It does not interact with the database -func NewRun(spec Spec, startedAt time.Time) (run Run, err error) { - if len(spec.PipelineTaskSpecs) == 0 { - return run, errors.New("spec.PipelineTaskSpecs was empty") - } - taskRuns := make([]TaskRun, len(spec.PipelineTaskSpecs)) - for i, taskSpec := range spec.PipelineTaskSpecs { - taskRuns[i] = TaskRun{ - Type: taskSpec.Type, - PipelineTaskSpecID: taskSpec.ID, - PipelineTaskSpec: taskSpec, - CreatedAt: startedAt, - } - } - run = Run{ - PipelineSpecID: spec.ID, - PipelineSpec: spec, - PipelineTaskRuns: taskRuns, - CreatedAt: startedAt, - } - - return run, nil -} - func (r *runner) CreateRun(ctx context.Context, jobID int32, meta map[string]interface{}) (int64, error) { runID, err := r.orm.CreateRun(ctx, jobID, meta) if err != nil { @@ -213,6 +190,7 @@ func (r *runner) processRun() (anyRemaining bool, err error) { type ( memoryTaskRun struct { + task Task taskRun TaskRun next *memoryTaskRun nPredecessors int @@ -244,68 +222,70 @@ func (m *memoryTaskRun) results() (a []Result) { return } -func (r *runner) ExecuteRun(ctx context.Context, run Run, l logger.Logger) (trrs TaskRunResults, err error) { - return r.executeRun(ctx, r.orm.DB(), run, l) +func (r *runner) ExecuteRun(ctx context.Context, spec Spec, l logger.Logger) (trrs TaskRunResults, err error) { + return r.executeRun(ctx, r.orm.DB(), spec, l) } -func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, run Run, l logger.Logger) (trrs TaskRunResults, err error) { - l.Debugw("Initiating tasks for pipeline run", "runID", run.ID) +func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, spec Spec, l logger.Logger) (TaskRunResults, error) { + l.Debugw("Initiating tasks for pipeline run of spec", "spec", spec.ID) + var ( + err error + trrs TaskRunResults + ) startRun := time.Now() - if run.PipelineSpec.ID == 0 { - return nil, errors.Errorf("run.PipelineSpec.ID may not be 0: %#v", run) + d := TaskDAG{} + err = d.UnmarshalText([]byte(spec.DotDagSource)) + if err != nil { + return trrs, err } // Find "firsts" and work forwards - // 1. Make map of all memory task runs keyed by task spec id - all := make(map[int32]*memoryTaskRun) - for _, tr := range run.PipelineTaskRuns { - if tr.PipelineTaskSpec.ID == 0 { - return nil, errors.Errorf("taskRun.PipelineTaskSpec.ID may not be 0: %#v", tr) + tasks, err := d.TasksInDependencyOrderWithResultTask() + if err != nil { + return nil, err + } + all := make(map[string]*memoryTaskRun) + var graph []*memoryTaskRun + txMu := new(sync.Mutex) + for _, task := range tasks { + if task.Type() == TaskTypeHTTP { + task.(*HTTPTask).config = r.config + } + if task.Type() == TaskTypeBridge { + task.(*BridgeTask).config = r.config + task.(*BridgeTask).safeTx = SafeTx{txdb, txMu} } mtr := memoryTaskRun{ - taskRun: tr, + nPredecessors: task.NPreds(), + task: task, + taskRun: TaskRun{ + Type: task.Type(), + Index: task.OutputIndex(), + DotID: task.DotID(), + }, } - all[tr.PipelineTaskSpec.ID] = &mtr - } - - var graph []*memoryTaskRun - - // TODO: Test with multiple and single null successor IDs - // https://www.pivotaltracker.com/story/show/176557536 - - // 2. Fill in predecessor count and next, append firsts to work graph - for id, mtr := range all { - for _, pred := range all { - if !pred.taskRun.PipelineTaskSpec.SuccessorID.IsZero() && pred.taskRun.PipelineTaskSpec.SuccessorID.ValueOrZero() == int64(id) { - mtr.nPredecessors++ - } + if mtr.nPredecessors == 0 { + graph = append(graph, &mtr) } + all[task.DotID()] = &mtr + } - if mtr.taskRun.PipelineTaskSpec.SuccessorID.IsZero() { - mtr.next = nil + // Populate next pointers + for did, ts := range all { + if ts.task.OutputTask() != nil { + all[did].next = all[ts.task.OutputTask().DotID()] } else { - mtr.next = all[int32(mtr.taskRun.PipelineTaskSpec.SuccessorID.ValueOrZero())] - } - - if mtr.nPredecessors == 0 { - // No predecessors so this is the first one - graph = append(graph, mtr) + all[did].next = nil } } + // TODO: Test with multiple and single null successor IDs + // https://www.pivotaltracker.com/story/show/176557536 // 3. Execute tasks using "fan in" job processing - var updateMu sync.Mutex var wg sync.WaitGroup wg.Add(len(graph)) - - // HACK: This mutex is necessary to work around a bug in the pq driver that - // causes concurrent database calls inside the same transaction to fail - // with a mysterious `pq: unexpected Parse response 'C'` error - // FIXME: Get rid of this by replacing pq with pgx - var txdbMutex sync.Mutex - for _, mtr := range graph { go func(m *memoryTaskRun) { defer wg.Done() @@ -333,13 +313,15 @@ func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, run Run, l logge startTaskRun := time.Now() - result := r.executeTaskRun(ctx, txdb, run.PipelineSpec, m.taskRun, m.results(), &txdbMutex, l) + result := r.executeTaskRun(ctx, spec, m.task, m.taskRun, m.results(), l) finishedAt := time.Now() + m.taskRun.CreatedAt = startTaskRun + m.taskRun.FinishedAt = &finishedAt trr := TaskRunResult{ - ID: m.taskRun.ID, - TaskSpecID: m.taskRun.PipelineTaskSpec.ID, + TaskRun: m.taskRun, + Task: m.task, Result: result, FinishedAt: finishedAt, IsTerminal: m.next == nil, @@ -351,21 +333,21 @@ func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, run Run, l logge elapsed := finishedAt.Sub(startTaskRun) - promPipelineTaskExecutionTime.WithLabelValues(fmt.Sprintf("%d", run.PipelineSpec.ID), string(m.taskRun.PipelineTaskSpec.Type)).Set(float64(elapsed)) + promPipelineTaskExecutionTime.WithLabelValues(fmt.Sprintf("%d", spec.ID), string(m.taskRun.Type)).Set(float64(elapsed)) var status string if result.Error != nil { status = "error" } else { status = "completed" } - promPipelineTasksTotalFinished.WithLabelValues(fmt.Sprintf("%d", run.PipelineSpec.ID), string(m.taskRun.PipelineTaskSpec.Type), status).Inc() + promPipelineTasksTotalFinished.WithLabelValues(fmt.Sprintf("%d", spec.ID), string(m.taskRun.Type), status).Inc() if m.next == nil { return } m.next.predMu.Lock() - m.next.inputs = append(m.next.inputs, input{result: result, index: m.taskRun.PipelineTaskSpec.Index}) + m.next.inputs = append(m.next.inputs, input{result: result, index: m.task.OutputIndex()}) m.next.nPredecessors-- m.next.predMu.Unlock() @@ -377,31 +359,16 @@ func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, run Run, l logge wg.Wait() runTime := time.Since(startRun) - l.Debugw("Finished all tasks for pipeline run", "runID", run.ID, "runTime", runTime) + l.Debugw("Finished all tasks for pipeline run", "specID", spec.ID, "runTime", runTime) return trrs, err } -func (r *runner) executeTaskRun(ctx context.Context, txdb *gorm.DB, spec Spec, taskRun TaskRun, inputs []Result, txdbMutex *sync.Mutex, l logger.Logger) Result { +func (r *runner) executeTaskRun(ctx context.Context, spec Spec, task Task, taskRun TaskRun, inputs []Result, l logger.Logger) Result { loggerFields := []interface{}{ - "taskName", taskRun.PipelineTaskSpec.DotID, - "taskID", taskRun.PipelineTaskSpecID, + "taskName", taskRun.DotID, "runID", taskRun.PipelineRunID, "taskRunID", taskRun.ID, - "specID", taskRun.PipelineTaskSpec.PipelineSpecID, - } - - task, err := UnmarshalTaskFromMap( - taskRun.PipelineTaskSpec.Type, - taskRun.PipelineTaskSpec.JSON.Val, - taskRun.PipelineTaskSpec.DotID, - r.config, - txdb, - txdbMutex, - ) - if err != nil { - l.Errorw("Pipeline task run could not be unmarshaled", append(loggerFields, "error", err)...) - return Result{Error: err} } // Order of precedence for task timeout: @@ -439,14 +406,10 @@ func (r *runner) executeTaskRun(ctx context.Context, txdb *gorm.DB, spec Spec, t // ExecuteAndInsertNewRun bypasses the job pipeline entirely. // It executes a run in memory then inserts the finished run/task run records, returning the final result func (r *runner) ExecuteAndInsertNewRun(ctx context.Context, spec Spec, l logger.Logger) (runID int64, result FinalResult, err error) { - start := time.Now() - - run, err := NewRun(spec, start) - if err != nil { - return run.ID, result, errors.Wrapf(err, "error creating new run for spec ID %v", spec.ID) - } - - trrs, err := r.ExecuteRun(ctx, run, l) + var run Run + run.PipelineSpecID = spec.ID + run.CreatedAt = time.Now() + trrs, err := r.ExecuteRun(ctx, spec, l) if err != nil { return run.ID, result, errors.Wrapf(err, "error executing run for spec ID %v", spec.ID) } diff --git a/core/services/pipeline/runner_test.go b/core/services/pipeline/runner_test.go index c6c63d131bf..e1e9a77e12c 100644 --- a/core/services/pipeline/runner_test.go +++ b/core/services/pipeline/runner_test.go @@ -18,7 +18,6 @@ import ( "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/utils" "github.com/stretchr/testify/require" - "gopkg.in/guregu/null.v4" ) // TODO: Add a test for multiple terminal tasks after __result__ is deprecated @@ -55,173 +54,39 @@ func Test_PipelineRunner_ExecuteTaskRuns(t *testing.T) { r := pipeline.NewRunner(orm, store.Config) - spec := pipeline.Spec{ID: 142} - taskRuns := []pipeline.TaskRun{ - // 1. Bridge request, succeeds - pipeline.TaskRun{ - ID: 10, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 1, - DotID: `ds1`, - Type: "bridge", - JSON: cltest.MustNewJSONSerializable(t, `{"name": "example-bridge", "Timeout": 0, "requestData": {"data": {"coin": "BTC", "market": "USD"}}}`), - SuccessorID: null.IntFrom(2), - PipelineSpec: spec, - }, - }, - pipeline.TaskRun{ - ID: 11, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 2, - DotID: `ds1_parse`, - Type: "jsonparse", - JSON: cltest.MustNewJSONSerializable(t, `{"Lax": false, "path": ["data", "result"], "Timeout": 0}`), - SuccessorID: null.IntFrom(3), - PipelineSpec: spec, - }, - }, - pipeline.TaskRun{ - ID: 12, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 3, - DotID: `ds1_multiply`, - Type: "multiply", - JSON: cltest.MustNewJSONSerializable(t, `{"times": "1000000000000000000", "Timeout": 0}`), - SuccessorID: null.IntFrom(102), - PipelineSpec: spec, - }, - }, - // 2. HTTP request, succeeds - pipeline.TaskRun{ - ID: 21, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 33, - DotID: `ds2`, - Type: "http", - JSON: cltest.MustNewJSONSerializable(t, fmt.Sprintf(`{"method": "GET", "url": "%s", "requestData": {"data": {"coin": "BTC", "market": "USD"}}}`, s2.URL)), - SuccessorID: null.IntFrom(32), - PipelineSpec: spec, - }, - }, - pipeline.TaskRun{ - ID: 22, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 32, - DotID: `ds2_parse`, - Type: "jsonparse", - JSON: cltest.MustNewJSONSerializable(t, `{"Lax": false, "path": ["data", "result"], "Timeout": 0}`), - SuccessorID: null.IntFrom(31), - PipelineSpec: spec, - }, - }, - pipeline.TaskRun{ - ID: 23, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 31, - DotID: `ds2_multiply`, - Type: "multiply", - JSON: cltest.MustNewJSONSerializable(t, `{"times": "1000000000000000000", "Timeout": 0}`), - SuccessorID: null.IntFrom(102), - PipelineSpec: spec, - }, - }, - // 3. HTTP request, fails - pipeline.TaskRun{ - ID: 41, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 51, - DotID: `ds3`, - Type: "http", - JSON: cltest.MustNewJSONSerializable(t, `{"method": "GET", "url": "blah://test.invalid", "requestData": {"data": {"coin": "BTC", "market": "USD"}}}`), - SuccessorID: null.IntFrom(52), - PipelineSpec: spec, - }, - }, - pipeline.TaskRun{ - ID: 42, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 52, - DotID: `ds3_parse`, - Type: "jsonparse", - JSON: cltest.MustNewJSONSerializable(t, `{"Lax": false, "path": ["data", "result"], "Timeout": 0}`), - SuccessorID: null.IntFrom(53), - PipelineSpec: spec, - }, - }, - pipeline.TaskRun{ - ID: 43, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 53, - DotID: `ds3_multiply`, - Type: "multiply", - JSON: cltest.MustNewJSONSerializable(t, `{"times": "1000000000000000000", "Timeout": 0}`), - SuccessorID: null.IntFrom(102), - PipelineSpec: spec, - }, - }, - // MEDIAN - pipeline.TaskRun{ - ID: 30, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 102, - DotID: `median`, - Type: "median", - JSON: cltest.MustNewJSONSerializable(t, `{"allowedFaults": 1}`), - SuccessorID: null.IntFrom(203), - Index: 0, - PipelineSpec: spec, - }, - }, - // 4. HTTP Request, side by side with median to test indexing - pipeline.TaskRun{ - ID: 71, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 72, - DotID: `ds4`, - Type: "http", - JSON: cltest.MustNewJSONSerializable(t, fmt.Sprintf(`{"method": "GET", "url": "%s"}`, s4.URL)), - SuccessorID: null.IntFrom(203), - Index: 1, - PipelineSpec: spec, - }, - }, - // 5. HTTP Request, side by side with median to test indexing - pipeline.TaskRun{ - ID: 73, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 74, - DotID: `ds5`, - Type: "http", - JSON: cltest.MustNewJSONSerializable(t, fmt.Sprintf(`{"method": "GET", "url": "%s"}`, s5.URL)), - SuccessorID: null.IntFrom(203), - Index: 2, - PipelineSpec: spec, - }, - }, - // 6. Result - pipeline.TaskRun{ - ID: 13, - PipelineTaskSpec: pipeline.TaskSpec{ - ID: 203, - DotID: `__result__`, - Type: "result", - JSON: cltest.MustNewJSONSerializable(t, `{}`), - SuccessorID: null.Int{}, - PipelineSpec: spec, - }, - }, - } + d := pipeline.TaskDAG{} + s := fmt.Sprintf(` +ds1 [type=bridge name="example-bridge" timeout=0 requestData="{\"data\": {\"coin\": \"BTC\", \"market\": \"USD\"}}"] +ds1_parse [type=jsonparse lax=false path="data,result"] +ds1_multiply [type=multiply times=1000000000000000000] + +ds2 [type=http method="GET" url="%s" requestData="{\"data\": {\"coin\": \"BTC\", \"market\": \"USD\"}}"] +ds2_parse [type=jsonparse lax=false path="data,result"] +ds2_multiply [type=multiply times=1000000000000000000] + +ds3 [type=http method="GET" url="blah://test.invalid" requestData="{\"data\": {\"coin\": \"BTC\", \"market\": \"USD\"}}"] +ds3_parse [type=jsonparse lax=false path="data,result"] +ds3_multiply [type=multiply times=1000000000000000000] + +ds1->ds1_parse->ds1_multiply->median; +ds2->ds2_parse->ds2_multiply->median; +ds3->ds3_parse->ds3_multiply->median; + +median [type=median index=0] +ds4 [type=http method="GET" url="%s" index=1] +ds5 [type=http method="GET" url="%s" index=2] +`, s2.URL, s4.URL, s5.URL) + err = d.UnmarshalText([]byte(s)) + require.NoError(t, err) + ts, err := d.TasksInDependencyOrder() + require.NoError(t, err) - run := pipeline.Run{ - ID: 242, - PipelineSpec: spec, - PipelineTaskRuns: taskRuns, + spec := pipeline.Spec{ + DotDagSource: s, } - - trrs, err := r.ExecuteRun(context.Background(), run, *logger.Default) + trrs, err := r.ExecuteRun(context.Background(), spec, *logger.Default) require.NoError(t, err) - - require.Len(t, trrs, len(taskRuns)) + require.Len(t, trrs, len(ts)+1) // +1 for the result task var finalResults []pipeline.Result for _, trr := range trrs { @@ -263,44 +128,6 @@ func Test_PipelineRunner_ExecuteTaskRuns(t *testing.T) { require.Len(t, errorResults, 3) } -func dotGraphToSpec(t *testing.T, id int32, taskIDStart int32, graph string) pipeline.Spec { - d := pipeline.NewTaskDAG() - err := d.UnmarshalText([]byte(graph)) - require.NoError(t, err) - ts, err := d.TasksInDependencyOrder() - require.NoError(t, err) - var s = pipeline.Spec{ - ID: id, - PipelineTaskSpecs: make([]pipeline.TaskSpec, 0), - } - taskSpecIDs := make(map[pipeline.Task]int32) - for _, task := range ts { - var successorID null.Int - if task.OutputTask() != nil { - successor := task.OutputTask() - successorID = null.IntFrom(int64(taskSpecIDs[successor])) - } - v := pipeline.JSONSerializable{task, false} - b, err := v.MarshalJSON() - require.NoError(t, err) - v2 := pipeline.JSONSerializable{} - err = v2.UnmarshalJSON(b) - require.NoError(t, err) - s.PipelineTaskSpecs = append(s.PipelineTaskSpecs, pipeline.TaskSpec{ - ID: taskIDStart, - DotID: task.DotID(), - PipelineSpecID: s.ID, - Type: task.Type(), - JSON: v2, - Index: task.OutputIndex(), - SuccessorID: successorID, - }) - taskSpecIDs[task] = taskIDStart - taskIDStart++ - } - return s -} - func Test_PipelineRunner_HandleFaults(t *testing.T) { // We want to test the scenario where one or multiple APIs time out, // but a sufficient number of them still complete within the desired time frame @@ -318,7 +145,7 @@ func Test_PipelineRunner_HandleFaults(t *testing.T) { res.WriteHeader(http.StatusOK) res.Write([]byte(`{"result":11}`)) })) - s := dotGraphToSpec(t, 1, 1, fmt.Sprintf(` + s := fmt.Sprintf(` ds1 [type=http url="%s"]; ds1_parse [type=jsonparse path="result"]; ds1_multiply [type=multiply times=100]; @@ -331,20 +158,21 @@ ds1 -> ds1_parse -> ds1_multiply -> answer1; ds2 -> ds2_parse -> ds2_multiply -> answer1; answer1 [type=median index=0]; -`, m1.URL, m2.URL)) +`, m1.URL, m2.URL) r := pipeline.NewRunner(orm, store.Config) - run, err := pipeline.NewRun(s, time.Now()) - require.NoError(t, err) // If we cancel before an API is finished, we should still get a median. ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() - trrs, err := r.ExecuteRun(ctx, run, *logger.Default) + spec := pipeline.Spec{ + DotDagSource: s, + } + trrs, err := r.ExecuteRun(ctx, spec, *logger.Default) require.NoError(t, err) for _, trr := range trrs { if trr.IsTerminal { - require.Equal(t, decimal.RequireFromString("1100"), trr.Result.Value) + require.Equal(t, decimal.RequireFromString("1100"), trr.Result.Value.([]interface{})[0].(decimal.Decimal)) } } } diff --git a/core/services/pipeline/task.bridge.go b/core/services/pipeline/task.bridge.go index 07b6be84694..6b593e65fb6 100644 --- a/core/services/pipeline/task.bridge.go +++ b/core/services/pipeline/task.bridge.go @@ -4,11 +4,8 @@ import ( "context" "fmt" "net/url" - "sync" "github.com/pkg/errors" - "gorm.io/gorm" - "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/store/models" ) @@ -19,14 +16,8 @@ type BridgeTask struct { Name string `json:"name"` RequestData HttpRequestData `json:"requestData"` - txdb *gorm.DB - // HACK: This mutex is necessary to work around a bug in the pq driver that - // causes concurrent database calls inside the same transaction to fail - // with a mysterious `pq: unexpected Parse response 'C'` error - // FIXME: Get rid of this by replacing pq with pgx - // https://www.pivotaltracker.com/story/show/174401187 - txdbMutex *sync.Mutex - config Config + safeTx SafeTx + config Config } var _ Task = (*BridgeTask)(nil) @@ -58,7 +49,7 @@ func (t *BridgeTask) Run(ctx context.Context, taskRun TaskRun, inputs []Result) logger.Warnw(`"meta" field on task run is malformed, discarding`, "jobID", taskRun.PipelineRun.PipelineSpecID, "taskRunID", taskRun.ID, - "task", taskRun.PipelineTaskSpec.DotID, + "task", taskRun.DotID, "meta", taskRun.PipelineRun.Meta.Val, ) } @@ -85,12 +76,12 @@ func (t *BridgeTask) Run(ctx context.Context, taskRun TaskRun, inputs []Result) func (t BridgeTask) getBridgeURLFromName() (url.URL, error) { task := models.TaskType(t.Name) - if t.txdbMutex != nil { - t.txdbMutex.Lock() - defer t.txdbMutex.Unlock() + if t.safeTx.txMu != nil { + t.safeTx.txMu.Lock() + defer t.safeTx.txMu.Unlock() } - bridge, err := FindBridge(t.txdb, task) + bridge, err := FindBridge(t.safeTx.tx, task) if err != nil { return url.URL{}, err } diff --git a/core/services/pipeline/task.http.go b/core/services/pipeline/task.http.go index 3dc30314ab1..b6f9c8de893 100644 --- a/core/services/pipeline/task.http.go +++ b/core/services/pipeline/task.http.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "encoding/json" - "fmt" "io" "net/http" "time" @@ -134,8 +133,8 @@ func (t *HTTPTask) Run(ctx context.Context, taskRun TaskRun, inputs []Result) Re return Result{Error: errors.Wrapf(err, "error making http request")} } elapsed := time.Since(start) - promHTTPFetchTime.WithLabelValues(fmt.Sprintf("%d", taskRun.PipelineTaskSpecID)).Set(float64(elapsed)) - promHTTPResponseBodySize.WithLabelValues(fmt.Sprintf("%d", taskRun.PipelineTaskSpecID)).Set(float64(len(responseBytes))) + promHTTPFetchTime.WithLabelValues(taskRun.DotID).Set(float64(elapsed)) + promHTTPResponseBodySize.WithLabelValues(taskRun.DotID).Set(float64(len(responseBytes))) if statusCode >= 400 { maybeErr := bestEffortExtractError(responseBytes) @@ -145,7 +144,7 @@ func (t *HTTPTask) Run(ctx context.Context, taskRun TaskRun, inputs []Result) Re logger.Debugw("HTTP task got response", "response", string(responseBytes), "url", t.URL.String(), - "pipelineTaskSpecID", taskRun.PipelineTaskSpecID, + "pipelineTaskSpecID", taskRun.DotID, ) // NOTE: We always stringify the response since this is required for all current jobs. // If a binary response is required we might consider adding an adapter diff --git a/core/services/pipeline/test_helpers.go b/core/services/pipeline/test_helpers.go index bb3671533db..98f06f97408 100644 --- a/core/services/pipeline/test_helpers.go +++ b/core/services/pipeline/test_helpers.go @@ -26,13 +26,13 @@ const ( ` ) -func NewBaseTask(dotID string, t Task, index int32) BaseTask { - return BaseTask{dotID: dotID, outputTask: t, Index: index} +func NewBaseTask(dotID string, t Task, index int32, nPreds int) BaseTask { + return BaseTask{dotID: dotID, outputTask: t, Index: index, nPreds: nPreds} } func (t *BridgeTask) HelperSetConfigAndTxDB(config Config, txdb *gorm.DB) { t.config = config - t.txdb = txdb + t.safeTx = SafeTx{tx: txdb} } func (t *HTTPTask) HelperSetConfig(config Config) { diff --git a/core/services/postgres/mocks/event_broadcaster.go b/core/services/postgres/mocks/event_broadcaster.go index d6cd7deae05..bb918431464 100644 --- a/core/services/postgres/mocks/event_broadcaster.go +++ b/core/services/postgres/mocks/event_broadcaster.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/postgres/mocks/subscription.go b/core/services/postgres/mocks/subscription.go index 0ab3a62b295..65c7bac4931 100644 --- a/core/services/postgres/mocks/subscription.go +++ b/core/services/postgres/mocks/subscription.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.5.1. DO NOT EDIT. +// Code generated by mockery v2.6.0. DO NOT EDIT. package mocks diff --git a/core/services/prom_reporter_test.go b/core/services/prom_reporter_test.go index 21eed8ebb39..c1454fdb989 100644 --- a/core/services/prom_reporter_test.go +++ b/core/services/prom_reporter_test.go @@ -61,7 +61,6 @@ func Test_PromReporter_OnNewLongestChain(t *testing.T) { defer cleanup() require.NoError(t, store.DB.Exec(`SET CONSTRAINTS pipeline_task_runs_pipeline_run_id_fkey DEFERRED`).Error) - require.NoError(t, store.DB.Exec(`SET CONSTRAINTS pipeline_task_runs_pipeline_task_spec_id_fkey DEFERRED`).Error) backend := new(mocks.PrometheusBackend) d, _ := store.DB.DB() diff --git a/core/store/migrations/0016_pipeline_task_run_dot_id.go b/core/store/migrations/0016_pipeline_task_run_dot_id.go new file mode 100644 index 00000000000..6b02c7e3d2e --- /dev/null +++ b/core/store/migrations/0016_pipeline_task_run_dot_id.go @@ -0,0 +1,48 @@ +package migrations + +import ( + "github.com/go-gormigrate/gormigrate/v2" + "gorm.io/gorm" +) + +const ( + up16 = ` +ALTER TABLE pipeline_task_runs ADD COLUMN dot_id text; +UPDATE pipeline_task_runs SET dot_id = ts.dot_id FROM pipeline_task_specs ts WHERE ts.id = pipeline_task_runs.pipeline_task_spec_id; +ALTER TABLE pipeline_task_runs ALTER COLUMN dot_id SET NOT NULL, DROP COLUMN pipeline_task_spec_id; +DROP TABLE pipeline_task_specs; + +CREATE UNIQUE INDEX ON pipeline_task_runs(pipeline_run_id, dot_id); +DROP INDEX idx_pipeline_task_runs_optimise_find_results; +` + down16 = ` +ALTER TABLE pipeline_task_runs DROP COLUMN dot_id; +CREATE TABLE public.pipeline_task_specs ( + id BIGSERIAL PRIMARY KEY, + dot_id text NOT NULL, + pipeline_spec_id integer NOT NULL, + type text NOT NULL, + json jsonb NOT NULL, + index integer DEFAULT 0 NOT NULL, + successor_id integer, + created_at timestamp with time zone NOT NULL +); +CREATE INDEX idx_pipeline_task_specs_created_at ON public.pipeline_task_specs USING brin (created_at); +CREATE INDEX idx_pipeline_task_specs_pipeline_spec_id ON public.pipeline_task_specs USING btree (pipeline_spec_id); +CREATE UNIQUE INDEX idx_pipeline_task_specs_single_output ON public.pipeline_task_specs USING btree (pipeline_spec_id) WHERE (successor_id IS NULL); +CREATE INDEX idx_pipeline_task_specs_successor_id ON public.pipeline_task_specs USING btree (successor_id); +CREATE INDEX idx_pipeline_task_runs_optimise_find_results ON public.pipeline_task_runs USING btree (pipeline_run_id); +` +) + +func init() { + Migrations = append(Migrations, &gormigrate.Migration{ + ID: "0016_pipeline_task_run_dot_id", + Migrate: func(db *gorm.DB) error { + return db.Exec(up16).Error + }, + Rollback: func(db *gorm.DB) error { + return db.Exec(down16).Error + }, + }) +} diff --git a/core/store/migrations/migrate_test.go b/core/store/migrations/migrate_test.go index 823b7090658..15104a0dadb 100644 --- a/core/store/migrations/migrate_test.go +++ b/core/store/migrations/migrate_test.go @@ -82,6 +82,26 @@ func TestMigrate_Initial(t *testing.T) { } } +// V2 pipeline TaskSpec +// DEPRECATED +type TaskSpec struct { + ID int32 `json:"-" gorm:"primary_key"` + DotID string `json:"dotId"` + PipelineSpecID int32 `json:"-"` + PipelineSpec pipeline.Spec `json:"-"` + Type pipeline.TaskType `json:"-"` + JSON pipeline.JSONSerializable `json:"-" gorm:"type:jsonb"` + Index int32 `json:"-"` + SuccessorID null.Int `json:"-"` + CreatedAt time.Time `json:"-"` + BridgeName *string `json:"-"` + Bridge models.BridgeType `json:"-" gorm:"foreignKey:BridgeName;->"` +} + +func (TaskSpec) TableName() string { + return "pipeline_task_specs" +} + func TestMigrate_BridgeFK(t *testing.T) { _, orm, cleanup := cltest.BootstrapThrowawayORM(t, "migrations_bridgefk", false) defer cleanup() @@ -122,8 +142,7 @@ func TestMigrate_BridgeFK(t *testing.T) { // Migrating up should populate the bridge field require.NoError(t, migrations.MigrateUp(orm.DB, "0010_bridge_fk")) - // V2 pipeline TaskSpec - var p pipeline.TaskSpec + var p TaskSpec require.NoError(t, orm.DB.Find(&p, "id = ?", bts.ID).Error) assert.Equal(t, *p.BridgeName, string(bt.Name)) @@ -162,3 +181,78 @@ func TestMigrate_ChangeJobsToNumeric(t *testing.T) { require.NoError(t, migrations.MigrateDownFrom(orm.DB, "0012_change_jobs_to_numeric")) } + +func TestMigrate_PipelineTaskRunDotID(t *testing.T) { + _, orm, cleanup := cltest.BootstrapThrowawayORM(t, "migrations_task_run_dot_id", false) + defer cleanup() + + require.NoError(t, migrations.MigrateUp(orm.DB, "0015_simplify_log_broadcaster")) + // Add some task specs + ps := pipeline.Spec{ + DotDagSource: "blah", + } + require.NoError(t, orm.DB.Create(&ps).Error) + result := TaskSpec{ + DotID: "__result__", + PipelineSpecID: ps.ID, + Type: "result", + JSON: pipeline.JSONSerializable{}, + SuccessorID: null.Int{}, + } + require.NoError(t, orm.DB.Create(&result).Error) + ds := TaskSpec{ + DotID: "ds1", + PipelineSpecID: ps.ID, + Type: "http", + JSON: pipeline.JSONSerializable{}, + SuccessorID: null.NewInt(int64(result.ID), true), + } + require.NoError(t, orm.DB.Create(&ds).Error) + // Add a pipeline run + pr := pipeline.Run{ + PipelineSpecID: ps.ID, + Meta: pipeline.JSONSerializable{}, + Errors: pipeline.JSONSerializable{Null: true}, + Outputs: pipeline.JSONSerializable{Null: true}, + } + require.NoError(t, orm.DB.Create(&pr).Error) + + // Add some task runs + type PipelineTaskRun struct { + ID int64 `json:"-" gorm:"primary_key"` + Type pipeline.TaskType `json:"type"` + PipelineRun pipeline.Run `json:"-"` + PipelineRunID int64 `json:"-"` + Output *pipeline.JSONSerializable `json:"output" gorm:"type:jsonb"` + Error null.String `json:"error"` + CreatedAt time.Time `json:"createdAt"` + FinishedAt *time.Time `json:"finishedAt"` + Index int32 + PipelineTaskSpecID int32 `json:"-"` + } + tr1 := PipelineTaskRun{ + Type: pipeline.TaskTypeResult, + PipelineRunID: pr.ID, + PipelineTaskSpecID: result.ID, + Output: &pipeline.JSONSerializable{Null: true}, + Error: null.String{}, + } + require.NoError(t, orm.DB.Create(&tr1).Error) + tr2 := PipelineTaskRun{ + Type: pipeline.TaskTypeHTTP, + PipelineTaskSpecID: ds.ID, + PipelineRunID: pr.ID, + Output: &pipeline.JSONSerializable{Null: true}, + Error: null.String{}, + } + require.NoError(t, orm.DB.Create(&tr2).Error) + + require.NoError(t, migrations.MigrateUp(orm.DB, "0016_pipeline_task_run_dot_id")) + var ptrs []pipeline.TaskRun + require.NoError(t, orm.DB.Find(&ptrs).Error) + assert.Equal(t, "__result__", ptrs[0].DotID) + assert.Equal(t, "ds1", ptrs[1].DotID) + + require.NoError(t, migrations.MigrateDownFrom(orm.DB, "0016_pipeline_task_run_dot_id")) + +} diff --git a/core/web/bridge_types_controller.go b/core/web/bridge_types_controller.go index 0c20d165f8c..b581ad0adbb 100644 --- a/core/web/bridge_types_controller.go +++ b/core/web/bridge_types_controller.go @@ -142,18 +142,27 @@ func (btc *BridgeTypesController) Destroy(c *gin.Context) { return } if err != nil { - jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("error searching for bridge for BTC Destroy: %+v", err)) + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("error searching for bridge: %+v", err)) return } jobsUsingBridge, err := btc.App.GetStore().FindJobIDsWithBridge(name) if err != nil { - jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("error searching for associated jobs for BTC Destroy: %+v", err)) + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("error searching for associated jobs: %+v", err)) return } if len(jobsUsingBridge) > 0 { jsonAPIError(c, http.StatusConflict, fmt.Errorf("can't remove the bridge because jobs %v are associated with it", jobsUsingBridge)) return } + v2jobsUsingBridge, err := btc.App.GetJobORM().FindJobIDsWithBridge(name) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("error searching for associated v2 jobs: %+v", err)) + return + } + if len(v2jobsUsingBridge) > 0 { + jsonAPIError(c, http.StatusConflict, fmt.Errorf("can't remove the bridge because jobs %v are associated with it", v2jobsUsingBridge)) + return + } if err = btc.App.GetStore().DeleteBridgeType(&bt); err != nil { jsonAPIError(c, StatusCodeForError(err), fmt.Errorf("failed to delete bridge: %+v", err)) return diff --git a/core/web/jobs_controller_test.go b/core/web/jobs_controller_test.go index 8becc546183..fbd51159657 100644 --- a/core/web/jobs_controller_test.go +++ b/core/web/jobs_controller_test.go @@ -338,9 +338,9 @@ func setupJobsControllerTests(t *testing.T) (*cltest.TestApplication, cltest.HTT ) require.NoError(t, app.Start()) - _, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah") + _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, app.Store.DB.Create(bridge).Error) - _, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah") + _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") require.NoError(t, app.Store.DB.Create(bridge2).Error) client := app.NewHTTPClient() return app, client, cleanup @@ -355,9 +355,9 @@ func setupJobSpecsControllerTestsWithJobs(t *testing.T) (cltest.HTTPClientCleane ) require.NoError(t, app.Start()) - _, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah") + _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, app.Store.DB.Create(bridge).Error) - _, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah") + _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") require.NoError(t, app.Store.DB.Create(bridge2).Error) client := app.NewHTTPClient() diff --git a/core/web/pipeline_runs_controller.go b/core/web/pipeline_runs_controller.go index 679fd8caa8e..1ed5ca4e9e6 100644 --- a/core/web/pipeline_runs_controller.go +++ b/core/web/pipeline_runs_controller.go @@ -87,6 +87,5 @@ func preloadPipelineRunDependencies(db *gorm.DB) *gorm.DB { return db. Where(`pipeline_task_runs.type != 'result'`). Order("created_at ASC, id ASC") - }). - Preload("PipelineTaskRuns.PipelineTaskSpec") + }) } diff --git a/core/web/pipeline_runs_controller_test.go b/core/web/pipeline_runs_controller_test.go index 6701922da20..ed81639b0a3 100644 --- a/core/web/pipeline_runs_controller_test.go +++ b/core/web/pipeline_runs_controller_test.go @@ -30,9 +30,9 @@ func TestPipelineRunsController_Create_HappyPath(t *testing.T) { require.NoError(t, app.Start()) key := cltest.MustInsertRandomKey(t, app.Store.DB) - _, bridge := cltest.NewBridgeType(t, "voter_turnout", "blah") + _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, app.Store.DB.Create(bridge).Error) - _, bridge2 := cltest.NewBridgeType(t, "election_winner", "blah") + _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") require.NoError(t, app.Store.DB.Create(bridge2).Error) client := app.NewHTTPClient() diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index aeddb5826ee..d398fd1ff30 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -79,6 +79,10 @@ after reboot before the first transaction is ever sent, eliminating the previous scenario where the node could send underpriced or overpriced transactions for a period after a reboot, until the gas updater caught up. +- Performance improvements to OCR job adds. Removed the pipeline_task_specs table +and added a new column `dot_id` to the pipeline_task_runs table which links a pipeline_task_run +to a dotID in the pipeline_spec.dot_dag_source. + ### Changed From a58c1d5ddc0de9c972835cea611d3eff1a606c7d Mon Sep 17 00:00:00 2001 From: James Kong Date: Mon, 15 Mar 2021 17:43:22 +0800 Subject: [PATCH 024/116] Add JobID and FM Contract Address to error logging when setting an oracle address --- core/services/fluxmonitor/flux_monitor.go | 18 +++++++++++++++--- core/services/fluxmonitorv2/flux_monitor.go | 14 +++++++++++--- 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/core/services/fluxmonitor/flux_monitor.go b/core/services/fluxmonitor/flux_monitor.go index d9c13720ab1..657e749597c 100644 --- a/core/services/fluxmonitor/flux_monitor.go +++ b/core/services/fluxmonitor/flux_monitor.go @@ -661,15 +661,27 @@ func (p *PollingDeviationChecker) SetOracleAddress() error { } } } + + l := logger.Default.With( + "jobID", p.initr.JobSpecID.String(), + "contract", p.initr.Address.Hex(), + "accounts", accounts, + "oracleAddresses", oracleAddrs, + ) + if len(accounts) > 0 { addr := accounts[0].Address - logger.Warnw("None of the node's keys matched any oracle addresses, using first available key. This flux monitor job may not work correctly", "address", addr, "accounts", accounts, "oracleAddresses", oracleAddrs) + + l.Warnw( + "None of the node's keys matched any oracle addresses, using first available key. This flux monitor job may not work correctly", + "address", addr.Hex(), + ) p.oracleAddress = addr } else { - logger.Error("No keys found. This flux monitor job may not work correctly") + l.Error("No keys found. This flux monitor job may not work correctly") } - return errors.New("none of the node's keys matched any oracle addresses") + return errors.New("none of the node's keys matched any oracle addresses") } func (p *PollingDeviationChecker) performInitialPoll() { diff --git a/core/services/fluxmonitorv2/flux_monitor.go b/core/services/fluxmonitorv2/flux_monitor.go index 6852c646fff..0a3518bb231 100644 --- a/core/services/fluxmonitorv2/flux_monitor.go +++ b/core/services/fluxmonitorv2/flux_monitor.go @@ -426,15 +426,23 @@ func (fm *FluxMonitor) SetOracleAddress() error { } } } + + l := fm.logger.With( + "accounts", accounts, + "oracleAddresses", oracleAddrs, + ) + if len(accounts) > 0 { addr := accounts[0].Address - fm.logger.Warnw("None of the node's keys matched any oracle addresses, using first available key. This flux monitor job may not work correctly", "address", addr) + l.Warnw("None of the node's keys matched any oracle addresses, using first available key. This flux monitor job may not work correctly", + "address", addr.Hex(), + ) fm.oracleAddress = addr } else { - fm.logger.Error("No keys found. This flux monitor job may not work correctly") + l.Error("No keys found. This flux monitor job may not work correctly") } - return errors.New("none of the node's keys matched any oracle addresses") + return errors.New("none of the node's keys matched any oracle addresses") } // performInitialPoll performs the initial poll if required From c20ccc9757a15b28390922bd51b279b706516406 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Wed, 17 Mar 2021 18:01:39 -0400 Subject: [PATCH 025/116] adding log endpoint test cases, moving enabledebug cli command under config arg --- core/cmd/app.go | 29 ++++++--------- core/cmd/remote_client.go | 4 +- core/web/log_controller.go | 8 ++++ core/web/log_controller_test.go | 65 +++++++++++++++++++++++++++++++++ 4 files changed, 86 insertions(+), 20 deletions(-) create mode 100644 core/web/log_controller_test.go diff --git a/core/cmd/app.go b/core/cmd/app.go index b887e3971cb..b507957540f 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -147,6 +147,17 @@ func NewApp(client *Client) *cli.App { }, }, }, + { + Name: "enabledebug", + Usage: "Enable and disable debug logging", + Action: client.SetDebugLogging, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "enabled, true", + Usage: "enable or disable debug logger", + }, + }, + }, }, }, @@ -461,24 +472,6 @@ func NewApp(client *Client) *cli.App { }, }, }, - { - Name: "logs", - Aliases: []string{"log"}, - Usage: "Commands for dynamic configuration and actions for the logger", - Subcommands: []cli.Command{ - { - Name: "enabledebug", - Usage: "Enable and disable debug logging", - Action: client.SetDebugLogging, - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "enabled, true", - Usage: "enable or disable debug logger", - }, - }, - }, - }, - }, { Name: "node", Aliases: []string{"local"}, diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index f310918c70d..c7dd3699ea9 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -1199,8 +1199,8 @@ func (cli *Client) SetDebugLogging(c *clipkg.Context) (err error) { return cli.errorOut(errors.New("Must set enabled or disabled (true || false)")) } - isDebugEnabld := c.Bool("enabled") - request := web.LoglevelPatchRequest{EnableDebugLog: &isDebugEnabld} + isDebugEnabled := c.Bool("enabled") + request := web.LoglevelPatchRequest{EnableDebugLog: &isDebugEnabled} requestData, err := json.Marshal(request) if err != nil { return cli.errorOut(err) diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 00ea625a650..b46d2d3c313 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -27,11 +27,19 @@ func (cc *LogController) SetDebug(c *gin.Context) { return } + var err error if *request.EnableDebugLog { cc.App.GetStore().Config.Set("LOG_LEVEL", zapcore.DebugLevel.String()) + err = cc.App.GetStore().SetConfigValue("LogLevel", zapcore.DebugLevel) } else { cc.App.GetStore().Config.Set("LOG_LEVEL", zapcore.InfoLevel.String()) + err = cc.App.GetStore().SetConfigValue("LogLevel", zapcore.InfoLevel) } + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + logger.SetLogger(cc.App.GetStore().Config.CreateProductionLogger()) response := &presenters.LogResource{ diff --git a/core/web/log_controller_test.go b/core/web/log_controller_test.go new file mode 100644 index 00000000000..d54b438135c --- /dev/null +++ b/core/web/log_controller_test.go @@ -0,0 +1,65 @@ +package web_test + +import ( + "bytes" + "encoding/json" + "net/http" + "testing" + + "github.com/bmizerany/assert" + "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/services/eth" + "github.com/smartcontractkit/chainlink/core/web" + "github.com/smartcontractkit/chainlink/core/web/presenters" + "github.com/stretchr/testify/require" + "go.uber.org/zap/zapcore" +) + +type testCase struct { + Description string + enableDug bool + expectedLogLevel zapcore.Level +} + +func TestLogController_SetDebug(t *testing.T) { + t.Parallel() + + rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) + defer assertMocksCalled() + app, cleanup := cltest.NewApplicationWithKey(t, + eth.NewClientWith(rpcClient, gethClient), + ) + defer cleanup() + require.NoError(t, app.Start()) + client := app.NewHTTPClient() + + cases := []testCase{ + { + Description: "Set debug enabled to true", + enableDug: true, + expectedLogLevel: zapcore.DebugLevel, + }, + { + Description: "Set debug enabled to false (info)", + enableDug: false, + expectedLogLevel: zapcore.InfoLevel, + }, + } + + for _, tc := range cases { + func() { + request := web.LoglevelPatchRequest{EnableDebugLog: &tc.enableDug} + requestData, _ := json.Marshal(request) + buf := bytes.NewBuffer(requestData) + + resp, cleanup := client.Patch("/v2/log", buf) + defer cleanup() + cltest.AssertServerResponse(t, resp, http.StatusOK) + + lR := presenters.LogResource{} + require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &lR)) + assert.Equal(t, tc.enableDug, lR.DebugEnabled) + assert.Equal(t, tc.expectedLogLevel.String(), app.GetStore().Config.LogLevel().String()) + }() + } +} From 8a54b08d337d8d386f96fe73b37ef8fc9c6120de Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Wed, 17 Mar 2021 18:14:05 -0400 Subject: [PATCH 026/116] adding log renderer to endpoint output --- core/cmd/renderer.go | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/core/cmd/renderer.go b/core/cmd/renderer.go index a3bda83ae64..4e8036c392d 100644 --- a/core/cmd/renderer.go +++ b/core/cmd/renderer.go @@ -16,6 +16,7 @@ import ( "github.com/smartcontractkit/chainlink/core/store/presenters" "github.com/smartcontractkit/chainlink/core/utils" "github.com/smartcontractkit/chainlink/core/web" + webPresenters "github.com/smartcontractkit/chainlink/core/web/presenters" ) // Renderer implements the Render method. @@ -89,11 +90,23 @@ func (rt RendererTable) Render(v interface{}) error { return rt.renderOCRKeys(*typed) case *[]Job: return rt.renderJobsV2(*typed) + case *webPresenters.LogResource: + return rt.renderLogResource(*typed) default: return fmt.Errorf("unable to render object of type %T: %v", typed, typed) } } +func (rt RendererTable) renderLogResource(logResource webPresenters.LogResource) error { + table := rt.newTable([]string{"ID", "DebugEnabled"}) + table.Append([]string{ + logResource.ID, + strconv.FormatBool(logResource.DebugEnabled), + }) + render("Logs", table) + return nil +} + func (rt RendererTable) renderJobs(jobs []models.JobSpec) error { table := rt.newTable([]string{"ID", "Name", "Created At", "Initiators", "Tasks"}) for _, v := range jobs { From ac0ff3df0662a58729b8f961fa46fb38935797c9 Mon Sep 17 00:00:00 2001 From: connorwstein Date: Wed, 17 Mar 2021 21:58:05 -0400 Subject: [PATCH 027/116] deflake --- core/internal/features_test.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/core/internal/features_test.go b/core/internal/features_test.go index ce015acbea8..74ee97128ce 100644 --- a/core/internal/features_test.go +++ b/core/internal/features_test.go @@ -1379,7 +1379,7 @@ func TestIntegration_OCR(t *testing.T) { // bootstrap node to come up. app.Config.Set("OCR_BOOTSTRAP_CHECK_INTERVAL", "5s") // GracePeriod < ObservationTimeout - app.Config.Set("OCR_OBSERVATION_GRACE_PERIOD", "200ms") + app.Config.Set("OCR_OBSERVATION_GRACE_PERIOD", "100ms") kbs = append(kbs, kb) apps = append(apps, app) @@ -1473,7 +1473,7 @@ p2pBootstrapPeers = [ ] keyBundleID = "%s" transmitterAddress = "%s" -observationTimeout = "200ms" +observationTimeout = "100ms" contractConfigConfirmations = 1 contractConfigTrackerPollInterval = "1s" observationSource = """ @@ -1527,7 +1527,15 @@ observationSource = """ require.NoError(t, err) // No spec errors for _, j := range jobs { - require.Len(t, j.JobSpecErrors, 0) + ignore := 0 + for i := range j.JobSpecErrors { + // Non-fatal timing related error, ignore for testing. + if strings.Contains(j.JobSpecErrors[i].Description, "leader's phase conflicts tGrace timeout") { + fmt.Println("ignoring this error") + ignore++ + } + } + require.Len(t, j.JobSpecErrors, ignore) } } } From 20a1e9eae23ca1a4d1e4af998168fa30c62a2b0e Mon Sep 17 00:00:00 2001 From: connorwstein Date: Wed, 17 Mar 2021 22:01:08 -0400 Subject: [PATCH 028/116] fmt --- core/internal/features_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/core/internal/features_test.go b/core/internal/features_test.go index 74ee97128ce..1bd3f43906b 100644 --- a/core/internal/features_test.go +++ b/core/internal/features_test.go @@ -1531,7 +1531,6 @@ observationSource = """ for i := range j.JobSpecErrors { // Non-fatal timing related error, ignore for testing. if strings.Contains(j.JobSpecErrors[i].Description, "leader's phase conflicts tGrace timeout") { - fmt.Println("ignoring this error") ignore++ } } From 954ce27014220f324f44aabdf2896000c34a6e92 Mon Sep 17 00:00:00 2001 From: James Kong Date: Tue, 16 Mar 2021 23:38:25 +0800 Subject: [PATCH 029/116] Enforce JSON when global flag is provided * Refactor tests for readability * Removed some unused flags from `keys` commands * Only display JSON for `keys` commands when the json flag is provided * Jobs List now correctly displays the id in the JSON * Display some pipeline run data after a create * Fix errors being placed in the output when creating a key * `chainlink -j txs create` now responds with standard EthTx JSON --- core/cmd/app.go | 56 +- core/cmd/presenters.go | 2 +- core/cmd/remote_client.go | 126 ++--- core/cmd/remote_client_test.go | 639 ++++++++-------------- core/cmd/renderer.go | 38 +- core/internal/cltest/mocks.go | 2 +- core/store/presenters/presenters.go | 12 + core/web/jobs_controller.go | 1 - core/web/pipeline_runs_controller.go | 12 +- core/web/pipeline_runs_controller_test.go | 5 +- core/web/transfer_controller.go | 3 +- 11 files changed, 368 insertions(+), 528 deletions(-) diff --git a/core/cmd/app.go b/core/cmd/app.go index caf2afb2c5a..01360f044a6 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -154,16 +154,6 @@ func NewApp(client *Client) *cli.App { Name: "job_specs", Usage: "Commands for managing Job Specs (jobs V1)", Subcommands: []cli.Command{ - { - Name: "archive", - Usage: "Archive a Job and all its associated Runs", - Action: client.ArchiveJobSpec, - }, - { - Name: "create", - Usage: "Create Job from a Job Specification JSON", - Action: client.CreateJobSpec, - }, { Name: "list", Usage: "List all jobs", @@ -180,6 +170,16 @@ func NewApp(client *Client) *cli.App { Usage: "Show a specific Job's details", Action: client.ShowJobSpec, }, + { + Name: "create", + Usage: "Create Job from a Job Specification JSON", + Action: client.CreateJobSpec, + }, + { + Name: "archive", + Usage: "Archive a Job and all its associated Runs", + Action: client.ArchiveJobSpec, + }, }, }, { @@ -234,7 +234,7 @@ func NewApp(client *Client) *cli.App { }, { Name: "delete", - Usage: format(`Deletes the ETH key matching the given address`), + Usage: format(`Delete the ETH key by address`), Flags: []cli.Flag{ cli.BoolFlag{ Name: "yes, y", @@ -249,11 +249,11 @@ func NewApp(client *Client) *cli.App { }, { Name: "import", - Usage: format(`Imports an ETH key from a JSON file`), + Usage: format(`Import an ETH key from a JSON file`), Flags: []cli.Flag{ cli.StringFlag{ Name: "oldpassword, p", - Usage: "the password that the key in the JSON file was encrypted with", + Usage: "`FILE` containing the password used to encrypt the key in the JSON file", }, }, Action: client.ImportETHKey, @@ -264,11 +264,11 @@ func NewApp(client *Client) *cli.App { Flags: []cli.Flag{ cli.StringFlag{ Name: "newpassword, p", - Usage: "the password with which to encrypt the key in the JSON file", + Usage: "`FILE` containing the password to encrypt the key (required)", }, cli.StringFlag{ Name: "output, o", - Usage: "the path where the JSON file will be saved", + Usage: "Path where the JSON file will be saved (required)", }, }, Action: client.ExportETHKey, @@ -281,15 +281,13 @@ func NewApp(client *Client) *cli.App { Usage: "Remote commands for administering the node's p2p keys", Subcommands: cli.Commands{ { - Name: "create", - Usage: format(`Create a p2p key, encrypted with password from the - password file, and store it in the database.`), - Flags: flags("password, p"), + Name: "create", + Usage: format(`Create a p2p key, encrypted with password from the password file, and store it in the database.`), Action: client.CreateP2PKey, }, { Name: "delete", - Usage: format(`Deletes the encrypted P2P key matching the given ID`), + Usage: format(`Delete the encrypted P2P key by id`), Flags: []cli.Flag{ cli.BoolFlag{ Name: "yes, y", @@ -313,7 +311,7 @@ func NewApp(client *Client) *cli.App { Flags: []cli.Flag{ cli.StringFlag{ Name: "oldpassword, p", - Usage: "the password that the key in the JSON file was encrypted with", + Usage: "`FILE` containing the password used to encrypt the key in the JSON file", }, }, Action: client.ImportP2PKey, @@ -324,11 +322,11 @@ func NewApp(client *Client) *cli.App { Flags: []cli.Flag{ cli.StringFlag{ Name: "newpassword, p", - Usage: "the password with which to encrypt the key in the JSON file", + Usage: "`FILE` containing the password to encrypt the key (required)", }, cli.StringFlag{ Name: "output, o", - Usage: "the path where the JSON file will be saved", + Usage: "`FILE` where the JSON file will be saved (required)", }, }, Action: client.ExportP2PKey, @@ -341,10 +339,8 @@ func NewApp(client *Client) *cli.App { Usage: "Remote commands for administering the node's off chain reporting keys", Subcommands: cli.Commands{ { - Name: "create", - Usage: format(`Create an OCR key bundle, encrypted with password from the - password file, and store it in the database`), - Flags: flags("password, p"), + Name: "create", + Usage: format(`Create an OCR key bundle, encrypted with password from the password file, and store it in the database`), Action: client.CreateOCRKeyBundle, }, { @@ -373,7 +369,7 @@ func NewApp(client *Client) *cli.App { Flags: []cli.Flag{ cli.StringFlag{ Name: "oldpassword, p", - Usage: "the password that the key in the JSON file was encrypted with", + Usage: "`FILE` containing the password used to encrypt the key in the JSON file", }, }, Action: client.ImportOCRKey, @@ -384,11 +380,11 @@ func NewApp(client *Client) *cli.App { Flags: []cli.Flag{ cli.StringFlag{ Name: "newpassword, p", - Usage: "the password with which to encrypt the key in the JSON file", + Usage: "`FILE` containing the password to encrypt the key (required)", }, cli.StringFlag{ Name: "output, o", - Usage: "the path where the JSON file will be saved", + Usage: "`FILE` where the JSON file will be saved (required)", }, }, Action: client.ExportOCRKey, diff --git a/core/cmd/presenters.go b/core/cmd/presenters.go index 28e90fd05ca..99cca49a0b1 100644 --- a/core/cmd/presenters.go +++ b/core/cmd/presenters.go @@ -10,7 +10,7 @@ import ( // JAID represents a JSON API ID. // It implements the api2go MarshalIdentifier and UnmarshalIdentitier interface. type JAID struct { - ID string `json:"-"` + ID string `json:"id"` } // GetID implements the api2go MarshalIdentifier interface. diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index 80794d84e0a..a61300364a2 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -21,6 +21,7 @@ import ( "go.uber.org/multierr" "github.com/smartcontractkit/chainlink/core/assets" + "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/store/models/ocrkey" "github.com/smartcontractkit/chainlink/core/store/models/p2pkey" @@ -255,18 +256,9 @@ func (cli *Client) CreateJobV2(c *clipkg.Context) (err error) { return cli.errorOut(err) } - responseBodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return cli.errorOut(err) - } - - ocrJobSpec := Job{} - if err := web.ParseJSONAPIResponse(responseBodyBytes, &ocrJobSpec); err != nil { - return cli.errorOut(err) - } - - fmt.Printf("Job added (job ID: %v).\n", ocrJobSpec.ID) - return nil + var js Job + err = cli.renderAPIResponse(resp, &js, "Job created") + return err } func (cli *Client) DeleteJobV2(c *clipkg.Context) error { @@ -281,6 +273,8 @@ func (cli *Client) DeleteJobV2(c *clipkg.Context) error { if err != nil { return cli.errorOut(err) } + + fmt.Printf("Job %v Deleted\n", c.Args().First()) return nil } @@ -293,12 +287,15 @@ func (cli *Client) TriggerPipelineRun(c *clipkg.Context) error { if err != nil { return cli.errorOut(err) } - _, err = cli.parseResponse(resp) - if err != nil { - return cli.errorOut(err) - } - fmt.Printf("Pipeline run successfully triggered for job ID %v.\n", c.Args().First()) - return nil + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var run pipeline.Run + err = cli.renderAPIResponse(resp, &run, "Pipeline run successfully triggered") + return err } // CreateJobRun creates job run based on SpecID and optional JSON @@ -495,7 +492,8 @@ func (cli *Client) SendEther(c *clipkg.Context) (err error) { } }() - err = cli.printResponseBody(resp) + var tx presenters.EthTx + err = cli.renderAPIResponse(resp, &tx) return err } @@ -611,12 +609,13 @@ func (cli *Client) printResponseBody(resp *http.Response) error { return nil } -func (cli *Client) renderAPIResponse(resp *http.Response, dst interface{}) error { +func (cli *Client) renderAPIResponse(resp *http.Response, dst interface{}, headers ...string) error { var links jsonapi.Links if err := cli.deserializeAPIResponse(resp, dst, &links); err != nil { return cli.errorOut(err) } - return cli.errorOut(cli.Render(dst)) + + return cli.errorOut(cli.Render(dst, headers...)) } // SetMinimumGasPrice specifies the minimum gas price to use for outgoing transactions @@ -710,16 +709,11 @@ func (cli *Client) CreateETHKey(c *clipkg.Context) (err error) { } }() - _, err = os.Stderr.WriteString("ETH key created.\n\n🔑 New key\n") - if err != nil { - return cli.errorOut(err) - } var keys presenters.ETHKey - return cli.renderAPIResponse(resp, &keys) + return cli.renderAPIResponse(resp, &keys, "ETH key created.\n\n🔑 New key") } -// ListETHKeys renders a table containing the active account address -// with its ETH & LINK balance +// ListETHKeys renders the active account address with its ETH & LINK balance func (cli *Client) ListETHKeys(c *clipkg.Context) (err error) { resp, err := cli.HTTP.Get("/v2/keys/eth") if err != nil { @@ -731,12 +725,8 @@ func (cli *Client) ListETHKeys(c *clipkg.Context) (err error) { } }() - _, err = os.Stderr.WriteString("🔑 ETH keys\n") - if err != nil { - return cli.errorOut(err) - } var keys []presenters.ETHKey - return cli.renderAPIResponse(resp, &keys) + return cli.renderAPIResponse(resp, &keys, "🔑 ETH keys") } func (cli *Client) DeleteETHKey(c *clipkg.Context) (err error) { @@ -752,9 +742,9 @@ func (cli *Client) DeleteETHKey(c *clipkg.Context) (err error) { var confirmationMsg string if c.Bool("hard") { queryStr = "?hard=true" - confirmationMsg = "ETH key deleted.\n\n" + confirmationMsg = "Deleted ETH key" } else { - confirmationMsg = "ETH key archived.\n\n" + confirmationMsg = "Archived ETH key" } address := c.Args().Get(0) @@ -768,15 +758,8 @@ func (cli *Client) DeleteETHKey(c *clipkg.Context) (err error) { } }() - if resp.StatusCode == 200 { - fmt.Print(confirmationMsg) - } - _, err = os.Stderr.WriteString("🔑 Deleted ETH key\n") - if err != nil { - return cli.errorOut(err) - } var key presenters.ETHKey - return cli.renderAPIResponse(resp, &key) + return cli.renderAPIResponse(resp, &key, fmt.Sprintf("🔑 %s", confirmationMsg)) } func (cli *Client) ImportETHKey(c *clipkg.Context) (err error) { @@ -810,12 +793,8 @@ func (cli *Client) ImportETHKey(c *clipkg.Context) (err error) { } }() - _, err = os.Stderr.WriteString("🔑 Imported ETH key\n") - if err != nil { - return cli.errorOut(err) - } var key presenters.ETHKey - return cli.renderAPIResponse(resp, &key) + return cli.renderAPIResponse(resp, &key, "🔑 Imported ETH key") } func (cli *Client) ExportETHKey(c *clipkg.Context) (err error) { @@ -850,6 +829,10 @@ func (cli *Client) ExportETHKey(c *clipkg.Context) (err error) { } }() + if resp.StatusCode != http.StatusOK { + return cli.errorOut(errors.New("Error exporting")) + } + keyJSON, err := ioutil.ReadAll(resp.Body) if err != nil { return cli.errorOut(errors.Wrap(err, "Could not read response body")) @@ -864,6 +847,7 @@ func (cli *Client) ExportETHKey(c *clipkg.Context) (err error) { if err != nil { return cli.errorOut(err) } + return nil } @@ -878,11 +862,8 @@ func (cli *Client) CreateP2PKey(c *clipkg.Context) (err error) { } }() - if resp.StatusCode == 200 { - fmt.Printf("Created P2P keypair.\n\n") - } var key p2pkey.EncryptedP2PKey - return cli.renderAPIResponse(resp, &key) + return cli.renderAPIResponse(resp, &key, "Created P2P keypair") } func (cli *Client) ListP2PKeys(c *clipkg.Context) (err error) { @@ -928,11 +909,8 @@ func (cli *Client) DeleteP2PKey(c *clipkg.Context) (err error) { } }() - if resp.StatusCode == 200 { - fmt.Printf("P2P key deleted.\n\n") - } var key p2pkey.EncryptedP2PKey - return cli.renderAPIResponse(resp, &key) + return cli.renderAPIResponse(resp, &key, "P2P key deleted") } func (cli *Client) ImportP2PKey(c *clipkg.Context) (err error) { @@ -966,13 +944,8 @@ func (cli *Client) ImportP2PKey(c *clipkg.Context) (err error) { } }() - _, err = os.Stderr.WriteString("🔑 Imported P2P key\n") - if err != nil { - return cli.errorOut(err) - } - var key p2pkey.EncryptedP2PKey - return cli.renderAPIResponse(resp, &key) + return cli.renderAPIResponse(resp, &key, "🔑 Imported P2P key") } func (cli *Client) ExportP2PKey(c *clipkg.Context) (err error) { @@ -1007,6 +980,10 @@ func (cli *Client) ExportP2PKey(c *clipkg.Context) (err error) { } }() + if resp.StatusCode != http.StatusOK { + return cli.errorOut(errors.New("Error exporting")) + } + keyJSON, err := ioutil.ReadAll(resp.Body) if err != nil { return cli.errorOut(errors.Wrap(err, "Could not read response body")) @@ -1017,7 +994,7 @@ func (cli *Client) ExportP2PKey(c *clipkg.Context) (err error) { return cli.errorOut(errors.Wrapf(err, "Could not write %v", filepath)) } - _, err = os.Stderr.WriteString(fmt.Sprintf("🔑 Exported P2P key %s to %s", ID, filepath)) + _, err = os.Stderr.WriteString(fmt.Sprintf("🔑 Exported P2P key %s to %s\n", ID, filepath)) if err != nil { return cli.errorOut(err) } @@ -1038,11 +1015,8 @@ func (cli *Client) CreateOCRKeyBundle(c *clipkg.Context) error { } }() - if resp.StatusCode == 200 { - fmt.Printf("Created OCR key bundle.\n\n") - } var key ocrkey.EncryptedKeyBundle - return cli.renderAPIResponse(resp, &key) + return cli.renderAPIResponse(resp, &key, "Created OCR key bundle") } // ListOCRKeyBundles lists the available OCR Key Bundles @@ -1091,11 +1065,8 @@ func (cli *Client) DeleteOCRKeyBundle(c *clipkg.Context) error { } }() - if resp.StatusCode == 200 { - fmt.Printf("OCR key bundle deleted.\n\n") - } var key ocrkey.EncryptedKeyBundle - return cli.renderAPIResponse(resp, &key) + return cli.renderAPIResponse(resp, &key, "OCR key bundle deleted") } func (cli *Client) ImportOCRKey(c *clipkg.Context) (err error) { @@ -1129,13 +1100,8 @@ func (cli *Client) ImportOCRKey(c *clipkg.Context) (err error) { } }() - _, err = os.Stderr.WriteString("🔑 Imported OCR key bundle") - if err != nil { - return cli.errorOut(err) - } - var key ocrkey.EncryptedKeyBundle - return cli.renderAPIResponse(resp, &key) + return cli.renderAPIResponse(resp, &key, "Imported OCR key bundle") } func (cli *Client) ExportOCRKey(c *clipkg.Context) (err error) { @@ -1170,6 +1136,10 @@ func (cli *Client) ExportOCRKey(c *clipkg.Context) (err error) { } }() + if resp.StatusCode != http.StatusOK { + return cli.errorOut(errors.New("Error exporting")) + } + keyJSON, err := ioutil.ReadAll(resp.Body) if err != nil { return cli.errorOut(errors.Wrap(err, "Could not read response body")) @@ -1180,7 +1150,7 @@ func (cli *Client) ExportOCRKey(c *clipkg.Context) (err error) { return cli.errorOut(errors.Wrapf(err, "Could not write %v", filepath)) } - _, err = os.Stderr.WriteString(fmt.Sprintf("🔑 Exported OCR key bundle %s to %s", ID, filepath)) + _, err = os.Stderr.WriteString(fmt.Sprintf("Exported OCR key bundle %s to %s", ID, filepath)) if err != nil { return cli.errorOut(err) } diff --git a/core/cmd/remote_client_test.go b/core/cmd/remote_client_test.go index 618d1419546..e820f8012c0 100644 --- a/core/cmd/remote_client_test.go +++ b/core/cmd/remote_client_test.go @@ -43,6 +43,90 @@ var ( nilContext = cli.NewContext(nil, nil, nil) ) +type startOptions struct { + // Set the config options + Config map[string]interface{} + // Use to set up mocks on the app + FlagsAndDeps []interface{} + // Add a key on start up + WithKey bool + // Use app.StartAndConnect instead of app.Start + StartAndConnect bool +} + +func startNewApplication(t *testing.T, setup ...func(opts *startOptions)) *cltest.TestApplication { + t.Helper() + + sopts := &startOptions{ + Config: map[string]interface{}{}, + FlagsAndDeps: []interface{}{}, + } + for _, fn := range setup { + fn(sopts) + } + + // Setup config + config, cfgCleanup := cltest.NewConfig(t) + t.Cleanup(cfgCleanup) + + for k, v := range sopts.Config { + config.Set(k, v) + } + + var app *cltest.TestApplication + var cleanup func() + if sopts.WithKey { + app, cleanup = cltest.NewApplicationWithConfigAndKey(t, config, sopts.FlagsAndDeps...) + } else { + app, cleanup = cltest.NewApplicationWithConfig(t, config, sopts.FlagsAndDeps...) + } + t.Cleanup(cleanup) + + if sopts.StartAndConnect { + require.NoError(t, app.StartAndConnect()) + } else { + require.NoError(t, app.Start()) + } + + return app +} + +// withConfig is a function option which sets config on the app +func withConfig(cfgs map[string]interface{}) func(opts *startOptions) { + return func(opts *startOptions) { + for k, v := range cfgs { + opts.Config[k] = v + } + } +} + +func withMocks(mks ...interface{}) func(opts *startOptions) { + return func(opts *startOptions) { + opts.FlagsAndDeps = mks + } +} + +func withKey() func(opts *startOptions) { + return func(opts *startOptions) { + opts.WithKey = true + } +} + +func startAndConnect() func(opts *startOptions) { + return func(opts *startOptions) { + opts.StartAndConnect = true + } +} + +func newEthMocks(t *testing.T) (*mocks.RPCClient, *mocks.GethClient) { + t.Helper() + + rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) + t.Cleanup(assertMocksCalled) + + return rpcClient, gethClient +} + func keyNameForTest(t *testing.T) string { return fmt.Sprintf("%s_test_key.json", t.Name()) } @@ -74,14 +158,11 @@ func requireP2PKeyCount(t *testing.T, store *store.Store, length int) []p2pkey.E func TestClient_ListETHKeys(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - app, cleanup := cltest.NewApplicationWithKey(t, - eth.NewClientWith(rpcClient, gethClient), + rpcClient, gethClient := newEthMocks(t) + app := startNewApplication(t, + withKey(), + withMocks(eth.NewClientWith(rpcClient, gethClient)), ) - defer cleanup() - require.NoError(t, app.Start()) - client, r := app.NewClientAndRenderer() gethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(big.NewInt(42), nil) @@ -96,23 +177,14 @@ func TestClient_ListETHKeys(t *testing.T) { func TestClient_IndexJobSpecs(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() j1 := cltest.NewJob() app.Store.CreateJob(&j1) j2 := cltest.NewJob() app.Store.CreateJob(&j2) - client, r := app.NewClientAndRenderer() - require.Nil(t, client.IndexJobSpecs(cltest.EmptyCLIContext())) jobs := *r.Renders[0].(*[]models.JobSpec) require.Equal(t, 2, len(jobs)) @@ -122,23 +194,14 @@ func TestClient_IndexJobSpecs(t *testing.T) { func TestClient_ShowJobRun_Exists(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() j := cltest.NewJobWithWebInitiator() assert.NoError(t, app.Store.CreateJob(&j)) jr := cltest.CreateJobRunViaWeb(t, app, j, `{"result":"100"}`) - client, r := app.NewClientAndRenderer() - set := flag.NewFlagSet("test", 0) set.Parse([]string{jr.ID.String()}) c := cli.NewContext(nil, set, nil) @@ -150,16 +213,7 @@ func TestClient_ShowJobRun_Exists(t *testing.T) { func TestClient_ShowJobRun_NotFound(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, r := app.NewClientAndRenderer() set := flag.NewFlagSet("test", 0) @@ -172,15 +226,8 @@ func TestClient_ShowJobRun_NotFound(t *testing.T) { func TestClient_IndexJobRuns(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() j := cltest.NewJobWithWebInitiator() assert.NoError(t, app.Store.CreateJob(&j)) @@ -192,8 +239,6 @@ func TestClient_IndexJobRuns(t *testing.T) { jr1.Result.Data = cltest.JSONFromString(t, `{"x":"y"}`) require.NoError(t, app.Store.CreateJobRun(&jr1)) - client, r := app.NewClientAndRenderer() - require.Nil(t, client.IndexJobRuns(cltest.EmptyCLIContext())) runs := *r.Renders[0].(*[]presenters.JobRun) require.Len(t, runs, 2) @@ -206,21 +251,12 @@ func TestClient_IndexJobRuns(t *testing.T) { func TestClient_ShowJobSpec_Exists(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() job := cltest.NewJob() app.Store.CreateJob(&job) - client, r := app.NewClientAndRenderer() - set := flag.NewFlagSet("test", 0) set.Parse([]string{job.ID.String()}) c := cli.NewContext(nil, set, nil) @@ -232,16 +268,7 @@ func TestClient_ShowJobSpec_Exists(t *testing.T) { func TestClient_ShowJobSpec_NotFound(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, r := app.NewClientAndRenderer() set := flag.NewFlagSet("test", 0) @@ -266,15 +293,7 @@ func TestClient_CreateExternalInitiator(t *testing.T) { for _, tt := range tests { test := tt t.Run(test.name, func(t *testing.T) { - - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - app, cleanup := cltest.NewApplicationWithKey(t, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() set := flag.NewFlagSet("create", 0) @@ -312,14 +331,7 @@ func TestClient_CreateExternalInitiator_Errors(t *testing.T) { for _, tt := range tests { test := tt t.Run(test.name, func(t *testing.T) { - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - app, cleanup := cltest.NewApplicationWithKey(t, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() set := flag.NewFlagSet("create", 0) @@ -338,15 +350,8 @@ func TestClient_CreateExternalInitiator_Errors(t *testing.T) { func TestClient_DestroyExternalInitiator(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() token := auth.NewToken() exi, err := models.NewExternalInitiator(token, @@ -356,8 +361,6 @@ func TestClient_DestroyExternalInitiator(t *testing.T) { err = app.Store.CreateExternalInitiator(exi) require.NoError(t, err) - client, r := app.NewClientAndRenderer() - set := flag.NewFlagSet("test", 0) set.Parse([]string{exi.Name}) c := cli.NewContext(nil, set, nil) @@ -368,16 +371,7 @@ func TestClient_DestroyExternalInitiator(t *testing.T) { func TestClient_DestroyExternalInitiator_NotFound(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, r := app.NewClientAndRenderer() set := flag.NewFlagSet("test", 0) @@ -390,16 +384,7 @@ func TestClient_DestroyExternalInitiator_NotFound(t *testing.T) { func TestClient_CreateJobSpec(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() tests := []struct { @@ -432,23 +417,13 @@ func TestClient_CreateJobSpec(t *testing.T) { func TestClient_ArchiveJobSpec(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() eim := new(mocks.ExternalInitiatorManager) - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - eim, - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t, withMocks(eim)) + client, _ := app.NewClientAndRenderer() job := cltest.NewJob() require.NoError(t, app.Store.CreateJob(&job)) - client, _ := app.NewClientAndRenderer() - set := flag.NewFlagSet("archive", 0) set.Parse([]string{job.ID.String()}) c := cli.NewContext(nil, set, nil) @@ -466,16 +441,7 @@ func TestClient_ArchiveJobSpec(t *testing.T) { func TestClient_CreateJobSpec_JSONAPIErrors(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() set := flag.NewFlagSet("create", 0) @@ -490,16 +456,7 @@ func TestClient_CreateJobSpec_JSONAPIErrors(t *testing.T) { func TestClient_CreateJobRun(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() tests := []struct { @@ -545,16 +502,7 @@ func TestClient_CreateJobRun(t *testing.T) { func TestClient_CreateBridge(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() tests := []struct { @@ -589,15 +537,8 @@ func TestClient_CreateBridge(t *testing.T) { func TestClient_IndexBridges(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() bt1 := &models.BridgeType{ Name: models.MustNewTaskType("testingbridges1"), @@ -615,8 +556,6 @@ func TestClient_IndexBridges(t *testing.T) { err = app.GetStore().CreateBridgeType(bt2) require.NoError(t, err) - client, r := app.NewClientAndRenderer() - require.Nil(t, client.IndexBridges(cltest.EmptyCLIContext())) bridges := *r.Renders[0].(*[]models.BridgeType) require.Equal(t, 2, len(bridges)) @@ -626,13 +565,8 @@ func TestClient_IndexBridges(t *testing.T) { func TestClient_ShowBridge(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - app, cleanup := cltest.NewApplication(t, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.StartAndConnect()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() bt := &models.BridgeType{ Name: models.MustNewTaskType("testingbridges1"), @@ -641,8 +575,6 @@ func TestClient_ShowBridge(t *testing.T) { } require.NoError(t, app.GetStore().CreateBridgeType(bt)) - client, r := app.NewClientAndRenderer() - set := flag.NewFlagSet("test", 0) set.Parse([]string{bt.Name.String()}) c := cli.NewContext(nil, set, nil) @@ -654,15 +586,8 @@ func TestClient_ShowBridge(t *testing.T) { func TestClient_RemoveBridge(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() bt := &models.BridgeType{ Name: models.MustNewTaskType("testingbridges1"), @@ -672,8 +597,6 @@ func TestClient_RemoveBridge(t *testing.T) { err := app.GetStore().CreateBridgeType(bt) require.NoError(t, err) - client, r := app.NewClientAndRenderer() - set := flag.NewFlagSet("test", 0) set.Parse([]string{bt.Name.String()}) c := cli.NewContext(nil, set, nil) @@ -685,16 +608,9 @@ func TestClient_RemoveBridge(t *testing.T) { func TestClient_RemoteLogin(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - config.Set("ADMIN_CREDENTIALS_FILE", "") - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t, withConfig(map[string]interface{}{ + "ADMIN_CREDENTIALS_FILE": "", + })) tests := []struct { name, file string @@ -727,31 +643,22 @@ func TestClient_RemoteLogin(t *testing.T) { } } -func setupWithdrawalsApplication(t *testing.T, config *cltest.TestConfig) (*cltest.TestApplication, func()) { - oca := common.HexToAddress("0xDEADB3333333F") - config.Set("OPERATOR_CONTRACT_ADDRESS", &oca) - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - app, cleanup := cltest.NewApplicationWithConfigAndKey(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - return app, func() { - assertMocksCalled() - cleanup() - } -} - func TestClient_SendEther_From_BPTXM(t *testing.T) { t.Parallel() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := setupWithdrawalsApplication(t, config) - defer cleanup() + rpcClient, gethClient := newEthMocks(t) + oca := common.HexToAddress("0xDEADB3333333F") + app := startNewApplication(t, + withKey(), + withConfig(map[string]interface{}{ + "OPERATOR_CONTRACT_ADDRESS": &oca, + }), + withMocks(eth.NewClientWith(rpcClient, gethClient)), + startAndConnect(), + ) + client, _ := app.NewClientAndRenderer() s := app.GetStore() - require.NoError(t, app.StartAndConnect()) - - client, _ := app.NewClientAndRenderer() set := flag.NewFlagSet("sendether", 0) amount := "100.5" _, fromAddress := cltest.MustAddRandomKeyToKeystore(t, s, 0) @@ -773,15 +680,7 @@ func TestClient_SendEther_From_BPTXM(t *testing.T) { func TestClient_ChangePassword(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) enteredStrings := []string{cltest.APIEmail, cltest.Password} prompter := &cltest.MockCountingPrompter{EnteredStrings: enteredStrings} @@ -816,15 +715,8 @@ func TestClient_ChangePassword(t *testing.T) { func TestClient_IndexTransactions(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() store := app.GetStore() _, from := cltest.MustAddRandomKeyToKeystore(t, store) @@ -832,8 +724,6 @@ func TestClient_IndexTransactions(t *testing.T) { tx := cltest.MustInsertConfirmedEthTxWithAttempt(t, store, 0, 1, from) attempt := tx.EthTxAttempts[0] - client, r := app.NewClientAndRenderer() - // page 1 set := flag.NewFlagSet("test transactions", 0) set.Int("page", 1, "doc") @@ -859,15 +749,8 @@ func TestClient_IndexTransactions(t *testing.T) { func TestClient_ShowTransaction(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() store := app.GetStore() _, from := cltest.MustAddRandomKeyToKeystore(t, store) @@ -875,8 +758,6 @@ func TestClient_ShowTransaction(t *testing.T) { tx := cltest.MustInsertConfirmedEthTxWithAttempt(t, store, 0, 1, from) attempt := tx.EthTxAttempts[0] - client, r := app.NewClientAndRenderer() - set := flag.NewFlagSet("test get tx", 0) set.Parse([]string{attempt.Hash.Hex()}) c := cli.NewContext(nil, set, nil) @@ -889,23 +770,14 @@ func TestClient_ShowTransaction(t *testing.T) { func TestClient_IndexTxAttempts(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, r := app.NewClientAndRenderer() store := app.GetStore() _, from := cltest.MustAddRandomKeyToKeystore(t, store) tx := cltest.MustInsertConfirmedEthTxWithAttempt(t, store, 0, 1, from) - client, r := app.NewClientAndRenderer() - // page 1 set := flag.NewFlagSet("test txattempts", 0) set.Int("page", 1, "doc") @@ -931,40 +803,33 @@ func TestClient_IndexTxAttempts(t *testing.T) { func TestClient_CreateETHKey(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - app, cleanup := cltest.NewApplicationWithKey(t, - eth.NewClientWith(rpcClient, gethClient), + rpcClient, gethClient := newEthMocks(t) + app := startNewApplication(t, + withKey(), + withMocks(eth.NewClientWith(rpcClient, gethClient)), ) - defer cleanup() + client, _ := app.NewClientAndRenderer() + gethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(42), nil) rpcClient.On("Call", mock.Anything, "eth_call", mock.Anything, "latest").Return(nil) - require.NoError(t, app.Start()) - - client, _ := app.NewClientAndRenderer() assert.NoError(t, client.CreateETHKey(nilContext)) } func TestClient_ImportExportETHKey(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), + defer deleteKeyExportFile(t) + + rpcClient, gethClient := newEthMocks(t) + app := startNewApplication(t, + withMocks(eth.NewClientWith(rpcClient, gethClient)), ) - defer cleanup() + client, r := app.NewClientAndRenderer() gethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(big.NewInt(42), nil) rpcClient.On("Call", mock.Anything, "eth_call", mock.Anything, "latest").Return(nil) - client, r := app.NewClientAndRenderer() - - require.NoError(t, app.Start()) - set := flag.NewFlagSet("test", 0) set.String("file", "internal/fixtures/apicredentials", "") c := cli.NewContext(nil, set, nil) @@ -1033,18 +898,35 @@ func TestClient_ImportExportETHKey(t *testing.T) { acct, err := keystore.Import(keyJSON, strings.TrimSpace(string(newpassword))) assert.NoError(t, err) assert.Equal(t, addr.Hex(), acct.Address.Hex()) + + // Export test invalid id + keyName := keyNameForTest(t) + set = flag.NewFlagSet("test Eth export invalid id", 0) + set.Parse([]string{"999"}) + set.String("newpassword", "../internal/fixtures/apicredentials", "") + set.String("output", keyName, "") + c = cli.NewContext(nil, set, nil) + err = client.ExportETHKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) } func TestClient_SetMinimumGasPrice(t *testing.T) { t.Parallel() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := setupWithdrawalsApplication(t, config) - defer cleanup() - require.NoError(t, app.StartAndConnect()) - + // Setup Withdrawals application + rpcClient, gethClient := newEthMocks(t) + oca := common.HexToAddress("0xDEADB3333333F") + app := startNewApplication(t, + withKey(), + withConfig(map[string]interface{}{ + "OPERATOR_CONTRACT_ADDRESS": &oca, + }), + withMocks(eth.NewClientWith(rpcClient, gethClient)), + startAndConnect(), + ) client, _ := app.NewClientAndRenderer() + set := flag.NewFlagSet("setgasprice", 0) set.Parse([]string{"8616460799"}) @@ -1067,17 +949,9 @@ func TestClient_SetMinimumGasPrice(t *testing.T) { func TestClient_GetConfiguration(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, r := app.NewClientAndRenderer() + assert.NoError(t, client.GetConfiguration(cltest.EmptyCLIContext())) require.Equal(t, 1, len(r.Renders)) @@ -1098,23 +972,14 @@ func TestClient_GetConfiguration(t *testing.T) { func TestClient_CancelJobRun(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) + client, _ := app.NewClientAndRenderer() job := cltest.NewJobWithWebInitiator() require.NoError(t, app.Store.CreateJob(&job)) run := cltest.NewJobRun(job) require.NoError(t, app.Store.CreateJobRun(&run)) - client, _ := app.NewClientAndRenderer() - set := flag.NewFlagSet("cancel", 0) set.Parse([]string{run.ID.String()}) c := cli.NewContext(nil, set, nil) @@ -1130,17 +995,9 @@ func TestClient_CancelJobRun(t *testing.T) { func TestClient_P2P_CreateKey(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() + app.Store.OCRKeyStore.Unlock(cltest.Password) require.NoError(t, client.CreateP2PKey(nilContext)) @@ -1160,17 +1017,9 @@ func TestClient_P2P_CreateKey(t *testing.T) { func TestClient_P2P_DeleteKey(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() + app.Store.OCRKeyStore.Unlock(cltest.Password) key, err := p2pkey.CreateKey() @@ -1194,31 +1043,36 @@ func TestClient_P2P_DeleteKey(t *testing.T) { } func TestClient_ImportExportP2PKeyBundle(t *testing.T) { - defer deleteKeyExportFile(t) + t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + defer deleteKeyExportFile(t) - store := app.GetStore() + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() + store := app.GetStore() + store.OCRKeyStore.Unlock(cltest.Password) keys := requireP2PKeyCount(t, store, 1) key := keys[0] - keyName := keyNameForTest(t) + + // Export test invalid id set := flag.NewFlagSet("test P2P export", 0) - set.Parse([]string{fmt.Sprint(key.ID)}) + set.Parse([]string{"0"}) set.String("newpassword", "../internal/fixtures/apicredentials", "") set.String("output", keyName, "") c := cli.NewContext(nil, set, nil) + err := client.ExportP2PKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) + + // Export test + set = flag.NewFlagSet("test P2P export", 0) + set.Parse([]string{fmt.Sprint(key.ID)}) + set.String("newpassword", "../internal/fixtures/apicredentials", "") + set.String("output", keyName, "") + c = cli.NewContext(nil, set, nil) require.NoError(t, client.ExportP2PKey(c)) require.NoError(t, utils.JustError(os.Stat(keyName))) @@ -1238,17 +1092,9 @@ func TestClient_ImportExportP2PKeyBundle(t *testing.T) { func TestClient_CreateOCRKeyBundle(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() + app.Store.OCRKeyStore.Unlock(cltest.Password) require.NoError(t, client.CreateOCRKeyBundle(nilContext)) @@ -1268,17 +1114,9 @@ func TestClient_CreateOCRKeyBundle(t *testing.T) { func TestClient_DeleteOCRKeyBundle(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() + app.Store.OCRKeyStore.Unlock(cltest.Password) key, err := ocrkey.NewKeyBundle() @@ -1302,28 +1140,32 @@ func TestClient_DeleteOCRKeyBundle(t *testing.T) { func TestClient_ImportExportOCRKeyBundle(t *testing.T) { defer deleteKeyExportFile(t) - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) - store := app.GetStore() + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() + + store := app.GetStore() store.OCRKeyStore.Unlock(cltest.Password) keys := requireOCRKeyCount(t, store, 1) key := keys[0] - keyName := keyNameForTest(t) + + // Export test invalid id set := flag.NewFlagSet("test OCR export", 0) - set.Parse([]string{key.ID.String()}) + set.Parse([]string{"0"}) set.String("newpassword", "../internal/fixtures/apicredentials", "") set.String("output", keyName, "") c := cli.NewContext(nil, set, nil) + err := client.ExportOCRKey(c) + require.Error(t, err, "Error exporting") + require.Error(t, utils.JustError(os.Stat(keyName))) + + // Export + set = flag.NewFlagSet("test OCR export", 0) + set.Parse([]string{key.ID.String()}) + set.String("newpassword", "../internal/fixtures/apicredentials", "") + set.String("output", keyName, "") + c = cli.NewContext(nil, set, nil) require.NoError(t, client.ExportOCRKey(c)) require.NoError(t, utils.JustError(os.Stat(keyName))) @@ -1342,19 +1184,14 @@ func TestClient_ImportExportOCRKeyBundle(t *testing.T) { func TestClient_RunOCRJob_HappyPath(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - app, cleanup := cltest.NewApplication(t, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + + app := startNewApplication(t) + client, _ := app.NewClientAndRenderer() _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, app.Store.DB.Create(bridge).Error) _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") require.NoError(t, app.Store.DB.Create(bridge2).Error) - client, _ := app.NewClientAndRenderer() var ocrJobSpecFromFile job.Job tree, err := toml.LoadFile("testdata/oracle-spec.toml") @@ -1380,14 +1217,8 @@ func TestClient_RunOCRJob_HappyPath(t *testing.T) { func TestClient_RunOCRJob_MissingJobID(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - app, cleanup := cltest.NewApplication(t, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() set := flag.NewFlagSet("test", 0) @@ -1399,14 +1230,8 @@ func TestClient_RunOCRJob_MissingJobID(t *testing.T) { func TestClient_RunOCRJob_JobNotFound(t *testing.T) { t.Parallel() - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - defer assertMocksCalled() - app, cleanup := cltest.NewApplication(t, - eth.NewClientWith(rpcClient, gethClient), - ) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() set := flag.NewFlagSet("test", 0) @@ -1414,16 +1239,13 @@ func TestClient_RunOCRJob_JobNotFound(t *testing.T) { c := cli.NewContext(nil, set, nil) require.NoError(t, client.RemoteLogin(c)) - assert.EqualError(t, client.TriggerPipelineRun(c), "Error; no job found with id 1 (most likely it was deleted)") + assert.EqualError(t, client.TriggerPipelineRun(c), "parseResponse error: Error; no job found with id 1 (most likely it was deleted)") } func TestClient_ListJobsV2(t *testing.T) { t.Parallel() - app, cleanup := cltest.NewApplication(t) - defer cleanup() - require.NoError(t, app.Start()) - + app := startNewApplication(t) client, r := app.NewClientAndRenderer() // Create the job @@ -1454,10 +1276,9 @@ func TestClient_ListJobsV2(t *testing.T) { func TestClient_CreateJobV2(t *testing.T) { t.Parallel() - app, cleanup := cltest.NewApplication(t) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() + fs := flag.NewFlagSet("", flag.ExitOnError) fs.Parse([]string{"./testdata/ocr-bootstrap-spec.toml"}) err := client.CreateJobV2(cli.NewContext(nil, fs, nil)) @@ -1467,11 +1288,7 @@ func TestClient_CreateJobV2(t *testing.T) { func TestClient_AutoLogin(t *testing.T) { t.Parallel() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config) - defer cleanup() - require.NoError(t, app.Start()) + app := startNewApplication(t) user := cltest.MustRandomUser() require.NoError(t, app.Store.SaveUser(&user)) @@ -1482,7 +1299,7 @@ func TestClient_AutoLogin(t *testing.T) { } client, _ := app.NewClientAndRenderer() client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) - client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) + client.HTTP = cmd.NewAuthenticatedHTTPClient(app.Config, client.CookieAuthenticator, sr) fs := flag.NewFlagSet("", flag.ExitOnError) err := client.ListJobsV2(cli.NewContext(nil, fs, nil)) diff --git a/core/cmd/renderer.go b/core/cmd/renderer.go index a3bda83ae64..bb09e716247 100644 --- a/core/cmd/renderer.go +++ b/core/cmd/renderer.go @@ -9,6 +9,7 @@ import ( "github.com/olekukonko/tablewriter" "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/store/models/ocrkey" "github.com/smartcontractkit/chainlink/core/store/models/p2pkey" @@ -20,7 +21,7 @@ import ( // Renderer implements the Render method. type Renderer interface { - Render(interface{}) error + Render(interface{}, ...string) error } // RendererJSON is used to render JSON data. @@ -29,11 +30,14 @@ type RendererJSON struct { } // Render writes the given input as a JSON string. -func (rj RendererJSON) Render(v interface{}) error { +func (rj RendererJSON) Render(v interface{}, _ ...string) error { b, err := utils.FormatJSON(v) if err != nil { return err } + + // Append a new line + b = append(b, []byte("\n")...) if _, err = rj.Write(b); err != nil { return err } @@ -47,7 +51,11 @@ type RendererTable struct { // Render returns a formatted table of text for a given Job or presenter // and relevant information. -func (rt RendererTable) Render(v interface{}) error { +func (rt RendererTable) Render(v interface{}, headers ...string) error { + for _, h := range headers { + fmt.Println(h) + } + switch typed := v.(type) { case *[]models.JobSpec: return rt.renderJobs(*typed) @@ -89,6 +97,10 @@ func (rt RendererTable) Render(v interface{}) error { return rt.renderOCRKeys(*typed) case *[]Job: return rt.renderJobsV2(*typed) + case *Job: + return rt.renderJobsV2([]Job{*typed}) + case *pipeline.Run: + return rt.renderPipelineRun(*typed) default: return fmt.Errorf("unable to render object of type %T: %v", typed, typed) } @@ -428,6 +440,7 @@ func (rt RendererTable) renderETHKeys(keys []presenters.ETHKey) error { deletedAt, }) } + renderList([]string{"Address", "ETH", "LINK", "Next nonce", "Last used", "Is funding", "Created", "Updated", "Deleted"}, rows) return nil } @@ -474,3 +487,22 @@ func (rt RendererTable) renderOCRKeys(ocrKeys []ocrkey.EncryptedKeyBundle) error renderList([]string{"ID", "On-chain signing addr", "Off-chain pubkey", "Config pubkey", "Created", "Updated", "Deleted"}, rows) return nil } + +func (rt RendererTable) renderPipelineRun(run pipeline.Run) error { + table := rt.newTable([]string{"ID", "Created At", "Finished At"}) + + var finishedAt string + if run.FinishedAt != nil { + finishedAt = run.FinishedAt.String() + } + + row := []string{ + run.GetID(), + run.CreatedAt.String(), + finishedAt, + } + table.Append(row) + + render("Pipeline Run", table) + return nil +} diff --git a/core/internal/cltest/mocks.go b/core/internal/cltest/mocks.go index ebaa08af234..fdd96926e2a 100644 --- a/core/internal/cltest/mocks.go +++ b/core/internal/cltest/mocks.go @@ -131,7 +131,7 @@ type RendererMock struct { } // Render appends values to renderer mock -func (rm *RendererMock) Render(v interface{}) error { +func (rm *RendererMock) Render(v interface{}, headers ...string) error { rm.Renders = append(rm.Renders, v) return nil } diff --git a/core/store/presenters/presenters.go b/core/store/presenters/presenters.go index 41595fafa90..3f6f99ef11f 100644 --- a/core/store/presenters/presenters.go +++ b/core/store/presenters/presenters.go @@ -596,6 +596,18 @@ type EthTx struct { Value string `json:"value,omitempty"` } +func NewEthTx(tx models.EthTx) EthTx { + return EthTx{ + ID: tx.ID, + Data: hexutil.Bytes(tx.EncodedPayload), + From: &tx.FromAddress, + GasLimit: strconv.FormatUint(tx.GasLimit, 10), + State: string(tx.State), + To: &tx.ToAddress, + Value: tx.Value.String(), + } +} + func NewEthTxFromAttempt(txa models.EthTxAttempt) EthTx { return newEthTxWithAttempt(txa.EthTx, txa) } diff --git a/core/web/jobs_controller.go b/core/web/jobs_controller.go index a3212743f72..9309713fe5d 100644 --- a/core/web/jobs_controller.go +++ b/core/web/jobs_controller.go @@ -125,7 +125,6 @@ func (jc *JobsController) Create(c *gin.Context) { } jsonAPIResponse(c, presenters.NewJobResource(job), job.Type.String()) - } // Delete soft deletes an OCR job spec. diff --git a/core/web/pipeline_runs_controller.go b/core/web/pipeline_runs_controller.go index 1ed5ca4e9e6..c94a771fcec 100644 --- a/core/web/pipeline_runs_controller.go +++ b/core/web/pipeline_runs_controller.go @@ -77,7 +77,17 @@ func (prc *PipelineRunsController) Create(c *gin.Context) { return } - jsonAPIResponse(c, job.PipelineRun{ID: jobRunID}, "offChainReportingPipelineRun") + pipelineRun := pipeline.Run{} + err = preloadPipelineRunDependencies(prc.App.GetStore().DB). + Where("pipeline_runs.id = ?", jobRunID). + First(&pipelineRun).Error + + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + + jsonAPIResponse(c, pipelineRun, "offChainReportingPipelineRun") } func preloadPipelineRunDependencies(db *gorm.DB) *gorm.DB { diff --git a/core/web/pipeline_runs_controller_test.go b/core/web/pipeline_runs_controller_test.go index ed81639b0a3..61ffde3eb15 100644 --- a/core/web/pipeline_runs_controller_test.go +++ b/core/web/pipeline_runs_controller_test.go @@ -55,10 +55,13 @@ func TestPipelineRunsController_Create_HappyPath(t *testing.T) { defer cleanup() cltest.AssertServerResponse(t, response, http.StatusOK) - parsedResponse := job.PipelineRun{} + var parsedResponse pipeline.Run err = web.ParseJSONAPIResponse(cltest.ParseResponseBody(t, response), &parsedResponse) assert.NoError(t, err) assert.NotNil(t, parsedResponse.ID) + assert.NotNil(t, parsedResponse.CreatedAt) + assert.Nil(t, parsedResponse.FinishedAt) + require.Len(t, parsedResponse.PipelineTaskRuns, 8) } func TestPipelineRunsController_Index_HappyPath(t *testing.T) { diff --git a/core/web/transfer_controller.go b/core/web/transfer_controller.go index 655538e0175..fd61637f3a6 100644 --- a/core/web/transfer_controller.go +++ b/core/web/transfer_controller.go @@ -7,6 +7,7 @@ import ( "github.com/smartcontractkit/chainlink/core/services/bulletprooftxmanager" "github.com/smartcontractkit/chainlink/core/services/chainlink" "github.com/smartcontractkit/chainlink/core/store/models" + "github.com/smartcontractkit/chainlink/core/store/presenters" "github.com/gin-gonic/gin" ) @@ -34,5 +35,5 @@ func (tc *TransfersController) Create(c *gin.Context) { return } - jsonAPIResponse(c, etx, "eth_tx") + jsonAPIResponse(c, presenters.NewEthTx(etx), "eth_tx") } From 663a41d24df1ec791825b4ef1bed3bc9f52c780e Mon Sep 17 00:00:00 2001 From: James Kong Date: Fri, 19 Mar 2021 00:50:18 +0800 Subject: [PATCH 030/116] Fix crashing pages when a job run is present This fixes the Job page when a job run is present, and the run page. https://www.pivotaltracker.com/story/show/177402305 --- core/services/pipeline/models.go | 4 +-- operator_ui/@types/core/store/models.d.ts | 4 +-- .../Jobs/Runs/augmentOcrTasksList.test.ts | 28 +++++-------------- .../pages/Jobs/Runs/augmentOcrTasksList.ts | 2 +- .../src/pages/Jobs/transformJobRuns.test.ts | 12 ++------ .../src/pages/Jobs/transformJobRuns.ts | 8 ++---- operator_ui/support/factories/jobRunV2.ts | 6 ++-- 7 files changed, 19 insertions(+), 45 deletions(-) diff --git a/core/services/pipeline/models.go b/core/services/pipeline/models.go index a80d58941db..91d2a69604c 100644 --- a/core/services/pipeline/models.go +++ b/core/services/pipeline/models.go @@ -81,8 +81,8 @@ type TaskRun struct { Error null.String `json:"error"` CreatedAt time.Time `json:"createdAt"` FinishedAt *time.Time `json:"finishedAt"` - Index int32 - DotID string + Index int32 `json:"index"` + DotID string `json:"dotId"` } func (TaskRun) TableName() string { diff --git a/operator_ui/@types/core/store/models.d.ts b/operator_ui/@types/core/store/models.d.ts index 975f0fb9026..7d9f59d0a4a 100644 --- a/operator_ui/@types/core/store/models.d.ts +++ b/operator_ui/@types/core/store/models.d.ts @@ -546,8 +546,6 @@ export interface PipelineTaskRun { error: PipelineTaskError finishedAt: nullable.Time output: PipelineTaskOutput - taskSpec: { - dotId: string - } + dotId: string type: string } diff --git a/operator_ui/src/pages/Jobs/Runs/augmentOcrTasksList.test.ts b/operator_ui/src/pages/Jobs/Runs/augmentOcrTasksList.test.ts index 6cb2fd18259..241f9c89f0e 100644 --- a/operator_ui/src/pages/Jobs/Runs/augmentOcrTasksList.test.ts +++ b/operator_ui/src/pages/Jobs/Runs/augmentOcrTasksList.test.ts @@ -20,9 +20,7 @@ describe('augmentOcrTasksList', () => { output: null, error: 'majority of fetchers in median failed: error making http request: reason; error making http request: reason: bad input for task', - taskSpec: { - dotId: 'answer', - }, + dotId: 'answer', createdAt: '2020-11-24T11:38:36.100272Z', finishedAt: '2020-11-24T11:39:26.19516Z', status: 'errored', @@ -31,9 +29,7 @@ describe('augmentOcrTasksList', () => { type: 'multiply', output: null, error: 'error making http request: reason', - taskSpec: { - dotId: 'multiplyLast', - }, + dotId: 'multiplyLast', createdAt: '2020-11-24T11:38:36.100272Z', finishedAt: '2020-11-24T11:39:26.171678Z', status: 'not_run', @@ -42,9 +38,7 @@ describe('augmentOcrTasksList', () => { type: 'multiply', output: null, error: 'error making http request: reason', - taskSpec: { - dotId: 'multiplyOpen', - }, + dotId: 'multiplyOpen', createdAt: '2020-11-24T11:38:36.100272Z', finishedAt: '2020-11-24T11:39:26.176633Z', status: 'not_run', @@ -53,9 +47,7 @@ describe('augmentOcrTasksList', () => { type: 'jsonparse', output: null, error: 'error making http request: reason', - taskSpec: { - dotId: 'parseLast', - }, + dotId: 'parseLast', createdAt: '2020-11-24T11:38:36.100272Z', finishedAt: '2020-11-24T11:39:26.154488Z', status: 'not_run', @@ -64,9 +56,7 @@ describe('augmentOcrTasksList', () => { type: 'jsonparse', output: null, error: 'error making http request: reason', - taskSpec: { - dotId: 'parseOpen', - }, + dotId: 'parseOpen', createdAt: '2020-11-24T11:38:36.100272Z', finishedAt: '2020-11-24T11:39:26.15558Z', status: 'not_run', @@ -75,9 +65,7 @@ describe('augmentOcrTasksList', () => { type: 'http', output: null, error: 'error making http request: reason', - taskSpec: { - dotId: 'fetch', - }, + dotId: 'fetch', createdAt: '2020-11-24T11:38:36.100272Z', finishedAt: '2020-11-24T11:39:26.12949Z', status: 'errored', @@ -86,9 +74,7 @@ describe('augmentOcrTasksList', () => { type: 'http', output: null, error: 'error making http request: reason', - taskSpec: { - dotId: 'fetch2', - }, + dotId: 'fetch2', createdAt: '2020-11-24T11:38:36.100272Z', finishedAt: '2020-11-24T11:39:26.127941Z', status: 'errored', diff --git a/operator_ui/src/pages/Jobs/Runs/augmentOcrTasksList.ts b/operator_ui/src/pages/Jobs/Runs/augmentOcrTasksList.ts index 22f61aecf6d..8212859bcdd 100644 --- a/operator_ui/src/pages/Jobs/Runs/augmentOcrTasksList.ts +++ b/operator_ui/src/pages/Jobs/Runs/augmentOcrTasksList.ts @@ -28,7 +28,7 @@ export function augmentOcrTasksList({ jobRun }: { jobRun: PipelineJobRun }) { ) const taskRun = jobRun.taskRuns.find( - ({ taskSpec }) => taskSpec.dotId === stratifyNodeCopy.id, + ({ dotId }) => dotId === stratifyNodeCopy.id, ) stratifyNodeCopy.attributes = { diff --git a/operator_ui/src/pages/Jobs/transformJobRuns.test.ts b/operator_ui/src/pages/Jobs/transformJobRuns.test.ts index e1927581438..da8a52b5d77 100644 --- a/operator_ui/src/pages/Jobs/transformJobRuns.test.ts +++ b/operator_ui/src/pages/Jobs/transformJobRuns.test.ts @@ -38,9 +38,7 @@ describe('transformPipelineJobRun', () => { finishedAt: '2020-11-19T14:01:25.015681Z', output: null, status: 'not_run', - taskSpec: { - dotId: 'multiply', - }, + dotId: 'multiply', type: 'multiply', }, { @@ -50,9 +48,7 @@ describe('transformPipelineJobRun', () => { finishedAt: '2020-11-19T14:01:25.005568Z', output: null, status: 'not_run', - taskSpec: { - dotId: 'parse', - }, + dotId: 'parse', type: 'jsonparse', }, { @@ -62,9 +58,7 @@ describe('transformPipelineJobRun', () => { finishedAt: '2020-11-19T14:01:24.997068Z', output: null, status: 'errored', - taskSpec: { - dotId: 'fetch', - }, + dotId: 'fetch', type: 'http', }, ], diff --git a/operator_ui/src/pages/Jobs/transformJobRuns.ts b/operator_ui/src/pages/Jobs/transformJobRuns.ts index 698808dd869..a791635032c 100644 --- a/operator_ui/src/pages/Jobs/transformJobRuns.ts +++ b/operator_ui/src/pages/Jobs/transformJobRuns.ts @@ -9,11 +9,7 @@ import { import { getOcrJobStatus } from './utils' function getTaskStatus({ - taskRun: { - taskSpec: { dotId }, - finishedAt, - error, - }, + taskRun: { dotId, finishedAt, error }, stratify, taskRuns, }: { @@ -30,7 +26,7 @@ function getTaskStatus({ if (currentNode) { currentNode.parentIds.forEach((id) => { - const parentTaskRun = taskRuns.find((tr) => tr.taskSpec.dotId === id) + const parentTaskRun = taskRuns.find((tr) => tr.dotId === id) if (parentTaskRun?.error !== null && parentTaskRun?.error === taskError) { taskError = 'not_run' diff --git a/operator_ui/support/factories/jobRunV2.ts b/operator_ui/support/factories/jobRunV2.ts index 636736b081f..97a3c8accb1 100644 --- a/operator_ui/support/factories/jobRunV2.ts +++ b/operator_ui/support/factories/jobRunV2.ts @@ -20,7 +20,7 @@ export function jobRunV2( error: `error making http request: Post "http://localhost:8001": dial tcp 127.0.0.1:8001: connect: connection refused`, finishedAt: '2020-11-19T14:01:25.015681Z', output: null, - taskSpec: { dotId: 'multiply' }, + dotId: 'multiply', type: 'multiply', }, @@ -29,7 +29,7 @@ export function jobRunV2( error: `error making http request: Post "http://localhost:8001": dial tcp 127.0.0.1:8001: connect: connection refused`, finishedAt: '2020-11-19T14:01:25.005568Z', output: null, - taskSpec: { dotId: 'parse' }, + dotId: 'parse', type: 'jsonparse', }, { @@ -37,7 +37,7 @@ export function jobRunV2( error: `error making http request: Post "http://localhost:8001": dial tcp 127.0.0.1:8001: connect: connection refused`, finishedAt: '2020-11-19T14:01:24.997068Z', output: null, - taskSpec: { dotId: 'fetch' }, + dotId: 'fetch', type: 'http', }, ], From df0f7c31a803372b0031e2e0170de65437f8b399 Mon Sep 17 00:00:00 2001 From: John Barker Date: Thu, 18 Mar 2021 11:00:41 -0600 Subject: [PATCH 031/116] Add toggleable verbose logging to the stats pusher --- core/services/chainlink/application.go | 2 +- core/services/synchronization/explorer_client.go | 11 ++++++++++- core/store/orm/config.go | 5 +++++ core/store/orm/schema.go | 1 + docs/CHANGELOG.md | 3 +++ 5 files changed, 20 insertions(+), 2 deletions(-) diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index 564ac4ac280..9ee80850b66 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -144,7 +144,7 @@ func NewApplication(config *orm.Config, ethClient eth.Client, advisoryLocker pos monitoringEndpoint := ocrtypes.MonitoringEndpoint(&telemetry.NoopAgent{}) if config.ExplorerURL() != nil { - explorerClient = synchronization.NewExplorerClient(config.ExplorerURL(), config.ExplorerAccessKey(), config.ExplorerSecret()) + explorerClient = synchronization.NewExplorerClient(config.ExplorerURL(), config.ExplorerAccessKey(), config.ExplorerSecret(), config.StatsPusherLogging()) statsPusher = synchronization.NewStatsPusher(store.DB, explorerClient) monitoringEndpoint = telemetry.NewAgent(explorerClient) } diff --git a/core/services/synchronization/explorer_client.go b/core/services/synchronization/explorer_client.go index a2a2d7f6555..3559e953cd4 100644 --- a/core/services/synchronization/explorer_client.go +++ b/core/services/synchronization/explorer_client.go @@ -77,6 +77,7 @@ type explorerClient struct { url *url.URL accessKey string secret string + logging bool closeRequested chan struct{} closed chan struct{} @@ -86,7 +87,11 @@ type explorerClient struct { // NewExplorerClient returns a stats pusher using a websocket for // delivery. -func NewExplorerClient(url *url.URL, accessKey, secret string) ExplorerClient { +func NewExplorerClient(url *url.URL, accessKey, secret string, loggingArgs ...bool) ExplorerClient { + logging := false + if len(loggingArgs) > 0 { + logging = loggingArgs[0] + } return &explorerClient{ url: url, sendText: make(chan []byte, SendBufferSize), @@ -97,6 +102,7 @@ func NewExplorerClient(url *url.URL, accessKey, secret string) ExplorerClient { status: ConnectionStatusDisconnected, accessKey: accessKey, secret: secret, + logging: logging, closeRequested: make(chan struct{}), closed: make(chan struct{}), @@ -316,6 +322,9 @@ func (ec *explorerClient) writeMessage(message []byte, messageType int) error { if _, err := writer.Write(message); err != nil { return err } + if ec.logging { + logger.Debugw("websocketStatsPusher successfully wrote message", "messageType", messageType, "message", message) + } return writer.Close() } diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 1b6a1d095c2..ac4757c7c5b 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -881,6 +881,11 @@ func (c Config) SessionTimeout() models.Duration { return models.MustMakeDuration(c.getWithFallback("SessionTimeout", parseDuration).(time.Duration)) } +// StatsPusherLogging toggles very verbose logging of raw messages for the StatsPusher (also telemetry) +func (c Config) StatsPusherLogging() bool { + return c.getWithFallback("StatsPusherLogging", parseBool).(bool) +} + // TLSCertPath represents the file system location of the TLS certificate // Chainlink should use for HTTPS. func (c Config) TLSCertPath() string { diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index b3eea4f1686..2a9464aecad 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -120,6 +120,7 @@ type ConfigSchema struct { RootDir string `env:"ROOT" default:"~/.chainlink"` SecureCookies bool `env:"SECURE_COOKIES" default:"true"` SessionTimeout models.Duration `env:"SESSION_TIMEOUT" default:"15m"` + StatsPusherLogging string `env:"STATS_PUSHER_LOGGING" default:"false"` TriggerFallbackDBPollInterval time.Duration `env:"TRIGGER_FALLBACK_DB_POLL_INTERVAL" default:"30s"` TLSCertPath string `env:"TLS_CERT_PATH" ` TLSHost string `env:"CHAINLINK_TLS_HOST" ` diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index d398fd1ff30..65e7e091393 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -9,6 +9,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- Add `STATS_PUSHER_LOGGING` to toggle stats pusher raw message logging (DEBUG + level). + - Add `ADMIN_CREDENTIALS_FILE` configuration variable This variable defaults to `$ROOT/apicredentials` and when defined / the From 26f2e1d2581a5a06c5cca58c0b643eafdb3b9d5d Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Thu, 18 Mar 2021 20:17:08 -0400 Subject: [PATCH 032/116] add logSql field update, updating default logger, persisting to db --- core/cmd/app.go | 21 +++++++--- core/cmd/remote_client.go | 44 +++++++++++++++++--- core/cmd/renderer.go | 5 ++- core/store/orm/orm.go | 8 ++++ core/web/log_controller.go | 74 +++++++++++++++++++++++++++------ core/web/log_controller_test.go | 40 ++++++++++++++---- core/web/presenters/log.go | 3 +- 7 files changed, 160 insertions(+), 35 deletions(-) diff --git a/core/cmd/app.go b/core/cmd/app.go index b507957540f..6f9eb3232e2 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -148,13 +148,24 @@ func NewApp(client *Client) *cli.App { }, }, { - Name: "enabledebug", - Usage: "Enable and disable debug logging", - Action: client.SetDebugLogging, + Name: "loglevel", + Usage: "Set log level", + Action: client.SetLogLevel, Flags: []cli.Flag{ cli.BoolFlag{ - Name: "enabled, true", - Usage: "enable or disable debug logger", + Name: "level", + Usage: "set log level for node (debug||info||warn||error)", + }, + }, + }, + { + Name: "logsql", + Usage: "Enable/disable sql statement logging", + Action: client.SetLogSQL, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "enable", + Usage: "enable or disable sql logging", }, }, }, diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index c7dd3699ea9..f98f7734b7a 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -1193,14 +1193,46 @@ func normalizePassword(password string) string { return url.PathEscape(strings.TrimSpace(password)) } -// SetDebugLogging enables or disables debug logging on the node -func (cli *Client) SetDebugLogging(c *clipkg.Context) (err error) { - if !c.Args().Present() { - return cli.errorOut(errors.New("Must set enabled or disabled (true || false)")) +// SetLogLevel sets the log level on the node +func (cli *Client) SetLogLevel(c *clipkg.Context) (err error) { + if !c.Bool("level") { + return cli.errorOut(errors.New("expecting a log level (debug, info, warn, error)")) } - isDebugEnabled := c.Bool("enabled") - request := web.LoglevelPatchRequest{EnableDebugLog: &isDebugEnabled} + logLevel := c.Args().Get(0) + request := web.LoglevelPatchRequest{LogLevel: logLevel, LogSql: ""} + requestData, err := json.Marshal(request) + if err != nil { + return cli.errorOut(err) + } + + buf := bytes.NewBuffer(requestData) + resp, err := cli.HTTP.Patch("/v2/log", buf) + if err != nil { + return cli.errorOut(errors.Wrap(err, "from toggling debug logging")) + } + defer func() { + if cerr := resp.Body.Close(); cerr != nil { + err = multierr.Append(err, cerr) + } + }() + + var lR webPresenter.LogResource + err = cli.renderAPIResponse(resp, &lR) + return err +} + +// SetLogSQL enables or disables the log sql statemnts +func (cli *Client) SetLogSQL(c *clipkg.Context) (err error) { + if !c.Bool("enable") { + return cli.errorOut(errors.New("Must set --enabled = (true || false)")) + } + + logSql, err := strconv.ParseBool(c.Args().Get(0)) + if err != nil { + return cli.errorOut(err) + } + request := web.LoglevelPatchRequest{LogLevel: "", LogSql: strconv.FormatBool(logSql)} requestData, err := json.Marshal(request) if err != nil { return cli.errorOut(err) diff --git a/core/cmd/renderer.go b/core/cmd/renderer.go index 4e8036c392d..eedce620d48 100644 --- a/core/cmd/renderer.go +++ b/core/cmd/renderer.go @@ -98,10 +98,11 @@ func (rt RendererTable) Render(v interface{}) error { } func (rt RendererTable) renderLogResource(logResource webPresenters.LogResource) error { - table := rt.newTable([]string{"ID", "DebugEnabled"}) + table := rt.newTable([]string{"ID", "LogLevel", "LogSql"}) table.Append([]string{ logResource.ID, - strconv.FormatBool(logResource.DebugEnabled), + logResource.LogLevel, + strconv.FormatBool(logResource.LogSql), }) render("Logs", table) return nil diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index 56e48834a52..53bac365c4d 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -644,6 +644,14 @@ func (orm *ORM) SetConfigValue(field string, value encoding.TextMarshaler) error FirstOrCreate(&models.Configuration{}).Error } +// SetConfigValue returns the value for a named configuration entry +func (orm *ORM) SetConfigStrValue(field string, value string) error { + name := EnvVarName(field) + return orm.DB.Where(models.Configuration{Name: name}). + Assign(models.Configuration{Name: name, Value: value}). + FirstOrCreate(&models.Configuration{}).Error +} + // CreateJob saves a job to the database and adds IDs to associated tables. func (orm *ORM) CreateJob(job *models.JobSpec) error { return orm.createJob(orm.DB, job) diff --git a/core/web/log_controller.go b/core/web/log_controller.go index b46d2d3c313..3ae7c917aad 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -1,7 +1,10 @@ package web import ( + "fmt" "net/http" + "strconv" + "strings" "github.com/gin-gonic/gin" "github.com/smartcontractkit/chainlink/core/logger" @@ -16,7 +19,29 @@ type LogController struct { } type LoglevelPatchRequest struct { - EnableDebugLog *bool `json:"debugEnabled"` + LogLevel string `json:"logLevel"` + LogSql string `json:"logSql"` +} + +func getLogLevelFromStr(logLevel string) (zapcore.Level, error) { + switch strings.ToLower(logLevel) { + case "debug": + return zapcore.DebugLevel, nil + case "info": + return zapcore.InfoLevel, nil + case "warn": + return zapcore.WarnLevel, nil + case "error": + return zapcore.ErrorLevel, nil + case "dpanic": + return zapcore.DPanicLevel, nil + case "panic": + return zapcore.PanicLevel, nil + case "fatal": + return zapcore.FatalLevel, nil + default: + return zapcore.InfoLevel, fmt.Errorf("could not parse %s as log level (debug, info, warn, error)", logLevel) + } } // SetDebug sets the debug log mode for the logger @@ -27,26 +52,49 @@ func (cc *LogController) SetDebug(c *gin.Context) { return } - var err error - if *request.EnableDebugLog { - cc.App.GetStore().Config.Set("LOG_LEVEL", zapcore.DebugLevel.String()) - err = cc.App.GetStore().SetConfigValue("LogLevel", zapcore.DebugLevel) - } else { - cc.App.GetStore().Config.Set("LOG_LEVEL", zapcore.InfoLevel.String()) - err = cc.App.GetStore().SetConfigValue("LogLevel", zapcore.InfoLevel) - } - if err != nil { - jsonAPIError(c, http.StatusInternalServerError, err) + if request.LogLevel == "" && request.LogSql == "" { + jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("please set either logLevel or logSql as params in order to set the log level")) return } - logger.SetLogger(cc.App.GetStore().Config.CreateProductionLogger()) + if request.LogLevel != "" { + ll, err := getLogLevelFromStr(request.LogLevel) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + cc.App.GetStore().Config.Set("LOG_LEVEL", ll.String()) + err = cc.App.GetStore().SetConfigStrValue("LogLevel", ll.String()) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + } + + if request.LogSql != "" { + logSql, err := strconv.ParseBool(request.LogSql) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + cc.App.GetStore().Config.Set("LOG_SQL", request.LogSql) + err = cc.App.GetStore().SetConfigStrValue("LogSQLStatements", request.LogSql) + if err != nil { + jsonAPIError(c, http.StatusInternalServerError, err) + return + } + cc.App.GetStore().SetLogging(logSql) + } + + // Set default logger with new configurations + logger.Default = cc.App.GetStore().Config.CreateProductionLogger() response := &presenters.LogResource{ JAID: presenters.JAID{ ID: "log", }, - DebugEnabled: *request.EnableDebugLog, + LogLevel: cc.App.GetStore().Config.LogLevel().String(), + LogSql: cc.App.GetStore().Config.LogSQLStatements(), } jsonAPIResponse(c, response, "log") diff --git a/core/web/log_controller_test.go b/core/web/log_controller_test.go index d54b438135c..a53822debf1 100644 --- a/core/web/log_controller_test.go +++ b/core/web/log_controller_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "net/http" + "strconv" "testing" "github.com/bmizerany/assert" @@ -16,9 +17,12 @@ import ( ) type testCase struct { - Description string - enableDug bool + Description string + logLevel string + logSql string + expectedLogLevel zapcore.Level + expectedLogSql bool } func TestLogController_SetDebug(t *testing.T) { @@ -35,20 +39,35 @@ func TestLogController_SetDebug(t *testing.T) { cases := []testCase{ { - Description: "Set debug enabled to true", - enableDug: true, + Description: "Set log level to debug", + logLevel: "debug", + logSql: "", expectedLogLevel: zapcore.DebugLevel, }, { - Description: "Set debug enabled to false (info)", - enableDug: false, + Description: "Set log level to info", + logLevel: "info", + logSql: "", + expectedLogLevel: zapcore.InfoLevel, + }, + { + Description: "Set log level to info and log sql to true", + logLevel: "info", + logSql: "true", expectedLogLevel: zapcore.InfoLevel, }, + { + Description: "Set log level to warn and log sql to false", + logLevel: "warn", + logSql: "false", + expectedLogLevel: zapcore.WarnLevel, + }, } for _, tc := range cases { func() { - request := web.LoglevelPatchRequest{EnableDebugLog: &tc.enableDug} + request := web.LoglevelPatchRequest{LogLevel: tc.logLevel, LogSql: tc.logSql} + requestData, _ := json.Marshal(request) buf := bytes.NewBuffer(requestData) @@ -58,7 +77,12 @@ func TestLogController_SetDebug(t *testing.T) { lR := presenters.LogResource{} require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &lR)) - assert.Equal(t, tc.enableDug, lR.DebugEnabled) + if tc.logLevel != "" { + assert.Equal(t, tc.logLevel, lR.LogLevel) + } + if tc.logSql != "" { + assert.Equal(t, tc.logSql, strconv.FormatBool(lR.LogSql)) + } assert.Equal(t, tc.expectedLogLevel.String(), app.GetStore().Config.LogLevel().String()) }() } diff --git a/core/web/presenters/log.go b/core/web/presenters/log.go index 5c408545a1f..eb359eec091 100644 --- a/core/web/presenters/log.go +++ b/core/web/presenters/log.go @@ -2,7 +2,8 @@ package presenters type LogResource struct { JAID - DebugEnabled bool `json:"debugEnabled"` + LogLevel string `json:"logLevel"` + LogSql bool `json:"logSql"` } // GetName implements the api2go EntityNamer interface From abcc62fd707cbffb89a42b4b8e9d0b5394977772 Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 5 Mar 2021 16:54:06 +0000 Subject: [PATCH 033/116] EthBroadcaster autorecovers nonce after a database restore --- core/cmd/local_client.go | 33 +- core/cmd/local_client_test.go | 2 +- core/cmd/renderer.go | 7 +- core/internal/cltest/cltest.go | 2 + core/internal/cltest/factories.go | 7 +- core/internal/features_test.go | 6 + .../bulletprooftxmanager.go | 16 +- .../bulletprooftxmanager/eth_broadcaster.go | 80 +--- .../eth_broadcaster_test.go | 102 ++--- .../bulletprooftxmanager/eth_confirmer.go | 27 +- .../eth_confirmer_test.go | 2 +- .../bulletprooftxmanager/helpers_test.go | 4 +- .../bulletprooftxmanager/nonce_syncer.go | 405 +++++++++++++++++ .../bulletprooftxmanager/nonce_syncer_test.go | 415 ++++++++++++++++++ .../0017_bptxm_chain_nonce_fastforward.go | 28 ++ core/store/models/key.go | 2 +- core/store/orm/config.go | 2 + core/store/presenters/presenters.go | 2 +- docs/CHANGELOG.md | 6 + 19 files changed, 975 insertions(+), 173 deletions(-) create mode 100644 core/services/bulletprooftxmanager/nonce_syncer.go create mode 100644 core/services/bulletprooftxmanager/nonce_syncer_test.go create mode 100644 core/store/migrations/0017_bptxm_chain_nonce_fastforward.go diff --git a/core/cmd/local_client.go b/core/cmd/local_client.go index cb81b0bc060..8c87d645afc 100644 --- a/core/cmd/local_client.go +++ b/core/cmd/local_client.go @@ -55,10 +55,7 @@ func (cli *Client) RunNode(c *clipkg.Context) error { logger.SetLogger(cli.Config.CreateProductionLogger()) logger.Infow(fmt.Sprintf("Starting Chainlink Node %s at commit %s", static.Version, static.Sha), "id", "boot", "Version", static.Version, "SHA", static.Sha, "InstanceUUID", static.InstanceUUID) - app, err := cli.AppFactory.NewApplication(cli.Config, func(app chainlink.Application) { - store := app.GetStore() - checkAccountsForExternalUse(store) - }) + app, err := cli.AppFactory.NewApplication(cli.Config) if err != nil { return cli.errorOut(errors.Wrap(err, "creating application")) } @@ -198,32 +195,6 @@ func passwordFromFile(pwdFile string) (string, error) { return strings.TrimSpace(string(dat)), err } -func checkAccountsForExternalUse(store *strpkg.Store) { - keys, err := store.AllKeys() - if err != nil { - logger.Error("database error while retrieving send keys:", err) - return - } - for _, key := range keys { - logIfNonceOutOfSync(store, key) - } -} - -func logIfNonceOutOfSync(store *strpkg.Store, key models.Key) { - onChainNonce, err := store.EthClient.PendingNonceAt(context.TODO(), key.Address.Address()) - if err != nil { - logger.Error(fmt.Sprintf("error determining nonce for address %s: %v", key.Address.Hex(), err)) - return - } - var nonce int64 - if key.NextNonce != nil { - nonce = *key.NextNonce - } - if nonce < int64(onChainNonce) { - logger.Warn(fmt.Sprintf("The account %s is being used by another wallet and is not safe to use with chainlink", key.Address.Hex())) - } -} - func updateConfig(config *orm.Config, debug bool, replayFromBlock int64) { if debug { config.Set("LOG_LEVEL", zapcore.DebugLevel.String()) @@ -263,14 +234,12 @@ func setupFundingKey(ctx context.Context, str *strpkg.Store, pwd string) (*model if err != nil { return nil, nil, err } - var firstNonce int64 = 0 key = models.Key{ Address: models.EIP55Address(ethAccount.Address.Hex()), IsFunding: true, JSON: models.JSON{ Result: gjson.ParseBytes(exportedJSON), }, - NextNonce: &firstNonce, } // The key does not exist at this point, so we're only creating it here. if err = str.CreateKeyIfNotExists(key); err != nil { diff --git a/core/cmd/local_client_test.go b/core/cmd/local_client_test.go index 163a37035eb..89550e70365 100644 --- a/core/cmd/local_client_test.go +++ b/core/cmd/local_client_test.go @@ -523,5 +523,5 @@ func TestClient_SetNextNonce(t *testing.T) { var key models.Key require.NoError(t, store.DB.First(&key).Error) require.NotNil(t, key.NextNonce) - require.Equal(t, int64(42), *key.NextNonce) + require.Equal(t, int64(42), key.NextNonce) } diff --git a/core/cmd/renderer.go b/core/cmd/renderer.go index a3bda83ae64..5d43c0d0431 100644 --- a/core/cmd/renderer.go +++ b/core/cmd/renderer.go @@ -402,12 +402,7 @@ func (rt RendererTable) renderConfigPatchResponse(config *web.ConfigPatchRespons func (rt RendererTable) renderETHKeys(keys []presenters.ETHKey) error { var rows [][]string for _, key := range keys { - var nextNonce string - if key.NextNonce == nil { - nextNonce = "0" - } else { - nextNonce = fmt.Sprintf("%d", *key.NextNonce) - } + nextNonce := fmt.Sprintf("%d", key.NextNonce) var lastUsed string if key.LastUsed != nil { lastUsed = key.LastUsed.String() diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index 157634f8717..dce55994eea 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -453,6 +453,7 @@ func NewEthMocks(t testing.TB) (*mocks.RPCClient, *mocks.GethClient, *mocks.Subs func NewEthMocksWithStartupAssertions(t testing.TB) (*mocks.RPCClient, *mocks.GethClient, *mocks.Subscription, func()) { r, g, s, assertMocksCalled := NewEthMocks(t) g.On("ChainID", mock.Anything).Return(NewTestConfig(t).ChainID(), nil) + g.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil).Maybe() r.On("EthSubscribe", mock.Anything, mock.Anything, "newHeads").Return(EmptyMockSubscription(), nil) s.On("Err").Return(nil).Maybe() s.On("Unsubscribe").Return(nil).Maybe() @@ -1699,6 +1700,7 @@ func MockApplicationEthCalls(t *testing.T, app *TestApplication, ethClient *mock sub.On("Err").Return(nil) ethClient.On("SubscribeNewHead", mock.Anything, mock.Anything).Return(sub, nil) ethClient.On("ChainID", mock.Anything).Return(app.Store.Config.ChainID(), nil) + ethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Return(uint64(0), nil).Maybe() // Stop sub.On("Unsubscribe").Return(nil) diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go index af80c3479fd..5fb9aa315e0 100644 --- a/core/internal/cltest/factories.go +++ b/core/internal/cltest/factories.go @@ -685,15 +685,14 @@ func MustGenerateRandomKey(t testing.TB, opts ...interface{}) models.Key { eip, err := models.EIP55AddressFromAddress(k.Address) require.NoError(t, err) - var nextNonce *int64 + var nextNonce int64 var funding bool for _, opt := range opts { switch v := opt.(type) { case int: - i := int64(v) - nextNonce = &i + nextNonce = int64(v) case int64: - nextNonce = &v + nextNonce = v case bool: funding = v default: diff --git a/core/internal/features_test.go b/core/internal/features_test.go index ce015acbea8..3d0c787b666 100644 --- a/core/internal/features_test.go +++ b/core/internal/features_test.go @@ -128,6 +128,7 @@ func TestIntegration_HttpRequestWithHeaders(t *testing.T) { Return(nil) gethClient.On("ChainID", mock.Anything).Return(config.ChainID(), nil) + gethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Maybe().Return(uint64(0), nil) gethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(oneETH.ToInt(), nil) gethClient.On("SendTransaction", mock.Anything, mock.Anything). @@ -798,6 +799,7 @@ func TestIntegration_FluxMonitor_Deviation(t *testing.T) { sub.On("Err").Return(nil).Maybe() sub.On("Unsubscribe").Return(nil).Maybe() gethClient.On("ChainID", mock.Anything).Return(app.Store.Config.ChainID(), nil) + gethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Maybe().Return(uint64(0), nil) gethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(oneETH.ToInt(), nil) newHeads := make(chan<- *models.Head, 1) rpcClient.On("EthSubscribe", mock.Anything, mock.Anything, "newHeads"). @@ -934,6 +936,7 @@ func TestIntegration_FluxMonitor_NewRound(t *testing.T) { // Start, connect, and initialize node gethClient.On("ChainID", mock.Anything).Maybe().Return(app.Store.Config.ChainID(), nil) + gethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Maybe().Return(uint64(0), nil) gethClient.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(oneETH.ToInt(), nil) newHeadsCh := make(chan chan<- *models.Head, 1) @@ -1081,6 +1084,7 @@ func TestIntegration_MultiwordV1(t *testing.T) { sub.On("Err").Return(nil) sub.On("Unsubscribe").Return(nil).Maybe() gethClient.On("ChainID", mock.Anything).Return(app.Store.Config.ChainID(), nil) + gethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Maybe().Return(uint64(0), nil) headsCh := make(chan chan<- *models.Head, 1) rpcClient.On("EthSubscribe", mock.Anything, mock.Anything, "newHeads"). Run(func(args mock.Arguments) { headsCh <- args.Get(1).(chan<- *models.Head) }). @@ -1578,6 +1582,8 @@ func TestIntegration_GasUpdater(t *testing.T) { rpcClient.On("EthSubscribe", mock.Anything, mock.Anything, "newHeads"). Run(func(args mock.Arguments) { chchNewHeads <- args.Get(1).(chan<- *models.Head) }). Return(sub, nil) + // Nonce syncer + gethClient.On("PendingNonceAt", mock.Anything, mock.Anything).Maybe().Return(uint64(0), nil) // GasUpdater boot calls rpcClient.On("CallContext", mock.Anything, mock.AnythingOfType("**models.Head"), "eth_getBlockByNumber", "latest", false).Return(nil).Run(func(args mock.Arguments) { diff --git a/core/services/bulletprooftxmanager/bulletprooftxmanager.go b/core/services/bulletprooftxmanager/bulletprooftxmanager.go index 55f99974bd1..f9b66a307dd 100644 --- a/core/services/bulletprooftxmanager/bulletprooftxmanager.go +++ b/core/services/bulletprooftxmanager/bulletprooftxmanager.go @@ -131,13 +131,9 @@ func sendEmptyTransaction( ) (_ *gethTypes.Transaction, err error) { defer utils.WrapIfError(&err, "sendEmptyTransaction failed") - to := utils.ZeroAddress - value := big.NewInt(0) - payload := []byte{} - tx := gethTypes.NewTransaction(nonce, to, value, gasLimit, gasPriceWei, payload) - signedTx, err := keyStore.SignTx(account, tx, chainID) + signedTx, err := makeEmptyTransaction(keyStore, nonce, gasLimit, gasPriceWei, account, chainID) if err != nil { - return signedTx, err + return nil, err } ctx, cancel := context.WithTimeout(context.Background(), maxEthNodeRequestTime) defer cancel() @@ -145,6 +141,14 @@ func sendEmptyTransaction( return signedTx, err } +// makes a transaction that sends 0 eth to self +func makeEmptyTransaction(keyStore strpkg.KeyStoreInterface, nonce uint64, gasLimit uint64, gasPriceWei *big.Int, account gethAccounts.Account, chainID *big.Int) (*gethTypes.Transaction, error) { + value := big.NewInt(0) + payload := []byte{} + tx := gethTypes.NewTransaction(nonce, account.Address, value, gasLimit, gasPriceWei, payload) + return keyStore.SignTx(account, tx, chainID) +} + func saveReplacementInProgressAttempt(store *strpkg.Store, oldAttempt models.EthTxAttempt, replacementAttempt *models.EthTxAttempt) error { if oldAttempt.State != models.EthTxAttemptInProgress || replacementAttempt.State != models.EthTxAttemptInProgress { return errors.New("expected attempts to be in_progress") diff --git a/core/services/bulletprooftxmanager/eth_broadcaster.go b/core/services/bulletprooftxmanager/eth_broadcaster.go index 89502889d30..02f32775580 100644 --- a/core/services/bulletprooftxmanager/eth_broadcaster.go +++ b/core/services/bulletprooftxmanager/eth_broadcaster.go @@ -1,7 +1,5 @@ package bulletprooftxmanager -// NOTE: See: https://godoc.org/time#Timer.Stop for an explanation of this pattern - import ( "context" "fmt" @@ -54,20 +52,24 @@ type ethBroadcaster struct { // trigger allows other goroutines to force ethBroadcaster to rescan the // database early (before the next poll interval) trigger chan struct{} - chStop chan struct{} - wg sync.WaitGroup + + ctx context.Context + ctxCancel context.CancelFunc + wg sync.WaitGroup utils.StartStopOnce } // NewEthBroadcaster returns a new concrete ethBroadcaster func NewEthBroadcaster(store *store.Store, config orm.ConfigReader, eventBroadcaster postgres.EventBroadcaster) EthBroadcaster { + ctx, cancel := context.WithCancel(context.Background()) return ðBroadcaster{ store: store, config: config, ethClient: store.EthClient, trigger: make(chan struct{}, 1), - chStop: make(chan struct{}), + ctx: ctx, + ctxCancel: cancel, wg: sync.WaitGroup{}, eventBroadcaster: eventBroadcaster, } @@ -84,6 +86,11 @@ func (eb *ethBroadcaster) Start() error { return errors.Wrap(err, "EthBroadcaster could not start") } + syncer := NewNonceSyncer(eb.store, eb.config, eb.ethClient) + if err := syncer.SyncAll(eb.ctx); err != nil { + return errors.Wrap(err, "EthBroadcaster failed to sync with on-chain nonce") + } + eb.wg.Add(1) go eb.monitorEthTxs() @@ -102,7 +109,7 @@ func (eb *ethBroadcaster) Close() error { eb.ethTxInsertListener.Close() } - close(eb.chStop) + eb.ctxCancel() eb.wg.Wait() return nil @@ -121,7 +128,7 @@ func (eb *ethBroadcaster) ethTxInsertTriggerer() { select { case <-eb.ethTxInsertListener.Events(): eb.Trigger() - case <-eb.chStop: + case <-eb.ctx.Done(): return } } @@ -156,7 +163,7 @@ func (eb *ethBroadcaster) monitorEthTxs() { } select { - case <-eb.chStop: + case <-eb.ctx.Done(): // NOTE: See: https://godoc.org/time#Timer.Stop for an explanation of this pattern if !pollDBTimer.Stop() { <-pollDBTimer.C @@ -258,7 +265,7 @@ func (eb *ethBroadcaster) handleInProgressEthTx(etx models.EthTx, attempt models return errors.Errorf("invariant violation: expected transaction %v to be in_progress, it was %s", etx.ID, etx.State) } - ctx, cancel := context.WithTimeout(context.Background(), maxEthNodeRequestTime) + ctx, cancel := context.WithTimeout(eb.ctx, maxEthNodeRequestTime) defer cancel() sendError := sendTransaction(ctx, eb.ethClient, attempt) @@ -366,7 +373,7 @@ func (eb *ethBroadcaster) nextUnstartedTransactionWithNonce(fromAddress gethComm return nil, errors.Wrap(err, "findNextUnstartedTransactionFromAddress failed") } - nonce, err := eb.getNextNonceWithInitialLoad(etx.FromAddress) + nonce, err := GetNextNonce(eb.store.DB, etx.FromAddress) if err != nil { return nil, err } @@ -471,62 +478,15 @@ func saveFatallyErroredTransaction(store *store.Store, etx *models.EthTx) error } // GetNextNonce returns keys.next_nonce for the given address -func GetNextNonce(db *gorm.DB, address gethCommon.Address) (*int64, error) { - var nonce *int64 +func GetNextNonce(db *gorm.DB, address gethCommon.Address) (int64, error) { + var nonce int64 row := db.Raw("SELECT next_nonce FROM keys WHERE address = ?", address).Row() if err := row.Scan(&nonce); err != nil { - return nil, errors.Wrap(err, "GetNextNonce failed scanning row") + return 0, errors.Wrap(err, "GetNextNonce failed scanning row") } return nonce, nil } -// getNextNonce returns keys.next_nonce for the given address -// It loads it from the database, or if this is a brand new key, queries the eth node for the latest nonce -func (eb *ethBroadcaster) getNextNonceWithInitialLoad(address gethCommon.Address) (int64, error) { - nonce, err := GetNextNonce(eb.store.DB, address) - if err != nil { - return 0, err - } - if nonce != nil { - return *nonce, nil - } - - return eb.loadAndSaveNonce(address) -} - -func (eb *ethBroadcaster) loadAndSaveNonce(address gethCommon.Address) (int64, error) { - logger.Debugw("EthBroadcaster: loading next nonce from eth node", "address", address.Hex()) - nonce, err := eb.loadInitialNonceFromEthClient(address) - if err != nil { - return 0, errors.Wrap(err, "GetNextNonce failed to loadInitialNonceFromEthClient") - } - res := eb.store.DB.Exec(`UPDATE keys SET next_nonce = ? WHERE next_nonce IS NULL AND address = ?`, nonce, address) - if res.Error != nil { - return 0, errors.Wrap(err, "GetNextNonce failed to save new nonce loaded from eth client") - } - if res.RowsAffected == 0 { - return 0, errors.Errorf("GetNextNonce optimistic locking failed; someone else modified key %s", address.Hex()) - } - if nonce == 0 { - logger.Infow(fmt.Sprintf("EthBroadcaster: first use of address %s, starting from nonce 0", - address.Hex()), "address", address.Hex(), "nextNonce", nonce) - } else { - logger.Warnw(fmt.Sprintf("EthBroadcaster: address %s has been used before. Starting from nonce %v."+ - " Please note that using the chainlink keys with an external wallet is NOT SUPPORTED and can lead to missed or stuck transactions.", - address.Hex(), nonce), - "address", address.Hex(), "nextNonce", nonce) - } - - return int64(nonce), nil -} - -func (eb *ethBroadcaster) loadInitialNonceFromEthClient(account gethCommon.Address) (nextNonce uint64, err error) { - ctx, cancel := context.WithTimeout(context.Background(), maxEthNodeRequestTime) - defer cancel() - nextNonce, err = eb.ethClient.PendingNonceAt(ctx, account) - return nextNonce, errors.WithStack(err) -} - // IncrementNextNonce increments keys.next_nonce by 1 func IncrementNextNonce(db *gorm.DB, address gethCommon.Address, currentNonce int64) error { res := db.Exec("UPDATE keys SET next_nonce = next_nonce + 1, updated_at = NOW() WHERE address = ? AND next_nonce = ?", address.Bytes(), currentNonce) diff --git a/core/services/bulletprooftxmanager/eth_broadcaster_test.go b/core/services/bulletprooftxmanager/eth_broadcaster_test.go index a83b62640b1..7d7948cb97e 100644 --- a/core/services/bulletprooftxmanager/eth_broadcaster_test.go +++ b/core/services/bulletprooftxmanager/eth_broadcaster_test.go @@ -218,82 +218,71 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Success(t *testing.T) { }) } -func TestEthBroadcaster_AssignsNonceOnFirstRun(t *testing.T) { +func TestEthBroadcaster_AssignsNonceOnStart(t *testing.T) { var err error store, cleanup := cltest.NewStore(t) defer cleanup() - key, fromAddress := cltest.MustAddRandomKeyToKeystore(t, store) + _, fromAddress := cltest.MustAddRandomKeyToKeystore(t, store) store.KeyStore.Unlock(cltest.Password) config, cleanup := cltest.NewConfig(t) defer cleanup() - ethClient := new(mocks.Client) - store.EthClient = ethClient - - eb, cleanup := cltest.NewEthBroadcaster(t, store, config) - defer cleanup() - - toAddress := gethCommon.HexToAddress("0x6C03DDA95a2AEd917EeCc6eddD4b9D16E6380411") - gasLimit := uint64(242) + ethNodeNonce := uint64(22) // Insert new key to test we only update the intended one dummykey := cltest.MustInsertRandomKey(t, store.DB) - ethTx := models.EthTx{ - FromAddress: fromAddress, - ToAddress: toAddress, - EncodedPayload: []byte{42, 42, 0}, - Value: assets.NewEthValue(0), - GasLimit: gasLimit, - CreatedAt: time.Unix(0, 0), - State: models.EthTxUnstarted, - } - require.NoError(t, store.DB.Create(ðTx).Error) - t.Run("when eth node returns error", func(t *testing.T) { + ethClient := new(mocks.Client) + store.EthClient = ethClient + + eb, cleanup := cltest.NewEthBroadcaster(t, store, config) + defer cleanup() + + ethClient.On("PendingNonceAt", mock.Anything, mock.MatchedBy(func(account gethCommon.Address) bool { + return account.Hex() == dummykey.Address.Hex() + })).Return(uint64(0), nil).Once() ethClient.On("PendingNonceAt", mock.Anything, mock.MatchedBy(func(account gethCommon.Address) bool { return account.Hex() == fromAddress.Hex() - })).Return(uint64(0), errors.New("something exploded")).Once() + })).Return(ethNodeNonce, errors.New("something exploded")).Once() - // First attempt errored - err = eb.ProcessUnstartedEthTxs(key) + err = eb.Start() require.Error(t, err) + defer eb.Close() require.Contains(t, err.Error(), "something exploded") - // Check ethTx that it has no nonce assigned - ethTx, err = store.FindEthTxWithAttempts(ethTx.ID) + // dummy address got updated + var n int + err := store.DB.Raw(`SELECT next_nonce FROM keys WHERE address = ?`, dummykey.Address).Scan(&n).Error require.NoError(t, err) + require.Equal(t, 0, n) - require.Nil(t, ethTx.Nonce) - - // Check to make sure all keys still don't have a nonce assigned - res := store.DB.Exec(`SELECT * FROM keys WHERE next_nonce IS NULL`) - require.NoError(t, res.Error) - require.Equal(t, int64(2), res.RowsAffected) + // real address did not update (it errored) + err = store.DB.Raw(`SELECT next_nonce FROM keys WHERE address = ?`, fromAddress).Scan(&n).Error + require.NoError(t, err) + require.Equal(t, 0, n) ethClient.AssertExpectations(t) }) t.Run("when eth node returns nonce", func(t *testing.T) { - ethNodeNonce := uint64(42) + ethClient := new(mocks.Client) + store.EthClient = ethClient + + eb, cleanup := cltest.NewEthBroadcaster(t, store, config) + defer cleanup() + ethClient.On("PendingNonceAt", mock.Anything, mock.MatchedBy(func(account gethCommon.Address) bool { + return account.Hex() == dummykey.Address.Hex() + })).Return(uint64(0), nil).Once() ethClient.On("PendingNonceAt", mock.Anything, mock.MatchedBy(func(account gethCommon.Address) bool { return account.Hex() == fromAddress.Hex() })).Return(ethNodeNonce, nil).Once() - ethClient.On("SendTransaction", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { - return tx.Nonce() == ethNodeNonce - })).Return(nil).Once() - - // Do the thing - require.NoError(t, eb.ProcessUnstartedEthTxs(key)) - - // Check ethTx that it has the correct nonce assigned - ethTx, err = store.FindEthTxWithAttempts(ethTx.ID) - require.NoError(t, err) + ethClient.On("BatchCallContext", mock.Anything, mock.Anything).Return(nil) - require.NotNil(t, ethTx.Nonce) - require.Equal(t, int64(ethNodeNonce), *ethTx.Nonce) + require.NoError(t, eb.Start()) + defer eb.Close() // Check key to make sure it has correct nonce assigned keys, err := store.SendKeys() @@ -301,13 +290,14 @@ func TestEthBroadcaster_AssignsNonceOnFirstRun(t *testing.T) { key := keys[0] require.NotNil(t, key.NextNonce) - require.Equal(t, int64(43), *key.NextNonce) + require.Equal(t, int64(ethNodeNonce), key.NextNonce) // The dummy key did not get updated key2 := keys[1] require.Equal(t, dummykey.Address, key2.Address) - require.Nil(t, key2.NextNonce) + require.Equal(t, 0, int(key2.NextNonce)) + // TODO: Test for zero transaction insertion ethClient.AssertExpectations(t) }) } @@ -614,7 +604,7 @@ func getLocalNextNonce(t *testing.T, str *store.Store, fromAddress gethCommon.Ad n, err := bulletprooftxmanager.GetNextNonce(str.DB, fromAddress) require.NoError(t, err) require.NotNil(t, n) - return uint64(*n) + return uint64(n) } // Note that all of these tests share the same database, and ordering matters. @@ -680,11 +670,11 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) { assert.Len(t, etx1.EthTxAttempts, 1) // Check that the local nonce was incremented by one - var finalNextNonce *int64 + var finalNextNonce int64 finalNextNonce, err = bulletprooftxmanager.GetNextNonce(store.DB, fromAddress) require.NoError(t, err) require.NotNil(t, finalNextNonce) - require.Equal(t, int64(1), *finalNextNonce) + require.Equal(t, int64(1), finalNextNonce) }) t.Run("geth client returns an error in the fatal errors category", func(t *testing.T) { @@ -723,7 +713,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) { // Saved NextNonce must be the same as before because this transaction // was not accepted by the eth node and never can be require.NotNil(t, key.NextNonce) - require.Equal(t, int64(localNextNonce), *key.NextNonce) + require.Equal(t, int64(localNextNonce), key.NextNonce) ethClient.AssertExpectations(t) }) @@ -763,7 +753,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_Errors(t *testing.T) { // Saved NextNonce must be the same as before because this transaction // was not accepted by the eth node and never can be require.NotNil(t, key.NextNonce) - require.Equal(t, int64(localNextNonce), *key.NextNonce) + require.Equal(t, int64(localNextNonce), key.NextNonce) ethClient.AssertExpectations(t) }) @@ -1070,7 +1060,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_KeystoreErrors(t *testing.T) { // Check that the key did not have its nonce incremented require.NoError(t, store.DB.First(&key).Error) require.NotNil(t, key.NextNonce) - require.Equal(t, int64(localNonce), *key.NextNonce) + require.Equal(t, int64(localNonce), key.NextNonce) kst.AssertExpectations(t) }) @@ -1113,7 +1103,7 @@ func TestEthBroadcaster_ProcessUnstartedEthTxs_KeystoreErrors(t *testing.T) { var key models.Key require.NoError(t, store.DB.First(&key).Error) require.NotNil(t, key.NextNonce) - require.Equal(t, int64(localNonce), *key.NextNonce) + require.Equal(t, int64(localNonce), key.NextNonce) kst.AssertExpectations(t) }) @@ -1150,7 +1140,7 @@ func TestEthBroadcaster_GetNextNonce(t *testing.T) { nonce, err := bulletprooftxmanager.GetNextNonce(store.DB, key.Address.Address()) assert.NoError(t, err) require.NotNil(t, nonce) - assert.Equal(t, int64(0), *nonce) + assert.Equal(t, int64(0), nonce) } func TestEthBroadcaster_IncrementNextNonce(t *testing.T) { @@ -1167,7 +1157,7 @@ func TestEthBroadcaster_IncrementNextNonce(t *testing.T) { // Nonce bumped to 1 require.NoError(t, store.DB.First(&key).Error) require.NotNil(t, key.NextNonce) - require.Equal(t, int64(1), *key.NextNonce) + require.Equal(t, int64(1), key.NextNonce) } func TestEthBroadcaster_Trigger(t *testing.T) { diff --git a/core/services/bulletprooftxmanager/eth_confirmer.go b/core/services/bulletprooftxmanager/eth_confirmer.go index dcbf74a065b..3b85560a115 100644 --- a/core/services/bulletprooftxmanager/eth_confirmer.go +++ b/core/services/bulletprooftxmanager/eth_confirmer.go @@ -393,6 +393,13 @@ func (ec *ethConfirmer) saveFetchedReceipts(ctx context.Context, receipts []Rece // In this case we mark these transactions as 'confirmed_missing_receipt' to // prevent gas bumping. // +// FIXME: We should continue to attempt to resend eth_txes in this state on +// every head to guard against the extremely rare scenario of nonce gap due to +// reorg that excludes the transaction (from another wallet) that had this +// nonce (until finality depth is reached, after which we make the explicit +// decision to give up). +// https://www.pivotaltracker.com/story/show/177389604 +// // We will continue to try to fetch a receipt for these attempts until all // attempts are below the finality depth from current head. func (ec *ethConfirmer) markConfirmedMissingReceipt(ctx context.Context) (err error) { @@ -414,8 +421,8 @@ AND nonce < ( // markOldTxesMissingReceiptAsErrored // -// Once eth_tx has all of its attempts broadcast before some cutoff threshold, -// we mark it as fatally errored (never sent). +// Once eth_tx has all of its attempts broadcast before some cutoff threshold +// without receiving any receipts, we mark it as fatally errored (never sent). // // The job run will also be marked as errored in this case since we never got a // receipt and thus cannot pass on any transaction hash @@ -622,6 +629,8 @@ func FindEthTxsRequiringResubmissionDueToInsufficientEth(db *gorm.DB, address ge // FindEthTxsRequiringGasBump returns transactions that have all // attempts which are unconfirmed for at least gasBumpThreshold blocks, // limited by limit pending transactions +// +// It also returns eth_txes that are unconfirmed with no eth_tx_attempts func FindEthTxsRequiringGasBump(db *gorm.DB, address gethCommon.Address, blockNum, gasBumpThreshold, depth int64) (etxs []models.EthTx, err error) { if gasBumpThreshold == 0 { logger.Debug("EthConfirmer: Gas bumping disabled (gasBumpThreshold set to 0)") @@ -841,7 +850,12 @@ func saveSentAttempt(db *gorm.DB, attempt *models.EthTxAttempt) error { return errors.New("expected state to be in_progress") } attempt.State = models.EthTxAttemptBroadcast - return errors.Wrap(db.Save(attempt).Error, "saveSentAttempt failed") + return postgres.GormTransaction(context.Background(), db, func(tx *gorm.DB) error { + if err := tx.Exec(`UPDATE eth_txes SET broadcast_at = NOW() WHERE id = ? AND broadcast_at IS NULL`, attempt.EthTxID).Error; err != nil { + return errors.Wrap(err, "saveSentAttempt failed") + } + return errors.Wrap(db.Save(attempt).Error, "saveSentAttempt failed") + }) } func saveInsufficientEthAttempt(db *gorm.DB, attempt *models.EthTxAttempt) error { @@ -849,7 +863,12 @@ func saveInsufficientEthAttempt(db *gorm.DB, attempt *models.EthTxAttempt) error return errors.New("expected state to be either in_progress or insufficient_eth") } attempt.State = models.EthTxAttemptInsufficientEth - return errors.Wrap(db.Save(attempt).Error, "saveInsufficientEthAttempt failed") + return postgres.GormTransaction(context.Background(), db, func(tx *gorm.DB) error { + if err := tx.Exec(`UPDATE eth_txes SET broadcast_at = NOW() WHERE id = ? AND broadcast_at IS NULL`, attempt.EthTxID).Error; err != nil { + return errors.Wrap(err, "saveInsufficientEthAttempt failed") + } + return errors.Wrap(db.Save(attempt).Error, "saveInsufficientEthAttempt failed") + }) } diff --git a/core/services/bulletprooftxmanager/eth_confirmer_test.go b/core/services/bulletprooftxmanager/eth_confirmer_test.go index a2cdd175a80..7018c4cfdf6 100644 --- a/core/services/bulletprooftxmanager/eth_confirmer_test.go +++ b/core/services/bulletprooftxmanager/eth_confirmer_test.go @@ -2084,7 +2084,7 @@ func TestEthConfirmer_ForceRebroadcast(t *testing.T) { return tx.Nonce() == uint64(nonce) && uint64(tx.GasPrice().Int64()) == gasPriceWei && tx.Gas() == overrideGasLimit && - *tx.To() == utils.ZeroAddress && + *tx.To() == fromAddress && tx.Value().Cmp(big.NewInt(0)) == 0 && len(tx.Data()) == 0 })).Return(nil).Once() diff --git a/core/services/bulletprooftxmanager/helpers_test.go b/core/services/bulletprooftxmanager/helpers_test.go index d2460f8f37e..11cbd7efea1 100644 --- a/core/services/bulletprooftxmanager/helpers_test.go +++ b/core/services/bulletprooftxmanager/helpers_test.go @@ -1,6 +1,8 @@ package bulletprooftxmanager -import "github.com/smartcontractkit/chainlink/core/services/eth" +import ( + "github.com/smartcontractkit/chainlink/core/services/eth" +) func SetEthClientOnEthConfirmer(ethClient eth.Client, ethConfirmer *ethConfirmer) { ethConfirmer.ethClient = ethClient diff --git a/core/services/bulletprooftxmanager/nonce_syncer.go b/core/services/bulletprooftxmanager/nonce_syncer.go new file mode 100644 index 00000000000..a5a897f0bb8 --- /dev/null +++ b/core/services/bulletprooftxmanager/nonce_syncer.go @@ -0,0 +1,405 @@ +package bulletprooftxmanager + +import ( + "bytes" + "context" + "fmt" + "sort" + "sync" + "time" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/smartcontractkit/chainlink/core/assets" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/eth" + "github.com/smartcontractkit/chainlink/core/services/postgres" + "github.com/smartcontractkit/chainlink/core/store" + "github.com/smartcontractkit/chainlink/core/store/models" + "github.com/smartcontractkit/chainlink/core/store/orm" + "github.com/smartcontractkit/chainlink/core/utils" + "go.uber.org/multierr" + "gorm.io/gorm" +) + +type ( + // NonceSyncer manages the delicate task of syncing the local nonce with the + // chain nonce in case of divergence. + // + // On startup, we check each key for the nonce value on chain and compare + // it to our local value. + // + // Usually the on-chain nonce will be the same as (or lower than) the + // next_nonce in the DB, in which case we do nothing. + // + // If we are restoring from a backup however, or another wallet has used the + // account, the chain nonce might be higher than our local one. In this + // scenario, we must fastforward the local nonce to match the chain nonce. + // + // The problem with doing this is that now Chainlink does not have any + // ownership or control over potentially pending transactions with nonces + // between our local highest nonce and the chain nonce. If one of those + // transactions is pushed out of the mempool or re-org'd out of the chain, + // we run the risk of being stuck with a gap in the nonce sequence that + // will never be filled. + // + // The solution is to query the chain for our own transactions and take + // ownership of them by writing them to the database and letting the + // EthConfirmer handle them as it would any other transaction. + // + // This is not quite as straightforward as one might expect. We cannot + // query transactions from our account to infinite depth (geth does not + // support this). The best we can do is to query for all transactions sent + // within the past ETH_FINALITY_DEPTH blocks and find the ones sent by our + // address(es). + // + // This gives us re-org protection up to ETH_FINALITY_DEPTH deep in the + // worst case, which is in line with our other guarantees. + NonceSyncer struct { + store *store.Store + config orm.ConfigReader + ethClient eth.Client + } + // NSinserttx represents an EthTx and Attempt to be inserted together + NSinserttx struct { + Etx models.EthTx + Attempt models.EthTxAttempt + } +) + +// NewNonceSyncer returns a new syncer +func NewNonceSyncer(store *store.Store, config orm.ConfigReader, ethClient eth.Client) *NonceSyncer { + return &NonceSyncer{ + store, + config, + ethClient, + } +} + +// SyncAll syncs nonces for all keys in parallel +// +// This should only be called once, before the EthBroadcaster has started. +// Calling it later is not safe and could lead to races. +func (s NonceSyncer) SyncAll(ctx context.Context) (merr error) { + keys, err := s.store.SendKeys() + if err != nil { + return errors.Wrap(err, "NonceSyncer#fastForwardNoncesIfNecessary failed to get keys") + } + + var wg sync.WaitGroup + var errMu sync.Mutex + + wg.Add(len(keys)) + for _, key := range keys { + go func(k models.Key) { + defer wg.Done() + if err := s.fastForwardNonceIfNecessary(ctx, k.Address.Address()); err != nil { + errMu.Lock() + defer errMu.Unlock() + merr = multierr.Combine(merr, err) + } + }(key) + } + + wg.Wait() + + return errors.Wrap(merr, "NonceSyncer#fastForwardNoncesIfNecessary failed") +} + +func (s NonceSyncer) fastForwardNonceIfNecessary(ctx context.Context, address common.Address) error { + chainNonce, err := s.pendingNonceFromEthClient(ctx, address) + if err != nil { + return errors.Wrap(err, "GetNextNonce failed to loadInitialNonceFromEthClient") + } + if chainNonce == 0 { + return nil + } + + localNonce, err := GetNextNonce(s.store.DB, address) + if err != nil { + return err + } + if chainNonce <= uint64(localNonce) { + return nil + } + logger.Warnw(fmt.Sprintf("NonceSyncer: address %s has been used before, either by an external wallet or a different Chainlink node. "+ + "Local nonce is %v but the on-chain nonce for this account was %v. "+ + "Please note that using the chainlink keys with an external wallet is NOT SUPPORTED and can lead to missed or stuck transactions. "+ + "This Chainlink node will now take ownership of this address and may overwrite currently pending transactions", + address.Hex(), localNonce, chainNonce), + "address", address.Hex(), "localNonce", localNonce, "chainNonce", chainNonce) + + // First fetch + // + // We have to get the latest block first and then fetch deeper blocks in a + // subsequent step, because otherwise we don't know what block numbers to + // query for. + // pending can be fetched in the initial request because it doesn't rely on + // any block number. + reqs := []rpc.BatchElem{ + rpc.BatchElem{ + Method: "eth_getBlockByNumber", + Args: []interface{}{"pending", true}, + Result: &models.Block{}, + }, + rpc.BatchElem{ + Method: "eth_getBlockByNumber", + Args: []interface{}{"latest", true}, + Result: &models.Block{}, + }, + } + + err = s.ethClient.BatchCallContext(ctx, reqs) + if err != nil { + return err + } + + if reqs[1].Error != nil { + return errors.Wrap(reqs[1].Error, "latest block request returned error") + } + latestBlock, is := reqs[1].Result.(*models.Block) + if !is { + panic(fmt.Sprintf("invariant violation, expected %T but got %T", &models.Block{}, latestBlock)) + } + latestBlockNum := latestBlock.Number + + floor := latestBlockNum - int64(s.config.EthFinalityDepth()) + if floor < 0 { + floor = 0 + } + + // Second fetch + // + // OPTIMISATION NOTE: + // The astute observer will note that if multiple keys are behind, we fetch + // the same blocks multiple times, doing redundant extra work. This does + // put unnecessary load on the eth node, but the most common use-case is + // with a single key and given how rare this scenario is, I think we can + // live with it. + var reqs2 []rpc.BatchElem + for i := floor; i < latestBlockNum; i++ { + req := rpc.BatchElem{ + Method: "eth_getBlockByNumber", + Args: []interface{}{models.Int64ToHex(i), true}, + Result: &models.Block{}, + } + reqs2 = append(reqs2, req) + } + + err = s.ethClient.BatchCallContext(ctx, reqs2) + if err != nil { + return err + } + + reqs = append(reqs, reqs2...) + var txes []types.Transaction + signer := types.NewEIP155Signer(s.config.ChainID()) + + // Rip through all transactions in all blocks and keep only the ones sent + // from our key + for _, req := range reqs { + if req.Error != nil { + logger.Warnw("NonceSyncer: got error querying for block", "blockNum", req.Args[0], "err", req.Error) + continue + } + block, is := req.Result.(*models.Block) + if !is { + panic(fmt.Sprintf("invariant violation, expected %T but got %T", &models.Block{}, block)) + } + for _, tx := range block.Transactions { + from, err2 := types.Sender(signer, &tx) + if err2 != nil { + logger.Warnw("NonceSyncer#fastForwardNonceIfNecessary failed to extract 'from' from transaction", "tx", tx, "err", err2) + continue + } + if from == address { + txes = append(txes, tx) + } + } + } + + account, err := s.store.KeyStore.GetAccountByAddress(address) + if err != nil { + return errors.Wrap(err, "NonceSyncer#fastForwardNonceIfNecessary could not get account from keystore") + } + sort.Slice(txes, func(i, j int) bool { return txes[i].Nonce() < txes[j].Nonce() }) + + inserts, err := s.makeInserts(account, latestBlock.Number, txes, chainNonce-1) + if err != nil { + return errors.Wrap(err, "NonceSyncer#fastForwardNonceIfNecessary error generating transactions for backfill") + } + + now := time.Now() + return postgres.GormTransaction(ctx, s.store.DB, func(dbtx *gorm.DB) error { + // We pass in next_nonce here as an optimistic lock to make sure it + // didn't get changed out from under us + res := dbtx.Exec(`UPDATE keys SET next_nonce = ?, updated_at = ? WHERE address = ? AND next_nonce = ?`, chainNonce, now, address, localNonce) + if res.Error != nil { + return errors.Wrap(res.Error, "NonceSyncer#fastForwardNonceIfNecessary failed to update keys.next_nonce") + } + if res.RowsAffected == 0 { + return errors.Errorf("NonceSyncer#fastForwardNonceIfNecessary optimistic lock failure fastforwarding nonce %v to %v for key %s", localNonce, chainNonce, address.Hex()) + } + + for _, ins := range inserts { + // Setting broadcast_at here is a bit of a misnomer since this node + // didn't actually broadcast the transaction, but including it + // allows us to avoid changing the state machine limitations and + // represents roughly the time we read the tx from the blockchain + ins.Etx.BroadcastAt = &now + if err := dbtx.Create(&ins.Etx).Error; err != nil { + return errors.Wrap(err, "NonceSyncer#fastForwardNonceIfNecessary failed to create eth_tx") + } + ins.Attempt.EthTxID = ins.Etx.ID + if err := dbtx.Create(&ins.Attempt).Error; err != nil { + return errors.Wrap(err, "NonceSyncer#fastForwardNonceIfNecessary failed to create eth_tx_attempt") + } + } + return nil + }) +} + +func (s NonceSyncer) pendingNonceFromEthClient(ctx context.Context, account common.Address) (nextNonce uint64, err error) { + ctx, cancel := context.WithTimeout(ctx, maxEthNodeRequestTime) + defer cancel() + nextNonce, err = s.ethClient.PendingNonceAt(ctx, account) + return nextNonce, errors.WithStack(err) +} + +func (s NonceSyncer) makeInserts(acct accounts.Account, blockNum int64, txes []types.Transaction, toNonce uint64) (inserts []NSinserttx, err error) { + if len(txes) == 0 { + return + } + if !sort.SliceIsSorted(txes, func(i, j int) bool { return txes[i].Nonce() < txes[j].Nonce() }) { + return nil, errors.New("expected txes to be sorted in nonce ascending order") + } + fromNonce := txes[0].Nonce() + if fromNonce > toNonce { + // I don't know how this could ever happen but we should handle the case anyway + return nil, errors.Errorf("fromNonce of %v was greater than toNonce of %v", fromNonce, toNonce) + } + txMap := make(map[uint64]types.Transaction) + for _, tx := range txes { + txMap[tx.Nonce()] = tx + } + for n := fromNonce; n <= toNonce; n++ { + nonce := int64(n) + tx, exists := txMap[n] + if exists { + ins, err := s.MakeInsert(tx, acct, blockNum, nonce) + if err != nil { + logger.Errorw("NonceSyncer: failed to generate transaction, this nonce will not be re-org protected", "address", acct.Address.Hex(), "err", err, "nonce", nonce) + continue + } + inserts = append(inserts, ins) + } else { + // Use a zero-transaction if its missing for whatever reason + // Should really never happen but you never know with geth + logger.Warnw(fmt.Sprintf("NonceSyncer: missing transaction for nonce %d, falling back to zero transaction", n), "address", acct.Address.Hex(), "blockNum", blockNum, "nonce", nonce) + ins, err := s.MakeZeroInsert(acct, blockNum, nonce) + if err != nil { + logger.Errorw("NonceSyncer: failed to generate empty transaction, this nonce will not be re-org protected", "address", acct.Address.Hex(), "err", err, "nonce", nonce) + continue + } + inserts = append(inserts, ins) + } + } + + return inserts, nil +} + +// MakeInsert generates a NSinserttx that perfectly mirrors the on-chain transaction +// +// This can be handed off to the EthConfirmer and used to query for receipts +// and bump gas etc exactly like any other transaction we might have sent. +func (s NonceSyncer) MakeInsert(tx types.Transaction, acct accounts.Account, blockNum, nonce int64) (ins NSinserttx, err error) { + v, _, _ := tx.RawSignatureValues() + if v == nil { + // Believe it or not, this is the only way to determine if the tx + // is a zero struct without panicking. Thank you, geth. + logger.Warnw("NonceSyncer: tx was empty/unsigned. Falling back to zero transaction", "err", err, "txHash", tx.Hash(), "nonce", nonce, "address", acct.Address.Hex()) + return s.MakeZeroInsert(acct, blockNum, int64(nonce)) + } + // NOTE: We set all transactions to unconfirmed even if they are mined. + // + // This works out, because the first round of the EthConfirmer will check + // for receipts. All these transactions should get receipts if they are confirmed. + // + // Any transaction not yet confirmed will go into the regular gas bumping + // cycle as if it were any normal transaction we had sent ourselves. + ins.Etx = models.EthTx{ + Nonce: &nonce, + FromAddress: acct.Address, + ToAddress: *tx.To(), + EncodedPayload: tx.Data(), + Value: assets.Eth(*tx.Value()), + GasLimit: tx.Gas(), + State: models.EthTxUnconfirmed, + } + rlp := new(bytes.Buffer) + if err := tx.EncodeRLP(rlp); err != nil { + logger.Warnw("NonceSyncer: could not encode RLP. Falling back to zero transaction", "err", err, "txHash", tx.Hash(), "nonce", nonce, "address", acct.Address.Hex()) + return s.MakeZeroInsert(acct, blockNum, int64(nonce)) + } + signedRawTx := rlp.Bytes() + ins.Attempt = models.EthTxAttempt{ + GasPrice: utils.Big(*tx.GasPrice()), + SignedRawTx: signedRawTx, + Hash: tx.Hash(), + BroadcastBeforeBlockNum: &blockNum, + State: models.EthTxAttemptBroadcast, + } + + return ins, nil +} + +// MakeZeroInsert generates a NSinserttx that represents a zero transaction +// +// This transaction will never get a receipt and does not match anything +// on-chain, but it does serve the purpose of a placeholder for this nonce in +// case the on-chain version is ejected from the mempool or re-org'd out of the +// main chain. +func (s NonceSyncer) MakeZeroInsert(acct accounts.Account, blockNum, nonce int64) (ins NSinserttx, err error) { + gasLimit := s.config.EthGasLimitDefault() + gasPrice := s.config.EthGasPriceDefault() + + tx, err := makeEmptyTransaction(s.store.KeyStore, uint64(nonce), gasLimit, gasPrice, acct, s.config.ChainID()) + if err != nil { + return ins, errors.Wrap(err, "NonceSyncer#MakeZeroInsert failed to makeEmptyTransaction") + } + rlp := new(bytes.Buffer) + if err := tx.EncodeRLP(rlp); err != nil { + return ins, err + } + // NOTE: These transactions will never get a receipt, but setting them to + // unconfirmed still works out. + // + // If there is a transaction on-chain with the same nonce that is pending, + // then this zero transaction will go into the bumping cycle and may or may + // not replace the on-chain version. + // + // If the on-chain transaction is confirmed, this one will eventually be + // marked confirmed_missing_receipt when a new transaction is confirmed on + // top of it, and will exit the bumping cycle once it's deeper than + // ETH_FINALITY_DEPTH. + ins.Etx = models.EthTx{ + Nonce: &nonce, + FromAddress: acct.Address, + ToAddress: *tx.To(), + EncodedPayload: tx.Data(), + Value: assets.Eth(*tx.Value()), + GasLimit: tx.Gas(), + State: models.EthTxUnconfirmed, + } + ins.Attempt = models.EthTxAttempt{ + GasPrice: utils.Big(*gasPrice), + SignedRawTx: rlp.Bytes(), + Hash: tx.Hash(), + BroadcastBeforeBlockNum: &blockNum, + State: models.EthTxAttemptBroadcast, + } + return ins, nil +} diff --git a/core/services/bulletprooftxmanager/nonce_syncer_test.go b/core/services/bulletprooftxmanager/nonce_syncer_test.go new file mode 100644 index 00000000000..a00f4d0e2fe --- /dev/null +++ b/core/services/bulletprooftxmanager/nonce_syncer_test.go @@ -0,0 +1,415 @@ +package bulletprooftxmanager_test + +import ( + "bytes" + "context" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" + "github.com/smartcontractkit/chainlink/core/assets" + "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/internal/mocks" + "github.com/smartcontractkit/chainlink/core/services/bulletprooftxmanager" + "github.com/smartcontractkit/chainlink/core/store" + "github.com/smartcontractkit/chainlink/core/store/models" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func Test_NonceSyncer_SyncAll(t *testing.T) { + t.Parallel() + + t.Run("returns error if PendingNonceAt fails", func(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + ethClient := new(mocks.Client) + + _, from := cltest.MustAddRandomKeyToKeystore(t, store) + + ethClient.On("PendingNonceAt", mock.Anything, mock.MatchedBy(func(addr common.Address) bool { + return from == addr + })).Return(uint64(0), errors.New("something exploded")) + + ns := bulletprooftxmanager.NewNonceSyncer(store, store.Config, ethClient) + + err := ns.SyncAll(context.Background()) + require.Error(t, err) + assert.Contains(t, err.Error(), "something exploded") + + cltest.AssertCount(t, store, models.EthTx{}, 0) + cltest.AssertCount(t, store, models.EthTxAttempt{}, 0) + + assertDatabaseNonce(t, store, from, 0) + + ethClient.AssertExpectations(t) + }) + + t.Run("does nothing if chain nonce reflects local nonce", func(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + ethClient := new(mocks.Client) + + _, from := cltest.MustAddRandomKeyToKeystore(t, store) + + ethClient.On("PendingNonceAt", mock.Anything, mock.MatchedBy(func(addr common.Address) bool { + return from == addr + })).Return(uint64(0), nil) + + ns := bulletprooftxmanager.NewNonceSyncer(store, store.Config, ethClient) + + require.NoError(t, ns.SyncAll(context.Background())) + + cltest.AssertCount(t, store, models.EthTx{}, 0) + cltest.AssertCount(t, store, models.EthTxAttempt{}, 0) + + assertDatabaseNonce(t, store, from, 0) + + ethClient.AssertExpectations(t) + }) + + t.Run("does nothing if chain nonce is behind local nonce", func(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + ethClient := new(mocks.Client) + + _, from := cltest.MustAddRandomKeyToKeystore(t, store, int64(32)) + + ethClient.On("PendingNonceAt", mock.Anything, mock.MatchedBy(func(addr common.Address) bool { + return from == addr + })).Return(uint64(31), nil) + + ns := bulletprooftxmanager.NewNonceSyncer(store, store.Config, ethClient) + + require.NoError(t, ns.SyncAll(context.Background())) + + cltest.AssertCount(t, store, models.EthTx{}, 0) + cltest.AssertCount(t, store, models.EthTxAttempt{}, 0) + + assertDatabaseNonce(t, store, from, 32) + + ethClient.AssertExpectations(t) + }) + + t.Run("fast forwards if chain nonce is ahead of local nonce and fills in recent transactions", func(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + ethClient := new(mocks.Client) + + _, acct1 := cltest.MustAddRandomKeyToKeystore(t, store, int64(0)) + _, acct2 := cltest.MustAddRandomKeyToKeystore(t, store, int64(32)) + + accounts := store.KeyStore.Accounts() + txes := makeRandomTransactions(t, store, 5, accounts, store.Config.ChainID()) + + bPending := models.Block{ + Number: 0, + Transactions: txes[4:], + } + + b2 := models.Block{ + Number: 2, + Hash: cltest.NewHash(), + ParentHash: cltest.NewHash(), + Transactions: txes[0:1], + } + b41 := models.Block{ + Number: 41, + Hash: cltest.NewHash(), + ParentHash: cltest.NewHash(), + Transactions: txes[2:3], + } + bLatest := models.Block{ + Number: 42, + Hash: cltest.NewHash(), + ParentHash: b41.Hash, + Transactions: txes[3:4], + } + + ethClient.On("PendingNonceAt", mock.Anything, mock.MatchedBy(func(addr common.Address) bool { + // Nothing to do for acct2 + return acct2 == addr + })).Return(uint64(32), nil) + ethClient.On("PendingNonceAt", mock.Anything, mock.MatchedBy(func(addr common.Address) bool { + // acct1 has chain nonce of 5 which is ahead of local nonce 0 + return acct1 == addr + })).Return(uint64(5), nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == "pending" && b[0].Args[1] == true && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == "latest" && b[1].Args[1] == true + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &bPending + elems[1].Result = &bLatest + }) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 42 && + b[0].Args[0] == models.Int64ToHex(0) && + b[1].Args[0] == models.Int64ToHex(1) && + b[41].Args[0] == models.Int64ToHex(41) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[41].Result = &b41 + elems[2].Result = &b2 + elems[3].Error = errors.New("random error thrown in for fun") + }) + + ns := bulletprooftxmanager.NewNonceSyncer(store, store.Config, ethClient) + + require.NoError(t, ns.SyncAll(context.Background())) + + cltest.AssertCount(t, store, models.EthTx{}, 5) + cltest.AssertCount(t, store, models.EthTxAttempt{}, 5) + + assertDatabaseNonce(t, store, acct1, 5) + + ethClient.AssertExpectations(t) + }) + + t.Run("only backfills to ETH_FINALITY_DEPTH", func(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + ethClient := new(mocks.Client) + + store.Config.Set("ETH_FINALITY_DEPTH", 2) + + _, acct1 := cltest.MustAddRandomKeyToKeystore(t, store, int64(0)) + + accounts := store.KeyStore.Accounts() + txes := makeRandomTransactions(t, store, 5, accounts, store.Config.ChainID()) + + bPending := models.Block{ + Number: 0, + Transactions: txes[4:], + } + + b40 := models.Block{ + Number: 41, + Hash: cltest.NewHash(), + ParentHash: cltest.NewHash(), + } + b41 := models.Block{ + Number: 41, + Hash: cltest.NewHash(), + ParentHash: b40.Hash, + Transactions: txes[2:3], + } + bLatest := models.Block{ + Number: 42, + Hash: cltest.NewHash(), + ParentHash: b41.Hash, + Transactions: txes[3:4], + } + + ethClient.On("PendingNonceAt", mock.Anything, mock.MatchedBy(func(addr common.Address) bool { + // acct1 has chain nonce of 5 which is ahead of local nonce 0 + return acct1 == addr + })).Return(uint64(5), nil) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == "pending" && b[0].Args[1] == true && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == "latest" && b[1].Args[1] == true + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &bPending + elems[1].Result = &bLatest + }) + ethClient.On("BatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_getBlockByNumber" && b[0].Args[0] == models.Int64ToHex(40) && + b[1].Method == "eth_getBlockByNumber" && b[1].Args[0] == models.Int64ToHex(41) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + elems[0].Result = &b40 + elems[1].Result = &b41 + }) + + ns := bulletprooftxmanager.NewNonceSyncer(store, store.Config, ethClient) + + require.NoError(t, ns.SyncAll(context.Background())) + + cltest.AssertCount(t, store, models.EthTx{}, 3) + cltest.AssertCount(t, store, models.EthTxAttempt{}, 3) + + assertDatabaseNonce(t, store, acct1, 5) + + ethClient.AssertExpectations(t) + }) +} + +func Test_NonceSyncer_MakeInsert(t *testing.T) { + t.Parallel() + + store, cleanup := cltest.NewStore(t) + defer cleanup() + ethClient := new(mocks.Client) + + _, from := cltest.MustAddRandomKeyToKeystore(t, store) + acct := accounts.Account{Address: from} + + kst := store.KeyStore + kst.Unlock(cltest.Password) + + var blockNum int64 = 42 + var nonce uint64 = 1 + to := cltest.NewAddress() + amount := big.NewInt(4200) + var gasLimit uint64 = 120000 + gasPrice := big.NewInt(25000000000) + data := cltest.MustRandomBytes(t, 72) + unsigned := types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data) + + t.Run("falls back to zero insert if encodeRLP would fail for some reason, e.g. tx is zero struct", func(t *testing.T) { + ns := bulletprooftxmanager.NewNonceSyncer(store, store.Config, ethClient) + + tx := types.Transaction{} + + ins, err := ns.MakeInsert(tx, acct, blockNum, int64(nonce)) + require.NoError(t, err) + + assertZero(t, ins, store, from, int64(nonce), blockNum) + }) + + t.Run("makes insert with the given payload", func(t *testing.T) { + tx, err := kst.SignTx(acct, unsigned, store.Config.ChainID()) + require.NoError(t, err) + + ns := bulletprooftxmanager.NewNonceSyncer(store, store.Config, ethClient) + + ins, err := ns.MakeInsert(*tx, acct, blockNum, int64(nonce)) + require.NoError(t, err) + + assert.Equal(t, int64(0), ins.Etx.ID) + assert.Equal(t, int64(nonce), *ins.Etx.Nonce) + assert.Equal(t, from, ins.Etx.FromAddress) + assert.Equal(t, *tx.To(), ins.Etx.ToAddress) + assert.Equal(t, tx.Data(), ins.Etx.EncodedPayload) + assert.Equal(t, *amount, (big.Int)(ins.Etx.Value)) + assert.Equal(t, tx.Gas(), ins.Etx.GasLimit) + assert.Nil(t, ins.Etx.Error) + assert.Nil(t, ins.Etx.BroadcastAt) + assert.Equal(t, models.EthTxUnconfirmed, ins.Etx.State) + + rlp := new(bytes.Buffer) + require.NoError(t, tx.EncodeRLP(rlp)) + + assert.Equal(t, int64(0), ins.Attempt.ID) + assert.Equal(t, int64(0), ins.Attempt.EthTxID) + assert.Equal(t, tx.GasPrice().String(), ins.Attempt.GasPrice.String()) + assert.Equal(t, rlp.Bytes(), ins.Attempt.SignedRawTx) + assert.NotEqual(t, common.Hash{}, ins.Attempt.Hash) + assert.NotNil(t, ins.Attempt.BroadcastBeforeBlockNum) + assert.Equal(t, blockNum, *ins.Attempt.BroadcastBeforeBlockNum) + assert.Equal(t, models.EthTxAttemptBroadcast, ins.Attempt.State) + }) +} + +func Test_NonceSyncer_MakeZeroInsert(t *testing.T) { + t.Parallel() + + var blockNum int64 = 42 + var nonce int64 = 1 + + t.Run("errors if keystore.SignTx errors", func(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + ethClient := new(mocks.Client) + kst := new(mocks.KeyStoreInterface) + store.KeyStore = kst + acct := accounts.Account{Address: cltest.NewAddress()} + + kst.On("SignTx", acct, mock.Anything, store.Config.ChainID()).Return(nil, errors.New("something exploded")) + + ns := bulletprooftxmanager.NewNonceSyncer(store, store.Config, ethClient) + + _, err := ns.MakeZeroInsert(acct, blockNum, nonce) + assert.Error(t, err) + assert.Contains(t, err.Error(), "something exploded") + }) + + t.Run("returns insert with zero EthTx and Attempt", func(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + ethClient := new(mocks.Client) + + _, from := cltest.MustAddRandomKeyToKeystore(t, store) + acct := accounts.Account{Address: from} + + oldKst := store.KeyStore + oldKst.Unlock(cltest.Password) + + kst := new(mocks.KeyStoreInterface) + store.KeyStore = kst + + kst.On("SignTx", acct, mock.MatchedBy(func(tx *types.Transaction) bool { + return int64(tx.Nonce()) == nonce && *tx.To() == from && big.NewInt(0).Cmp(tx.Value()) == 0 + }), store.Config.ChainID()).Return( + func(acct accounts.Account, unsigned *types.Transaction, chainID *big.Int) *types.Transaction { + signed, err := oldKst.SignTx(acct, unsigned, chainID) + if err != nil { + t.Fatal(err) + } + return signed + }, + func(accounts.Account, *types.Transaction, *big.Int) error { return nil }, + ) + + ns := bulletprooftxmanager.NewNonceSyncer(store, store.Config, ethClient) + + ins, err := ns.MakeZeroInsert(acct, blockNum, nonce) + require.NoError(t, err) + + assertZero(t, ins, store, from, nonce, blockNum) + }) +} + +func assertZero(t *testing.T, ins bulletprooftxmanager.NSinserttx, store *store.Store, from common.Address, nonce, blockNum int64) { + t.Helper() + + assert.Equal(t, int64(0), ins.Etx.ID) + assert.Equal(t, nonce, *ins.Etx.Nonce) + assert.Equal(t, from, ins.Etx.FromAddress) + assert.Equal(t, from, ins.Etx.ToAddress) + assert.Equal(t, []byte{}, ins.Etx.EncodedPayload) + assert.Equal(t, assets.NewEthValue(0), ins.Etx.Value) + assert.Equal(t, store.Config.EthGasLimitDefault(), ins.Etx.GasLimit) + assert.Nil(t, ins.Etx.Error) + assert.Nil(t, ins.Etx.BroadcastAt) + assert.Equal(t, models.EthTxUnconfirmed, ins.Etx.State) + + assert.Equal(t, int64(0), ins.Attempt.ID) + assert.Equal(t, int64(0), ins.Attempt.EthTxID) + assert.Equal(t, store.Config.EthGasPriceDefault().String(), ins.Attempt.GasPrice.String()) + assert.Len(t, ins.Attempt.SignedRawTx, 103) + assert.NotEqual(t, common.Hash{}, ins.Attempt.Hash) + assert.NotNil(t, ins.Attempt.BroadcastBeforeBlockNum) + assert.Equal(t, blockNum, *ins.Attempt.BroadcastBeforeBlockNum) + assert.Equal(t, models.EthTxAttemptBroadcast, ins.Attempt.State) +} + +func assertDatabaseNonce(t *testing.T, store *store.Store, from common.Address, nonce int64) { + t.Helper() + + k, err := store.KeyByAddress(from) + require.NoError(t, err) + assert.Equal(t, nonce, k.NextNonce) +} + +func makeRandomTransactions(t *testing.T, store *store.Store, n int, accounts []accounts.Account, chainID *big.Int) (txes []types.Transaction) { + for i := 0; i < n; i++ { + unsigned := types.NewTransaction(uint64(i), cltest.NewAddress(), big.NewInt(int64(100+i)), uint64(100000+i), big.NewInt(int64(1000000000+i)), cltest.MustRandomBytes(t, 100+i)) + + // rotate accounts + acct := accounts[i%len(accounts)] + signed, err := store.KeyStore.SignTx(acct, unsigned, chainID) + require.NoError(t, err) + txes = append(txes, *signed) + } + return +} diff --git a/core/store/migrations/0017_bptxm_chain_nonce_fastforward.go b/core/store/migrations/0017_bptxm_chain_nonce_fastforward.go new file mode 100644 index 00000000000..3d9c03b24ad --- /dev/null +++ b/core/store/migrations/0017_bptxm_chain_nonce_fastforward.go @@ -0,0 +1,28 @@ +package migrations + +import ( + "github.com/go-gormigrate/gormigrate/v2" + "gorm.io/gorm" +) + +const ( + up17 = ` +UPDATE keys SET next_nonce = 0 WHERE next_nonce IS NULL; +ALTER TABLE keys ALTER COLUMN next_nonce SET NOT NULL, ALTER COLUMN next_nonce SET DEFAULT 0; +` + down17 = ` +ALTER TABLE keys ALTER COLUMN next_nonce SET DEFAULT NULL; +` +) + +func init() { + Migrations = append(Migrations, &gormigrate.Migration{ + ID: "0017_bptxm_chain_nonce_fastforward", + Migrate: func(db *gorm.DB) error { + return db.Exec(up17).Error + }, + Rollback: func(db *gorm.DB) error { + return db.Exec(down17).Error + }, + }) +} diff --git a/core/store/models/key.go b/core/store/models/key.go index 8dccf0b46cc..af6df15ce7d 100644 --- a/core/store/models/key.go +++ b/core/store/models/key.go @@ -27,7 +27,7 @@ type Key struct { // This is the nonce that should be used for the next transaction. // Conceptually equivalent to geth's `PendingNonceAt` but more reliable // because we have a better view of our own transactions - NextNonce *int64 + NextNonce int64 // LastUsed is the time that the address was last assigned to a transaction LastUsed *time.Time // IsFunding marks the address as being used for rescuing the node and the pending transactions diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 1b6a1d095c2..3f5812ad52e 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -367,6 +367,8 @@ func (c Config) EthBalanceMonitorBlockDelay() uint16 { return c.getWithFallback("EthBalanceMonitorBlockDelay", parseUint16).(uint16) } +// EthReceiptFetchBatchSize controls the number of receipts fetched in each +// request in the EthConfirmer func (c Config) EthReceiptFetchBatchSize() uint32 { return c.viper.GetUint32(EnvVarName("EthReceiptFetchBatchSize")) } diff --git a/core/store/presenters/presenters.go b/core/store/presenters/presenters.go index 41595fafa90..4641663363c 100644 --- a/core/store/presenters/presenters.go +++ b/core/store/presenters/presenters.go @@ -35,7 +35,7 @@ type ETHKey struct { Address string `json:"address"` EthBalance *assets.Eth `json:"ethBalance"` LinkBalance *assets.Link `json:"linkBalance"` - NextNonce *int64 `json:"nextNonce"` + NextNonce int64 `json:"nextNonce"` LastUsed *time.Time `json:"lastUsed"` IsFunding bool `json:"isFunding"` CreatedAt time.Time `json:"createdAt"` diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index d398fd1ff30..4da6d847268 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Fixed + +- Chainlink node now automatically sets the correct nonce on startup if you are restoring from a previous backup (manual setnextnonce is no longer necessary). + +--- 0.10.3 + ### Added - Add `ADMIN_CREDENTIALS_FILE` configuration variable From 5eaa2b8a3e204c50ba8f414298e8560172aaf073 Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Tue, 16 Mar 2021 12:20:13 +0100 Subject: [PATCH 034/116] database backups using pgdump --- .../continuous-integration-workflow.yml | 2 + .gitignore | 4 + core/services/chainlink/application.go | 10 ++ core/services/periodicbackup/backup.go | 139 ++++++++++++++++++ core/services/periodicbackup/backup_test.go | 36 +++++ .../periodicbackup/restore_db_example.sh | 12 ++ core/store/orm/config.go | 11 ++ core/store/orm/schema.go | 2 + core/store/store.go | 6 + docs/CHANGELOG.md | 7 + 10 files changed, 229 insertions(+) create mode 100644 core/services/periodicbackup/backup.go create mode 100644 core/services/periodicbackup/backup_test.go create mode 100755 core/services/periodicbackup/restore_db_example.sh diff --git a/.github/workflows/continuous-integration-workflow.yml b/.github/workflows/continuous-integration-workflow.yml index 771a78b353d..69f0df5d848 100644 --- a/.github/workflows/continuous-integration-workflow.yml +++ b/.github/workflows/continuous-integration-workflow.yml @@ -42,6 +42,8 @@ jobs: uses: docker://postgres with: args: psql -v ON_ERROR_STOP=1 --username postgres -h postgres -c "CREATE USER chainlink NOSUPERUSER CREATEDB;" + - name: Install Postgres for CLI tools + run: wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - && echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" |sudo tee /etc/apt/sources.list.d/pgdg.list && sudo apt update && sudo apt install -y postgresql-client-13 - name: Cache Yarn dependencies uses: actions/cache@v2 with: diff --git a/.gitignore b/.gitignore index 6cfb84f80f8..f36fd4b58bf 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,7 @@ dockercfg env credentials.env gcr_creds.env + +# DB backups + +cl_backup_*.tar.gz \ No newline at end of file diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index 564ac4ac280..4e17e8781ec 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -14,6 +14,7 @@ import ( "github.com/smartcontractkit/chainlink/core/services/fluxmonitorv2" "github.com/smartcontractkit/chainlink/core/services/gasupdater" "github.com/smartcontractkit/chainlink/core/services/keeper" + "github.com/smartcontractkit/chainlink/core/services/periodicbackup" "github.com/smartcontractkit/chainlink/core/services/telemetry" "gorm.io/gorm" @@ -158,6 +159,15 @@ func NewApplication(config *orm.Config, ethClient eth.Client, advisoryLocker pos logger.Debugw("GasUpdater: dynamic gas updating is disabled", "ethGasPriceDefault", store.Config.EthGasPriceDefault()) } + if store.Config.DatabaseBackupEnabled() && store.Config.DatabaseBackupFrequency() > 0 { + logger.Debug("DatabaseBackup: periodic database backups are enabled") + + databaseBackup := periodicbackup.NewDatabaseBackup(store.Config.DatabaseBackupFrequency(), store.Config.DatabaseURL(), config.RootDir(), logger.Default) + subservices = append(subservices, databaseBackup) + } else { + logger.Debug("DatabaseBackup: periodic database backups are disabled") + } + runExecutor := services.NewRunExecutor(store, statsPusher) runQueue := services.NewRunQueue(runExecutor) runManager := services.NewRunManager(runQueue, config, store.ORM, statsPusher, store.Clock) diff --git a/core/services/periodicbackup/backup.go b/core/services/periodicbackup/backup.go new file mode 100644 index 00000000000..d035fd89acb --- /dev/null +++ b/core/services/periodicbackup/backup.go @@ -0,0 +1,139 @@ +package periodicbackup + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/pkg/errors" + "github.com/smartcontractkit/chainlink/core/logger" +) + +var ( + filePattern = "cl_backup_%s.tar.gz" + minBackupFrequency = time.Minute +) + +type backupResult struct { + size int64 + path string +} + +type ( + DatabaseBackup interface { + Start() error + Close() error + RunBackupGracefully() + } + + databaseBackup struct { + logger *logger.Logger + databaseURL url.URL + frequency time.Duration + outputParentDir string + done chan bool + } +) + +func NewDatabaseBackup(frequency time.Duration, databaseURL url.URL, outputParentDir string, logger *logger.Logger) DatabaseBackup { + return &databaseBackup{ + logger, + databaseURL, + frequency, + outputParentDir, + make(chan bool), + } +} + +func (backup databaseBackup) Start() error { + + if backup.frequencyIsTooSmall() { + return errors.Errorf("Database backup frequency (%s=%v) is too small. Please set it to at least %s", "DATABASE_BACKUP_FREQUENCY", backup.frequency, minBackupFrequency) + } + + ticker := time.NewTicker(backup.frequency) + + go func() { + for { + select { + case <-backup.done: + ticker.Stop() + return + case <-ticker.C: + backup.RunBackupGracefully() + } + } + }() + + return nil +} + +func (backup databaseBackup) Close() error { + backup.done <- true + return nil +} + +func (backup *databaseBackup) frequencyIsTooSmall() bool { + return backup.frequency < minBackupFrequency +} + +func (backup *databaseBackup) RunBackupGracefully() { + backup.logger.Info("DatabaseBackup: Starting database backup...") + startAt := time.Now() + result, err := backup.runBackup() + duration := time.Since(startAt) + if err != nil { + backup.logger.Errorw("DatabaseBackup: Failed", "duration", duration, "error", err) + } else { + backup.logger.Infow("DatabaseBackup: Database backup finished successfully.", "duration", duration, "fileSize", result.size, "filePath", result.path) + } +} + +func (backup *databaseBackup) runBackup() (*backupResult, error) { + + tmpFile, err := ioutil.TempFile(backup.outputParentDir, "db_backup") + if err != nil { + return nil, errors.Wrap(err, "Failed to create a tmp file") + } + err = os.Remove(tmpFile.Name()) + if err != nil { + return nil, errors.Wrap(err, "Failed to remove the tmp file before running backup") + } + + cmd := exec.Command( + "pg_dump", backup.databaseURL.String(), + "-f", tmpFile.Name(), + "-F", "t", // format: tar + ) + + _, err = cmd.Output() + + if err != nil { + if ee, ok := err.(*exec.ExitError); ok { + return nil, errors.Wrap(err, fmt.Sprintf("pg_dump failed with output: %s", string(ee.Stderr))) + } + return nil, errors.Wrap(err, "pg_dump failed") + } + + finalFilePath := filepath.Join(backup.outputParentDir, fmt.Sprintf(filePattern, time.Now().UTC().Format("2006-01-02T15-04-05Z"))) + _ = os.Remove(finalFilePath) + err = os.Rename(tmpFile.Name(), finalFilePath) + if err != nil { + _ = os.Remove(tmpFile.Name()) + return nil, errors.Wrap(err, "Failed to rename the temp file to the final backup file") + } + + file, err := os.Stat(finalFilePath) + if err != nil { + return nil, errors.Wrap(err, "Failed to access the final backup file") + } + + return &backupResult{ + size: file.Size(), + path: finalFilePath, + }, nil +} diff --git a/core/services/periodicbackup/backup_test.go b/core/services/periodicbackup/backup_test.go new file mode 100644 index 00000000000..cc656b5aa54 --- /dev/null +++ b/core/services/periodicbackup/backup_test.go @@ -0,0 +1,36 @@ +package periodicbackup + +import ( + "os" + "testing" + "time" + + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/store/orm" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPeriodicBackup_RunBackup(t *testing.T) { + rawConfig := orm.NewConfig() + periodicBackup := NewDatabaseBackup(time.Minute, rawConfig.DatabaseURL(), os.TempDir(), logger.Default).(*databaseBackup) + assert.False(t, periodicBackup.frequencyIsTooSmall()) + + result, err := periodicBackup.runBackup() + require.NoError(t, err, "error not nil for backup") + + defer os.Remove(result.path) + + file, err := os.Stat(result.path) + require.NoError(t, err, "error not nil when checking for output file") + + assert.Greater(t, file.Size(), int64(0)) + assert.Equal(t, file.Size(), result.size) + assert.Contains(t, result.path, "cl_backup") +} + +func TestPeriodicBackup_FrequencyTooSmall(t *testing.T) { + rawConfig := orm.NewConfig() + periodicBackup := NewDatabaseBackup(time.Second, rawConfig.DatabaseURL(), os.TempDir(), logger.Default).(*databaseBackup) + assert.True(t, periodicBackup.frequencyIsTooSmall()) +} diff --git a/core/services/periodicbackup/restore_db_example.sh b/core/services/periodicbackup/restore_db_example.sh new file mode 100755 index 00000000000..3ba34cea512 --- /dev/null +++ b/core/services/periodicbackup/restore_db_example.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +DB_FILE="$1" +DB_SUPER_USER="postgres" +DB_USER="postgres" +DB_NAME="chainlink_fallback_db" +DB_HOST_PORT="localhost:5432" + +psql "postgresql://$DB_SUPER_USER@$DB_HOST_PORT/postgres" -c "CREATE DATABASE $DB_NAME" +psql "postgresql://$DB_SUPER_USER@$DB_HOST_PORT/postgres" -c "GRANT ALL PRIVILEGES ON DATABASE $DB_NAME TO $DB_USER;" + +pg_restore -d "postgresql://$DB_SUPER_USER@$DB_HOST_PORT/$DB_NAME" "$DB_FILE" \ No newline at end of file diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 3f5812ad52e..3dabc9763b7 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -264,6 +264,17 @@ func (c Config) DatabaseMaximumTxDuration() time.Duration { return c.getWithFallback("DatabaseMaximumTxDuration", parseDuration).(time.Duration) } +// DatabaseBackupEnabled turns on the database backup on node start if set to true +func (c Config) DatabaseBackupEnabled() bool { + return c.getWithFallback("DatabaseBackupEnabled", parseBool).(bool) +} + +// DatabaseBackupFrequency turns on the periodic database backup if set to a positive value +// DatabaseBackupEnabled must be then set to true as well +func (c Config) DatabaseBackupFrequency() time.Duration { + return c.getWithFallback("DatabaseBackupFrequency", parseDuration).(time.Duration) +} + // DatabaseTimeout represents how long to tolerate non response from the DB. func (c Config) DatabaseTimeout() models.Duration { return models.MustMakeDuration(c.getWithFallback("DatabaseTimeout", parseDuration).(time.Duration)) diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index b3eea4f1686..d71d4687c91 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -29,6 +29,8 @@ type ConfigSchema struct { DatabaseListenerMinReconnectInterval time.Duration `env:"DATABASE_LISTENER_MIN_RECONNECT_INTERVAL" default:"1m"` DatabaseListenerMaxReconnectDuration time.Duration `env:"DATABASE_LISTENER_MAX_RECONNECT_DURATION" default:"10m"` DatabaseMaximumTxDuration time.Duration `env:"DATABASE_MAXIMUM_TX_DURATION" default:"30m"` + DatabaseBackupEnabled bool `env:"DATABASE_BACKUP_ENABLED" default:"false"` + DatabaseBackupFrequency time.Duration `env:"DATABASE_BACKUP_FREQUENCY" default:"0m"` DefaultHTTPLimit int64 `env:"DEFAULT_HTTP_LIMIT" default:"32768"` DefaultHTTPTimeout models.Duration `env:"DEFAULT_HTTP_TIMEOUT" default:"15s"` DefaultHTTPAllowUnrestrictedNetworkAccess bool `env:"DEFAULT_HTTP_ALLOW_UNRESTRICTED_NETWORK_ACCESS" default:"false"` diff --git a/core/store/store.go b/core/store/store.go index 1397a927a90..a9d44cbd5d1 100644 --- a/core/store/store.go +++ b/core/store/store.go @@ -7,6 +7,8 @@ import ( "sync" "github.com/coreos/go-semver/semver" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/periodicbackup" "github.com/smartcontractkit/chainlink/core/static" "github.com/smartcontractkit/chainlink/core/gracefulpanic" @@ -251,6 +253,10 @@ func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal) (*or if err != nil { return nil, errors.Wrap(err, "initializeORM#NewORM") } + if config.DatabaseBackupEnabled() { + databaseBackup := periodicbackup.NewDatabaseBackup(config.DatabaseBackupFrequency(), config.DatabaseURL(), config.RootDir(), logger.Default) + databaseBackup.RunBackupGracefully() + } if err = CheckSquashUpgrade(orm.DB); err != nil { panic(err) } diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 4da6d847268..034576e85b5 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -54,6 +54,13 @@ Help: "The total number of eth node connection errors", - Support for arbitrum +- Experimental: Add `DATABASE_BACKUP_ENABLED` and `DATABASE_BACKUP_FREQUENCY` configuration variables + +It's now possible to configure database backups: on node start and separately, to be run at given frequency. +If set to true, DATABASE_BACKUP_ENABLED turns on the initial backup on node start. +Additionally, if DATABASE_BACKUP_FREQUENCY variable is set to a duration of at least '1m', it enables periodic backups. + + ### Fixed - Improved handling of the case where we exceed the configured TX fee cap in From aa37ae9207ce9769da4f6e0ab70ac9a4ab6d6dd5 Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Tue, 16 Mar 2021 15:36:59 +0100 Subject: [PATCH 035/116] database backups using pgdump --- core/services/periodicbackup/backup.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/services/periodicbackup/backup.go b/core/services/periodicbackup/backup.go index d035fd89acb..fa683b89ab0 100644 --- a/core/services/periodicbackup/backup.go +++ b/core/services/periodicbackup/backup.go @@ -14,7 +14,7 @@ import ( ) var ( - filePattern = "cl_backup_%s.tar.gz" + filePattern = "cl_backup.tar.gz" minBackupFrequency = time.Minute ) @@ -119,7 +119,7 @@ func (backup *databaseBackup) runBackup() (*backupResult, error) { return nil, errors.Wrap(err, "pg_dump failed") } - finalFilePath := filepath.Join(backup.outputParentDir, fmt.Sprintf(filePattern, time.Now().UTC().Format("2006-01-02T15-04-05Z"))) + finalFilePath := filepath.Join(backup.outputParentDir, filePattern) _ = os.Remove(finalFilePath) err = os.Rename(tmpFile.Name(), finalFilePath) if err != nil { From 09bb66d66f45262930eafc0a7e3a6cb183f10c72 Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Wed, 17 Mar 2021 17:49:35 +0100 Subject: [PATCH 036/116] version table and alt db url config --- core/services/chainlink/application.go | 2 +- core/services/periodicbackup/backup.go | 39 ++++++++--- core/services/periodicbackup/backup_test.go | 70 +++++++++++++++++-- .../migrations/0016_add_node_version_table.go | 30 ++++++++ core/store/models/node_version.go | 15 ++++ core/store/orm/config.go | 14 ++++ core/store/orm/orm.go | 27 +++++++ core/store/orm/orm_test.go | 30 ++++++++ core/store/orm/schema.go | 1 + core/store/store.go | 18 ++++- 10 files changed, 228 insertions(+), 18 deletions(-) create mode 100644 core/store/migrations/0016_add_node_version_table.go create mode 100644 core/store/models/node_version.go diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index 4e17e8781ec..52f656cf166 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -162,7 +162,7 @@ func NewApplication(config *orm.Config, ethClient eth.Client, advisoryLocker pos if store.Config.DatabaseBackupEnabled() && store.Config.DatabaseBackupFrequency() > 0 { logger.Debug("DatabaseBackup: periodic database backups are enabled") - databaseBackup := periodicbackup.NewDatabaseBackup(store.Config.DatabaseBackupFrequency(), store.Config.DatabaseURL(), config.RootDir(), logger.Default) + databaseBackup := periodicbackup.NewDatabaseBackup(store.Config, logger.Default) subservices = append(subservices, databaseBackup) } else { logger.Debug("DatabaseBackup: periodic database backups are disabled") diff --git a/core/services/periodicbackup/backup.go b/core/services/periodicbackup/backup.go index fa683b89ab0..a55a06dfeb0 100644 --- a/core/services/periodicbackup/backup.go +++ b/core/services/periodicbackup/backup.go @@ -11,10 +11,11 @@ import ( "github.com/pkg/errors" "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/static" ) var ( - filePattern = "cl_backup.tar.gz" + filePattern = "cl_backup_%s.tar.gz" minBackupFrequency = time.Minute ) @@ -27,7 +28,7 @@ type ( DatabaseBackup interface { Start() error Close() error - RunBackupGracefully() + RunBackupGracefully(version string) } databaseBackup struct { @@ -37,14 +38,27 @@ type ( outputParentDir string done chan bool } + + Config interface { + DatabaseBackupFrequency() time.Duration + DatabaseBackupURL() *url.URL + DatabaseURL() url.URL + RootDir() string + } ) -func NewDatabaseBackup(frequency time.Duration, databaseURL url.URL, outputParentDir string, logger *logger.Logger) DatabaseBackup { +func NewDatabaseBackup(config Config, logger *logger.Logger) DatabaseBackup { + + dbUrl := config.DatabaseURL() + dbBackupUrl := config.DatabaseBackupURL() + if dbBackupUrl != nil { + dbUrl = *dbBackupUrl + } return &databaseBackup{ logger, - databaseURL, - frequency, - outputParentDir, + dbUrl, + config.DatabaseBackupFrequency(), + config.RootDir(), make(chan bool), } } @@ -64,7 +78,7 @@ func (backup databaseBackup) Start() error { ticker.Stop() return case <-ticker.C: - backup.RunBackupGracefully() + backup.RunBackupGracefully(static.Version) } } }() @@ -81,10 +95,10 @@ func (backup *databaseBackup) frequencyIsTooSmall() bool { return backup.frequency < minBackupFrequency } -func (backup *databaseBackup) RunBackupGracefully() { +func (backup *databaseBackup) RunBackupGracefully(version string) { backup.logger.Info("DatabaseBackup: Starting database backup...") startAt := time.Now() - result, err := backup.runBackup() + result, err := backup.runBackup(version) duration := time.Since(startAt) if err != nil { backup.logger.Errorw("DatabaseBackup: Failed", "duration", duration, "error", err) @@ -93,7 +107,7 @@ func (backup *databaseBackup) RunBackupGracefully() { } } -func (backup *databaseBackup) runBackup() (*backupResult, error) { +func (backup *databaseBackup) runBackup(version string) (*backupResult, error) { tmpFile, err := ioutil.TempFile(backup.outputParentDir, "db_backup") if err != nil { @@ -119,7 +133,10 @@ func (backup *databaseBackup) runBackup() (*backupResult, error) { return nil, errors.Wrap(err, "pg_dump failed") } - finalFilePath := filepath.Join(backup.outputParentDir, filePattern) + if version == "" { + version = "initial" + } + finalFilePath := filepath.Join(backup.outputParentDir, fmt.Sprintf(filePattern, version)) _ = os.Remove(finalFilePath) err = os.Rename(tmpFile.Name(), finalFilePath) if err != nil { diff --git a/core/services/periodicbackup/backup_test.go b/core/services/periodicbackup/backup_test.go index cc656b5aa54..4a0fb7cab7e 100644 --- a/core/services/periodicbackup/backup_test.go +++ b/core/services/periodicbackup/backup_test.go @@ -1,6 +1,7 @@ package periodicbackup import ( + "net/url" "os" "testing" "time" @@ -13,10 +14,11 @@ import ( func TestPeriodicBackup_RunBackup(t *testing.T) { rawConfig := orm.NewConfig() - periodicBackup := NewDatabaseBackup(time.Minute, rawConfig.DatabaseURL(), os.TempDir(), logger.Default).(*databaseBackup) + backupConfig := newTestConfig(time.Minute, nil, rawConfig.DatabaseURL(), os.TempDir()) + periodicBackup := NewDatabaseBackup(backupConfig, logger.Default).(*databaseBackup) assert.False(t, periodicBackup.frequencyIsTooSmall()) - result, err := periodicBackup.runBackup() + result, err := periodicBackup.runBackup("0.9.9") require.NoError(t, err, "error not nil for backup") defer os.Remove(result.path) @@ -26,11 +28,71 @@ func TestPeriodicBackup_RunBackup(t *testing.T) { assert.Greater(t, file.Size(), int64(0)) assert.Equal(t, file.Size(), result.size) - assert.Contains(t, result.path, "cl_backup") + assert.Contains(t, result.path, "cl_backup_0.9.9") +} + +func TestPeriodicBackup_RunBackupWithoutVersion(t *testing.T) { + rawConfig := orm.NewConfig() + backupConfig := newTestConfig(time.Minute, nil, rawConfig.DatabaseURL(), os.TempDir()) + periodicBackup := NewDatabaseBackup(backupConfig, logger.Default).(*databaseBackup) + assert.False(t, periodicBackup.frequencyIsTooSmall()) + + result, err := periodicBackup.runBackup("") + require.NoError(t, err, "error not nil for backup") + + defer os.Remove(result.path) + + file, err := os.Stat(result.path) + require.NoError(t, err, "error not nil when checking for output file") + + assert.Greater(t, file.Size(), int64(0)) + assert.Equal(t, file.Size(), result.size) + assert.Contains(t, result.path, "cl_backup_initial") +} + +func TestPeriodicBackup_RunBackupViaAltUrl(t *testing.T) { + rawConfig := orm.NewConfig() + altUrl, _ := url.Parse("postgresql//invalid") + backupConfig := newTestConfig(time.Minute, altUrl, rawConfig.DatabaseURL(), os.TempDir()) + periodicBackup := NewDatabaseBackup(backupConfig, logger.Default).(*databaseBackup) + assert.False(t, periodicBackup.frequencyIsTooSmall()) + + _, err := periodicBackup.runBackup("") + require.Error(t, err, "connection to database \"postgresql//invalid\" failed") } func TestPeriodicBackup_FrequencyTooSmall(t *testing.T) { rawConfig := orm.NewConfig() - periodicBackup := NewDatabaseBackup(time.Second, rawConfig.DatabaseURL(), os.TempDir(), logger.Default).(*databaseBackup) + backupConfig := newTestConfig(time.Second, nil, rawConfig.DatabaseURL(), os.TempDir()) + periodicBackup := NewDatabaseBackup(backupConfig, logger.Default).(*databaseBackup) assert.True(t, periodicBackup.frequencyIsTooSmall()) } + +type testConfig struct { + databaseBackupFrequency time.Duration + databaseBackupURL *url.URL + databaseURL url.URL + rootDir string +} + +func (config testConfig) DatabaseBackupFrequency() time.Duration { + return config.databaseBackupFrequency +} +func (config testConfig) DatabaseBackupURL() *url.URL { + return config.databaseBackupURL +} +func (config testConfig) DatabaseURL() url.URL { + return config.databaseURL +} +func (config testConfig) RootDir() string { + return config.rootDir +} + +func newTestConfig(frequency time.Duration, databaseBackupURL *url.URL, databaseURL url.URL, outputParentDir string) testConfig { + return testConfig{ + databaseBackupFrequency: frequency, + databaseBackupURL: databaseBackupURL, + databaseURL: databaseURL, + rootDir: outputParentDir, + } +} diff --git a/core/store/migrations/0016_add_node_version_table.go b/core/store/migrations/0016_add_node_version_table.go new file mode 100644 index 00000000000..7c644b80da9 --- /dev/null +++ b/core/store/migrations/0016_add_node_version_table.go @@ -0,0 +1,30 @@ +package migrations + +import ( + "github.com/go-gormigrate/gormigrate/v2" + "gorm.io/gorm" +) + +const up16 = ` + CREATE TABLE "node_versions" ( + "version" TEXT PRIMARY KEY, + "created_at" timestamp without time zone NOT NULL + ); +` + +const down16 = ` + DROP TABLE IF EXISTS "node_version"; + DROP TABLE IF EXISTS "node_versions"; +` + +func init() { + Migrations = append(Migrations, &gormigrate.Migration{ + ID: "0016_add_node_version_table", + Migrate: func(db *gorm.DB) error { + return db.Exec(up16).Error + }, + Rollback: func(db *gorm.DB) error { + return db.Exec(down16).Error + }, + }) +} diff --git a/core/store/models/node_version.go b/core/store/models/node_version.go new file mode 100644 index 00000000000..1692fac9ccd --- /dev/null +++ b/core/store/models/node_version.go @@ -0,0 +1,15 @@ +package models + +import "time" + +type NodeVersion struct { + Version string `gorm:"primary_key"` + CreatedAt time.Time `gorm:"index"` +} + +func NewNodeVersion(version string) NodeVersion { + return NodeVersion{ + Version: version, + CreatedAt: time.Now(), + } +} diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 3dabc9763b7..604d1c349ee 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -275,6 +275,20 @@ func (c Config) DatabaseBackupFrequency() time.Duration { return c.getWithFallback("DatabaseBackupFrequency", parseDuration).(time.Duration) } +// DatabaseBackupURL configures the URL for the database to backup, if it's to be different from the main on +func (c Config) DatabaseBackupURL() *url.URL { + s := c.viper.GetString(EnvVarName("DatabaseBackupURL")) + if s == "" { + return nil + } + uri, err := url.Parse(s) + if err != nil { + logger.Error("invalid database backup url %s", s) + return nil + } + return uri +} + // DatabaseTimeout represents how long to tolerate non response from the DB. func (c Config) DatabaseTimeout() models.Duration { return models.MustMakeDuration(c.getWithFallback("DatabaseTimeout", parseDuration).(time.Duration)) diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index 56e48834a52..b629c1f60db 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -141,6 +141,33 @@ func (orm *ORM) Unscoped() *ORM { } } +// UpsertNodeVersion inserts a new NodeVersion +func (orm *ORM) UpsertNodeVersion(version models.NodeVersion) error { + if err := orm.MustEnsureAdvisoryLock(); err != nil { + return err + } + return orm.DB.Exec(` + INSERT INTO node_versions ( + version, created_at + ) VALUES ( + ?, ? + ) ON CONFLICT DO NOTHING + `, version.Version, version.CreatedAt).Error +} + +// FindLatestNodeVersion looks up the latest node version +func (orm *ORM) FindLatestNodeVersion() (*models.NodeVersion, error) { + if err := orm.MustEnsureAdvisoryLock(); err != nil { + return nil, err + } + var nodeVersion models.NodeVersion + err := orm.DB.Order("created_at DESC").First(&nodeVersion).Error + if err == gorm.ErrRecordNotFound { + return nil, nil + } + return &nodeVersion, err +} + // FindBridge looks up a Bridge by its Name. func (orm *ORM) FindBridge(name models.TaskType) (bt models.BridgeType, err error) { if err := orm.MustEnsureAdvisoryLock(); err != nil { diff --git a/core/store/orm/orm_test.go b/core/store/orm/orm_test.go index 5e80ad4ecd9..83ce5db7314 100644 --- a/core/store/orm/orm_test.go +++ b/core/store/orm/orm_test.go @@ -39,6 +39,36 @@ func TestORM_AllNotFound(t *testing.T) { assert.Equal(t, 0, len(jobs), "Queried array should be empty") } +func TestORM_NodeVersion(t *testing.T) { + t.Parallel() + store, cleanup := cltest.NewStore(t) + defer cleanup() + + ver, err := store.FindLatestNodeVersion() + + require.NoError(t, err) + require.NotNil(t, ver) + require.Equal(t, ver.Version, "unset") + + require.NoError(t, store.UpsertNodeVersion(models.NewNodeVersion("9.9.8"))) + + ver, err = store.FindLatestNodeVersion() + + require.NoError(t, err) + require.NotNil(t, ver) + require.Equal(t, ver.Version, "9.9.8") + + require.NoError(t, store.UpsertNodeVersion(models.NewNodeVersion("9.9.8"))) + require.NoError(t, store.UpsertNodeVersion(models.NewNodeVersion("9.9.7"))) + require.NoError(t, store.UpsertNodeVersion(models.NewNodeVersion("9.9.9"))) + + ver, err = store.FindLatestNodeVersion() + + require.NoError(t, err) + require.NotNil(t, ver) + require.Equal(t, ver.Version, "9.9.9") +} + func TestORM_CreateJob(t *testing.T) { t.Parallel() store, cleanup := cltest.NewStore(t) diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index d71d4687c91..cc9c0573675 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -31,6 +31,7 @@ type ConfigSchema struct { DatabaseMaximumTxDuration time.Duration `env:"DATABASE_MAXIMUM_TX_DURATION" default:"30m"` DatabaseBackupEnabled bool `env:"DATABASE_BACKUP_ENABLED" default:"false"` DatabaseBackupFrequency time.Duration `env:"DATABASE_BACKUP_FREQUENCY" default:"0m"` + DatabaseBackupURL *url.URL `env:"DATABASE_BACKUP_URL" default:""` DefaultHTTPLimit int64 `env:"DEFAULT_HTTP_LIMIT" default:"32768"` DefaultHTTPTimeout models.Duration `env:"DEFAULT_HTTP_TIMEOUT" default:"15s"` DefaultHTTPAllowUnrestrictedNetworkAccess bool `env:"DEFAULT_HTTP_ALLOW_UNRESTRICTED_NETWORK_ACCESS" default:"false"` diff --git a/core/store/store.go b/core/store/store.go index a9d44cbd5d1..43ebfd34225 100644 --- a/core/store/store.go +++ b/core/store/store.go @@ -254,8 +254,17 @@ func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal) (*or return nil, errors.Wrap(err, "initializeORM#NewORM") } if config.DatabaseBackupEnabled() { - databaseBackup := periodicbackup.NewDatabaseBackup(config.DatabaseBackupFrequency(), config.DatabaseURL(), config.RootDir(), logger.Default) - databaseBackup.RunBackupGracefully() + + version, err2 := orm.FindLatestNodeVersion() + if err2 != nil { + return nil, errors.Wrap(err2, "initializeORM#FindLatestNodeVersion") + } + var versionString string + if version != nil { + versionString = version.Version + } + databaseBackup := periodicbackup.NewDatabaseBackup(config, logger.Default) + databaseBackup.RunBackupGracefully(versionString) } if err = CheckSquashUpgrade(orm.DB); err != nil { panic(err) @@ -270,6 +279,11 @@ func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal) (*or return nil, errors.Wrap(err, "initializeORM#Migrate") } } + version := models.NewNodeVersion(static.Version) + err = orm.UpsertNodeVersion(version) + if err != nil { + return nil, errors.Wrap(err, "initializeORM#UpsertNodeVersion") + } orm.SetLogging(config.LogSQLStatements()) return orm, nil } From dd79ae9f29e8c4d3d0117fc38830ee09ce605a35 Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Wed, 17 Mar 2021 18:12:19 +0100 Subject: [PATCH 037/116] update migrations --- core/store/migrations/0016_add_node_version_table.go | 1 - 1 file changed, 1 deletion(-) diff --git a/core/store/migrations/0016_add_node_version_table.go b/core/store/migrations/0016_add_node_version_table.go index 7c644b80da9..8a2e0d859b5 100644 --- a/core/store/migrations/0016_add_node_version_table.go +++ b/core/store/migrations/0016_add_node_version_table.go @@ -13,7 +13,6 @@ const up16 = ` ` const down16 = ` - DROP TABLE IF EXISTS "node_version"; DROP TABLE IF EXISTS "node_versions"; ` From 51441d2678a502252c44c38293fa0c856037c32a Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Wed, 17 Mar 2021 18:31:16 +0100 Subject: [PATCH 038/116] tweak version as unset --- core/services/periodicbackup/backup.go | 3 --- core/services/periodicbackup/backup_test.go | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/core/services/periodicbackup/backup.go b/core/services/periodicbackup/backup.go index a55a06dfeb0..37fd23958c0 100644 --- a/core/services/periodicbackup/backup.go +++ b/core/services/periodicbackup/backup.go @@ -133,9 +133,6 @@ func (backup *databaseBackup) runBackup(version string) (*backupResult, error) { return nil, errors.Wrap(err, "pg_dump failed") } - if version == "" { - version = "initial" - } finalFilePath := filepath.Join(backup.outputParentDir, fmt.Sprintf(filePattern, version)) _ = os.Remove(finalFilePath) err = os.Rename(tmpFile.Name(), finalFilePath) diff --git a/core/services/periodicbackup/backup_test.go b/core/services/periodicbackup/backup_test.go index 4a0fb7cab7e..b17515152e0 100644 --- a/core/services/periodicbackup/backup_test.go +++ b/core/services/periodicbackup/backup_test.go @@ -37,7 +37,7 @@ func TestPeriodicBackup_RunBackupWithoutVersion(t *testing.T) { periodicBackup := NewDatabaseBackup(backupConfig, logger.Default).(*databaseBackup) assert.False(t, periodicBackup.frequencyIsTooSmall()) - result, err := periodicBackup.runBackup("") + result, err := periodicBackup.runBackup("unset") require.NoError(t, err, "error not nil for backup") defer os.Remove(result.path) @@ -47,7 +47,7 @@ func TestPeriodicBackup_RunBackupWithoutVersion(t *testing.T) { assert.Greater(t, file.Size(), int64(0)) assert.Equal(t, file.Size(), result.size) - assert.Contains(t, result.path, "cl_backup_initial") + assert.Contains(t, result.path, "cl_backup_unset") } func TestPeriodicBackup_RunBackupViaAltUrl(t *testing.T) { From b92f2c69a5d7d776af266fdbbf48d00337be7553 Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Wed, 17 Mar 2021 18:48:09 +0100 Subject: [PATCH 039/116] upsert version fix --- core/store/orm/orm.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index b629c1f60db..289bc94a228 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -146,13 +146,9 @@ func (orm *ORM) UpsertNodeVersion(version models.NodeVersion) error { if err := orm.MustEnsureAdvisoryLock(); err != nil { return err } - return orm.DB.Exec(` - INSERT INTO node_versions ( - version, created_at - ) VALUES ( - ?, ? - ) ON CONFLICT DO NOTHING - `, version.Version, version.CreatedAt).Error + return orm.DB.Clauses(clause.OnConflict{ + DoNothing: true, + }).Create(&version).Error } // FindLatestNodeVersion looks up the latest node version From 3b426913464673f0e495a8a8967fef672b8843d5 Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Wed, 17 Mar 2021 20:29:55 +0100 Subject: [PATCH 040/116] fix version creation --- core/services/periodicbackup/backup.go | 3 +++ core/store/orm/orm.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/core/services/periodicbackup/backup.go b/core/services/periodicbackup/backup.go index 37fd23958c0..64ec1cd17b9 100644 --- a/core/services/periodicbackup/backup.go +++ b/core/services/periodicbackup/backup.go @@ -133,6 +133,9 @@ func (backup *databaseBackup) runBackup(version string) (*backupResult, error) { return nil, errors.Wrap(err, "pg_dump failed") } + if version == "" { + version = "unset" + } finalFilePath := filepath.Join(backup.outputParentDir, fmt.Sprintf(filePattern, version)) _ = os.Remove(finalFilePath) err = os.Rename(tmpFile.Name(), finalFilePath) diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index 289bc94a228..73d54d7d19c 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -161,6 +161,9 @@ func (orm *ORM) FindLatestNodeVersion() (*models.NodeVersion, error) { if err == gorm.ErrRecordNotFound { return nil, nil } + if err != nil && strings.Contains(err.Error(), "relation \"node_versions\" does not exist") { + return nil, nil + } return &nodeVersion, err } From 5737b14b40c09e1137d0a7d92449151d7842befa Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Wed, 17 Mar 2021 20:35:18 +0100 Subject: [PATCH 041/116] fix version creation --- core/store/orm/orm.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index 73d54d7d19c..32dcc656c3d 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -232,6 +232,7 @@ func (orm *ORM) FindJobWithErrors(id models.JobID) (models.JobSpec, error) { // FindInitiator returns the single initiator defined by the passed ID. func (orm *ORM) FindInitiator(ID int64) (initr models.Initiator, err error) { + if err := orm.MustEnsureAdvisoryLock(); err != nil { return initr, err } From 067272b424f90c24a228246618d81ea3ae3fd7f2 Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Wed, 17 Mar 2021 20:36:23 +0100 Subject: [PATCH 042/116] fix migrations --- ...version_table.go => 0017_add_node_version_table.go} | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) rename core/store/migrations/{0016_add_node_version_table.go => 0017_add_node_version_table.go} (76%) diff --git a/core/store/migrations/0016_add_node_version_table.go b/core/store/migrations/0017_add_node_version_table.go similarity index 76% rename from core/store/migrations/0016_add_node_version_table.go rename to core/store/migrations/0017_add_node_version_table.go index 8a2e0d859b5..097ccc22ff2 100644 --- a/core/store/migrations/0016_add_node_version_table.go +++ b/core/store/migrations/0017_add_node_version_table.go @@ -5,25 +5,25 @@ import ( "gorm.io/gorm" ) -const up16 = ` +const up17 = ` CREATE TABLE "node_versions" ( "version" TEXT PRIMARY KEY, "created_at" timestamp without time zone NOT NULL ); ` -const down16 = ` +const down17 = ` DROP TABLE IF EXISTS "node_versions"; ` func init() { Migrations = append(Migrations, &gormigrate.Migration{ - ID: "0016_add_node_version_table", + ID: "0017_add_node_version_table", Migrate: func(db *gorm.DB) error { - return db.Exec(up16).Error + return db.Exec(up17).Error }, Rollback: func(db *gorm.DB) error { - return db.Exec(down16).Error + return db.Exec(down17).Error }, }) } From caf3d98424db7e65802c90793b263ea6dc70c259 Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Thu, 18 Mar 2021 14:54:43 +0100 Subject: [PATCH 043/116] random node version in tests --- .gitignore | 2 +- core/services/chainlink/application.go | 4 ++-- core/store/store.go | 13 ++++++++----- docs/CHANGELOG.md | 10 +++++----- 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/.gitignore b/.gitignore index f36fd4b58bf..2c03f4d6444 100644 --- a/.gitignore +++ b/.gitignore @@ -40,4 +40,4 @@ gcr_creds.env # DB backups -cl_backup_*.tar.gz \ No newline at end of file +cl_backup_*.tar.gz diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index 52f656cf166..bebc841503f 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -160,12 +160,12 @@ func NewApplication(config *orm.Config, ethClient eth.Client, advisoryLocker pos } if store.Config.DatabaseBackupEnabled() && store.Config.DatabaseBackupFrequency() > 0 { - logger.Debug("DatabaseBackup: periodic database backups are enabled") + logger.Infow("DatabaseBackup: periodic database backups are enabled", "frequency", store.Config.DatabaseBackupFrequency()) databaseBackup := periodicbackup.NewDatabaseBackup(store.Config, logger.Default) subservices = append(subservices, databaseBackup) } else { - logger.Debug("DatabaseBackup: periodic database backups are disabled") + logger.Info("DatabaseBackup: periodic database backups are disabled") } runExecutor := services.NewRunExecutor(store, statsPusher) diff --git a/core/store/store.go b/core/store/store.go index 43ebfd34225..1163bb0e75d 100644 --- a/core/store/store.go +++ b/core/store/store.go @@ -2,6 +2,8 @@ package store import ( "context" + "fmt" + "math/rand" "os" "path/filepath" "sync" @@ -65,13 +67,13 @@ func InsecureKeyStoreGen(config *orm.Config) *KeyStore { // NewStore will create a new store func NewStore(config *orm.Config, ethClient eth.Client, advisoryLock postgres.AdvisoryLocker, shutdownSignal gracefulpanic.Signal, keyStoreGenerator KeyStoreGenerator) (*Store, error) { - return newStoreWithKeyStore(config, ethClient, advisoryLock, keyStoreGenerator, shutdownSignal) + return newStoreWithKeyStore(config, ethClient, advisoryLock, keyStoreGenerator, shutdownSignal, static.Version) } // NewInsecureStore creates a new store with the given config using an insecure keystore. // NOTE: Should only be used for testing! func NewInsecureStore(config *orm.Config, ethClient eth.Client, advisoryLocker postgres.AdvisoryLocker, shutdownSignal gracefulpanic.Signal) (*Store, error) { - return newStoreWithKeyStore(config, ethClient, advisoryLocker, InsecureKeyStoreGen, shutdownSignal) + return newStoreWithKeyStore(config, ethClient, advisoryLocker, InsecureKeyStoreGen, shutdownSignal, fmt.Sprintf("%s_%d", static.Version, rand.Uint32())) } // TODO(sam): Remove ethClient from here completely after legacy tx manager is gone @@ -82,11 +84,12 @@ func newStoreWithKeyStore( advisoryLocker postgres.AdvisoryLocker, keyStoreGenerator KeyStoreGenerator, shutdownSignal gracefulpanic.Signal, + nodeVersion string, ) (*Store, error) { if err := utils.EnsureDirAndMaxPerms(config.RootDir(), os.FileMode(0700)); err != nil { return nil, errors.Wrap(err, "error while creating project root dir") } - orm, err := initializeORM(config, shutdownSignal) + orm, err := initializeORM(config, shutdownSignal, nodeVersion) if err != nil { return nil, errors.Wrap(err, "failed to initialize ORM") } @@ -247,7 +250,7 @@ func CheckSquashUpgrade(db *gorm.DB) error { return nil } -func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal) (*orm.ORM, error) { +func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal, nodeVersion string) (*orm.ORM, error) { dbURL := config.DatabaseURL() orm, err := orm.NewORM(dbURL.String(), config.DatabaseTimeout(), shutdownSignal, config.GetDatabaseDialectConfiguredOrDefault(), config.GetAdvisoryLockIDConfiguredOrDefault(), config.GlobalLockRetryInterval().Duration(), config.ORMMaxOpenConns(), config.ORMMaxIdleConns()) if err != nil { @@ -279,7 +282,7 @@ func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal) (*or return nil, errors.Wrap(err, "initializeORM#Migrate") } } - version := models.NewNodeVersion(static.Version) + version := models.NewNodeVersion(nodeVersion) err = orm.UpsertNodeVersion(version) if err != nil { return nil, errors.Wrap(err, "initializeORM#UpsertNodeVersion") diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 034576e85b5..9e57d39ca2a 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -54,11 +54,11 @@ Help: "The total number of eth node connection errors", - Support for arbitrum -- Experimental: Add `DATABASE_BACKUP_ENABLED` and `DATABASE_BACKUP_FREQUENCY` configuration variables - -It's now possible to configure database backups: on node start and separately, to be run at given frequency. -If set to true, DATABASE_BACKUP_ENABLED turns on the initial backup on node start. -Additionally, if DATABASE_BACKUP_FREQUENCY variable is set to a duration of at least '1m', it enables periodic backups. +- Experimental: Add `DATABASE_BACKUP_MODE`, `DATABASE_BACKUP_FREQUENCY` and `DATABASE_BACKUP_URL` configuration variables + +It's now possible to configure database backups: on node start and separately, to be run at given frequency. If set to true, +DATABASE_BACKUP_MODE enables the initial backup on node start (either full or partial). Additionally, if DATABASE_BACKUP_FREQUENCY variable +is set to a duration of at least '1m', it enables periodic backups. ### Fixed From 3ab6847efa1df371a4c541840ef78b89b1a15648 Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Thu, 18 Mar 2021 15:31:32 +0100 Subject: [PATCH 044/116] insert node version in transaction --- .../store/migrations/0017_add_node_version_table.go | 2 +- core/store/orm/orm.go | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/core/store/migrations/0017_add_node_version_table.go b/core/store/migrations/0017_add_node_version_table.go index 097ccc22ff2..a5f106b526a 100644 --- a/core/store/migrations/0017_add_node_version_table.go +++ b/core/store/migrations/0017_add_node_version_table.go @@ -6,7 +6,7 @@ import ( ) const up17 = ` - CREATE TABLE "node_versions" ( + CREATE TABLE IF NOT EXISTS "node_versions" ( "version" TEXT PRIMARY KEY, "created_at" timestamp without time zone NOT NULL ); diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index 32dcc656c3d..14612419158 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -146,9 +146,16 @@ func (orm *ORM) UpsertNodeVersion(version models.NodeVersion) error { if err := orm.MustEnsureAdvisoryLock(); err != nil { return err } - return orm.DB.Clauses(clause.OnConflict{ - DoNothing: true, - }).Create(&version).Error + + return orm.Transaction(func(tx *gorm.DB) error { + err := tx.Clauses(clause.OnConflict{ + DoNothing: true, + }).Create(&version).Error + if err != nil { + return err + } + return nil + }) } // FindLatestNodeVersion looks up the latest node version From d48b893f5954368e4586d63b2f8c61625ca5adb5 Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Thu, 18 Mar 2021 15:45:39 +0100 Subject: [PATCH 045/116] random node version if unset --- core/store/store.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/core/store/store.go b/core/store/store.go index 1163bb0e75d..123d5e4545c 100644 --- a/core/store/store.go +++ b/core/store/store.go @@ -67,13 +67,13 @@ func InsecureKeyStoreGen(config *orm.Config) *KeyStore { // NewStore will create a new store func NewStore(config *orm.Config, ethClient eth.Client, advisoryLock postgres.AdvisoryLocker, shutdownSignal gracefulpanic.Signal, keyStoreGenerator KeyStoreGenerator) (*Store, error) { - return newStoreWithKeyStore(config, ethClient, advisoryLock, keyStoreGenerator, shutdownSignal, static.Version) + return newStoreWithKeyStore(config, ethClient, advisoryLock, keyStoreGenerator, shutdownSignal) } // NewInsecureStore creates a new store with the given config using an insecure keystore. // NOTE: Should only be used for testing! func NewInsecureStore(config *orm.Config, ethClient eth.Client, advisoryLocker postgres.AdvisoryLocker, shutdownSignal gracefulpanic.Signal) (*Store, error) { - return newStoreWithKeyStore(config, ethClient, advisoryLocker, InsecureKeyStoreGen, shutdownSignal, fmt.Sprintf("%s_%d", static.Version, rand.Uint32())) + return newStoreWithKeyStore(config, ethClient, advisoryLocker, InsecureKeyStoreGen, shutdownSignal) } // TODO(sam): Remove ethClient from here completely after legacy tx manager is gone @@ -84,12 +84,12 @@ func newStoreWithKeyStore( advisoryLocker postgres.AdvisoryLocker, keyStoreGenerator KeyStoreGenerator, shutdownSignal gracefulpanic.Signal, - nodeVersion string, ) (*Store, error) { if err := utils.EnsureDirAndMaxPerms(config.RootDir(), os.FileMode(0700)); err != nil { return nil, errors.Wrap(err, "error while creating project root dir") } - orm, err := initializeORM(config, shutdownSignal, nodeVersion) + + orm, err := initializeORM(config, shutdownSignal) if err != nil { return nil, errors.Wrap(err, "failed to initialize ORM") } @@ -250,7 +250,7 @@ func CheckSquashUpgrade(db *gorm.DB) error { return nil } -func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal, nodeVersion string) (*orm.ORM, error) { +func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal) (*orm.ORM, error) { dbURL := config.DatabaseURL() orm, err := orm.NewORM(dbURL.String(), config.DatabaseTimeout(), shutdownSignal, config.GetDatabaseDialectConfiguredOrDefault(), config.GetAdvisoryLockIDConfiguredOrDefault(), config.GlobalLockRetryInterval().Duration(), config.ORMMaxOpenConns(), config.ORMMaxIdleConns()) if err != nil { @@ -282,6 +282,11 @@ func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal, node return nil, errors.Wrap(err, "initializeORM#Migrate") } } + + nodeVersion := static.Version + if nodeVersion == "unset" { + nodeVersion = fmt.Sprintf("random_%d", rand.Uint32()) + } version := models.NewNodeVersion(nodeVersion) err = orm.UpsertNodeVersion(version) if err != nil { From 28dfcc282a33de0544edcade590ce7476881198a Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Thu, 18 Mar 2021 15:56:58 +0100 Subject: [PATCH 046/116] fix test --- core/store/orm/orm.go | 1 + core/store/orm/orm_test.go | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index 14612419158..91aafb4885f 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -169,6 +169,7 @@ func (orm *ORM) FindLatestNodeVersion() (*models.NodeVersion, error) { return nil, nil } if err != nil && strings.Contains(err.Error(), "relation \"node_versions\" does not exist") { + logger.Default.Debug("Failed to find any node version in the DB, the node_versions table does not exist yet.") return nil, nil } return &nodeVersion, err diff --git a/core/store/orm/orm_test.go b/core/store/orm/orm_test.go index 83ce5db7314..e0b6a32118a 100644 --- a/core/store/orm/orm_test.go +++ b/core/store/orm/orm_test.go @@ -48,7 +48,7 @@ func TestORM_NodeVersion(t *testing.T) { require.NoError(t, err) require.NotNil(t, ver) - require.Equal(t, ver.Version, "unset") + require.Contains(t, ver.Version, "random") require.NoError(t, store.UpsertNodeVersion(models.NewNodeVersion("9.9.8"))) @@ -56,7 +56,7 @@ func TestORM_NodeVersion(t *testing.T) { require.NoError(t, err) require.NotNil(t, ver) - require.Equal(t, ver.Version, "9.9.8") + require.Equal(t, "9.9.8", ver.Version) require.NoError(t, store.UpsertNodeVersion(models.NewNodeVersion("9.9.8"))) require.NoError(t, store.UpsertNodeVersion(models.NewNodeVersion("9.9.7"))) @@ -66,7 +66,7 @@ func TestORM_NodeVersion(t *testing.T) { require.NoError(t, err) require.NotNil(t, ver) - require.Equal(t, ver.Version, "9.9.9") + require.Equal(t, "9.9.9", ver.Version) } func TestORM_CreateJob(t *testing.T) { From 164a00f37ba1456cecd87397b20ac70e2116057a Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Thu, 18 Mar 2021 18:07:22 +0100 Subject: [PATCH 047/116] feedback --- core/services/chainlink/application.go | 2 +- core/services/periodicbackup/backup.go | 45 +++++++++++++++---- core/services/periodicbackup/backup_test.go | 4 ++ .../migrations/0017_add_node_version_table.go | 10 ++--- core/store/orm/config.go | 25 +++++++++-- core/store/orm/orm.go | 1 - core/store/orm/schema.go | 2 +- core/store/store.go | 18 ++++---- docs/CHANGELOG.md | 7 +-- 9 files changed, 82 insertions(+), 32 deletions(-) diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index bebc841503f..01abe16fdb0 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -159,7 +159,7 @@ func NewApplication(config *orm.Config, ethClient eth.Client, advisoryLocker pos logger.Debugw("GasUpdater: dynamic gas updating is disabled", "ethGasPriceDefault", store.Config.EthGasPriceDefault()) } - if store.Config.DatabaseBackupEnabled() && store.Config.DatabaseBackupFrequency() > 0 { + if store.Config.DatabaseBackupMode() != orm.DatabaseBackupModeNone && store.Config.DatabaseBackupFrequency() > 0 { logger.Infow("DatabaseBackup: periodic database backups are enabled", "frequency", store.Config.DatabaseBackupFrequency()) databaseBackup := periodicbackup.NewDatabaseBackup(store.Config, logger.Default) diff --git a/core/services/periodicbackup/backup.go b/core/services/periodicbackup/backup.go index 64ec1cd17b9..4335aaeca4c 100644 --- a/core/services/periodicbackup/backup.go +++ b/core/services/periodicbackup/backup.go @@ -12,11 +12,26 @@ import ( "github.com/pkg/errors" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/static" + "github.com/smartcontractkit/chainlink/core/store/orm" ) var ( - filePattern = "cl_backup_%s.tar.gz" + filePattern = "cl_backup_%s.dump" minBackupFrequency = time.Minute + + excludedDataFromTables = []string{ + "job_runs", + "task_runs", + "eth_task_run_txes", + "run_requests", + "run_results", + "sync_events", + //"eth_tx_attempts", + //"eth_receipts", + //"eth_txes", + "pipeline_runs", + "pipeline_task_runs", + } ) type backupResult struct { @@ -34,12 +49,14 @@ type ( databaseBackup struct { logger *logger.Logger databaseURL url.URL + mode orm.DatabaseBackupMode frequency time.Duration outputParentDir string done chan bool } Config interface { + DatabaseBackupMode() orm.DatabaseBackupMode DatabaseBackupFrequency() time.Duration DatabaseBackupURL() *url.URL DatabaseURL() url.URL @@ -48,7 +65,6 @@ type ( ) func NewDatabaseBackup(config Config, logger *logger.Logger) DatabaseBackup { - dbUrl := config.DatabaseURL() dbBackupUrl := config.DatabaseBackupURL() if dbBackupUrl != nil { @@ -57,6 +73,7 @@ func NewDatabaseBackup(config Config, logger *logger.Logger) DatabaseBackup { return &databaseBackup{ logger, dbUrl, + config.DatabaseBackupMode(), config.DatabaseBackupFrequency(), config.RootDir(), make(chan bool), @@ -96,7 +113,7 @@ func (backup *databaseBackup) frequencyIsTooSmall() bool { } func (backup *databaseBackup) RunBackupGracefully(version string) { - backup.logger.Info("DatabaseBackup: Starting database backup...") + backup.logger.Debugw("DatabaseBackup: Starting database backup...", "mode", backup.mode, "url", backup.databaseURL.String(), "directory", backup.outputParentDir) startAt := time.Now() result, err := backup.runBackup(version) duration := time.Since(startAt) @@ -109,7 +126,7 @@ func (backup *databaseBackup) RunBackupGracefully(version string) { func (backup *databaseBackup) runBackup(version string) (*backupResult, error) { - tmpFile, err := ioutil.TempFile(backup.outputParentDir, "db_backup") + tmpFile, err := ioutil.TempFile(backup.outputParentDir, "cl_backup_tmp_") if err != nil { return nil, errors.Wrap(err, "Failed to create a tmp file") } @@ -118,10 +135,22 @@ func (backup *databaseBackup) runBackup(version string) (*backupResult, error) { return nil, errors.Wrap(err, "Failed to remove the tmp file before running backup") } - cmd := exec.Command( - "pg_dump", backup.databaseURL.String(), + args := []string{ + backup.databaseURL.String(), "-f", tmpFile.Name(), - "-F", "t", // format: tar + "-F", "c", // format: custom (zipped) + } + + if backup.mode == orm.DatabaseBackupModeLite { + for _, table := range excludedDataFromTables { + args = append(args, fmt.Sprintf("--exclude-table-data=%s", table)) + } + } + + backup.logger.Debugf("DatabaseBackup: Running pg_dump with: %v", args) + + cmd := exec.Command( + "pg_dump", args..., ) _, err = cmd.Output() @@ -134,7 +163,7 @@ func (backup *databaseBackup) runBackup(version string) (*backupResult, error) { } if version == "" { - version = "unset" + version = "unknown" } finalFilePath := filepath.Join(backup.outputParentDir, fmt.Sprintf(filePattern, version)) _ = os.Remove(finalFilePath) diff --git a/core/services/periodicbackup/backup_test.go b/core/services/periodicbackup/backup_test.go index b17515152e0..ae0a3e5e3f7 100644 --- a/core/services/periodicbackup/backup_test.go +++ b/core/services/periodicbackup/backup_test.go @@ -70,6 +70,7 @@ func TestPeriodicBackup_FrequencyTooSmall(t *testing.T) { type testConfig struct { databaseBackupFrequency time.Duration + databaseBackupMode string databaseBackupURL *url.URL databaseURL url.URL rootDir string @@ -78,6 +79,9 @@ type testConfig struct { func (config testConfig) DatabaseBackupFrequency() time.Duration { return config.databaseBackupFrequency } +func (config testConfig) DatabaseBackupMode() string { + return config.databaseBackupMode +} func (config testConfig) DatabaseBackupURL() *url.URL { return config.databaseBackupURL } diff --git a/core/store/migrations/0017_add_node_version_table.go b/core/store/migrations/0017_add_node_version_table.go index a5f106b526a..9c8756713d3 100644 --- a/core/store/migrations/0017_add_node_version_table.go +++ b/core/store/migrations/0017_add_node_version_table.go @@ -6,14 +6,14 @@ import ( ) const up17 = ` - CREATE TABLE IF NOT EXISTS "node_versions" ( - "version" TEXT PRIMARY KEY, - "created_at" timestamp without time zone NOT NULL - ); +CREATE TABLE IF NOT EXISTS "node_versions" ( + "version" TEXT PRIMARY KEY, + "created_at" timestamp without time zone NOT NULL +); ` const down17 = ` - DROP TABLE IF EXISTS "node_versions"; +DROP TABLE IF EXISTS "node_versions"; ` func init() { diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 604d1c349ee..2074d83371f 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -264,13 +264,13 @@ func (c Config) DatabaseMaximumTxDuration() time.Duration { return c.getWithFallback("DatabaseMaximumTxDuration", parseDuration).(time.Duration) } -// DatabaseBackupEnabled turns on the database backup on node start if set to true -func (c Config) DatabaseBackupEnabled() bool { - return c.getWithFallback("DatabaseBackupEnabled", parseBool).(bool) +// DatabaseBackupMode sets the database backup mode +func (c Config) DatabaseBackupMode() DatabaseBackupMode { + return c.getWithFallback("DatabaseBackupMode", parseDatabaseBackupMode).(DatabaseBackupMode) } // DatabaseBackupFrequency turns on the periodic database backup if set to a positive value -// DatabaseBackupEnabled must be then set to true as well +// DatabaseBackupMode must be then set to a value other than "none" func (c Config) DatabaseBackupFrequency() time.Duration { return c.getWithFallback("DatabaseBackupFrequency", parseDuration).(time.Duration) } @@ -1141,3 +1141,20 @@ func (ll LogLevel) ForGin() string { return gin.ReleaseMode } } + +type DatabaseBackupMode string + +var ( + DatabaseBackupModeNone DatabaseBackupMode = "none" + DatabaseBackupModeLite DatabaseBackupMode = "lite" + DatabaseBackupModeFull DatabaseBackupMode = "full" +) + +func parseDatabaseBackupMode(s string) (interface{}, error) { + switch DatabaseBackupMode(s) { + case DatabaseBackupModeNone, DatabaseBackupModeLite, DatabaseBackupModeFull: + return DatabaseBackupMode(s), nil + default: + return "", fmt.Errorf("unable to parse %v into DatabaseBackupMode. Must be one of values: \"%s\", \"%s\", \"%s\"", s, DatabaseBackupModeNone, DatabaseBackupModeLite, DatabaseBackupModeFull) + } +} diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index 91aafb4885f..6704bd728dc 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -240,7 +240,6 @@ func (orm *ORM) FindJobWithErrors(id models.JobID) (models.JobSpec, error) { // FindInitiator returns the single initiator defined by the passed ID. func (orm *ORM) FindInitiator(ID int64) (initr models.Initiator, err error) { - if err := orm.MustEnsureAdvisoryLock(); err != nil { return initr, err } diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index cc9c0573675..20b6b56beba 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -29,7 +29,7 @@ type ConfigSchema struct { DatabaseListenerMinReconnectInterval time.Duration `env:"DATABASE_LISTENER_MIN_RECONNECT_INTERVAL" default:"1m"` DatabaseListenerMaxReconnectDuration time.Duration `env:"DATABASE_LISTENER_MAX_RECONNECT_DURATION" default:"10m"` DatabaseMaximumTxDuration time.Duration `env:"DATABASE_MAXIMUM_TX_DURATION" default:"30m"` - DatabaseBackupEnabled bool `env:"DATABASE_BACKUP_ENABLED" default:"false"` + DatabaseBackupMode string `env:"DATABASE_BACKUP_MODE" default:"none"` DatabaseBackupFrequency time.Duration `env:"DATABASE_BACKUP_FREQUENCY" default:"0m"` DatabaseBackupURL *url.URL `env:"DATABASE_BACKUP_URL" default:""` DefaultHTTPLimit int64 `env:"DEFAULT_HTTP_LIMIT" default:"32768"` diff --git a/core/store/store.go b/core/store/store.go index 123d5e4545c..b924af6fce1 100644 --- a/core/store/store.go +++ b/core/store/store.go @@ -252,13 +252,13 @@ func CheckSquashUpgrade(db *gorm.DB) error { func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal) (*orm.ORM, error) { dbURL := config.DatabaseURL() - orm, err := orm.NewORM(dbURL.String(), config.DatabaseTimeout(), shutdownSignal, config.GetDatabaseDialectConfiguredOrDefault(), config.GetAdvisoryLockIDConfiguredOrDefault(), config.GlobalLockRetryInterval().Duration(), config.ORMMaxOpenConns(), config.ORMMaxIdleConns()) + dbOrm, err := orm.NewORM(dbURL.String(), config.DatabaseTimeout(), shutdownSignal, config.GetDatabaseDialectConfiguredOrDefault(), config.GetAdvisoryLockIDConfiguredOrDefault(), config.GlobalLockRetryInterval().Duration(), config.ORMMaxOpenConns(), config.ORMMaxIdleConns()) if err != nil { return nil, errors.Wrap(err, "initializeORM#NewORM") } - if config.DatabaseBackupEnabled() { + if config.DatabaseBackupMode() != orm.DatabaseBackupModeNone { - version, err2 := orm.FindLatestNodeVersion() + version, err2 := dbOrm.FindLatestNodeVersion() if err2 != nil { return nil, errors.Wrap(err2, "initializeORM#FindLatestNodeVersion") } @@ -269,13 +269,13 @@ func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal) (*or databaseBackup := periodicbackup.NewDatabaseBackup(config, logger.Default) databaseBackup.RunBackupGracefully(versionString) } - if err = CheckSquashUpgrade(orm.DB); err != nil { + if err = CheckSquashUpgrade(dbOrm.DB); err != nil { panic(err) } if config.MigrateDatabase() { - orm.SetLogging(config.LogSQLStatements() || config.LogSQLMigrations()) + dbOrm.SetLogging(config.LogSQLStatements() || config.LogSQLMigrations()) - err = orm.RawDBWithAdvisoryLock(func(db *gorm.DB) error { + err = dbOrm.RawDBWithAdvisoryLock(func(db *gorm.DB) error { return migrations.Migrate(db) }) if err != nil { @@ -288,10 +288,10 @@ func initializeORM(config *orm.Config, shutdownSignal gracefulpanic.Signal) (*or nodeVersion = fmt.Sprintf("random_%d", rand.Uint32()) } version := models.NewNodeVersion(nodeVersion) - err = orm.UpsertNodeVersion(version) + err = dbOrm.UpsertNodeVersion(version) if err != nil { return nil, errors.Wrap(err, "initializeORM#UpsertNodeVersion") } - orm.SetLogging(config.LogSQLStatements()) - return orm, nil + dbOrm.SetLogging(config.LogSQLStatements()) + return dbOrm, nil } diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 9e57d39ca2a..35397b5f8ed 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -56,15 +56,16 @@ Help: "The total number of eth node connection errors", - Experimental: Add `DATABASE_BACKUP_MODE`, `DATABASE_BACKUP_FREQUENCY` and `DATABASE_BACKUP_URL` configuration variables -It's now possible to configure database backups: on node start and separately, to be run at given frequency. If set to true, +It's now possible to configure database backups: on node start and separately, to be run at given frequency. + DATABASE_BACKUP_MODE enables the initial backup on node start (either full or partial). Additionally, if DATABASE_BACKUP_FREQUENCY variable is set to a duration of at least '1m', it enables periodic backups. +DATABASE_BACKUP_URL can be optionally set to point to e.g. a database replica, in order to avoid excessive load on the main one. ### Fixed -- Improved handling of the case where we exceed the configured TX fee cap in - geth. +- Improved handling of the case where we exceed the configured TX fee cap in geth. Node will now fatally error jobs if the total transaction costs exceeds the configured cap (default 1 Eth). Also, it will no longer continue to bump gas on From 0ff171956acc761530b91c5dcae7735e31f3019a Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Thu, 18 Mar 2021 18:14:14 +0100 Subject: [PATCH 048/116] tweak test --- core/services/periodicbackup/backup.go | 10 +++--- core/services/periodicbackup/backup_test.go | 36 +++++++++++++++++---- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/core/services/periodicbackup/backup.go b/core/services/periodicbackup/backup.go index 4335aaeca4c..0385eedec60 100644 --- a/core/services/periodicbackup/backup.go +++ b/core/services/periodicbackup/backup.go @@ -35,8 +35,9 @@ var ( ) type backupResult struct { - size int64 - path string + size int64 + path string + pgDumpArguments []string } type ( @@ -179,7 +180,8 @@ func (backup *databaseBackup) runBackup(version string) (*backupResult, error) { } return &backupResult{ - size: file.Size(), - path: finalFilePath, + size: file.Size(), + path: finalFilePath, + pgDumpArguments: args, }, nil } diff --git a/core/services/periodicbackup/backup_test.go b/core/services/periodicbackup/backup_test.go index ae0a3e5e3f7..d429a23f6aa 100644 --- a/core/services/periodicbackup/backup_test.go +++ b/core/services/periodicbackup/backup_test.go @@ -14,7 +14,7 @@ import ( func TestPeriodicBackup_RunBackup(t *testing.T) { rawConfig := orm.NewConfig() - backupConfig := newTestConfig(time.Minute, nil, rawConfig.DatabaseURL(), os.TempDir()) + backupConfig := newTestConfig(time.Minute, nil, rawConfig.DatabaseURL(), os.TempDir(), orm.DatabaseBackupModeFull) periodicBackup := NewDatabaseBackup(backupConfig, logger.Default).(*databaseBackup) assert.False(t, periodicBackup.frequencyIsTooSmall()) @@ -29,11 +29,32 @@ func TestPeriodicBackup_RunBackup(t *testing.T) { assert.Greater(t, file.Size(), int64(0)) assert.Equal(t, file.Size(), result.size) assert.Contains(t, result.path, "cl_backup_0.9.9") + assert.NotContains(t, result.pgDumpArguments, "--exclude-table-data=pipeline_task_runs") +} + +func TestPeriodicBackup_RunBackupInLiteMode(t *testing.T) { + rawConfig := orm.NewConfig() + backupConfig := newTestConfig(time.Minute, nil, rawConfig.DatabaseURL(), os.TempDir(), orm.DatabaseBackupModeLite) + periodicBackup := NewDatabaseBackup(backupConfig, logger.Default).(*databaseBackup) + assert.False(t, periodicBackup.frequencyIsTooSmall()) + + result, err := periodicBackup.runBackup("0.9.9") + require.NoError(t, err, "error not nil for backup") + + defer os.Remove(result.path) + + file, err := os.Stat(result.path) + require.NoError(t, err, "error not nil when checking for output file") + + assert.Greater(t, file.Size(), int64(0)) + assert.Equal(t, file.Size(), result.size) + assert.Contains(t, result.path, "cl_backup_0.9.9") + assert.Contains(t, result.pgDumpArguments, "--exclude-table-data=pipeline_task_runs") } func TestPeriodicBackup_RunBackupWithoutVersion(t *testing.T) { rawConfig := orm.NewConfig() - backupConfig := newTestConfig(time.Minute, nil, rawConfig.DatabaseURL(), os.TempDir()) + backupConfig := newTestConfig(time.Minute, nil, rawConfig.DatabaseURL(), os.TempDir(), orm.DatabaseBackupModeFull) periodicBackup := NewDatabaseBackup(backupConfig, logger.Default).(*databaseBackup) assert.False(t, periodicBackup.frequencyIsTooSmall()) @@ -53,7 +74,7 @@ func TestPeriodicBackup_RunBackupWithoutVersion(t *testing.T) { func TestPeriodicBackup_RunBackupViaAltUrl(t *testing.T) { rawConfig := orm.NewConfig() altUrl, _ := url.Parse("postgresql//invalid") - backupConfig := newTestConfig(time.Minute, altUrl, rawConfig.DatabaseURL(), os.TempDir()) + backupConfig := newTestConfig(time.Minute, altUrl, rawConfig.DatabaseURL(), os.TempDir(), orm.DatabaseBackupModeFull) periodicBackup := NewDatabaseBackup(backupConfig, logger.Default).(*databaseBackup) assert.False(t, periodicBackup.frequencyIsTooSmall()) @@ -63,14 +84,14 @@ func TestPeriodicBackup_RunBackupViaAltUrl(t *testing.T) { func TestPeriodicBackup_FrequencyTooSmall(t *testing.T) { rawConfig := orm.NewConfig() - backupConfig := newTestConfig(time.Second, nil, rawConfig.DatabaseURL(), os.TempDir()) + backupConfig := newTestConfig(time.Second, nil, rawConfig.DatabaseURL(), os.TempDir(), orm.DatabaseBackupModeFull) periodicBackup := NewDatabaseBackup(backupConfig, logger.Default).(*databaseBackup) assert.True(t, periodicBackup.frequencyIsTooSmall()) } type testConfig struct { databaseBackupFrequency time.Duration - databaseBackupMode string + databaseBackupMode orm.DatabaseBackupMode databaseBackupURL *url.URL databaseURL url.URL rootDir string @@ -79,7 +100,7 @@ type testConfig struct { func (config testConfig) DatabaseBackupFrequency() time.Duration { return config.databaseBackupFrequency } -func (config testConfig) DatabaseBackupMode() string { +func (config testConfig) DatabaseBackupMode() orm.DatabaseBackupMode { return config.databaseBackupMode } func (config testConfig) DatabaseBackupURL() *url.URL { @@ -92,9 +113,10 @@ func (config testConfig) RootDir() string { return config.rootDir } -func newTestConfig(frequency time.Duration, databaseBackupURL *url.URL, databaseURL url.URL, outputParentDir string) testConfig { +func newTestConfig(frequency time.Duration, databaseBackupURL *url.URL, databaseURL url.URL, outputParentDir string, mode orm.DatabaseBackupMode) testConfig { return testConfig{ databaseBackupFrequency: frequency, + databaseBackupMode: mode, databaseBackupURL: databaseBackupURL, databaseURL: databaseURL, rootDir: outputParentDir, From 5b27359859e7d5aeb10df087b1f1a41f29cea87a Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Fri, 19 Mar 2021 18:21:08 +0100 Subject: [PATCH 049/116] update docs --- docs/CHANGELOG.md | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 35397b5f8ed..3955e5402ed 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -58,10 +58,19 @@ Help: "The total number of eth node connection errors", It's now possible to configure database backups: on node start and separately, to be run at given frequency. -DATABASE_BACKUP_MODE enables the initial backup on node start (either full or partial). Additionally, if DATABASE_BACKUP_FREQUENCY variable -is set to a duration of at least '1m', it enables periodic backups. +`DATABASE_BACKUP_MODE` enables the initial backup on node start (with one of the values: `none`, `lite`, `full` where `lite` excludes +potentially large tables related to job runs, among others). Additionally, if `DATABASE_BACKUP_FREQUENCY` variable is set to a duration of +at least '1m', it enables periodic backups. + +`DATABASE_BACKUP_URL` can be optionally set to point to e.g. a database replica, in order to avoid excessive load on the main one. + +Example settings: + +`DATABASE_BACKUP_MODE="full"` and `DATABASE_BACKUP_FREQUENCY` not set, will run a full back only at the start of the node. + + +`DATABASE_BACKUP_MODE="lite"` and `DATABASE_BACKUP_FREQUENCY="1h"` will lead to a partial backup on node start and then again a partial backup every one hour. -DATABASE_BACKUP_URL can be optionally set to point to e.g. a database replica, in order to avoid excessive load on the main one. ### Fixed @@ -96,8 +105,7 @@ period after a reboot, until the gas updater caught up. - Performance improvements to OCR job adds. Removed the pipeline_task_specs table and added a new column `dot_id` to the pipeline_task_runs table which links a pipeline_task_run to a dotID in the pipeline_spec.dot_dag_source. - - + ### Changed - Bump `ORM_MAX_OPEN_CONNS` default from 10 to 20 From b515bafabad49dcee90396d85a9e5d8859f3f68e Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Fri, 19 Mar 2021 18:30:13 +0100 Subject: [PATCH 050/116] remove commented out --- core/services/periodicbackup/backup.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/core/services/periodicbackup/backup.go b/core/services/periodicbackup/backup.go index 0385eedec60..1f763d5e74c 100644 --- a/core/services/periodicbackup/backup.go +++ b/core/services/periodicbackup/backup.go @@ -26,9 +26,6 @@ var ( "run_requests", "run_results", "sync_events", - //"eth_tx_attempts", - //"eth_receipts", - //"eth_txes", "pipeline_runs", "pipeline_task_runs", } From c9b0ec332c53c961122998d3106525dafa16a29d Mon Sep 17 00:00:00 2001 From: PiotrTrzpil Date: Fri, 19 Mar 2021 18:33:40 +0100 Subject: [PATCH 051/116] fix migration --- ...version_table.go => 0018_add_node_version_table.go} | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) rename core/store/migrations/{0017_add_node_version_table.go => 0018_add_node_version_table.go} (76%) diff --git a/core/store/migrations/0017_add_node_version_table.go b/core/store/migrations/0018_add_node_version_table.go similarity index 76% rename from core/store/migrations/0017_add_node_version_table.go rename to core/store/migrations/0018_add_node_version_table.go index 9c8756713d3..7b0af04bce3 100644 --- a/core/store/migrations/0017_add_node_version_table.go +++ b/core/store/migrations/0018_add_node_version_table.go @@ -5,25 +5,25 @@ import ( "gorm.io/gorm" ) -const up17 = ` +const up18 = ` CREATE TABLE IF NOT EXISTS "node_versions" ( "version" TEXT PRIMARY KEY, "created_at" timestamp without time zone NOT NULL ); ` -const down17 = ` +const down18 = ` DROP TABLE IF EXISTS "node_versions"; ` func init() { Migrations = append(Migrations, &gormigrate.Migration{ - ID: "0017_add_node_version_table", + ID: "0018_add_node_version_table", Migrate: func(db *gorm.DB) error { - return db.Exec(up17).Error + return db.Exec(up18).Error }, Rollback: func(db *gorm.DB) error { - return db.Exec(down17).Error + return db.Exec(down18).Error }, }) } From 409991bf5f45f1c08ebafa49b26b383b0bdaaf33 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Fri, 19 Mar 2021 14:03:16 -0400 Subject: [PATCH 052/116] use zapcore unmarshaller, remove switch --- core/web/log_controller.go | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 3ae7c917aad..09691ade7c7 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -4,7 +4,6 @@ import ( "fmt" "net/http" "strconv" - "strings" "github.com/gin-gonic/gin" "github.com/smartcontractkit/chainlink/core/logger" @@ -23,27 +22,6 @@ type LoglevelPatchRequest struct { LogSql string `json:"logSql"` } -func getLogLevelFromStr(logLevel string) (zapcore.Level, error) { - switch strings.ToLower(logLevel) { - case "debug": - return zapcore.DebugLevel, nil - case "info": - return zapcore.InfoLevel, nil - case "warn": - return zapcore.WarnLevel, nil - case "error": - return zapcore.ErrorLevel, nil - case "dpanic": - return zapcore.DPanicLevel, nil - case "panic": - return zapcore.PanicLevel, nil - case "fatal": - return zapcore.FatalLevel, nil - default: - return zapcore.InfoLevel, fmt.Errorf("could not parse %s as log level (debug, info, warn, error)", logLevel) - } -} - // SetDebug sets the debug log mode for the logger func (cc *LogController) SetDebug(c *gin.Context) { request := &LoglevelPatchRequest{} @@ -58,7 +36,8 @@ func (cc *LogController) SetDebug(c *gin.Context) { } if request.LogLevel != "" { - ll, err := getLogLevelFromStr(request.LogLevel) + var ll zapcore.Level + err := ll.UnmarshalText([]byte(request.LogLevel)) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) return From ccc24d905b63d582b83a0b6342b81406b6e006b5 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Fri, 19 Mar 2021 14:11:48 -0400 Subject: [PATCH 053/116] cleaning request param and var names --- core/cmd/remote_client.go | 4 ++-- core/cmd/renderer.go | 6 +++--- core/web/log_controller.go | 26 +++++++++++++------------- core/web/log_controller_test.go | 6 +++--- core/web/presenters/log.go | 4 ++-- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index f98f7734b7a..99a9b2ae421 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -1200,7 +1200,7 @@ func (cli *Client) SetLogLevel(c *clipkg.Context) (err error) { } logLevel := c.Args().Get(0) - request := web.LoglevelPatchRequest{LogLevel: logLevel, LogSql: ""} + request := web.LogPatchRequest{Level: logLevel} requestData, err := json.Marshal(request) if err != nil { return cli.errorOut(err) @@ -1232,7 +1232,7 @@ func (cli *Client) SetLogSQL(c *clipkg.Context) (err error) { if err != nil { return cli.errorOut(err) } - request := web.LoglevelPatchRequest{LogLevel: "", LogSql: strconv.FormatBool(logSql)} + request := web.LogPatchRequest{SqlEnabled: strconv.FormatBool(logSql)} requestData, err := json.Marshal(request) if err != nil { return cli.errorOut(err) diff --git a/core/cmd/renderer.go b/core/cmd/renderer.go index eedce620d48..5f62054ccc5 100644 --- a/core/cmd/renderer.go +++ b/core/cmd/renderer.go @@ -98,11 +98,11 @@ func (rt RendererTable) Render(v interface{}) error { } func (rt RendererTable) renderLogResource(logResource webPresenters.LogResource) error { - table := rt.newTable([]string{"ID", "LogLevel", "LogSql"}) + table := rt.newTable([]string{"ID", "Level", "SqlEnabled"}) table.Append([]string{ logResource.ID, - logResource.LogLevel, - strconv.FormatBool(logResource.LogSql), + logResource.Level, + strconv.FormatBool(logResource.SqlEnabled), }) render("Logs", table) return nil diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 09691ade7c7..2a04c8a6062 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -17,27 +17,27 @@ type LogController struct { App chainlink.Application } -type LoglevelPatchRequest struct { - LogLevel string `json:"logLevel"` - LogSql string `json:"logSql"` +type LogPatchRequest struct { + Level string `json:"level"` + SqlEnabled string `json:"sqlEnabled"` } // SetDebug sets the debug log mode for the logger func (cc *LogController) SetDebug(c *gin.Context) { - request := &LoglevelPatchRequest{} + request := &LogPatchRequest{} if err := c.ShouldBindJSON(request); err != nil { jsonAPIError(c, http.StatusUnprocessableEntity, err) return } - if request.LogLevel == "" && request.LogSql == "" { + if request.Level == "" && request.SqlEnabled == "" { jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("please set either logLevel or logSql as params in order to set the log level")) return } - if request.LogLevel != "" { + if request.Level != "" { var ll zapcore.Level - err := ll.UnmarshalText([]byte(request.LogLevel)) + err := ll.UnmarshalText([]byte(request.Level)) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) return @@ -50,14 +50,14 @@ func (cc *LogController) SetDebug(c *gin.Context) { } } - if request.LogSql != "" { - logSql, err := strconv.ParseBool(request.LogSql) + if request.SqlEnabled != "" { + logSql, err := strconv.ParseBool(request.SqlEnabled) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) return } - cc.App.GetStore().Config.Set("LOG_SQL", request.LogSql) - err = cc.App.GetStore().SetConfigStrValue("LogSQLStatements", request.LogSql) + cc.App.GetStore().Config.Set("LOG_SQL", request.SqlEnabled) + err = cc.App.GetStore().SetConfigStrValue("LogSQLStatements", request.SqlEnabled) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) return @@ -72,8 +72,8 @@ func (cc *LogController) SetDebug(c *gin.Context) { JAID: presenters.JAID{ ID: "log", }, - LogLevel: cc.App.GetStore().Config.LogLevel().String(), - LogSql: cc.App.GetStore().Config.LogSQLStatements(), + Level: cc.App.GetStore().Config.LogLevel().String(), + SqlEnabled: cc.App.GetStore().Config.LogSQLStatements(), } jsonAPIResponse(c, response, "log") diff --git a/core/web/log_controller_test.go b/core/web/log_controller_test.go index a53822debf1..46bd23b9153 100644 --- a/core/web/log_controller_test.go +++ b/core/web/log_controller_test.go @@ -66,7 +66,7 @@ func TestLogController_SetDebug(t *testing.T) { for _, tc := range cases { func() { - request := web.LoglevelPatchRequest{LogLevel: tc.logLevel, LogSql: tc.logSql} + request := web.LogPatchRequest{Level: tc.logLevel, SqlEnabled: tc.logSql} requestData, _ := json.Marshal(request) buf := bytes.NewBuffer(requestData) @@ -78,10 +78,10 @@ func TestLogController_SetDebug(t *testing.T) { lR := presenters.LogResource{} require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &lR)) if tc.logLevel != "" { - assert.Equal(t, tc.logLevel, lR.LogLevel) + assert.Equal(t, tc.logLevel, lR.Level) } if tc.logSql != "" { - assert.Equal(t, tc.logSql, strconv.FormatBool(lR.LogSql)) + assert.Equal(t, tc.logSql, strconv.FormatBool(lR.SqlEnabled)) } assert.Equal(t, tc.expectedLogLevel.String(), app.GetStore().Config.LogLevel().String()) }() diff --git a/core/web/presenters/log.go b/core/web/presenters/log.go index eb359eec091..041ad6225fa 100644 --- a/core/web/presenters/log.go +++ b/core/web/presenters/log.go @@ -2,8 +2,8 @@ package presenters type LogResource struct { JAID - LogLevel string `json:"logLevel"` - LogSql bool `json:"logSql"` + Level string `json:"level"` + SqlEnabled bool `json:"sqlEnabled"` } // GetName implements the api2go EntityNamer interface From 9d198254a239a21e94a028722610671123b38e42 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Fri, 19 Mar 2021 14:22:07 -0400 Subject: [PATCH 054/116] parsing param as boolean --- core/cmd/remote_client.go | 2 +- core/web/log_controller.go | 15 +++++---------- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index 99a9b2ae421..78bf12077fe 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -1232,7 +1232,7 @@ func (cli *Client) SetLogSQL(c *clipkg.Context) (err error) { if err != nil { return cli.errorOut(err) } - request := web.LogPatchRequest{SqlEnabled: strconv.FormatBool(logSql)} + request := web.LogPatchRequest{SqlEnabled: &logSql} requestData, err := json.Marshal(request) if err != nil { return cli.errorOut(err) diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 2a04c8a6062..43ff1da1ab8 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -19,7 +19,7 @@ type LogController struct { type LogPatchRequest struct { Level string `json:"level"` - SqlEnabled string `json:"sqlEnabled"` + SqlEnabled *bool `json:"sqlEnabled"` } // SetDebug sets the debug log mode for the logger @@ -30,7 +30,7 @@ func (cc *LogController) SetDebug(c *gin.Context) { return } - if request.Level == "" && request.SqlEnabled == "" { + if request.Level == "" && request.SqlEnabled == nil { jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("please set either logLevel or logSql as params in order to set the log level")) return } @@ -50,19 +50,14 @@ func (cc *LogController) SetDebug(c *gin.Context) { } } - if request.SqlEnabled != "" { - logSql, err := strconv.ParseBool(request.SqlEnabled) - if err != nil { - jsonAPIError(c, http.StatusInternalServerError, err) - return - } + if request.SqlEnabled != nil { cc.App.GetStore().Config.Set("LOG_SQL", request.SqlEnabled) - err = cc.App.GetStore().SetConfigStrValue("LogSQLStatements", request.SqlEnabled) + err := cc.App.GetStore().SetConfigStrValue("LogSQLStatements", strconv.FormatBool(*request.SqlEnabled)) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) return } - cc.App.GetStore().SetLogging(logSql) + cc.App.GetStore().SetLogging(*request.SqlEnabled) } // Set default logger with new configurations From 0cc7311d684711b9a06148a43e07dba0ad11357d Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Fri, 19 Mar 2021 14:25:32 -0400 Subject: [PATCH 055/116] normalizing method naming for route handler --- core/web/log_controller.go | 4 ++-- core/web/router.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 43ff1da1ab8..6856732dd5d 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -22,8 +22,8 @@ type LogPatchRequest struct { SqlEnabled *bool `json:"sqlEnabled"` } -// SetDebug sets the debug log mode for the logger -func (cc *LogController) SetDebug(c *gin.Context) { +// Patch sets a log level and enables sql logging for the logger +func (cc *LogController) Patch(c *gin.Context) { request := &LogPatchRequest{} if err := c.ShouldBindJSON(request); err != nil { jsonAPIError(c, http.StatusUnprocessableEntity, err) diff --git a/core/web/router.go b/core/web/router.go index 2f77f3c199d..655a39619cb 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -292,7 +292,7 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { authv2.POST("/jobs/:ID/runs", prc.Create) lgc := LogController{app} - authv2.PATCH("/log", lgc.SetDebug) + authv2.PATCH("/log", lgc.Patch) } ping := PingController{app} From db464522b487a608992037a8d74241e42886d5a4 Mon Sep 17 00:00:00 2001 From: Ryan Hall Date: Mon, 8 Mar 2021 17:59:07 -0600 Subject: [PATCH 056/116] update RegistrySynchronizer to use eth events add grace period functionality to keeper fix bug in positioning constant calculation --- core/internal/cltest/cltest.go | 13 +- .../internal/cltest/contract_mock_receiver.go | 19 ++ core/internal/cltest/factories.go | 7 + core/internal/cltest/simulated_backend.go | 17 +- .../keeper_registry_wrapper.go | 94 ++++-- core/services/chainlink/application.go | 2 +- core/services/keeper/delegate.go | 39 ++- core/services/keeper/helpers_test.go | 16 +- core/services/keeper/integration_test.go | 59 +++- core/services/keeper/models.go | 2 +- core/services/keeper/orm.go | 60 +++- core/services/keeper/orm_test.go | 136 +++++--- .../keeper/registry_synchronizer_core.go | 140 ++++++++ .../registry_synchronizer_log_listener.go | 49 +++ .../registry_synchronizer_process_logs.go | 194 +++++++++++ ...nizer.go => registry_synchronizer_sync.go} | 189 ++++------- .../keeper/registry_synchronizer_test.go | 309 +++++++++++++++--- core/services/keeper/upkeep_executer.go | 22 +- core/services/keeper/upkeep_executer_test.go | 30 +- ..._last_run_height_column_to_keeper_table.go | 26 ++ core/store/orm/config.go | 8 + core/store/orm/schema.go | 4 +- core/utils/mailbox.go | 18 + 23 files changed, 1144 insertions(+), 309 deletions(-) create mode 100644 core/services/keeper/registry_synchronizer_core.go create mode 100644 core/services/keeper/registry_synchronizer_log_listener.go create mode 100644 core/services/keeper/registry_synchronizer_process_logs.go rename core/services/keeper/{registry_synchronizer.go => registry_synchronizer_sync.go} (54%) create mode 100644 core/store/migrations/0019_last_run_height_column_to_keeper_table.go diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index dce55994eea..a5c2726964a 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -1867,6 +1867,7 @@ func EventuallyExpectationsMet(t *testing.T, mock testifyExpectationsAsserter, t } func AssertCount(t *testing.T, store *strpkg.Store, model interface{}, expected int64) { + t.Helper() var count int64 err := store.DB.Model(model).Count(&count).Error require.NoError(t, err) @@ -1882,7 +1883,7 @@ func WaitForCount(t testing.TB, store *strpkg.Store, model interface{}, want int err = store.DB.Model(model).Count(&count).Error assert.NoError(t, err) return count - }, 5*time.Second, DBPollingInterval).Should(gomega.Equal(want)) + }, DBWaitTimeout, DBPollingInterval).Should(gomega.Equal(want)) } func AssertCountStays(t testing.TB, store *strpkg.Store, model interface{}, want int64) { @@ -1896,3 +1897,13 @@ func AssertCountStays(t testing.TB, store *strpkg.Store, model interface{}, want return count }, AsertNoActionTimeout, DBPollingInterval).Should(gomega.Equal(want)) } + +func AssertRecordEventually(t *testing.T, store *strpkg.Store, model interface{}, check func() bool) { + t.Helper() + g := gomega.NewGomegaWithT(t) + g.Eventually(func() bool { + err := store.DB.Find(model).Error + require.NoError(t, err, "unable to find record in DB") + return check() + }, DBWaitTimeout, DBPollingInterval).Should(gomega.BeTrue()) +} diff --git a/core/internal/cltest/contract_mock_receiver.go b/core/internal/cltest/contract_mock_receiver.go index 3d002afb4ea..afcc0a3c97a 100644 --- a/core/internal/cltest/contract_mock_receiver.go +++ b/core/internal/cltest/contract_mock_receiver.go @@ -1,6 +1,7 @@ package cltest import ( + "errors" "reflect" "testing" @@ -49,6 +50,24 @@ func (receiver contractMockReceiver) MockResponse(funcName string, responseArgs Return(encoded, nil) } +func (receiver contractMockReceiver) MockRevertResponse(funcName string) *mock.Call { + funcSig := hexutil.Encode(receiver.abi.Methods[funcName].ID) + if len(funcSig) != 10 { + receiver.t.Fatalf("Unable to find Registry contract function with name %s", funcName) + } + + return receiver.ethMock. + On( + "CallContract", + mock.Anything, + mock.MatchedBy(func(callArgs ethereum.CallMsg) bool { + return *callArgs.To == receiver.address && + hexutil.Encode(callArgs.Data)[0:10] == funcSig + }), + mock.Anything). + Return(nil, errors.New("revert")) +} + func (receiver contractMockReceiver) mustEncodeResponse(funcName string, responseArgs []interface{}) []byte { if len(responseArgs) == 0 { return []byte{} diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go index 5fb9aa315e0..ff0912b5f07 100644 --- a/core/internal/cltest/factories.go +++ b/core/internal/cltest/factories.go @@ -709,6 +709,13 @@ func MustGenerateRandomKey(t testing.TB, opts ...interface{}) models.Key { return key } +func MustInsertHead(t *testing.T, store *strpkg.Store, number int64) models.Head { + h := models.NewHead(big.NewInt(number), NewHash(), NewHash(), 0) + err := store.DB.Create(&h).Error + require.NoError(t, err) + return h +} + func MustInsertV2JobSpec(t *testing.T, store *strpkg.Store, transmitterAddress common.Address) job.Job { t.Helper() diff --git a/core/internal/cltest/simulated_backend.go b/core/internal/cltest/simulated_backend.go index 311647038cc..30b8eab533b 100644 --- a/core/internal/cltest/simulated_backend.go +++ b/core/internal/cltest/simulated_backend.go @@ -269,18 +269,19 @@ func (c *SimulatedBackendClient) blockNumber(number interface{}) (blockNumber *b } func (c *SimulatedBackendClient) HeaderByNumber(ctx context.Context, n *big.Int) (*models.Head, error) { + if n == nil { + n = c.currentBlockNumber() + } header, err := c.b.HeaderByNumber(ctx, n) if err != nil { return nil, err } else if header == nil { return nil, ethereum.NotFound } - if n == nil { - n = c.currentBlockNumber() - } return &models.Head{ - Hash: NewHash(), - Number: n.Int64(), + Hash: header.Hash(), + Number: header.Number.Int64(), + ParentHash: header.ParentHash, }, nil } @@ -333,7 +334,7 @@ func (c *SimulatedBackendClient) SubscribeNewHead( case nil: channel <- nil default: - channel <- &models.Head{Number: h.Number.Int64(), Hash: NewHash()} + channel <- &models.Head{Number: h.Number.Int64(), Hash: h.Hash(), ParentHash: h.ParentHash} } case <-subscription.close: return @@ -416,8 +417,8 @@ func (c *SimulatedBackendClient) BatchCallContext(ctx context.Context, b []rpc.B } // Mine forces the simulated backend to produce a new block every 2 seconds -func Mine(backend *backends.SimulatedBackend) (stopMining func()) { - timer := time.NewTicker(2 * time.Second) +func Mine(backend *backends.SimulatedBackend, blockTime time.Duration) (stopMining func()) { + timer := time.NewTicker(blockTime) chStop := make(chan struct{}) wg := sync.WaitGroup{} wg.Add(1) diff --git a/core/internal/gethwrappers/generated/keeper_registry_wrapper/keeper_registry_wrapper.go b/core/internal/gethwrappers/generated/keeper_registry_wrapper/keeper_registry_wrapper.go index 7e33b508137..a43ce9f027e 100644 --- a/core/internal/gethwrappers/generated/keeper_registry_wrapper/keeper_registry_wrapper.go +++ b/core/internal/gethwrappers/generated/keeper_registry_wrapper/keeper_registry_wrapper.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated" ) var ( @@ -45,6 +46,7 @@ func DeployKeeperRegistry(auth *bind.TransactOpts, backend bind.ContractBackend, type KeeperRegistry struct { address common.Address + abi abi.ABI KeeperRegistryCaller KeeperRegistryTransactor KeeperRegistryFilterer @@ -91,11 +93,15 @@ type KeeperRegistryTransactorRaw struct { } func NewKeeperRegistry(address common.Address, backend bind.ContractBackend) (*KeeperRegistry, error) { + abi, err := abi.JSON(strings.NewReader(KeeperRegistryABI)) + if err != nil { + return nil, err + } contract, err := bindKeeperRegistry(address, backend, backend, backend) if err != nil { return nil, err } - return &KeeperRegistry{address: address, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil + return &KeeperRegistry{address: address, abi: abi, KeeperRegistryCaller: KeeperRegistryCaller{contract: contract}, KeeperRegistryTransactor: KeeperRegistryTransactor{contract: contract}, KeeperRegistryFilterer: KeeperRegistryFilterer{contract: contract}}, nil } func NewKeeperRegistryCaller(address common.Address, caller bind.ContractCaller) (*KeeperRegistryCaller, error) { @@ -2237,39 +2243,31 @@ type GetUpkeep struct { MaxValidBlocknumber uint64 } -func (_KeeperRegistry *KeeperRegistry) UnpackLog(out interface{}, event string, log types.Log) error { - return _KeeperRegistry.KeeperRegistryFilterer.contract.UnpackLog(out, event, log) -} - -func (_KeeperRegistry *KeeperRegistry) ParseLog(log types.Log) (interface{}, error) { - abi, err := abi.JSON(strings.NewReader(KeeperRegistryABI)) - if err != nil { - return nil, fmt.Errorf("could not parse ABI: " + err.Error()) - } +func (_KeeperRegistry *KeeperRegistry) ParseLog(log types.Log) (generated.AbigenLog, error) { switch log.Topics[0] { - case abi.Events["ConfigSet"].ID: + case _KeeperRegistry.abi.Events["ConfigSet"].ID: return _KeeperRegistry.ParseConfigSet(log) - case abi.Events["FundsAdded"].ID: + case _KeeperRegistry.abi.Events["FundsAdded"].ID: return _KeeperRegistry.ParseFundsAdded(log) - case abi.Events["FundsWithdrawn"].ID: + case _KeeperRegistry.abi.Events["FundsWithdrawn"].ID: return _KeeperRegistry.ParseFundsWithdrawn(log) - case abi.Events["KeepersUpdated"].ID: + case _KeeperRegistry.abi.Events["KeepersUpdated"].ID: return _KeeperRegistry.ParseKeepersUpdated(log) - case abi.Events["OwnershipTransferRequested"].ID: + case _KeeperRegistry.abi.Events["OwnershipTransferRequested"].ID: return _KeeperRegistry.ParseOwnershipTransferRequested(log) - case abi.Events["OwnershipTransferred"].ID: + case _KeeperRegistry.abi.Events["OwnershipTransferred"].ID: return _KeeperRegistry.ParseOwnershipTransferred(log) - case abi.Events["PayeeshipTransferRequested"].ID: + case _KeeperRegistry.abi.Events["PayeeshipTransferRequested"].ID: return _KeeperRegistry.ParsePayeeshipTransferRequested(log) - case abi.Events["PayeeshipTransferred"].ID: + case _KeeperRegistry.abi.Events["PayeeshipTransferred"].ID: return _KeeperRegistry.ParsePayeeshipTransferred(log) - case abi.Events["PaymentWithdrawn"].ID: + case _KeeperRegistry.abi.Events["PaymentWithdrawn"].ID: return _KeeperRegistry.ParsePaymentWithdrawn(log) - case abi.Events["UpkeepCanceled"].ID: + case _KeeperRegistry.abi.Events["UpkeepCanceled"].ID: return _KeeperRegistry.ParseUpkeepCanceled(log) - case abi.Events["UpkeepPerformed"].ID: + case _KeeperRegistry.abi.Events["UpkeepPerformed"].ID: return _KeeperRegistry.ParseUpkeepPerformed(log) - case abi.Events["UpkeepRegistered"].ID: + case _KeeperRegistry.abi.Events["UpkeepRegistered"].ID: return _KeeperRegistry.ParseUpkeepRegistered(log) default: @@ -2277,6 +2275,54 @@ func (_KeeperRegistry *KeeperRegistry) ParseLog(log types.Log) (interface{}, err } } +func (KeeperRegistryConfigSet) Topic() common.Hash { + return common.HexToHash("0xf68c98604b3cbc1a909e0df75315ce475fbc89bae7a6ab88b53573897f58d0d7") +} + +func (KeeperRegistryFundsAdded) Topic() common.Hash { + return common.HexToHash("0xafd24114486da8ebfc32f3626dada8863652e187461aa74d4bfa734891506203") +} + +func (KeeperRegistryFundsWithdrawn) Topic() common.Hash { + return common.HexToHash("0xf3b5906e5672f3e524854103bcafbbdba80dbdfeca2c35e116127b1060a68318") +} + +func (KeeperRegistryKeepersUpdated) Topic() common.Hash { + return common.HexToHash("0x056264c94f28bb06c99d13f0446eb96c67c215d8d707bce2655a98ddf1c0b71f") +} + +func (KeeperRegistryOwnershipTransferRequested) Topic() common.Hash { + return common.HexToHash("0xed8889f560326eb138920d842192f0eb3dd22b4f139c87a2c57538e05bae1278") +} + +func (KeeperRegistryOwnershipTransferred) Topic() common.Hash { + return common.HexToHash("0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0") +} + +func (KeeperRegistryPayeeshipTransferRequested) Topic() common.Hash { + return common.HexToHash("0x84f7c7c80bb8ed2279b4aab5f61cd05e6374073d38f46d7f32de8c30e9e38367") +} + +func (KeeperRegistryPayeeshipTransferred) Topic() common.Hash { + return common.HexToHash("0x78af32efdcad432315431e9b03d27e6cd98fb79c405fdc5af7c1714d9c0f75b3") +} + +func (KeeperRegistryPaymentWithdrawn) Topic() common.Hash { + return common.HexToHash("0x9819093176a1851202c7bcfa46845809b4e47c261866550e94ed3775d2f40698") +} + +func (KeeperRegistryUpkeepCanceled) Topic() common.Hash { + return common.HexToHash("0x91cb3bb75cfbd718bbfccc56b7f53d92d7048ef4ca39a3b7b7c6d4af1f791181") +} + +func (KeeperRegistryUpkeepPerformed) Topic() common.Hash { + return common.HexToHash("0xcaacad83e47cc45c280d487ec84184eee2fa3b54ebaa393bda7549f13da228f6") +} + +func (KeeperRegistryUpkeepRegistered) Topic() common.Hash { + return common.HexToHash("0xbae366358c023f887e791d7a62f2e4316f1026bd77f6fb49501a917b3bc5d012") +} + func (_KeeperRegistry *KeeperRegistry) Address() common.Address { return _KeeperRegistry.address } @@ -2410,9 +2456,7 @@ type KeeperRegistryInterface interface { ParseUpkeepRegistered(log types.Log) (*KeeperRegistryUpkeepRegistered, error) - UnpackLog(out interface{}, event string, log types.Log) error - - ParseLog(log types.Log) (interface{}, error) + ParseLog(log types.Log) (generated.AbigenLog, error) Address() common.Address } diff --git a/core/services/chainlink/application.go b/core/services/chainlink/application.go index 01abe16fdb0..9b28fd40d68 100644 --- a/core/services/chainlink/application.go +++ b/core/services/chainlink/application.go @@ -199,7 +199,7 @@ func NewApplication(config *orm.Config, ethClient eth.Client, advisoryLocker pos pipelineRunner, store.DB, ), - job.Keeper: keeper.NewDelegate(store.DB, store.EthClient, headBroadcaster, config), + job.Keeper: keeper.NewDelegate(store.DB, store.EthClient, headBroadcaster, logBroadcaster, config), } ) diff --git a/core/services/keeper/delegate.go b/core/services/keeper/delegate.go index bac387fef1c..f8066672998 100644 --- a/core/services/keeper/delegate.go +++ b/core/services/keeper/delegate.go @@ -1,30 +1,37 @@ package keeper import ( - "time" - "github.com/pkg/errors" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" "github.com/smartcontractkit/chainlink/core/services" "github.com/smartcontractkit/chainlink/core/services/eth" "github.com/smartcontractkit/chainlink/core/services/job" + "github.com/smartcontractkit/chainlink/core/services/log" "github.com/smartcontractkit/chainlink/core/store/orm" "gorm.io/gorm" ) type Delegate struct { + config *orm.Config + db *gorm.DB ethClient eth.Client headBroadcaster *services.HeadBroadcaster - db *gorm.DB - syncInterval time.Duration + logBroadcaster log.Broadcaster } -func NewDelegate(db *gorm.DB, ethClient eth.Client, headBroadcaster *services.HeadBroadcaster, config *orm.Config) *Delegate { +func NewDelegate( + db *gorm.DB, + ethClient eth.Client, + headBroadcaster *services.HeadBroadcaster, + logBroadcaster log.Broadcaster, + config *orm.Config, +) *Delegate { return &Delegate{ + config: config, + db: db, ethClient: ethClient, headBroadcaster: headBroadcaster, - db: db, - syncInterval: config.KeeperRegistrySyncInterval(), + logBroadcaster: logBroadcaster, } } @@ -46,8 +53,22 @@ func (d *Delegate) ServicesForSpec(spec job.Job) (services []job.Service, err er return nil, errors.Wrap(err, "unable to create keeper registry contract wrapper") } - registrySynchronizer := NewRegistrySynchronizer(spec, contract, d.db, d.syncInterval) - upkeepExecutor := NewUpkeepExecutor(spec, d.db, d.ethClient, d.headBroadcaster) + registrySynchronizer := NewRegistrySynchronizer( + spec, + contract, + d.db, + d.headBroadcaster, + d.logBroadcaster, + d.config.KeeperRegistrySyncInterval(), + d.config.KeeperMinimumRequiredConfirmations(), + ) + upkeepExecutor := NewUpkeepExecutor( + spec, + d.db, + d.ethClient, + d.headBroadcaster, + d.config.KeeperMaximumGracePeriod(), + ) return []job.Service{ registrySynchronizer, diff --git a/core/services/keeper/helpers_test.go b/core/services/keeper/helpers_test.go index 2f3b97600bd..e30d6044eba 100644 --- a/core/services/keeper/helpers_test.go +++ b/core/services/keeper/helpers_test.go @@ -1,5 +1,17 @@ package keeper -func (rs *RegistrySynchronizer) ExportedSyncRegistry() { - rs.syncRegistry() +import ( + "github.com/smartcontractkit/chainlink/core/store/models" +) + +func (rs *RegistrySynchronizer) ExportedFullSync() { + rs.fullSync() +} + +func (rs *RegistrySynchronizer) ExportedProcessLogs(head models.Head) { + rs.processLogs(head) +} + +func ExportedCalcPositioningConstant(upkeepID int64, registryAddress models.EIP55Address) (int32, error) { + return calcPositioningConstant(upkeepID, registryAddress) } diff --git a/core/services/keeper/integration_test.go b/core/services/keeper/integration_test.go index 5103f6bb72d..60d3ef70b3b 100644 --- a/core/services/keeper/integration_test.go +++ b/core/services/keeper/integration_test.go @@ -14,14 +14,22 @@ import ( "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/basic_upkeep_contract" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/mock_v3_aggregator_contract" + "github.com/smartcontractkit/chainlink/core/services/keeper" "github.com/smartcontractkit/chainlink/core/store/dialects" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/libocr/gethwrappers/link_token_interface" "github.com/stretchr/testify/require" ) -var oneEth = big.NewInt(1000000000000000000) -var oneHunEth = big.NewInt(0).Mul(oneEth, big.NewInt(100)) +var ( + oneEth = big.NewInt(1000000000000000000) + tenEth = big.NewInt(0).Mul(oneEth, big.NewInt(10)) + oneHunEth = big.NewInt(0).Mul(oneEth, big.NewInt(100)) + + payload1 = common.Hex2Bytes("1234") + payload2 = common.Hex2Bytes("ABCD") + payload3 = common.Hex2Bytes("6789") +) func TestKeeperEthIntegration(t *testing.T) { g := gomega.NewGomegaWithT(t) @@ -48,8 +56,8 @@ func TestKeeperEthIntegration(t *testing.T) { gasLimit := goEthereumEth.DefaultConfig.Miner.GasCeil * 2 backend := backends.NewSimulatedBackend(genesisData, gasLimit) - payload1 := common.Hex2Bytes("1234") - payload2 := common.Hex2Bytes("ABCD") + stopMining := cltest.Mine(backend, 1*time.Second) // >> 2 seconds and the test gets slow, << 1 second and the app may miss heads + defer stopMining() linkAddr, _, linkToken, err := link_token_interface.DeployLinkToken(sergey, backend) require.NoError(t, err) @@ -73,19 +81,19 @@ func TestKeeperEthIntegration(t *testing.T) { require.NoError(t, err) _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) require.NoError(t, err) - backend.Commit() - _, err = registryContract.AddFunds(carrol, big.NewInt(0), oneHunEth) + _, err = registryContract.AddFunds(carrol, big.NewInt(0), tenEth) require.NoError(t, err) backend.Commit() - stopMining := cltest.Mine(backend) - defer stopMining() - // setup app config, _, cfgCleanup := cltest.BootstrapThrowawayORM(t, "keeper_eth_integration", true, true) config.Config.Dialect = dialects.PostgresWithoutLock defer cfgCleanup() - config.Set("KEEPER_REGISTRY_SYNC_INTERVAL", 2*time.Second) + config.Set("KEEPER_REGISTRY_SYNC_INTERVAL", 24*time.Hour) // disable full sync ticker for test + config.Set("BLOCK_BACKFILL_DEPTH", 0) // backfill will trigger sync on startup + config.Set("KEEPER_MINIMUM_REQUIRED_CONFIRMATIONS", 1) // disable reorg protection for this test + config.Set("KEEPER_MAXIMUM_GRACE_PERIOD", 0) // avoid waiting to re-submit for upkeeps + config.Set("ETH_HEAD_TRACKER_MAX_BUFFER_SIZE", 100) // helps prevent missed heads app, appCleanup := cltest.NewApplicationWithConfigAndKeyOnSimulatedBlockchain(t, config, backend, nodeKey) defer appCleanup() require.NoError(t, app.StartAndConnect()) @@ -115,4 +123,35 @@ func TestKeeperEthIntegration(t *testing.T) { // observe 2nd job run and received payload changes g.Eventually(receivedBytes, 20*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(payload2)) + + // cancel upkeep + _, err = registryContract.CancelUpkeep(carrol, big.NewInt(0)) + require.NoError(t, err) + backend.Commit() + + cltest.WaitForCount(t, app.Store, keeper.UpkeepRegistration{}, 0) + + // add new upkeep (same target contract) + _, err = registryContract.RegisterUpkeep(steve, upkeepAddr, 2_500_000, carrol.From, []byte{}) + require.NoError(t, err) + _, err = upkeepContract.SetBytesToSend(carrol, payload3) + require.NoError(t, err) + _, err = upkeepContract.SetShouldPerformUpkeep(carrol, true) + require.NoError(t, err) + _, err = registryContract.AddFunds(carrol, big.NewInt(1), tenEth) + require.NoError(t, err) + backend.Commit() + + // observe update + g.Eventually(receivedBytes, 20*time.Second, cltest.DBPollingInterval).Should(gomega.Equal(payload3)) + + // remove this node from keeper list + _, err = registryContract.SetKeepers(steve, []common.Address{nelly.From}, []common.Address{nelly.From}) + require.NoError(t, err) + + var registry keeper.Registry + require.NoError(t, app.Store.DB.First(®istry).Error) + cltest.AssertRecordEventually(t, app.Store, ®istry, func() bool { + return registry.KeeperIndex == -1 + }) } diff --git a/core/services/keeper/models.go b/core/services/keeper/models.go index 2cfdab95189..7c04c279124 100644 --- a/core/services/keeper/models.go +++ b/core/services/keeper/models.go @@ -25,11 +25,11 @@ func (Registry) TableName() string { return "keeper_registries" } -// todo - upkeep type UpkeepRegistration struct { ID int32 `gorm:"primary_key"` CheckData []byte ExecuteGas int32 + LastRunBlockHeight int64 RegistryID int32 Registry Registry UpkeepID int64 diff --git a/core/services/keeper/orm.go b/core/services/keeper/orm.go index 4433d7e82a4..617e6a72255 100644 --- a/core/services/keeper/orm.go +++ b/core/services/keeper/orm.go @@ -33,6 +33,14 @@ func (korm ORM) Registries(ctx context.Context) (registries []Registry, _ error) return registries, err } +func (korm ORM) RegistryForJob(ctx context.Context, jobID int32) (registry Registry, _ error) { + err := korm.DB. + WithContext(ctx). + First(®istry, "job_id = ?", jobID). + Error + return registry, err +} + func (korm ORM) UpsertRegistry(ctx context.Context, registry *Registry) error { return korm.DB. WithContext(ctx). @@ -51,40 +59,45 @@ func (korm ORM) UpsertUpkeep(ctx context.Context, registration *UpkeepRegistrati WithContext(ctx). Clauses(clause.OnConflict{ Columns: []clause.Column{{Name: "registry_id"}, {Name: "upkeep_id"}}, - DoUpdates: clause.AssignmentColumns([]string{"execute_gas", "check_data"}), + DoUpdates: clause.AssignmentColumns([]string{"execute_gas", "check_data", "positioning_constant"}), }). Create(registration). Error } -func (korm ORM) BatchDeleteUpkeeps(ctx context.Context, registryID int32, upkeedIDs []int64) error { +func (korm ORM) BatchDeleteUpkeepsForJob(ctx context.Context, jobID int32, upkeedIDs []int64) error { return korm.DB. - WithContext(ctx). - Where("registry_id = ? AND upkeep_id IN (?)", registryID, upkeedIDs). - Delete(UpkeepRegistration{}). - Error + WithContext(ctx).Exec( + `DELETE FROM upkeep_registrations WHERE registry_id = ( + SELECT id from keeper_registries where job_id = ? + ) AND upkeep_id IN (?)`, + jobID, + upkeedIDs, + ).Error } -func (korm ORM) DeleteRegistryByJobID(ctx context.Context, jobID int32) error { - return korm.DB. - WithContext(ctx). - Where("job_id = ?", jobID). - Delete(Registry{}). // auto deletes upkeep registrations - Error -} - -func (korm ORM) EligibleUpkeeps(ctx context.Context, blockNumber int64) (upkeeps []UpkeepRegistration, _ error) { +func (korm ORM) EligibleUpkeeps( + ctx context.Context, + blockNumber int64, + gracePeriod int64, +) (upkeeps []UpkeepRegistration, _ error) { + lastValidBlockHeight := blockNumber - gracePeriod + if lastValidBlockHeight < 1 { + lastValidBlockHeight = 1 // ensure that upkeeps with last_run_block_height = 0 are always eligible + } err := korm.DB. WithContext(ctx). Preload("Registry"). Joins("INNER JOIN keeper_registries ON keeper_registries.id = upkeep_registrations.registry_id"). Where(` ? % keeper_registries.block_count_per_turn = 0 AND + keeper_registries.num_keepers > 0 AND + upkeep_registrations.last_run_block_height < ? AND keeper_registries.keeper_index = ( upkeep_registrations.positioning_constant + (? / keeper_registries.block_count_per_turn) ) % keeper_registries.num_keepers - `, blockNumber, blockNumber). + `, blockNumber, lastValidBlockHeight, blockNumber). Find(&upkeeps). Error @@ -104,6 +117,21 @@ func (korm ORM) LowestUnsyncedID(ctx context.Context, reg Registry) (nextID int6 return nextID, err } +func (korm ORM) SetLastRunHeightForUpkeepOnJob(ctx context.Context, jobID int32, upkeepID int64, height int64) error { + return korm.DB. + WithContext(ctx).Exec( + `UPDATE upkeep_registrations + SET last_run_block_height = ? + WHERE upkeep_id = ? AND + registry_id = ( + SELECT id FROM keeper_registries WHERE job_id = ? + );`, + height, + upkeepID, + jobID, + ).Error +} + func (korm ORM) CreateEthTransactionForUpkeep(ctx context.Context, upkeep UpkeepRegistration, payload []byte) error { sqlDB, err := korm.DB.DB() if err != nil { diff --git a/core/services/keeper/orm_test.go b/core/services/keeper/orm_test.go index ee289db709a..00d54a253fa 100644 --- a/core/services/keeper/orm_test.go +++ b/core/services/keeper/orm_test.go @@ -7,7 +7,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/services/keeper" - "github.com/smartcontractkit/chainlink/core/services/postgres" "github.com/smartcontractkit/chainlink/core/store" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/stretchr/testify/assert" @@ -32,9 +31,10 @@ func newUpkeep(registry keeper.Registry, upkeepID int64) keeper.UpkeepRegistrati } } -func ctx() context.Context { - ctx, _ := postgres.DefaultQueryCtx() - return ctx +func assertLastRunHeight(t *testing.T, store *store.Store, upkeep keeper.UpkeepRegistration, height int64) { + err := store.DB.Find(&upkeep).Error + require.NoError(t, err) + require.Equal(t, height, upkeep.LastRunBlockHeight) } func TestKeeperDB_Registries(t *testing.T) { @@ -45,7 +45,7 @@ func TestKeeperDB_Registries(t *testing.T) { cltest.MustInsertKeeperRegistry(t, store) cltest.MustInsertKeeperRegistry(t, store) - existingRegistries, err := orm.Registries(ctx()) + existingRegistries, err := orm.Registries(context.Background()) require.NoError(t, err) require.Equal(t, 2, len(existingRegistries)) } @@ -56,34 +56,42 @@ func TestKeeperDB_UpsertUpkeep(t *testing.T) { defer cleanup() registry, _ := cltest.MustInsertKeeperRegistry(t, store) - upkeep := cltest.MustInsertUpkeepForRegistry(t, store, registry) - + upkeep := keeper.UpkeepRegistration{ + UpkeepID: 0, + ExecuteGas: executeGas, + Registry: registry, + CheckData: checkData, + LastRunBlockHeight: 1, + PositioningConstant: 1, + } + require.NoError(t, store.DB.Create(&upkeep).Error) cltest.AssertCount(t, store, &keeper.UpkeepRegistration{}, 1) - var upkeepFromDB keeper.UpkeepRegistration - err := store.DB.First(&upkeepFromDB).Error - require.NoError(t, err) - require.Equal(t, executeGas, upkeepFromDB.ExecuteGas) - require.Equal(t, checkData, upkeepFromDB.CheckData) // update upkeep upkeep.ExecuteGas = 20_000 upkeep.CheckData = common.Hex2Bytes("8888") + upkeep.PositioningConstant = 2 + upkeep.LastRunBlockHeight = 2 - err = orm.UpsertUpkeep(ctx(), &upkeep) + err := orm.UpsertUpkeep(context.Background(), &upkeep) require.NoError(t, err) cltest.AssertCount(t, store, &keeper.UpkeepRegistration{}, 1) + + var upkeepFromDB keeper.UpkeepRegistration err = store.DB.First(&upkeepFromDB).Error require.NoError(t, err) require.Equal(t, int32(20_000), upkeepFromDB.ExecuteGas) require.Equal(t, "8888", common.Bytes2Hex(upkeepFromDB.CheckData)) + require.Equal(t, int32(2), upkeepFromDB.PositioningConstant) + require.Equal(t, int64(1), upkeepFromDB.LastRunBlockHeight) // shouldn't change on upsert } -func TestKeeperDB_BatchDelete(t *testing.T) { +func TestKeeperDB_BatchDeleteUpkeepsForJob(t *testing.T) { t.Parallel() store, orm, cleanup := setupKeeperDB(t) defer cleanup() - registry, _ := cltest.MustInsertKeeperRegistry(t, store) + registry, job := cltest.MustInsertKeeperRegistry(t, store) for i := int64(0); i < 3; i++ { cltest.MustInsertUpkeepForRegistry(t, store, registry) @@ -91,7 +99,7 @@ func TestKeeperDB_BatchDelete(t *testing.T) { cltest.AssertCount(t, store, &keeper.UpkeepRegistration{}, 3) - err := orm.BatchDeleteUpkeeps(ctx(), registry.ID, []int64{0, 2}) + err := orm.BatchDeleteUpkeepsForJob(context.Background(), job.ID, []int64{0, 2}) require.NoError(t, err) cltest.AssertCount(t, store, &keeper.UpkeepRegistration{}, 1) @@ -101,26 +109,6 @@ func TestKeeperDB_BatchDelete(t *testing.T) { require.Equal(t, int64(1), remainingUpkeep.UpkeepID) } -func TestKeeperDB_DeleteRegistryByJobID(t *testing.T) { - t.Parallel() - store, orm, cleanup := setupKeeperDB(t) - defer cleanup() - - registry, _ := cltest.MustInsertKeeperRegistry(t, store) - - for i := int64(0); i < 3; i++ { - cltest.MustInsertUpkeepForRegistry(t, store, registry) - } - - cltest.AssertCount(t, store, &keeper.UpkeepRegistration{}, 3) - - err := orm.DeleteRegistryByJobID(ctx(), registry.JobID) - require.NoError(t, err) - - cltest.AssertCount(t, store, keeper.Registry{}, 0) - cltest.AssertCount(t, store, &keeper.UpkeepRegistration{}, 0) -} - func TestKeeperDB_EligibleUpkeeps_BlockCountPerTurn(t *testing.T) { t.Parallel() store, orm, cleanup := setupKeeperDB(t) @@ -142,13 +130,13 @@ func TestKeeperDB_EligibleUpkeeps_BlockCountPerTurn(t *testing.T) { } for _, upkeep := range upkeeps { - err := orm.UpsertUpkeep(ctx(), &upkeep) + err := orm.UpsertUpkeep(context.Background(), &upkeep) require.NoError(t, err) } cltest.AssertCount(t, store, &keeper.UpkeepRegistration{}, 3) - elligibleUpkeeps, err := orm.EligibleUpkeeps(ctx(), blockheight) + elligibleUpkeeps, err := orm.EligibleUpkeeps(context.Background(), blockheight, 0) assert.NoError(t, err) assert.Len(t, elligibleUpkeeps, 2) assert.Equal(t, int64(0), elligibleUpkeeps[0].UpkeepID) @@ -163,6 +151,36 @@ func TestKeeperDB_EligibleUpkeeps_BlockCountPerTurn(t *testing.T) { assert.Equal(t, reg1.ContractAddress, elligibleUpkeeps[1].Registry.ContractAddress) } +func TestKeeperDB_EligibleUpkeeps_GracePeriod(t *testing.T) { + t.Parallel() + store, orm, cleanup := setupKeeperDB(t) + defer cleanup() + + blockheight := int64(120) + gracePeriod := int64(100) + + registry, _ := cltest.MustInsertKeeperRegistry(t, store) + upkeep1 := newUpkeep(registry, 0) + upkeep1.LastRunBlockHeight = 0 + upkeep2 := newUpkeep(registry, 1) + upkeep2.LastRunBlockHeight = 19 + upkeep3 := newUpkeep(registry, 2) + upkeep3.LastRunBlockHeight = 20 + + for _, upkeep := range [3]keeper.UpkeepRegistration{upkeep1, upkeep2, upkeep3} { + err := orm.UpsertUpkeep(context.Background(), &upkeep) + require.NoError(t, err) + } + + cltest.AssertCount(t, store, &keeper.UpkeepRegistration{}, 3) + + elligibleUpkeeps, err := orm.EligibleUpkeeps(context.Background(), blockheight, gracePeriod) + assert.NoError(t, err) + assert.Len(t, elligibleUpkeeps, 2) + assert.Equal(t, int64(0), elligibleUpkeeps[0].UpkeepID) + assert.Equal(t, int64(1), elligibleUpkeeps[1].UpkeepID) +} + func TestKeeperDB_EligibleUpkeeps_KeepersRotate(t *testing.T) { t.Parallel() store, orm, cleanup := setupKeeperDB(t) @@ -178,21 +196,21 @@ func TestKeeperDB_EligibleUpkeeps_KeepersRotate(t *testing.T) { // out of 5 valid block heights, with 5 keepers, we are eligible // to submit on exactly 1 of them - list1, err := orm.EligibleUpkeeps(ctx(), 20) // someone eligible + list1, err := orm.EligibleUpkeeps(context.Background(), 20, 0) // someone eligible require.NoError(t, err) - list2, err := orm.EligibleUpkeeps(ctx(), 30) // noone eligible + list2, err := orm.EligibleUpkeeps(context.Background(), 30, 0) // noone eligible require.NoError(t, err) - list3, err := orm.EligibleUpkeeps(ctx(), 40) // someone eligible + list3, err := orm.EligibleUpkeeps(context.Background(), 40, 0) // someone eligible require.NoError(t, err) - list4, err := orm.EligibleUpkeeps(ctx(), 41) // noone eligible + list4, err := orm.EligibleUpkeeps(context.Background(), 41, 0) // noone eligible require.NoError(t, err) - list5, err := orm.EligibleUpkeeps(ctx(), 60) // someone eligible + list5, err := orm.EligibleUpkeeps(context.Background(), 60, 0) // someone eligible require.NoError(t, err) - list6, err := orm.EligibleUpkeeps(ctx(), 80) // someone eligible + list6, err := orm.EligibleUpkeeps(context.Background(), 80, 0) // someone eligible require.NoError(t, err) - list7, err := orm.EligibleUpkeeps(ctx(), 99) // noone eligible + list7, err := orm.EligibleUpkeeps(context.Background(), 99, 0) // noone eligible require.NoError(t, err) - list8, err := orm.EligibleUpkeeps(ctx(), 100) // someone eligible + list8, err := orm.EligibleUpkeeps(context.Background(), 100, 0) // someone eligible require.NoError(t, err) totalEligible := len(list1) + len(list2) + len(list3) + len(list4) + len(list5) + len(list6) + len(list7) + len(list8) @@ -206,27 +224,41 @@ func TestKeeperDB_NextUpkeepID(t *testing.T) { registry, _ := cltest.MustInsertKeeperRegistry(t, store) - nextID, err := orm.LowestUnsyncedID(ctx(), registry) + nextID, err := orm.LowestUnsyncedID(context.Background(), registry) require.NoError(t, err) require.Equal(t, int64(0), nextID) upkeep := newUpkeep(registry, 0) - err = orm.UpsertUpkeep(ctx(), &upkeep) + err = orm.UpsertUpkeep(context.Background(), &upkeep) require.NoError(t, err) - nextID, err = orm.LowestUnsyncedID(ctx(), registry) + nextID, err = orm.LowestUnsyncedID(context.Background(), registry) require.NoError(t, err) require.Equal(t, int64(1), nextID) upkeep = newUpkeep(registry, 3) - err = orm.UpsertUpkeep(ctx(), &upkeep) + err = orm.UpsertUpkeep(context.Background(), &upkeep) require.NoError(t, err) - nextID, err = orm.LowestUnsyncedID(ctx(), registry) + nextID, err = orm.LowestUnsyncedID(context.Background(), registry) require.NoError(t, err) require.Equal(t, int64(4), nextID) } +func TestKeeperDB_SetLastRunHeightForUpkeepOnJob(t *testing.T) { + t.Parallel() + store, orm, cleanup := setupKeeperDB(t) + defer cleanup() + + registry, j := cltest.MustInsertKeeperRegistry(t, store) + upkeep := cltest.MustInsertUpkeepForRegistry(t, store, registry) + + orm.SetLastRunHeightForUpkeepOnJob(context.Background(), j.ID, upkeep.UpkeepID, 100) + assertLastRunHeight(t, store, upkeep, 100) + orm.SetLastRunHeightForUpkeepOnJob(context.Background(), j.ID, upkeep.UpkeepID, 0) + assertLastRunHeight(t, store, upkeep, 0) +} + func TestKeeperDB_CreateEthTransactionForUpkeep(t *testing.T) { t.Parallel() store, orm, cleanup := setupKeeperDB(t) @@ -238,7 +270,7 @@ func TestKeeperDB_CreateEthTransactionForUpkeep(t *testing.T) { payload := common.Hex2Bytes("1234") gasBuffer := int32(200_000) - err := orm.CreateEthTransactionForUpkeep(ctx(), upkeep, payload) + err := orm.CreateEthTransactionForUpkeep(context.Background(), upkeep, payload) require.NoError(t, err) var ethTX models.EthTx diff --git a/core/services/keeper/registry_synchronizer_core.go b/core/services/keeper/registry_synchronizer_core.go new file mode 100644 index 00000000000..998a9137733 --- /dev/null +++ b/core/services/keeper/registry_synchronizer_core.go @@ -0,0 +1,140 @@ +package keeper + +import ( + "context" + "sync" + "time" + + "github.com/pkg/errors" + + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" + "github.com/smartcontractkit/chainlink/core/services" + "github.com/smartcontractkit/chainlink/core/services/job" + "github.com/smartcontractkit/chainlink/core/services/log" + "github.com/smartcontractkit/chainlink/core/store/models" + "github.com/smartcontractkit/chainlink/core/utils" + "gorm.io/gorm" +) + +// MailRoom holds the log mailboxes for all the log types that keeper cares about +type MailRoom struct { + mbUpkeepCanceled *utils.Mailbox + mbSyncRegistry *utils.Mailbox + mbUpkeepPerformed *utils.Mailbox + mbUpkeepRegistered *utils.Mailbox +} + +func NewRegistrySynchronizer( + job job.Job, + contract *keeper_registry_wrapper.KeeperRegistry, + db *gorm.DB, + headBroadcaster *services.HeadBroadcaster, + logBroadcaster log.Broadcaster, + syncInterval time.Duration, + minConfirmations uint64, +) *RegistrySynchronizer { + mailRoom := MailRoom{ + mbUpkeepCanceled: utils.NewMailbox(50), + mbSyncRegistry: utils.NewMailbox(1), + mbUpkeepPerformed: utils.NewMailbox(1), + mbUpkeepRegistered: utils.NewMailbox(50), + } + return &RegistrySynchronizer{ + chHeads: make(chan models.Head, 1), + chStop: make(chan struct{}), + contract: contract, + headBroadcaster: headBroadcaster, + interval: syncInterval, + job: job, + logBroadcaster: logBroadcaster, + mailRoom: mailRoom, + minConfirmations: minConfirmations, + orm: NewORM(db), + StartStopOnce: utils.StartStopOnce{}, + wgDone: sync.WaitGroup{}, + } +} + +// RegistrySynchronizer conforms to the Service, Listener, and HeadRelayable interfaces +var _ job.Service = (*RegistrySynchronizer)(nil) +var _ log.Listener = (*RegistrySynchronizer)(nil) +var _ services.HeadBroadcastable = (*RegistrySynchronizer)(nil) + +type RegistrySynchronizer struct { + chHeads chan models.Head + chStop chan struct{} + contract *keeper_registry_wrapper.KeeperRegistry + headBroadcaster *services.HeadBroadcaster + interval time.Duration + job job.Job + logBroadcaster log.Broadcaster + mailRoom MailRoom + minConfirmations uint64 + orm ORM + wgDone sync.WaitGroup + utils.StartStopOnce +} + +func (rs *RegistrySynchronizer) Start() error { + return rs.StartOnce("RegistrySynchronizer", func() error { + rs.wgDone.Add(2) + go rs.run() + + logListenerOpts := log.ListenerOpts{ + Contract: rs.contract, + Logs: []generated.AbigenLog{ + keeper_registry_wrapper.KeeperRegistryKeepersUpdated{}, + keeper_registry_wrapper.KeeperRegistryConfigSet{}, + keeper_registry_wrapper.KeeperRegistryUpkeepCanceled{}, + keeper_registry_wrapper.KeeperRegistryUpkeepRegistered{}, + keeper_registry_wrapper.KeeperRegistryUpkeepPerformed{}, + }, + } + _, lbUnsubscribe := rs.logBroadcaster.Register(rs, logListenerOpts) + hbUnsubscribe := rs.headBroadcaster.Subscribe(rs) + + go func() { + defer hbUnsubscribe() + defer lbUnsubscribe() + defer rs.wgDone.Done() + <-rs.chStop + }() + return nil + }) +} + +func (rs *RegistrySynchronizer) Close() error { + if !rs.OkayToStop() { + return errors.New("RegistrySynchronizer is already stopped") + } + close(rs.chStop) + rs.wgDone.Wait() + return nil +} + +func (rs *RegistrySynchronizer) OnNewLongestChain(ctx context.Context, head models.Head) { + select { + case rs.chHeads <- head: + default: + } +} + +func (rs *RegistrySynchronizer) run() { + ticker := time.NewTicker(rs.interval) + defer rs.wgDone.Done() + defer ticker.Stop() + + rs.fullSync() + + for { + select { + case <-rs.chStop: + return + case <-ticker.C: + rs.fullSync() + case head := <-rs.chHeads: + rs.processLogs(head) + } + } +} diff --git a/core/services/keeper/registry_synchronizer_log_listener.go b/core/services/keeper/registry_synchronizer_log_listener.go new file mode 100644 index 00000000000..49cf31fde2a --- /dev/null +++ b/core/services/keeper/registry_synchronizer_log_listener.go @@ -0,0 +1,49 @@ +package keeper + +import ( + "reflect" + + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/log" + "github.com/smartcontractkit/chainlink/core/store/models" +) + +func (rs *RegistrySynchronizer) OnConnect() {} + +func (rs *RegistrySynchronizer) OnDisconnect() {} + +func (rs *RegistrySynchronizer) JobID() models.JobID { + return models.JobID{} +} + +func (rs *RegistrySynchronizer) JobIDV2() int32 { + return rs.job.ID +} + +func (rs *RegistrySynchronizer) IsV2Job() bool { + return true +} + +func (rs *RegistrySynchronizer) HandleLog(broadcast log.Broadcast) { + log := broadcast.DecodedLog() + if log == nil || reflect.ValueOf(log).IsNil() { + logger.Error("HandleLog: ignoring nil value") + return + } + + switch log := log.(type) { + case *keeper_registry_wrapper.KeeperRegistryKeepersUpdated: + rs.mailRoom.mbSyncRegistry.Deliver(broadcast) // same mailbox because same action + case *keeper_registry_wrapper.KeeperRegistryConfigSet: + rs.mailRoom.mbSyncRegistry.Deliver(broadcast) // same mailbox because same action + case *keeper_registry_wrapper.KeeperRegistryUpkeepCanceled: + rs.mailRoom.mbUpkeepCanceled.Deliver(broadcast) + case *keeper_registry_wrapper.KeeperRegistryUpkeepRegistered: + rs.mailRoom.mbUpkeepRegistered.Deliver(broadcast) + case *keeper_registry_wrapper.KeeperRegistryUpkeepPerformed: + rs.mailRoom.mbUpkeepPerformed.Deliver(broadcast) + default: + logger.Warnf("unexpected log type %T", log) + } +} diff --git a/core/services/keeper/registry_synchronizer_process_logs.go b/core/services/keeper/registry_synchronizer_process_logs.go new file mode 100644 index 00000000000..c4750599a77 --- /dev/null +++ b/core/services/keeper/registry_synchronizer_process_logs.go @@ -0,0 +1,194 @@ +package keeper + +import ( + "sync" + + "github.com/pkg/errors" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/log" + "github.com/smartcontractkit/chainlink/core/services/postgres" + "github.com/smartcontractkit/chainlink/core/store/models" +) + +func (rs *RegistrySynchronizer) processLogs(head models.Head) { + wg := sync.WaitGroup{} + wg.Add(4) + go rs.handleSyncRegistryLog(head, wg.Done) + go rs.handleUpkeepCanceledLogs(head, wg.Done) + go rs.handleUpkeepRegisteredLogs(head, wg.Done) + go rs.handleUpkeepPerformedLogs(head, wg.Done) + wg.Wait() +} + +func (rs *RegistrySynchronizer) handleSyncRegistryLog(head models.Head, done func()) { + defer done() + oldEnough := isOldEnoughConstructor(head, rs.minConfirmations) + i := rs.mailRoom.mbSyncRegistry.RetrieveIf(oldEnough) + if i == nil { + return + } + broadcast, ok := i.(log.Broadcast) + if !ok { + logger.Errorf("RegistrySynchronizer: invariant violation, expected log.Broadcast but got %T", broadcast) + return + } + was, err := broadcast.WasAlreadyConsumed() + if err != nil { + logger.Warn(errors.Wrapf(err, "RegistrySynchronizer: unable to check if log was consumed, jobID: %d", rs.job.ID)) + return + } + if was { + return + } + if !head.IsInChain(broadcast.RawLog().BlockHash) { + return + } + _, err = rs.syncRegistry() + if err != nil { + logger.Error(errors.Wrapf(err, "RegistrySynchronizer: unable to sync registry, jobID: %d", rs.job.ID)) + return + } + err = broadcast.MarkConsumed() + logger.ErrorIf((errors.Wrapf(err, "RegistrySynchronizer: unable to mark log as consumed, jobID: %d", rs.job.ID))) +} + +func (rs *RegistrySynchronizer) handleUpkeepCanceledLogs(head models.Head, done func()) { + defer done() + oldEnough := isOldEnoughConstructor(head, rs.minConfirmations) + for { + i := rs.mailRoom.mbUpkeepCanceled.RetrieveIf(oldEnough) + if i == nil { + return + } + broadcast, ok := i.(log.Broadcast) + if !ok { + logger.Errorf("RegistrySynchronizer: invariant violation, expected log.Broadcast but got %T", broadcast) + continue + } + was, err := broadcast.WasAlreadyConsumed() + if err != nil { + logger.Warn(errors.Wrapf(err, "RegistrySynchronizer: unable to check if log was consumed, jobID: %d", rs.job.ID)) + continue + } + if was { + continue + } + if !head.IsInChain(broadcast.RawLog().BlockHash) { + continue + } + log, ok := broadcast.DecodedLog().(*keeper_registry_wrapper.KeeperRegistryUpkeepCanceled) + if !ok { + logger.Errorf("RegistrySynchronizer: invariant violation, expected UpkeepCanceled log but got %T", log) + continue + } + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + err = rs.orm.BatchDeleteUpkeepsForJob(ctx, rs.job.ID, []int64{log.Id.Int64()}) + if err != nil { + logger.Error(errors.Wrapf(err, "RegistrySynchronizer: unable to batch delete upkeeps, jobID: %d", rs.job.ID)) + continue + } + err = broadcast.MarkConsumed() + logger.ErrorIf((errors.Wrapf(err, "RegistrySynchronizer: unable to mark log as consumed, jobID: %d", rs.job.ID))) + } +} + +func (rs *RegistrySynchronizer) handleUpkeepRegisteredLogs(head models.Head, done func()) { + defer done() + oldEnough := isOldEnoughConstructor(head, rs.minConfirmations) + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + registry, err := rs.orm.RegistryForJob(ctx, rs.job.ID) + if err != nil { + logger.Error(errors.Wrapf(err, "RegistrySynchronizer: unable to find registry for job, jobID: %d", rs.job.ID)) + return + } + for { + i := rs.mailRoom.mbUpkeepRegistered.RetrieveIf(oldEnough) + if i == nil { + return + } + broadcast, ok := i.(log.Broadcast) + if !ok { + logger.Errorf("RegistrySynchronizer: invariant violation, expected log.Broadcast but got %T", broadcast) + continue + } + was, err := broadcast.WasAlreadyConsumed() + if err != nil { + logger.Warn(errors.Wrapf(err, "RegistrySynchronizer: unable to check if log was consumed, jobID: %d", rs.job.ID)) + continue + } + if was { + continue + } + if !head.IsInChain(broadcast.RawLog().BlockHash) { + continue + } + log, ok := broadcast.DecodedLog().(*keeper_registry_wrapper.KeeperRegistryUpkeepRegistered) + if !ok { + logger.Errorf("RegistrySynchronizer: invariant violation, expected UpkeepRegistered log but got %T", log) + continue + } + err = rs.syncUpkeep(registry, log.Id.Int64()) + if err != nil { + logger.Error(err) + continue + } + err = broadcast.MarkConsumed() + logger.ErrorIf((errors.Wrapf(err, "RegistrySynchronizer: unable to mark log as consumed, jobID: %d", rs.job.ID))) + } +} + +func (rs *RegistrySynchronizer) handleUpkeepPerformedLogs(head models.Head, done func()) { + defer done() + oldEnough := isOldEnoughConstructor(head, rs.minConfirmations) + for { + i := rs.mailRoom.mbUpkeepPerformed.RetrieveIf(oldEnough) + if i == nil { + return + } + broadcast, ok := i.(log.Broadcast) + if !ok { + logger.Errorf("RegistrySynchronizer: invariant violation, expected log.Broadcast but got %T", broadcast) + continue + } + was, err := broadcast.WasAlreadyConsumed() + if err != nil { + logger.Warn(errors.Wrapf(err, "RegistrySynchronizer: unable to check if log was consumed, jobID: %d", rs.job.ID)) + continue + } + if was { + continue + } + if !head.IsInChain(broadcast.RawLog().BlockHash) { + continue + } + log, ok := broadcast.DecodedLog().(*keeper_registry_wrapper.KeeperRegistryUpkeepPerformed) + if !ok { + logger.Errorf("RegistrySynchronizer: invariant violation, expected UpkeepPerformed log but got %T", log) + continue + } + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + // set last run to 0 so that keeper can resume checkUpkeep() + err = rs.orm.SetLastRunHeightForUpkeepOnJob(ctx, rs.job.ID, log.Id.Int64(), 0) + if err != nil { + logger.Error(err) + continue + } + err = broadcast.MarkConsumed() + logger.ErrorIf((errors.Wrapf(err, "RegistrySynchronizer: unable to mark log as consumed, jobID: %d", rs.job.ID))) + } +} + +func isOldEnoughConstructor(head models.Head, minConfirmations uint64) func(interface{}) bool { + return func(i interface{}) bool { + broadcast, ok := i.(log.Broadcast) + if !ok { + return true // we want to get bad data out of the queue + } + logHeight := broadcast.RawLog().BlockNumber + return (logHeight + uint64(minConfirmations) - 1) <= uint64(head.Number) + } +} diff --git a/core/services/keeper/registry_synchronizer.go b/core/services/keeper/registry_synchronizer_sync.go similarity index 54% rename from core/services/keeper/registry_synchronizer.go rename to core/services/keeper/registry_synchronizer_sync.go index 408417d34bb..e383b51e05e 100644 --- a/core/services/keeper/registry_synchronizer.go +++ b/core/services/keeper/registry_synchronizer_sync.go @@ -5,101 +5,30 @@ import ( "fmt" "math/big" "sync" - "time" "github.com/pkg/errors" - - "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" "github.com/smartcontractkit/chainlink/core/logger" - "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/services/postgres" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/utils" - "gorm.io/gorm" ) +// syncUpkeepQueueSize represents the max number of upkeeps that can be synced in parallel const syncUpkeepQueueSize = 10 -func NewRegistrySynchronizer( - job job.Job, - contract *keeper_registry_wrapper.KeeperRegistry, - db *gorm.DB, - syncInterval time.Duration, -) *RegistrySynchronizer { - return &RegistrySynchronizer{ - contract: contract, - interval: syncInterval, - job: job, - orm: NewORM(db), - StartStopOnce: utils.StartStopOnce{}, - wgDone: sync.WaitGroup{}, - chStop: make(chan struct{}), - } -} - -// RegistrySynchronizer conforms to the job.Service interface -var _ job.Service = (*RegistrySynchronizer)(nil) - -type RegistrySynchronizer struct { - contract *keeper_registry_wrapper.KeeperRegistry - interval time.Duration - job job.Job - orm ORM - wgDone sync.WaitGroup - chStop chan struct{} - utils.StartStopOnce -} - -func (rs *RegistrySynchronizer) Start() error { - return rs.StartOnce("RegistrySynchronizer", func() error { - go rs.run() - return nil - }) -} - -func (rs *RegistrySynchronizer) Close() error { - if !rs.OkayToStop() { - return errors.New("RegistrySynchronizer is already stopped") - } - close(rs.chStop) - rs.wgDone.Wait() - return nil -} - -func (rs *RegistrySynchronizer) run() { - rs.wgDone.Add(1) - ticker := time.NewTicker(rs.interval) - defer rs.wgDone.Done() - defer ticker.Stop() - - for { - select { - case <-rs.chStop: - return - case <-ticker.C: - rs.syncRegistry() - } - } -} - -func (rs *RegistrySynchronizer) syncRegistry() { +func (rs *RegistrySynchronizer) fullSync() { contractAddress := rs.job.KeeperSpec.ContractAddress - logger.Debugf("syncing registry %s", contractAddress.Hex()) + logger.Debugf("fullSyncing registry %s", contractAddress.Hex()) var err error defer func() { - logger.ErrorIf(err, fmt.Sprintf("unable to sync registry %s", contractAddress.Hex())) + logger.ErrorIf(err, fmt.Sprintf("unable to fullSync registry %s", contractAddress.Hex())) }() - registry, err := rs.newSyncedRegistry(rs.job) + registry, err := rs.syncRegistry() if err != nil { return } - ctx, cancel := postgres.DefaultQueryCtx() - defer cancel() - if err = rs.orm.UpsertRegistry(ctx, ®istry); err != nil { - return - } if err = rs.addNewUpkeeps(registry); err != nil { return } @@ -108,6 +37,19 @@ func (rs *RegistrySynchronizer) syncRegistry() { } } +func (rs *RegistrySynchronizer) syncRegistry() (Registry, error) { + registry, err := rs.newRegistryFromChain() + if err != nil { + return Registry{}, err + } + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + if err = rs.orm.UpsertRegistry(ctx, ®istry); err != nil { + return Registry{}, err + } + return registry, err +} + func (rs *RegistrySynchronizer) addNewUpkeeps(reg Registry) error { ctx, cancel := postgres.DefaultQueryCtx() defer cancel() @@ -126,57 +68,46 @@ func (rs *RegistrySynchronizer) addNewUpkeeps(reg Registry) error { return errors.New("RegistrySynchronizer: invariant, contract should always have at least as many upkeeps as DB") } - // batch sync registries + rs.batchSyncUpkeepsOnRegistry(reg, nextUpkeepID, countOnContract) + return nil +} + +// batchSyncUpkeepsOnRegistry syncs upkeeps at a time in parallel +// starting at upkeep ID and up to (but not including) +func (rs *RegistrySynchronizer) batchSyncUpkeepsOnRegistry(reg Registry, start, end int64) { wg := sync.WaitGroup{} - wg.Add(int(countOnContract - nextUpkeepID)) + wg.Add(int(end - start)) chSyncUpkeepQueue := make(chan struct{}, syncUpkeepQueueSize) - done := func() { + done := func() { <-chSyncUpkeepQueue; wg.Done() } + for upkeepID := start; upkeepID < end; upkeepID++ { select { case <-rs.chStop: - case <-chSyncUpkeepQueue: + return + case chSyncUpkeepQueue <- struct{}{}: + go rs.syncUpkeepWithCallback(reg, upkeepID, done) } - wg.Done() - } - - for upkeepID := nextUpkeepID; upkeepID < countOnContract; upkeepID++ { - chSyncUpkeepQueue <- struct{}{} - go rs.syncUpkeep(reg, upkeepID, done) } wg.Wait() - return nil } -func (rs *RegistrySynchronizer) deleteCanceledUpkeeps(reg Registry) error { - canceledBigs, err := rs.contract.GetCanceledUpkeepList(nil) +func (rs *RegistrySynchronizer) syncUpkeepWithCallback(registry Registry, upkeepID int64, doneCallback func()) { + defer doneCallback() + err := rs.syncUpkeep(registry, upkeepID) if err != nil { - return err - } - canceled := make([]int64, len(canceledBigs)) - for idx, upkeepID := range canceledBigs { - canceled[idx] = upkeepID.Int64() + logger.ErrorIf(err, fmt.Sprintf("unable to sync upkeep #%d on registry %s", upkeepID, registry.ContractAddress.Hex())) } - ctx, cancel := postgres.DefaultQueryCtx() - defer cancel() - return rs.orm.BatchDeleteUpkeeps(ctx, reg.ID, canceled) } -func (rs *RegistrySynchronizer) syncUpkeep(registry Registry, upkeepID int64, doneCallback func()) { - defer doneCallback() - - var err error - defer func() { - logger.ErrorIf(err, fmt.Sprintf("unable to sync upkeep #%d on registry %s", upkeepID, registry.ContractAddress.Hex())) - }() - +func (rs *RegistrySynchronizer) syncUpkeep(registry Registry, upkeepID int64) error { upkeepConfig, err := rs.contract.GetUpkeep(nil, big.NewInt(int64(upkeepID))) if err != nil { - return + return err } - positioningConstant, err := calcPositioningConstant(upkeepID, registry.ContractAddress, registry.NumKeepers) + positioningConstant, err := calcPositioningConstant(upkeepID, registry.ContractAddress) if err != nil { - return + return err } newUpkeep := UpkeepRegistration{ CheckData: upkeepConfig.CheckData, @@ -187,12 +118,27 @@ func (rs *RegistrySynchronizer) syncUpkeep(registry Registry, upkeepID int64, do } ctx, cancel := postgres.DefaultQueryCtx() defer cancel() - err = rs.orm.UpsertUpkeep(ctx, &newUpkeep) + return rs.orm.UpsertUpkeep(ctx, &newUpkeep) +} + +func (rs *RegistrySynchronizer) deleteCanceledUpkeeps(reg Registry) error { + canceledBigs, err := rs.contract.GetCanceledUpkeepList(nil) + if err != nil { + return err + } + canceled := make([]int64, len(canceledBigs)) + for idx, upkeepID := range canceledBigs { + canceled[idx] = upkeepID.Int64() + } + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + return rs.orm.BatchDeleteUpkeepsForJob(ctx, rs.job.ID, canceled) } -func (rs *RegistrySynchronizer) newSyncedRegistry(job job.Job) (Registry, error) { - fromAddress := job.KeeperSpec.FromAddress - contractAddress := job.KeeperSpec.ContractAddress +// newRegistryFromChain returns a Registry stuct with fields synched from those on chain +func (rs *RegistrySynchronizer) newRegistryFromChain() (Registry, error) { + fromAddress := rs.job.KeeperSpec.FromAddress + contractAddress := rs.job.KeeperSpec.ContractAddress config, err := rs.contract.GetConfig(nil) if err != nil { return Registry{}, err @@ -208,7 +154,7 @@ func (rs *RegistrySynchronizer) newSyncedRegistry(job job.Job) (Registry, error) } } if keeperIndex == -1 { - return Registry{}, fmt.Errorf("unable to find %s in keeper list on registry %s", fromAddress.Hex(), contractAddress.Hex()) + logger.Warnf("unable to find %s in keeper list on registry %s", fromAddress.Hex(), contractAddress.Hex()) } return Registry{ @@ -216,26 +162,21 @@ func (rs *RegistrySynchronizer) newSyncedRegistry(job job.Job) (Registry, error) CheckGas: int32(config.CheckGasLimit), ContractAddress: contractAddress, FromAddress: fromAddress, - JobID: job.ID, + JobID: rs.job.ID, KeeperIndex: keeperIndex, NumKeepers: int32(len(keeperAddresses)), }, nil } -func calcPositioningConstant(upkeepID int64, registryAddress models.EIP55Address, numKeepers int32) (int32, error) { - if numKeepers == 0 { - return 0, errors.New("cannot calc positioning constant with 0 keepers") - } - +// the positioning constant is fixed because upkeepID and registryAddress are immutable +func calcPositioningConstant(upkeepID int64, registryAddress models.EIP55Address) (int32, error) { upkeepBytes := make([]byte, binary.MaxVarintLen64) binary.PutVarint(upkeepBytes, upkeepID) bytesToHash := utils.ConcatBytes(upkeepBytes, registryAddress.Bytes()) - hash, err := utils.Keccak256(bytesToHash) + checksum, err := utils.Keccak256(bytesToHash) if err != nil { return 0, err } - hashUint := big.NewInt(0).SetBytes(hash) - constant := big.NewInt(0).Mod(hashUint, big.NewInt(int64(numKeepers))) - - return int32(constant.Int64()), nil + constant := binary.BigEndian.Uint32(checksum[:4]) + return int32(constant), nil } diff --git a/core/services/keeper/registry_synchronizer_test.go b/core/services/keeper/registry_synchronizer_test.go index 989f5e85f7f..26ce220b13e 100644 --- a/core/services/keeper/registry_synchronizer_test.go +++ b/core/services/keeper/registry_synchronizer_test.go @@ -6,26 +6,24 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/onsi/gomega" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/keeper_registry_wrapper" "github.com/smartcontractkit/chainlink/core/internal/mocks" + "github.com/smartcontractkit/chainlink/core/services" "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/services/keeper" + "github.com/smartcontractkit/chainlink/core/services/log" + logmocks "github.com/smartcontractkit/chainlink/core/services/log/mocks" "github.com/smartcontractkit/chainlink/core/store" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -const syncInterval = 3 * time.Second +const syncInterval = 1000 * time.Hour // prevents sync timer from triggering during test -var regConfig = struct { - PaymentPremiumPPB uint32 - BlockCountPerTurn *big.Int - CheckGasLimit uint32 - StalenessSeconds *big.Int - FallbackGasPrice *big.Int - FallbackLinkPrice *big.Int -}{ +var registryConfig = keeper_registry_wrapper.GetConfig{ PaymentPremiumPPB: 100, BlockCountPerTurn: big.NewInt(20), CheckGasLimit: 2_000_000, @@ -34,15 +32,7 @@ var regConfig = struct { FallbackLinkPrice: big.NewInt(1000000), } -var upkeep = struct { - Target common.Address - ExecuteGas uint32 - CheckData []byte - Balance *big.Int - LastKeeper common.Address - Admin common.Address - MaxValidBlocknumber uint64 -}{ +var upkeepConfig = keeper_registry_wrapper.GetUpkeep{ Target: cltest.NewAddress(), ExecuteGas: 2_000_000, CheckData: common.Hex2Bytes("1234"), @@ -52,60 +42,90 @@ var upkeep = struct { MaxValidBlocknumber: 1_000_000_000, } -func setupRegistrySync(t *testing.T) (*store.Store, *keeper.RegistrySynchronizer, *mocks.Client, job.Job, func()) { +func setupRegistrySync(t *testing.T) ( + *store.Store, + *keeper.RegistrySynchronizer, + *mocks.Client, + job.Job, + func(), +) { store, cleanup := cltest.NewStore(t) ethMock := new(mocks.Client) + lbMock := new(logmocks.Broadcaster) + headRelayer := services.NewHeadBroadcaster() j := cltest.MustInsertKeeperJob(t, store, cltest.NewEIP55Address(), cltest.NewEIP55Address()) - contractAddress := j.KeeperSpec.ContractAddress + contractAddress := j.KeeperSpec.ContractAddress.Address() contract, err := keeper_registry_wrapper.NewKeeperRegistry( - contractAddress.Address(), + contractAddress, ethMock, ) require.NoError(t, err) - synchronizer := keeper.NewRegistrySynchronizer(j, contract, store.DB, syncInterval) + lbMock.On("Register", mock.Anything, mock.MatchedBy(func(opts log.ListenerOpts) bool { + return opts.Contract.Address() == contractAddress + })).Return(true, func() {}) + + synchronizer := keeper.NewRegistrySynchronizer(j, contract, store.DB, headRelayer, lbMock, syncInterval, 1) return store, synchronizer, ethMock, j, cleanup } -func assertUpkeepIDs(t *testing.T, store *store.Store, expected []int32) { +func assertUpkeepIDs(t *testing.T, store *store.Store, expected []int64) { g := gomega.NewGomegaWithT(t) - var upkeepIDs []int32 + var upkeepIDs []int64 err := store.DB.Model(keeper.UpkeepRegistration{}).Pluck("upkeep_id", &upkeepIDs).Error require.NoError(t, err) + require.Equal(t, len(expected), len(upkeepIDs)) g.Expect(upkeepIDs).To(gomega.ContainElements(expected)) } func Test_RegistrySynchronizer_Start(t *testing.T) { - t.Parallel() - _, synchronizer, _, _, cleanup := setupRegistrySync(t) + store, synchronizer, ethMock, job, cleanup := setupRegistrySync(t) defer cleanup() + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.RegistryABI, contractAddress) + canceledUpkeeps := []*big.Int{big.NewInt(1)} + registryMock.MockResponse("getConfig", registryConfig).Once() + registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() + registryMock.MockResponse("getCanceledUpkeepList", canceledUpkeeps).Once() + registryMock.MockResponse("getUpkeepCount", big.NewInt(0)).Once() + err := synchronizer.Start() require.NoError(t, err) defer synchronizer.Close() + cltest.WaitForCount(t, store, keeper.Registry{}, 1) + err = synchronizer.Start() require.Error(t, err) } -func Test_RegistrySynchronizer_AddsAndRemovesUpkeeps(t *testing.T) { +func Test_RegistrySynchronizer_CalcPositioningConstant(t *testing.T) { t.Parallel() + for _, upkeepID := range []int64{0, 1, 100, 10_000} { + _, err := keeper.ExportedCalcPositioningConstant(upkeepID, cltest.NewEIP55Address()) + require.NoError(t, err) + } +} + +func Test_RegistrySynchronizer_FullSync(t *testing.T) { store, synchronizer, ethMock, job, cleanup := setupRegistrySync(t) defer cleanup() contractAddress := job.KeeperSpec.ContractAddress.Address() fromAddress := job.KeeperSpec.FromAddress.Address() - // 1st sync registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.RegistryABI, contractAddress) canceledUpkeeps := []*big.Int{big.NewInt(1)} - registryMock.MockResponse("getConfig", regConfig).Once() + registryMock.MockResponse("getConfig", registryConfig).Once() registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() registryMock.MockResponse("getCanceledUpkeepList", canceledUpkeeps).Once() registryMock.MockResponse("getUpkeepCount", big.NewInt(3)).Once() - registryMock.MockResponse("getUpkeep", upkeep).Times(3) // sync all 3, then delete + registryMock.MockResponse("getUpkeep", upkeepConfig).Times(3) // sync all 3, then delete - synchronizer.ExportedSyncRegistry() + synchronizer.ExportedFullSync() cltest.AssertCount(t, store, keeper.Registry{}, 1) cltest.AssertCount(t, store, keeper.UpkeepRegistration{}, 2) @@ -119,26 +139,235 @@ func Test_RegistrySynchronizer_AddsAndRemovesUpkeeps(t *testing.T) { require.Equal(t, int32(20), registry.BlockCountPerTurn) require.Equal(t, int32(0), registry.KeeperIndex) require.Equal(t, int32(1), registry.NumKeepers) - require.Equal(t, upkeep.CheckData, upkeepRegistration.CheckData) - require.Equal(t, int32(upkeep.ExecuteGas), upkeepRegistration.ExecuteGas) + require.Equal(t, upkeepConfig.CheckData, upkeepRegistration.CheckData) + require.Equal(t, int32(upkeepConfig.ExecuteGas), upkeepRegistration.ExecuteGas) - assertUpkeepIDs(t, store, []int32{0, 2}) + assertUpkeepIDs(t, store, []int64{0, 2}) ethMock.AssertExpectations(t) - gomega.ContainElements() - // 2nd sync canceledUpkeeps = []*big.Int{big.NewInt(0), big.NewInt(1), big.NewInt(3)} - registryMock.MockResponse("getConfig", regConfig).Once() + registryMock.MockResponse("getConfig", registryConfig).Once() registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() registryMock.MockResponse("getCanceledUpkeepList", canceledUpkeeps).Once() registryMock.MockResponse("getUpkeepCount", big.NewInt(5)).Once() - registryMock.MockResponse("getUpkeep", upkeep).Times(2) // two new upkeeps to sync + registryMock.MockResponse("getUpkeep", upkeepConfig).Times(2) // two new upkeeps to sync - synchronizer.ExportedSyncRegistry() + synchronizer.ExportedFullSync() cltest.AssertCount(t, store, keeper.Registry{}, 1) cltest.AssertCount(t, store, keeper.UpkeepRegistration{}, 2) - assertUpkeepIDs(t, store, []int32{2, 4}) + assertUpkeepIDs(t, store, []int64{2, 4}) + ethMock.AssertExpectations(t) +} + +func Test_RegistrySynchronizer_ConfigSetLog(t *testing.T) { + store, synchronizer, ethMock, job, cleanup := setupRegistrySync(t) + defer cleanup() + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.RegistryABI, contractAddress) + registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() + registryMock.MockResponse("getConfig", registryConfig).Once() + registryMock.MockResponse("getCanceledUpkeepList", []*big.Int{}).Once() + registryMock.MockResponse("getUpkeepCount", big.NewInt(0)).Once() + + require.NoError(t, synchronizer.Start()) + defer synchronizer.Close() + cltest.WaitForCount(t, store, keeper.Registry{}, 1) + var registry keeper.Registry + require.NoError(t, store.DB.First(®istry).Error) + + registryConfig.BlockCountPerTurn = big.NewInt(40) // change from default + registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() + registryMock.MockResponse("getConfig", registryConfig).Once() + + head := cltest.MustInsertHead(t, store, 1) + rawLog := types.Log{BlockHash: head.Hash} + log := keeper_registry_wrapper.KeeperRegistryConfigSet{} + logBroadcast := new(logmocks.Broadcast) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("MarkConsumed").Return(nil) + logBroadcast.On("WasAlreadyConsumed").Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + synchronizer.ExportedProcessLogs(head) + + cltest.AssertRecordEventually(t, store, ®istry, func() bool { + return registry.BlockCountPerTurn == 40 + }) + cltest.AssertCount(t, store, keeper.Registry{}, 1) + ethMock.AssertExpectations(t) + logBroadcast.AssertExpectations(t) +} + +func Test_RegistrySynchronizer_KeepersUpdatedLog(t *testing.T) { + store, synchronizer, ethMock, job, cleanup := setupRegistrySync(t) + defer cleanup() + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.RegistryABI, contractAddress) + registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() + registryMock.MockResponse("getConfig", registryConfig).Once() + registryMock.MockResponse("getCanceledUpkeepList", []*big.Int{}).Once() + registryMock.MockResponse("getUpkeepCount", big.NewInt(0)).Once() + + require.NoError(t, synchronizer.Start()) + defer synchronizer.Close() + cltest.WaitForCount(t, store, keeper.Registry{}, 1) + var registry keeper.Registry + require.NoError(t, store.DB.First(®istry).Error) + + addresses := []common.Address{fromAddress, cltest.NewAddress()} // change from default + registryMock.MockResponse("getConfig", registryConfig).Once() + registryMock.MockResponse("getKeeperList", addresses).Once() + + head := cltest.MustInsertHead(t, store, 1) + rawLog := types.Log{BlockHash: head.Hash} + log := keeper_registry_wrapper.KeeperRegistryKeepersUpdated{} + logBroadcast := new(logmocks.Broadcast) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("MarkConsumed").Return(nil) + logBroadcast.On("WasAlreadyConsumed").Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + synchronizer.ExportedProcessLogs(head) + + cltest.AssertRecordEventually(t, store, ®istry, func() bool { + return registry.NumKeepers == 2 + }) + cltest.AssertCount(t, store, keeper.Registry{}, 1) + ethMock.AssertExpectations(t) + logBroadcast.AssertExpectations(t) +} + +func Test_RegistrySynchronizer_UpkeepCanceledLog(t *testing.T) { + store, synchronizer, ethMock, job, cleanup := setupRegistrySync(t) + defer cleanup() + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.RegistryABI, contractAddress) + registryMock.MockResponse("getConfig", registryConfig).Once() + registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() + registryMock.MockResponse("getCanceledUpkeepList", []*big.Int{}).Once() + registryMock.MockResponse("getUpkeepCount", big.NewInt(3)).Once() + registryMock.MockResponse("getUpkeep", upkeepConfig).Times(3) + + require.NoError(t, synchronizer.Start()) + defer synchronizer.Close() + cltest.WaitForCount(t, store, keeper.Registry{}, 1) + cltest.WaitForCount(t, store, keeper.UpkeepRegistration{}, 3) + + head := cltest.MustInsertHead(t, store, 1) + rawLog := types.Log{BlockHash: head.Hash} + log := keeper_registry_wrapper.KeeperRegistryUpkeepCanceled{Id: big.NewInt(1)} + logBroadcast := new(logmocks.Broadcast) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("MarkConsumed").Return(nil) + logBroadcast.On("WasAlreadyConsumed").Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + synchronizer.ExportedProcessLogs(head) + + cltest.WaitForCount(t, store, keeper.UpkeepRegistration{}, 2) + ethMock.AssertExpectations(t) + logBroadcast.AssertExpectations(t) +} + +func Test_RegistrySynchronizer_UpkeepRegisteredLog(t *testing.T) { + store, synchronizer, ethMock, job, cleanup := setupRegistrySync(t) + defer cleanup() + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.RegistryABI, contractAddress) + registryMock.MockResponse("getConfig", registryConfig).Once() + registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() + registryMock.MockResponse("getCanceledUpkeepList", []*big.Int{}).Once() + registryMock.MockResponse("getUpkeepCount", big.NewInt(0)).Once() + + require.NoError(t, synchronizer.Start()) + defer synchronizer.Close() + cltest.WaitForCount(t, store, keeper.Registry{}, 1) + + registryMock.MockResponse("getUpkeep", upkeepConfig).Once() + + head := cltest.MustInsertHead(t, store, 1) + rawLog := types.Log{BlockHash: head.Hash} + log := keeper_registry_wrapper.KeeperRegistryUpkeepRegistered{Id: big.NewInt(3)} + logBroadcast := new(logmocks.Broadcast) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("MarkConsumed").Return(nil) + logBroadcast.On("WasAlreadyConsumed").Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + synchronizer.ExportedProcessLogs(head) + + cltest.WaitForCount(t, store, keeper.UpkeepRegistration{}, 1) + ethMock.AssertExpectations(t) + logBroadcast.AssertExpectations(t) +} + +func Test_RegistrySynchronizer_UpkeepPerformedLog(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + store, synchronizer, ethMock, job, cleanup := setupRegistrySync(t) + defer cleanup() + + contractAddress := job.KeeperSpec.ContractAddress.Address() + fromAddress := job.KeeperSpec.FromAddress.Address() + + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.RegistryABI, contractAddress) + registryMock.MockResponse("getConfig", registryConfig).Once() + registryMock.MockResponse("getKeeperList", []common.Address{fromAddress}).Once() + registryMock.MockResponse("getCanceledUpkeepList", []*big.Int{}).Once() + registryMock.MockResponse("getUpkeepCount", big.NewInt(1)).Once() + registryMock.MockResponse("getUpkeep", upkeepConfig).Once() + + require.NoError(t, synchronizer.Start()) + defer synchronizer.Close() + cltest.WaitForCount(t, store, keeper.Registry{}, 1) + cltest.WaitForCount(t, store, keeper.UpkeepRegistration{}, 1) + + var upkeep keeper.UpkeepRegistration + require.NoError(t, store.DB.First(&upkeep).Error) + upkeep.LastRunBlockHeight = 100 + require.NoError(t, store.DB.Save(&upkeep).Error) + + head := cltest.MustInsertHead(t, store, 1) + rawLog := types.Log{BlockHash: head.Hash} + log := keeper_registry_wrapper.KeeperRegistryUpkeepPerformed{Id: big.NewInt(0)} + logBroadcast := new(logmocks.Broadcast) + logBroadcast.On("DecodedLog").Return(&log) + logBroadcast.On("RawLog").Return(rawLog) + logBroadcast.On("MarkConsumed").Return(nil) + logBroadcast.On("WasAlreadyConsumed").Return(false, nil) + + // Do the thing + synchronizer.HandleLog(logBroadcast) + synchronizer.ExportedProcessLogs(head) + + g.Eventually(func() int64 { + err := store.DB.Find(&upkeep).Error + require.NoError(t, err) + return upkeep.LastRunBlockHeight + }, cltest.DBWaitTimeout, cltest.DBPollingInterval).Should(gomega.Equal(int64(0))) + ethMock.AssertExpectations(t) + logBroadcast.AssertExpectations(t) } diff --git a/core/services/keeper/upkeep_executer.go b/core/services/keeper/upkeep_executer.go index 9a94abfe0f0..3ff8cc14c8c 100644 --- a/core/services/keeper/upkeep_executer.go +++ b/core/services/keeper/upkeep_executer.go @@ -2,10 +2,11 @@ package keeper import ( "context" - "errors" "math/big" "sync" + "github.com/pkg/errors" + "github.com/ethereum/go-ethereum" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services" @@ -28,6 +29,7 @@ func NewUpkeepExecutor( db *gorm.DB, ethClient eth.Client, headBroadcaster *services.HeadBroadcaster, + maxGracePeriod int64, ) *UpkeepExecutor { return &UpkeepExecutor{ chStop: make(chan struct{}), @@ -36,6 +38,7 @@ func NewUpkeepExecutor( headBroadcaster: headBroadcaster, job: job, mailbox: utils.NewMailbox(1), + maxGracePeriod: maxGracePeriod, orm: NewORM(db), wgDone: sync.WaitGroup{}, StartStopOnce: utils.StartStopOnce{}, @@ -53,6 +56,7 @@ type UpkeepExecutor struct { headBroadcaster *services.HeadBroadcaster job job.Job mailbox *utils.Mailbox + maxGracePeriod int64 orm ORM wgDone sync.WaitGroup utils.StartStopOnce @@ -109,7 +113,7 @@ func (executor *UpkeepExecutor) processActiveUpkeeps() { ctx, cancel := postgres.DefaultQueryCtx() defer cancel() - activeUpkeeps, err := executor.orm.EligibleUpkeeps(ctx, head.Number) + activeUpkeeps, err := executor.orm.EligibleUpkeeps(ctx, head.Number, executor.maxGracePeriod) if err != nil { logger.Errorf("unable to load active registrations: %v", err) return @@ -120,7 +124,7 @@ func (executor *UpkeepExecutor) processActiveUpkeeps() { done := func() { <-executor.executionQueue; wg.Done() } for _, reg := range activeUpkeeps { executor.executionQueue <- struct{}{} - go executor.execute(reg, done) + go executor.execute(reg, head.Number, done) } wg.Wait() @@ -128,7 +132,7 @@ func (executor *UpkeepExecutor) processActiveUpkeeps() { // execute will call checkForUpkeep and, if it succeeds, trigger a job on the CL node // DEV: must perform contract call "manually" because abigen wrapper can only send tx -func (executor *UpkeepExecutor) execute(upkeep UpkeepRegistration, done func()) { +func (executor *UpkeepExecutor) execute(upkeep UpkeepRegistration, headNumber int64, done func()) { defer done() msg, err := constructCheckUpkeepCallMsg(upkeep) @@ -164,6 +168,16 @@ func (executor *UpkeepExecutor) execute(upkeep UpkeepRegistration, done func()) if err != nil { logger.Error(err) } + + ctxQuery, cancel = postgres.DefaultQueryCtx() + defer cancel() + ctxCombined, cancel = utils.CombinedContext(executor.chStop, ctxQuery) + defer cancel() + // DEV: this is the block that initiated the run, not the block height when broadcast nor the block + // that the tx gets confirmed in. This is fine because this grace period is just used as a fallback + // in case we miss the UpkeepPerformed log or the tx errors. It does not need to be exact. + err = executor.orm.SetLastRunHeightForUpkeepOnJob(ctxCombined, executor.job.ID, upkeep.UpkeepID, headNumber) + logger.ErrorIf(err, "UpkeepExecutor: unable to setLastRunHeightForUpkeep for upkeep") } func constructCheckUpkeepCallMsg(upkeep UpkeepRegistration) (ethereum.CallMsg, error) { diff --git a/core/services/keeper/upkeep_executer_test.go b/core/services/keeper/upkeep_executer_test.go index c0fdf45fbfb..037abd44b99 100644 --- a/core/services/keeper/upkeep_executer_test.go +++ b/core/services/keeper/upkeep_executer_test.go @@ -7,7 +7,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/onsi/gomega" - "github.com/pkg/errors" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/mocks" "github.com/smartcontractkit/chainlink/core/services" @@ -24,18 +23,19 @@ func setup(t *testing.T) ( *mocks.Client, *keeper.UpkeepExecutor, keeper.Registry, + keeper.UpkeepRegistration, func(), ) { store, strCleanup := cltest.NewStore(t) ethMock := new(mocks.Client) registry, job := cltest.MustInsertKeeperRegistry(t, store) headBroadcaster := services.NewHeadBroadcaster() - executor := keeper.NewUpkeepExecutor(job, store.DB, ethMock, headBroadcaster) - cltest.MustInsertUpkeepForRegistry(t, store, registry) + executor := keeper.NewUpkeepExecutor(job, store.DB, ethMock, headBroadcaster, 0) + upkeep := cltest.MustInsertUpkeepForRegistry(t, store, registry) err := executor.Start() require.NoError(t, err) cleanup := func() { executor.Close(); strCleanup() } - return store, ethMock, executor, registry, cleanup + return store, ethMock, executor, registry, upkeep, cleanup } var checkUpkeepResponse = struct { @@ -54,7 +54,7 @@ var checkUpkeepResponse = struct { func Test_UpkeepExecutor_ErrorsIfStartedTwice(t *testing.T) { t.Parallel() - _, _, executor, _, cleanup := setup(t) + _, _, executor, _, _, cleanup := setup(t) defer cleanup() err := executor.Start() // already started in setup() @@ -63,7 +63,7 @@ func Test_UpkeepExecutor_ErrorsIfStartedTwice(t *testing.T) { func Test_UpkeepExecutor_PerformsUpkeep_Happy(t *testing.T) { t.Parallel() - store, ethMock, executor, registry, cleanup := setup(t) + store, ethMock, executor, registry, upkeep, cleanup := setup(t) defer cleanup() registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.RegistryABI, registry.ContractAddress.Address()) @@ -71,13 +71,14 @@ func Test_UpkeepExecutor_PerformsUpkeep_Happy(t *testing.T) { t.Run("runs upkeep on triggering block number", func(t *testing.T) { head := models.NewHead(big.NewInt(20), cltest.NewHash(), cltest.NewHash(), 1000) - executor.OnNewLongestChain(context.TODO(), head) + executor.OnNewLongestChain(context.Background(), head) cltest.WaitForCount(t, store, models.EthTx{}, 1) + assertLastRunHeight(t, store, upkeep, 20) }) t.Run("skips upkeep on non-triggering block number", func(t *testing.T) { head := models.NewHead(big.NewInt(21), cltest.NewHash(), cltest.NewHash(), 1000) - executor.OnNewLongestChain(context.TODO(), head) + executor.OnNewLongestChain(context.Background(), head) cltest.AssertCountStays(t, store, models.EthTx{}, 1) }) @@ -88,20 +89,19 @@ func Test_UpkeepExecutor_PerformsUpkeep_Error(t *testing.T) { t.Parallel() g := gomega.NewGomegaWithT(t) - _, ethMock, executor, _, cleanup := setup(t) + store, ethMock, executor, registry, _, cleanup := setup(t) defer cleanup() wasCalled := atomic.NewBool(false) - ethMock. - On("CallContract", mock.Anything, mock.Anything, mock.Anything). - Return(nil, errors.New("contract call revert")). - Run(func(args mock.Arguments) { - wasCalled.Store(true) - }) + registryMock := cltest.NewContractMockReceiver(t, ethMock, keeper.RegistryABI, registry.ContractAddress.Address()) + registryMock.MockRevertResponse("checkUpkeep").Run(func(args mock.Arguments) { + wasCalled.Store(true) + }) head := models.NewHead(big.NewInt(20), cltest.NewHash(), cltest.NewHash(), 1000) executor.OnNewLongestChain(context.TODO(), head) g.Eventually(wasCalled).Should(gomega.Equal(atomic.NewBool(true))) + cltest.AssertCountStays(t, store, models.EthTx{}, 0) ethMock.AssertExpectations(t) } diff --git a/core/store/migrations/0019_last_run_height_column_to_keeper_table.go b/core/store/migrations/0019_last_run_height_column_to_keeper_table.go new file mode 100644 index 00000000000..cb117e4a96f --- /dev/null +++ b/core/store/migrations/0019_last_run_height_column_to_keeper_table.go @@ -0,0 +1,26 @@ +package migrations + +import ( + "github.com/go-gormigrate/gormigrate/v2" + "gorm.io/gorm" +) + +const up19 = ` + ALTER TABLE upkeep_registrations ADD COLUMN last_run_block_height BIGINT NOT NULL DEFAULT 0; + ` + +const down19 = ` + ALTER TABLE upkeep_registrations DROP COLUMN last_run_block_height; + ` + +func init() { + Migrations = append(Migrations, &gormigrate.Migration{ + ID: "0019_last_run_height_column_to_keeper_table", + Migrate: func(db *gorm.DB) error { + return db.Exec(up19).Error + }, + Rollback: func(db *gorm.DB) error { + return db.Exec(down19).Error + }, + }) +} diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 2074d83371f..0adf3d75735 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -598,6 +598,14 @@ func (c Config) KeeperRegistrySyncInterval() time.Duration { return c.getWithFallback("KeeperRegistrySyncInterval", parseDuration).(time.Duration) } +func (c Config) KeeperMinimumRequiredConfirmations() uint64 { + return c.viper.GetUint64(EnvVarName("KeeperMinimumRequiredConfirmations")) +} + +func (c Config) KeeperMaximumGracePeriod() int64 { + return c.viper.GetInt64(EnvVarName("KeeperMaximumGracePeriod")) +} + // JSONConsole enables the JSON console. func (c Config) JSONConsole() bool { return c.viper.GetBool(EnvVarName("JSONConsole")) diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index 20b6b56beba..60a1b300266 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -74,7 +74,9 @@ type ConfigSchema struct { JobPipelineReaperInterval time.Duration `env:"JOB_PIPELINE_REAPER_INTERVAL" default:"1h"` JobPipelineReaperThreshold time.Duration `env:"JOB_PIPELINE_REAPER_THRESHOLD" default:"168h"` JSONConsole bool `env:"JSON_CONSOLE" default:"false"` - KeeperRegistrySyncInterval time.Duration `env:"KEEPER_REGISTRY_SYNC_INTERVAL" default:"5m"` + KeeperRegistrySyncInterval time.Duration `env:"KEEPER_REGISTRY_SYNC_INTERVAL" default:"30m"` + KeeperMinimumRequiredConfirmations uint64 `env:"KEEPER_MINIMUM_REQUIRED_CONFIRMATIONS" default:"12"` + KeeperMaximumGracePeriod int64 `env:"KEEPER_MAXIMUM_GRACE_PERIOD" default:"100"` LinkContractAddress string `env:"LINK_CONTRACT_ADDRESS" default:"0x514910771AF9Ca656af840dff83E8264EcF986CA"` ExplorerURL *url.URL `env:"EXPLORER_URL"` ExplorerAccessKey string `env:"EXPLORER_ACCESS_KEY"` diff --git a/core/utils/mailbox.go b/core/utils/mailbox.go index fcc4d794d5a..d87ec0711c8 100644 --- a/core/utils/mailbox.go +++ b/core/utils/mailbox.go @@ -45,7 +45,25 @@ func (m *Mailbox) Deliver(x interface{}) { func (m *Mailbox) Retrieve() interface{} { m.mu.Lock() defer m.mu.Unlock() + return m.retrieve() +} + +// RetrieveIf returns the first item in the mailbox if it meets the provided criteria +func (m *Mailbox) RetrieveIf(conditional func(interface{}) bool) interface{} { + m.mu.Lock() + defer m.mu.Unlock() + if len(m.queue) == 0 { + return nil + } + x := m.queue[len(m.queue)-1] + if !conditional(x) { + return nil + } + return m.retrieve() +} +// DEV: not thread safe - caller must hold lock +func (m *Mailbox) retrieve() interface{} { if len(m.queue) == 0 { return nil } From dba8b165f893207224f04e745216f95670fc465d Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Fri, 19 Mar 2021 20:06:01 -0400 Subject: [PATCH 057/116] resolving test from bool pointer change --- core/web/log_controller_test.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/core/web/log_controller_test.go b/core/web/log_controller_test.go index 46bd23b9153..fa4488f41b3 100644 --- a/core/web/log_controller_test.go +++ b/core/web/log_controller_test.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "net/http" - "strconv" "testing" "github.com/bmizerany/assert" @@ -19,7 +18,7 @@ import ( type testCase struct { Description string logLevel string - logSql string + logSql *bool expectedLogLevel zapcore.Level expectedLogSql bool @@ -37,29 +36,31 @@ func TestLogController_SetDebug(t *testing.T) { require.NoError(t, app.Start()) client := app.NewHTTPClient() + sqlTrue := true + sqlFalse := false cases := []testCase{ { Description: "Set log level to debug", logLevel: "debug", - logSql: "", + logSql: nil, expectedLogLevel: zapcore.DebugLevel, }, { Description: "Set log level to info", logLevel: "info", - logSql: "", + logSql: nil, expectedLogLevel: zapcore.InfoLevel, }, { Description: "Set log level to info and log sql to true", logLevel: "info", - logSql: "true", + logSql: &sqlTrue, expectedLogLevel: zapcore.InfoLevel, }, { Description: "Set log level to warn and log sql to false", logLevel: "warn", - logSql: "false", + logSql: &sqlFalse, expectedLogLevel: zapcore.WarnLevel, }, } @@ -80,8 +81,8 @@ func TestLogController_SetDebug(t *testing.T) { if tc.logLevel != "" { assert.Equal(t, tc.logLevel, lR.Level) } - if tc.logSql != "" { - assert.Equal(t, tc.logSql, strconv.FormatBool(lR.SqlEnabled)) + if tc.logSql != nil { + assert.Equal(t, tc.logSql, &lR.SqlEnabled) } assert.Equal(t, tc.expectedLogLevel.String(), app.GetStore().Config.LogLevel().String()) }() From 438bb3750e9a2c9aec10c282c9469374edcf774a Mon Sep 17 00:00:00 2001 From: Alex Coventry Date: Mon, 15 Mar 2021 17:30:48 -0400 Subject: [PATCH 058/116] Add binance block headers to test of blockhash store --- .../test/v0.6/BlockhashStore.test.ts | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/evm-contracts/test/v0.6/BlockhashStore.test.ts b/evm-contracts/test/v0.6/BlockhashStore.test.ts index feb0530b9e6..ac48c454ad0 100644 --- a/evm-contracts/test/v0.6/BlockhashStore.test.ts +++ b/evm-contracts/test/v0.6/BlockhashStore.test.ts @@ -80,12 +80,101 @@ const maticBlocks: TestBlocks[] = [ }, ] +// The following headers from Binance Smart Chain were retrieved using `go run +// binance.go`, where binance.go contains +// +// package main +// +// import ( +// "context" +// "fmt" +// "log" +// "math/big" +// "math/rand" +// "strings" +// +// "github.com/ethereum/go-ethereum/ethclient" +// "github.com/ethereum/go-ethereum/rlp" +// ) +// +// var tsBlockTemplate = ` +// { +// num: %d, +// rlpHeader: ethers.utils.arrayify( +// '0x%x', +// ), +// hash: '0x%x', +// }, +// ` +// +// func main() { +// client, err := ethclient.Dial("https://bsc-dataseed.binance.org/") +// if err != nil { +// log.Fatal(err) +// } +// +// header, err := client.HeaderByNumber(context.Background(), nil) +// if err != nil { +// log.Fatal(err) +// } +// topBlockNum := header.Number.Int64() +// numBlocks := int64(4) +// if topBlockNum < numBlocks { +// log.Fatalf("need at least %d consecutive blocks", numBlocks) +// } +// targetBlock := int64(rand.Intn(int(topBlockNum - numBlocks))) +// simulatedHeadBlock := targetBlock + numBlocks - 1 +// for blockNum := targetBlock; blockNum <= simulatedHeadBlock; blockNum++ { +// header, err := client.HeaderByNumber(context.Background(), big.NewInt(blockNum)) +// if err != nil { +// log.Fatal(err) +// } +// s, err := rlp.EncodeToBytes(header) +// if err != nil { +// log.Fatalf("could not encode header: got error %s from %v", err, header) +// } +// // fmt.Printf("header for block number %d: 0x%x\n", blockNum, s) +// fmt.Printf(strings.TrimLeft(tsBlockTemplate, "\n"), blockNum, s, header.Hash()) +// } +// } +const binanceBlocks: TestBlocks[] = [ + { + num: 1875651, + rlpHeader: ethers.utils.arrayify( + '0xf9025da029c26248bebbe0d0acb209d13ac9337c4b5c313696c031dd63b3cd16cbdc0c21a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794b8f7166496996a7da21cf1f1b04d9b3e26a3d077a03f962867b5e86191c3280bd52c4249587e08ddfa9851cea981fb7a5721c9157aa05924ae05d17347687ba81d093aee159ccc65cefc8314b0515ef921e553df05a2a089af99a7afa586e7d67062d051df4255304bb730f6d62fdd3bdb207f1513b23bb901000100000000000000000800000000000000000000000200000000000000800000000000000200100000000000000800000000000000000000000000000000000000000000000000800000140800000008201000001000000202000000001200000000002002020000000000000000080000000000000002000000001000000000000002000000008010000000000000000002040080008400280000c00000081000400000004000000010000000020000000000000000000000000000000000000001000210200000000000000000000800000000000000000000000000002010000004000000000001000000000000000000000800020000000000000000000002831c9ec38401c9c380830789c2845f9faab1b861d883010002846765746888676f312e31332e34856c696e7578000000000000003311ee6830f31dc9116d8a59178b539d91eb6811c1d533c4a59bf77262689c552218bb1eae9cb9d6bf6e1066bea78052c8767313ace71c919d02e70760bd255401a00000000000000000000000000000000000000000000000000000000000000000880000000000000000', + ), + hash: '0xe0a935b1e37420ac1d855215bdad4730a5ffe315eda287c6c18aa86c426ede74', + }, + { + num: 1875652, + rlpHeader: ethers.utils.arrayify( + '0xf9025da0e0a935b1e37420ac1d855215bdad4730a5ffe315eda287c6c18aa86c426ede74a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794c2be4ec20253b8642161bc3f444f53679c1f3d47a0dbf2d40cf5533b65ac9b5be35cac75f7b106244ba14ee476a832c28d46a53452a04f83b8a51d3e17b6a02a0caa79acc7597996c5b8c68dba12c33095ae086089eea02fa2642645b2de17227a6c18c3fa491f54b3bdfe8ac8e04924a33a005a0e9e61b901000100000100000000000008000000000000000000040000000000000000800000000000000000000000000000000800000800000000000400000000000020000040100080000000000000000800000000209000001000000200000000801000400800002002030000000000000100080000002000000002004000011000000002000100040000000000100000000000000000040100009000300000000000000002004000004000000000000000020000002000000010000000200000800000000001000280000000000000008000000000000000800000000000020000002000041000000000000001200020001000080000002a40020040000000000000000002831c9ec48401c9c38083044b40845f9faab4b861d883010002846765746888676f312e31332e34856c696e757800000000000000cfc02687b2394922055792a8e67dad566f6690de06b229d752433b2067207b5f43b9f3c63f91cea5a79bbfc51d9132b933a706ab504038a92f37d57af2bb6c2e01a00000000000000000000000000000000000000000000000000000000000000000880000000000000000', + ), + hash: '0x629e5abcae42940e00d7b38aa7b2ecccfbab582cb7a0b2c3658c2dad8e66549d', + }, + { + num: 1875653, + rlpHeader: ethers.utils.arrayify( + '0xf9025da0629e5abcae42940e00d7b38aa7b2ecccfbab582cb7a0b2c3658c2dad8e66549da01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ce2fd7544e0b2cc94692d4a704debef7bcb61328a0718e7db53041585a814d658c32c88fd550c2c6d200826020008925e0a0f7967fa000fbf842e492a47cc9786885783259a08aed71055e78216006408932515fd960a0c7ffeb2189b8fcde43733cf1958cdb1c38c44052cfbb41125382240c232a98f8b901000000000000000000000000000000000000000002000000000004000000000000000000010000000000000000000000000000000000000200000000004020200000010000000800000000208800000000201000000000000000080000000000000000002002220000000000000000080000000000000000000000001000000000100000000000080010000000000000000000040000000000000000000000000002000000000008000000004000000000000000000000200000000000000000000000000202000000000000000000000000000000000008000000000000002080001000000000000001000000000000000000080100000000000000000000000002831c9ec58401c9c38083025019845f9faab7b861d883010002846765746888676f312e31332e34856c696e7578000000000000008c3c7a5c83e930fbd9d14f83c9b3931f032f0f678919c35b8b32ca6dae9948950bfa326fae134fa234fa7b84c06bdc3f7c6d6414c2a266df1339e563be8bd9cc00a00000000000000000000000000000000000000000000000000000000000000000880000000000000000', + ), + hash: '0xae8574651adabfd0ca55e2cee0e2e639ced73ec1cc0a35debeeceee6943442a9', + }, + { + num: 1875654, + rlpHeader: ethers.utils.arrayify( + '0xf9025da0ae8574651adabfd0ca55e2cee0e2e639ced73ec1cc0a35debeeceee6943442a9a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794d6caa02bbebaebb5d7e581e4b66559e635f805ffa02df6a1173c63ec0a8acc46c818670f030aece1154b9f3bbc70f46a8427dd8dd6a0fa8835c499682d8c90759ff9ea1c291048755b967e48880a0fc21d19ec034a59a0b4e22607cb105c04156044b3f98c2cecae1553b45aa9b6044c37573791a27576b901000200000008000000000001000000000000000000000020000000000000020000000000000000000000000000000000000000000000000040000000000220000000000000000400000000001802000000201000000000000000000000000000000000002002020000000000000000080000000000000000000000001000000000000000000000100000000000000000000000040000000000000000010200200002000400000000400000000200000000000000080000000000000000000008000000000200000000000000000000000000000000000000000000000000002000001000000000000001000000000000000000000000000000000008080000000002831c9ec68401c9c3808301e575845f9faabab861d883010002846765746888676f312e31332e34856c696e757800000000000000399e73b0e963ec029e815623a414aa852508a28dd9799a1bf4e2380c8db687a46cc5b6cc20352ae21e35cfd28124a32fcd49ac8fac5b03901b3e03963e4fff5801a00000000000000000000000000000000000000000000000000000000000000000880000000000000000', + ), + hash: '0x189990455c59a5dea78071df9a2008ede292ff0a062fc5c4c6ca35fbe476f834', + }, +] + beforeAll(async () => { personas = await setup.users(provider).then((x) => x.personas) }) runBlockhashStoreTests(mainnetBlocks, 'Ethereum') runBlockhashStoreTests(maticBlocks, 'Matic') +runBlockhashStoreTests(binanceBlocks, 'Binance Smart Chain') async function runBlockhashStoreTests( blocks: TestBlocks[], From 19c48badb7b10e2f7f43ec91f0daf6c0261b19ab Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Sat, 20 Mar 2021 10:04:17 -0400 Subject: [PATCH 059/116] remove unused test var --- core/web/log_controller_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/web/log_controller_test.go b/core/web/log_controller_test.go index fa4488f41b3..e3f8e32aae8 100644 --- a/core/web/log_controller_test.go +++ b/core/web/log_controller_test.go @@ -21,7 +21,6 @@ type testCase struct { logSql *bool expectedLogLevel zapcore.Level - expectedLogSql bool } func TestLogController_SetDebug(t *testing.T) { @@ -79,7 +78,7 @@ func TestLogController_SetDebug(t *testing.T) { lR := presenters.LogResource{} require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &lR)) if tc.logLevel != "" { - assert.Equal(t, tc.logLevel, lR.Level) + assert.Equal(t, tc.expectedLogLevel.String(), lR.Level) } if tc.logSql != nil { assert.Equal(t, tc.logSql, &lR.SqlEnabled) From 27b30bbacb48e33a9f1706d9eb0b9e349ed403bf Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 22 Mar 2021 07:46:17 -0400 Subject: [PATCH 060/116] adding test for TestORM_SetConfigStrValue --- core/store/orm/orm_test.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/core/store/orm/orm_test.go b/core/store/orm/orm_test.go index e0b6a32118a..47ed1fe7aee 100644 --- a/core/store/orm/orm_test.go +++ b/core/store/orm/orm_test.go @@ -7,6 +7,7 @@ import ( "math/big" "os" "path/filepath" + "strconv" "testing" "time" @@ -1935,3 +1936,31 @@ func TestORM_GetRoundRobinAddress(t *testing.T) { require.Equal(t, "no keys available", err.Error()) }) } + +func TestORM_SetConfigStrValue(t *testing.T) { + t.Parallel() + store, cleanup := cltest.NewStore(t) + defer cleanup() + + fieldName := "LogSQLStatements" + name := orm.EnvVarName(fieldName) + isSqlStatementEnabled := true + res := models.Configuration{} + + // Store db config entry as true + err := store.SetConfigStrValue(fieldName, strconv.FormatBool(isSqlStatementEnabled)) + require.NoError(t, err) + + err = store.DB.First(&res, "name = ?", name).Error + require.NoError(t, err) + require.Equal(t, strconv.FormatBool(isSqlStatementEnabled), res.Value) + + // Update db config entry as false + isSqlStatementEnabled = false + err = store.SetConfigStrValue(fieldName, strconv.FormatBool(isSqlStatementEnabled)) + require.NoError(t, err) + + err = store.DB.First(&res, "name = ?", name).Error + require.NoError(t, err) + require.Equal(t, strconv.FormatBool(isSqlStatementEnabled), res.Value) +} From 0a3885cff5f69a6d7dccf2d42e51e0572cdd37bb Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 22 Mar 2021 11:24:58 -0400 Subject: [PATCH 061/116] revwerting go.sum change --- go.sum | 1 - 1 file changed, 1 deletion(-) diff --git a/go.sum b/go.sum index 811a07975e3..7e5b0e4e4a9 100644 --- a/go.sum +++ b/go.sum @@ -335,7 +335,6 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2V github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= From b3c082249bab5906be371cb32b0da92428b1b895 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 22 Mar 2021 12:31:02 -0400 Subject: [PATCH 062/116] updating http error codes, adding error test cases, lint fixes --- core/cmd/remote_client.go | 6 +++--- core/web/log_controller.go | 4 ++-- core/web/log_controller_test.go | 32 ++++++++++++++++++++++---------- go.sum | 1 + 4 files changed, 28 insertions(+), 15 deletions(-) diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index 78bf12077fe..ee2601ef95d 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -1196,7 +1196,7 @@ func normalizePassword(password string) string { // SetLogLevel sets the log level on the node func (cli *Client) SetLogLevel(c *clipkg.Context) (err error) { if !c.Bool("level") { - return cli.errorOut(errors.New("expecting a log level (debug, info, warn, error)")) + return cli.errorOut(errors.New("Expecting a log level (debug, info, warn, error)")) } logLevel := c.Args().Get(0) @@ -1209,7 +1209,7 @@ func (cli *Client) SetLogLevel(c *clipkg.Context) (err error) { buf := bytes.NewBuffer(requestData) resp, err := cli.HTTP.Patch("/v2/log", buf) if err != nil { - return cli.errorOut(errors.Wrap(err, "from toggling debug logging")) + return cli.errorOut(err) } defer func() { if cerr := resp.Body.Close(); cerr != nil { @@ -1241,7 +1241,7 @@ func (cli *Client) SetLogSQL(c *clipkg.Context) (err error) { buf := bytes.NewBuffer(requestData) resp, err := cli.HTTP.Patch("/v2/log", buf) if err != nil { - return cli.errorOut(errors.Wrap(err, "from toggling debug logging")) + return cli.errorOut(err) } defer func() { if cerr := resp.Body.Close(); cerr != nil { diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 6856732dd5d..6944570d1fd 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -31,7 +31,7 @@ func (cc *LogController) Patch(c *gin.Context) { } if request.Level == "" && request.SqlEnabled == nil { - jsonAPIError(c, http.StatusInternalServerError, fmt.Errorf("please set either logLevel or logSql as params in order to set the log level")) + jsonAPIError(c, http.StatusBadRequest, fmt.Errorf("please set either logLevel or logSql as params in order to set the log level")) return } @@ -39,7 +39,7 @@ func (cc *LogController) Patch(c *gin.Context) { var ll zapcore.Level err := ll.UnmarshalText([]byte(request.Level)) if err != nil { - jsonAPIError(c, http.StatusInternalServerError, err) + jsonAPIError(c, http.StatusBadRequest, err) return } cc.App.GetStore().Config.Set("LOG_LEVEL", ll.String()) diff --git a/core/web/log_controller_test.go b/core/web/log_controller_test.go index e3f8e32aae8..244e685595b 100644 --- a/core/web/log_controller_test.go +++ b/core/web/log_controller_test.go @@ -20,7 +20,8 @@ type testCase struct { logLevel string logSql *bool - expectedLogLevel zapcore.Level + expectedLogLevel zapcore.Level + expectedErrorCode int } func TestLogController_SetDebug(t *testing.T) { @@ -62,20 +63,31 @@ func TestLogController_SetDebug(t *testing.T) { logSql: &sqlFalse, expectedLogLevel: zapcore.WarnLevel, }, + { + Description: "Send no params to updater", + expectedErrorCode: http.StatusBadRequest, + }, + { + Description: "Send bad log level request", + logLevel: "test", + expectedErrorCode: http.StatusBadRequest, + }, } for _, tc := range cases { - func() { - request := web.LogPatchRequest{Level: tc.logLevel, SqlEnabled: tc.logSql} + request := web.LogPatchRequest{Level: tc.logLevel, SqlEnabled: tc.logSql} - requestData, _ := json.Marshal(request) - buf := bytes.NewBuffer(requestData) + requestData, _ := json.Marshal(request) + buf := bytes.NewBuffer(requestData) - resp, cleanup := client.Patch("/v2/log", buf) - defer cleanup() - cltest.AssertServerResponse(t, resp, http.StatusOK) + resp, cleanup := client.Patch("/v2/log", buf) + defer cleanup() - lR := presenters.LogResource{} + lR := presenters.LogResource{} + if tc.expectedErrorCode != 0 { + cltest.AssertServerResponse(t, resp, tc.expectedErrorCode) + } else { + cltest.AssertServerResponse(t, resp, http.StatusOK) require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &lR)) if tc.logLevel != "" { assert.Equal(t, tc.expectedLogLevel.String(), lR.Level) @@ -84,6 +96,6 @@ func TestLogController_SetDebug(t *testing.T) { assert.Equal(t, tc.logSql, &lR.SqlEnabled) } assert.Equal(t, tc.expectedLogLevel.String(), app.GetStore().Config.LogLevel().String()) - }() + } } } diff --git a/go.sum b/go.sum index 7e5b0e4e4a9..811a07975e3 100644 --- a/go.sum +++ b/go.sum @@ -335,6 +335,7 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2V github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= From c48618b289cf8f94a778d610b9f9e2a580c91a6c Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 22 Mar 2021 15:12:32 -0400 Subject: [PATCH 063/116] adding db contexts for inserts, adding GET /log endpoint for UI support --- core/cmd/remote_client_test.go | 73 +++++++++++++++++++++++++++++++++ core/store/orm/orm.go | 4 +- core/store/orm/orm_test.go | 4 +- core/web/log_controller.go | 19 +++++++-- core/web/log_controller_test.go | 32 ++++++++++++++- core/web/router.go | 1 + 6 files changed, 125 insertions(+), 8 deletions(-) diff --git a/core/cmd/remote_client_test.go b/core/cmd/remote_client_test.go index 618d1419546..849dee80fc5 100644 --- a/core/cmd/remote_client_test.go +++ b/core/cmd/remote_client_test.go @@ -8,12 +8,15 @@ import ( "fmt" "io/ioutil" "math/big" + "net/http" "os" "path/filepath" "strconv" "strings" "testing" + webPresenter "github.com/smartcontractkit/chainlink/core/web/presenters" + "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/web" @@ -1488,3 +1491,73 @@ func TestClient_AutoLogin(t *testing.T) { err := client.ListJobsV2(cli.NewContext(nil, fs, nil)) require.NoError(t, err) } + +func TestClient_SetLogLevel(t *testing.T) { + t.Parallel() + + config, cleanup := cltest.NewConfig(t) + defer cleanup() + app, cleanup := cltest.NewApplicationWithConfig(t, config) + defer cleanup() + require.NoError(t, app.Start()) + + user := cltest.MustRandomUser() + require.NoError(t, app.Store.SaveUser(&user)) + sr := models.SessionRequest{ + Email: user.Email, + Password: cltest.Password, + } + client, _ := app.NewClientAndRenderer() + client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) + client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) + + // Set info level for request + infoLevel := "info" + lPr := web.LogPatchRequest{Level: infoLevel} + request, err := json.Marshal(lPr) + assert.NoError(t, err) + + resp, err := client.HTTP.Patch("/v2/log", bytes.NewReader(request)) + + var lR webPresenter.LogResource + require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &lR)) + assert.NoError(t, err) + + assert.Equal(t, infoLevel, lPr.Level) + cltest.AssertServerResponse(t, resp, http.StatusOK) +} + +func TestClient_SetLogSQL(t *testing.T) { + t.Parallel() + + config, cleanup := cltest.NewConfig(t) + defer cleanup() + app, cleanup := cltest.NewApplicationWithConfig(t, config) + defer cleanup() + require.NoError(t, app.Start()) + + user := cltest.MustRandomUser() + require.NoError(t, app.Store.SaveUser(&user)) + sr := models.SessionRequest{ + Email: user.Email, + Password: cltest.Password, + } + client, _ := app.NewClientAndRenderer() + client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) + client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) + + // Set info level for request + enabled := true + lPr := web.LogPatchRequest{SqlEnabled: &enabled} + request, err := json.Marshal(lPr) + assert.NoError(t, err) + + resp, err := client.HTTP.Patch("/v2/log", bytes.NewReader(request)) + + var lR webPresenter.LogResource + require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &lR)) + assert.NoError(t, err) + + assert.Equal(t, enabled, lR.SqlEnabled) + cltest.AssertServerResponse(t, resp, http.StatusOK) +} diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index ffa09f8295f..a118264e331 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -679,9 +679,9 @@ func (orm *ORM) SetConfigValue(field string, value encoding.TextMarshaler) error } // SetConfigValue returns the value for a named configuration entry -func (orm *ORM) SetConfigStrValue(field string, value string) error { +func (orm *ORM) SetConfigStrValue(ctx context.Context, field string, value string) error { name := EnvVarName(field) - return orm.DB.Where(models.Configuration{Name: name}). + return orm.DB.WithContext(ctx).Where(models.Configuration{Name: name}). Assign(models.Configuration{Name: name, Value: value}). FirstOrCreate(&models.Configuration{}).Error } diff --git a/core/store/orm/orm_test.go b/core/store/orm/orm_test.go index 47ed1fe7aee..6dd4724bd91 100644 --- a/core/store/orm/orm_test.go +++ b/core/store/orm/orm_test.go @@ -1948,7 +1948,7 @@ func TestORM_SetConfigStrValue(t *testing.T) { res := models.Configuration{} // Store db config entry as true - err := store.SetConfigStrValue(fieldName, strconv.FormatBool(isSqlStatementEnabled)) + err := store.SetConfigStrValue(context.TODO(), fieldName, strconv.FormatBool(isSqlStatementEnabled)) require.NoError(t, err) err = store.DB.First(&res, "name = ?", name).Error @@ -1957,7 +1957,7 @@ func TestORM_SetConfigStrValue(t *testing.T) { // Update db config entry as false isSqlStatementEnabled = false - err = store.SetConfigStrValue(fieldName, strconv.FormatBool(isSqlStatementEnabled)) + err = store.SetConfigStrValue(context.TODO(), fieldName, strconv.FormatBool(isSqlStatementEnabled)) require.NoError(t, err) err = store.DB.First(&res, "name = ?", name).Error diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 6944570d1fd..6144f5b410d 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -22,6 +22,19 @@ type LogPatchRequest struct { SqlEnabled *bool `json:"sqlEnabled"` } +// Get retrieves the current log config settings +func (cc *LogController) Get(c *gin.Context) { + response := &presenters.LogResource{ + JAID: presenters.JAID{ + ID: "log", + }, + Level: cc.App.GetStore().Config.LogLevel().String(), + SqlEnabled: cc.App.GetStore().Config.LogSQLStatements(), + } + + jsonAPIResponse(c, response, "log") +} + // Patch sets a log level and enables sql logging for the logger func (cc *LogController) Patch(c *gin.Context) { request := &LogPatchRequest{} @@ -43,7 +56,7 @@ func (cc *LogController) Patch(c *gin.Context) { return } cc.App.GetStore().Config.Set("LOG_LEVEL", ll.String()) - err = cc.App.GetStore().SetConfigStrValue("LogLevel", ll.String()) + err = cc.App.GetStore().SetConfigStrValue(c.Request.Context(), "LogLevel", ll.String()) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) return @@ -52,7 +65,7 @@ func (cc *LogController) Patch(c *gin.Context) { if request.SqlEnabled != nil { cc.App.GetStore().Config.Set("LOG_SQL", request.SqlEnabled) - err := cc.App.GetStore().SetConfigStrValue("LogSQLStatements", strconv.FormatBool(*request.SqlEnabled)) + err := cc.App.GetStore().SetConfigStrValue(c.Request.Context(), "LogSQLStatements", strconv.FormatBool(*request.SqlEnabled)) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) return @@ -61,7 +74,7 @@ func (cc *LogController) Patch(c *gin.Context) { } // Set default logger with new configurations - logger.Default = cc.App.GetStore().Config.CreateProductionLogger() + logger.SetLogger(cc.App.GetStore().Config.CreateProductionLogger()) response := &presenters.LogResource{ JAID: presenters.JAID{ diff --git a/core/web/log_controller_test.go b/core/web/log_controller_test.go index 244e685595b..5cb2101d23f 100644 --- a/core/web/log_controller_test.go +++ b/core/web/log_controller_test.go @@ -24,7 +24,37 @@ type testCase struct { expectedErrorCode int } -func TestLogController_SetDebug(t *testing.T) { +func TestLogController_GetLogConfig(t *testing.T) { + t.Parallel() + + rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) + defer assertMocksCalled() + app, cleanup := cltest.NewApplicationWithKey(t, + eth.NewClientWith(rpcClient, gethClient), + ) + + // Set log config values + logLevel := "warn" + sqlEnabled := true + app.GetStore().Config.Set("LOG_LEVEL", logLevel) + app.GetStore().Config.Set("LOG_SQL", sqlEnabled) + + defer cleanup() + require.NoError(t, app.Start()) + client := app.NewHTTPClient() + + resp, err := client.HTTPClient.Get("/v2/log") + require.NoError(t, err) + + lR := presenters.LogResource{} + cltest.AssertServerResponse(t, resp, http.StatusOK) + require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &lR)) + + assert.Equal(t, lR.SqlEnabled, sqlEnabled) + assert.Equal(t, lR.Level, logLevel) +} + +func TestLogController_PatchLogConfig(t *testing.T) { t.Parallel() rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) diff --git a/core/web/router.go b/core/web/router.go index 655a39619cb..b6cfba98331 100644 --- a/core/web/router.go +++ b/core/web/router.go @@ -292,6 +292,7 @@ func v2Routes(app chainlink.Application, r *gin.RouterGroup) { authv2.POST("/jobs/:ID/runs", prc.Create) lgc := LogController{app} + authv2.GET("/log", lgc.Get) authv2.PATCH("/log", lgc.Patch) } From f6dee0bd7717e553fa26cc05eb7114bb4db29a54 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 22 Mar 2021 15:29:31 -0400 Subject: [PATCH 064/116] updating log level cli argument to string --- core/cmd/app.go | 2 +- core/cmd/remote_client.go | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/core/cmd/app.go b/core/cmd/app.go index 6f9eb3232e2..2611317243b 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -152,7 +152,7 @@ func NewApp(client *Client) *cli.App { Usage: "Set log level", Action: client.SetLogLevel, Flags: []cli.Flag{ - cli.BoolFlag{ + cli.StringFlag{ Name: "level", Usage: "set log level for node (debug||info||warn||error)", }, diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index ee2601ef95d..5b95ba7078a 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -1195,11 +1195,7 @@ func normalizePassword(password string) string { // SetLogLevel sets the log level on the node func (cli *Client) SetLogLevel(c *clipkg.Context) (err error) { - if !c.Bool("level") { - return cli.errorOut(errors.New("Expecting a log level (debug, info, warn, error)")) - } - - logLevel := c.Args().Get(0) + logLevel := c.String("level") request := web.LogPatchRequest{Level: logLevel} requestData, err := json.Marshal(request) if err != nil { From 0ae81cc8f0c6fcab57f29d60fb907e130b21a0fd Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 22 Mar 2021 15:58:05 -0400 Subject: [PATCH 065/116] remove log level env --- tools/docker/.env | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/docker/.env b/tools/docker/.env index 016faeaae5d..179da05c33c 100644 --- a/tools/docker/.env +++ b/tools/docker/.env @@ -8,7 +8,6 @@ CHAINLINK_PGPASSWORD=node ETH_CHAIN_ID=34055 ETH_URL=ws://devnet:8546 EXPLORER_URL=ws://explorer:3001 -LOG_LEVEL=info MINIMUM_CONTRACT_PAYMENT=1000000000000 MIN_INCOMING_CONFIRMATIONS=1 MIN_OUTGOING_CONFIRMATIONS=1 From dab9af0b473dcce7ad567ceea30bdf57bb28cd6f Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 22 Mar 2021 16:06:35 -0400 Subject: [PATCH 066/116] reverting go.sum --- go.sum | 1 - 1 file changed, 1 deletion(-) diff --git a/go.sum b/go.sum index 811a07975e3..c426ac2e9ce 100644 --- a/go.sum +++ b/go.sum @@ -333,7 +333,6 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= From 195da360fa0d2ff21fa0ed76a68c7db6a2d64160 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 22 Mar 2021 21:54:12 -0400 Subject: [PATCH 067/116] resolving test cases for remote client, adding disable flag --- core/cmd/app.go | 6 ++++- core/cmd/remote_client.go | 10 +++++--- core/cmd/remote_client_test.go | 45 ++++++++++++++++------------------ 3 files changed, 33 insertions(+), 28 deletions(-) diff --git a/core/cmd/app.go b/core/cmd/app.go index 2611317243b..5981cb409bf 100644 --- a/core/cmd/app.go +++ b/core/cmd/app.go @@ -165,7 +165,11 @@ func NewApp(client *Client) *cli.App { Flags: []cli.Flag{ cli.BoolFlag{ Name: "enable", - Usage: "enable or disable sql logging", + Usage: "enable sql logging", + }, + cli.BoolFlag{ + Name: "disable", + Usage: "disable sql logging", }, }, }, diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index 5b95ba7078a..0f981256ea6 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -1220,11 +1220,15 @@ func (cli *Client) SetLogLevel(c *clipkg.Context) (err error) { // SetLogSQL enables or disables the log sql statemnts func (cli *Client) SetLogSQL(c *clipkg.Context) (err error) { - if !c.Bool("enable") { - return cli.errorOut(errors.New("Must set --enabled = (true || false)")) + + // Enforces selection of --enable or --disable + if !c.Bool("enable") && !c.Bool("disable") { + return cli.errorOut(errors.New("Must set logSql --enabled || --disable")) } - logSql, err := strconv.ParseBool(c.Args().Get(0)) + // Sets logSql to true || false based on the --enabled flag + logSql := c.Bool("enable") + if err != nil { return cli.errorOut(err) } diff --git a/core/cmd/remote_client_test.go b/core/cmd/remote_client_test.go index 849dee80fc5..9899602f11a 100644 --- a/core/cmd/remote_client_test.go +++ b/core/cmd/remote_client_test.go @@ -8,15 +8,12 @@ import ( "fmt" "io/ioutil" "math/big" - "net/http" "os" "path/filepath" "strconv" "strings" "testing" - webPresenter "github.com/smartcontractkit/chainlink/core/web/presenters" - "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/web" @@ -1511,20 +1508,15 @@ func TestClient_SetLogLevel(t *testing.T) { client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) - // Set info level for request - infoLevel := "info" - lPr := web.LogPatchRequest{Level: infoLevel} - request, err := json.Marshal(lPr) - assert.NoError(t, err) + infoLevel := "warn" + set := flag.NewFlagSet("test", 0) + set.String("level", "warn", "") + c := cli.NewContext(nil, set, nil) - resp, err := client.HTTP.Patch("/v2/log", bytes.NewReader(request)) + err := client.SetLogLevel(c) - var lR webPresenter.LogResource - require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &lR)) assert.NoError(t, err) - - assert.Equal(t, infoLevel, lPr.Level) - cltest.AssertServerResponse(t, resp, http.StatusOK) + assert.Equal(t, infoLevel, app.Config.LogLevel().String()) } func TestClient_SetLogSQL(t *testing.T) { @@ -1546,18 +1538,23 @@ func TestClient_SetLogSQL(t *testing.T) { client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) - // Set info level for request - enabled := true - lPr := web.LogPatchRequest{SqlEnabled: &enabled} - request, err := json.Marshal(lPr) - assert.NoError(t, err) + sqlEnabled := true + set := flag.NewFlagSet("test", 0) + set.Bool("enable", true, "") + c := cli.NewContext(nil, set, nil) - resp, err := client.HTTP.Patch("/v2/log", bytes.NewReader(request)) + err := client.SetLogSQL(c) - var lR webPresenter.LogResource - require.NoError(t, cltest.ParseJSONAPIResponse(t, resp, &lR)) assert.NoError(t, err) + assert.Equal(t, sqlEnabled, app.Config.LogSQLStatements()) + + sqlEnabled = false + set = flag.NewFlagSet("test", 0) + set.Bool("disable", true, "") + c = cli.NewContext(nil, set, nil) + + err = client.SetLogSQL(c) - assert.Equal(t, enabled, lR.SqlEnabled) - cltest.AssertServerResponse(t, resp, http.StatusOK) + assert.NoError(t, err) + assert.Equal(t, sqlEnabled, app.Config.LogSQLStatements()) } From f6a384956a7b6b667ac61291c18f2fd3b2a7b1ec Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 22 Mar 2021 22:40:39 -0400 Subject: [PATCH 068/116] updating test context --- core/cmd/remote_client_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/core/cmd/remote_client_test.go b/core/cmd/remote_client_test.go index 9899602f11a..35b9c802c70 100644 --- a/core/cmd/remote_client_test.go +++ b/core/cmd/remote_client_test.go @@ -1508,10 +1508,11 @@ func TestClient_SetLogLevel(t *testing.T) { client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) + cliapp := cli.NewApp() infoLevel := "warn" - set := flag.NewFlagSet("test", 0) + set := flag.NewFlagSet("loglevel", 0) set.String("level", "warn", "") - c := cli.NewContext(nil, set, nil) + c := cli.NewContext(cliapp, set, nil) err := client.SetLogLevel(c) @@ -1538,10 +1539,11 @@ func TestClient_SetLogSQL(t *testing.T) { client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) + cliapp := cli.NewApp() sqlEnabled := true - set := flag.NewFlagSet("test", 0) + set := flag.NewFlagSet("logsql", 0) set.Bool("enable", true, "") - c := cli.NewContext(nil, set, nil) + c := cli.NewContext(cliapp, set, nil) err := client.SetLogSQL(c) @@ -1549,9 +1551,9 @@ func TestClient_SetLogSQL(t *testing.T) { assert.Equal(t, sqlEnabled, app.Config.LogSQLStatements()) sqlEnabled = false - set = flag.NewFlagSet("test", 0) + set = flag.NewFlagSet("logsql", 0) set.Bool("disable", true, "") - c = cli.NewContext(nil, set, nil) + c = cli.NewContext(cliapp, set, nil) err = client.SetLogSQL(c) From ae93d6d35aa76cef76bdb1e62ee0be7052210fd0 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Mon, 22 Mar 2021 23:22:09 -0400 Subject: [PATCH 069/116] using renderers for test result --- core/cmd/remote_client_test.go | 38 +++++++++++++--------------------- 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/core/cmd/remote_client_test.go b/core/cmd/remote_client_test.go index 35b9c802c70..f2234b35148 100644 --- a/core/cmd/remote_client_test.go +++ b/core/cmd/remote_client_test.go @@ -14,6 +14,8 @@ import ( "strings" "testing" + webPresenter "github.com/smartcontractkit/chainlink/core/web/presenters" + "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/web" @@ -1504,20 +1506,19 @@ func TestClient_SetLogLevel(t *testing.T) { Email: user.Email, Password: cltest.Password, } - client, _ := app.NewClientAndRenderer() + client, r := app.NewClientAndRenderer() client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) - cliapp := cli.NewApp() infoLevel := "warn" set := flag.NewFlagSet("loglevel", 0) set.String("level", "warn", "") - c := cli.NewContext(cliapp, set, nil) - - err := client.SetLogLevel(c) + c := cli.NewContext(nil, set, nil) - assert.NoError(t, err) - assert.Equal(t, infoLevel, app.Config.LogLevel().String()) + assert.NoError(t, client.SetLogLevel(c)) + assert.NotNil(t, r.Renders) + logResource := *r.Renders[0].(*webPresenter.LogResource) + assert.Equal(t, infoLevel, logResource.Level) } func TestClient_SetLogSQL(t *testing.T) { @@ -1535,28 +1536,17 @@ func TestClient_SetLogSQL(t *testing.T) { Email: user.Email, Password: cltest.Password, } - client, _ := app.NewClientAndRenderer() + client, r := app.NewClientAndRenderer() client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) - cliapp := cli.NewApp() sqlEnabled := true set := flag.NewFlagSet("logsql", 0) set.Bool("enable", true, "") - c := cli.NewContext(cliapp, set, nil) - - err := client.SetLogSQL(c) - - assert.NoError(t, err) - assert.Equal(t, sqlEnabled, app.Config.LogSQLStatements()) - - sqlEnabled = false - set = flag.NewFlagSet("logsql", 0) - set.Bool("disable", true, "") - c = cli.NewContext(cliapp, set, nil) - - err = client.SetLogSQL(c) + c := cli.NewContext(nil, set, nil) - assert.NoError(t, err) - assert.Equal(t, sqlEnabled, app.Config.LogSQLStatements()) + assert.NoError(t, client.SetLogSQL(c)) + assert.NotNil(t, r.Renders) + lr1 := *r.Renders[0].(*webPresenter.LogResource) + assert.Equal(t, sqlEnabled, lr1.SqlEnabled) } From a5be40cc9afd5bf7532d54b329969340e45b6b9f Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Tue, 23 Mar 2021 00:48:51 -0400 Subject: [PATCH 070/116] consolidaiing config log tests --- core/cmd/remote_client_test.go | 55 +++++++++++++--------------------- 1 file changed, 20 insertions(+), 35 deletions(-) diff --git a/core/cmd/remote_client_test.go b/core/cmd/remote_client_test.go index f2234b35148..193236afa85 100644 --- a/core/cmd/remote_client_test.go +++ b/core/cmd/remote_client_test.go @@ -14,8 +14,6 @@ import ( "strings" "testing" - webPresenter "github.com/smartcontractkit/chainlink/core/web/presenters" - "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/web" @@ -1491,7 +1489,7 @@ func TestClient_AutoLogin(t *testing.T) { require.NoError(t, err) } -func TestClient_SetLogLevel(t *testing.T) { +func TestClient_SetLogConfig(t *testing.T) { t.Parallel() config, cleanup := cltest.NewConfig(t) @@ -1506,47 +1504,34 @@ func TestClient_SetLogLevel(t *testing.T) { Email: user.Email, Password: cltest.Password, } - client, r := app.NewClientAndRenderer() + client, _ := app.NewClientAndRenderer() client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) infoLevel := "warn" set := flag.NewFlagSet("loglevel", 0) - set.String("level", "warn", "") + set.String("level", infoLevel, "") c := cli.NewContext(nil, set, nil) - assert.NoError(t, client.SetLogLevel(c)) - assert.NotNil(t, r.Renders) - logResource := *r.Renders[0].(*webPresenter.LogResource) - assert.Equal(t, infoLevel, logResource.Level) -} - -func TestClient_SetLogSQL(t *testing.T) { - t.Parallel() + err := client.SetLogLevel(c) + assert.NoError(t, err) + assert.Equal(t, infoLevel, app.Config.LogLevel().String()) - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config) - defer cleanup() - require.NoError(t, app.Start()) + sqlEnabled := true + set = flag.NewFlagSet("logsql", 0) + set.Bool("enable", sqlEnabled, "") + c = cli.NewContext(nil, set, nil) - user := cltest.MustRandomUser() - require.NoError(t, app.Store.SaveUser(&user)) - sr := models.SessionRequest{ - Email: user.Email, - Password: cltest.Password, - } - client, r := app.NewClientAndRenderer() - client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) - client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) + err = client.SetLogSQL(c) + assert.NoError(t, err) + assert.Equal(t, sqlEnabled, app.Config.LogSQLStatements()) - sqlEnabled := true - set := flag.NewFlagSet("logsql", 0) - set.Bool("enable", true, "") - c := cli.NewContext(nil, set, nil) + sqlEnabled = false + set = flag.NewFlagSet("logsql", 0) + set.Bool("disable", true, "") + c = cli.NewContext(nil, set, nil) - assert.NoError(t, client.SetLogSQL(c)) - assert.NotNil(t, r.Renders) - lr1 := *r.Renders[0].(*webPresenter.LogResource) - assert.Equal(t, sqlEnabled, lr1.SqlEnabled) + err = client.SetLogSQL(c) + assert.NoError(t, err) + assert.Equal(t, sqlEnabled, app.Config.LogSQLStatements()) } From faad0058e2e1f362ca16b50e7de1be64dc17ad1f Mon Sep 17 00:00:00 2001 From: Connor Stein Date: Tue, 23 Mar 2021 09:27:34 -0400 Subject: [PATCH 071/116] Pipeline run panic protection (#4103) --- .../job/job_pipeline_orm_integration_test.go | 8 +- core/services/pipeline/common.go | 5 + core/services/pipeline/models.go | 13 +++ core/services/pipeline/orm.go | 4 +- core/services/pipeline/runner.go | 97 +++++++++++++++++-- core/services/pipeline/runner_test.go | 34 +++++++ core/services/pipeline/task.panic.go | 22 +++++ 7 files changed, 170 insertions(+), 13 deletions(-) create mode 100644 core/services/pipeline/task.panic.go diff --git a/core/services/job/job_pipeline_orm_integration_test.go b/core/services/job/job_pipeline_orm_integration_test.go index b078860aa0f..0c2f7ae26cf 100644 --- a/core/services/job/job_pipeline_orm_integration_test.go +++ b/core/services/job/job_pipeline_orm_integration_test.go @@ -265,7 +265,7 @@ func TestPipelineORM_Integration(t *testing.T) { // Process the run { var anyRemaining bool - anyRemaining, err = orm.ProcessNextUnfinishedRun(context.Background(), func(_ context.Context, db *gorm.DB, spec pipeline.Spec, l logger.Logger) (trrs pipeline.TaskRunResults, err error) { + anyRemaining, err = orm.ProcessNextUnfinishedRun(context.Background(), func(_ context.Context, db *gorm.DB, spec pipeline.Spec, l logger.Logger) (trrs pipeline.TaskRunResults, retry bool, err error) { for dotID, result := range test.answers { var tr pipeline.TaskRun require.NoError(t, db. @@ -280,7 +280,7 @@ func TestPipelineORM_Integration(t *testing.T) { } trrs = append(trrs, trr) } - return trrs, nil + return trrs, false, nil }) require.NoError(t, err) require.True(t, anyRemaining) @@ -288,9 +288,9 @@ func TestPipelineORM_Integration(t *testing.T) { // Ensure that the ORM doesn't think there are more runs { - anyRemaining, err2 := orm.ProcessNextUnfinishedRun(context.Background(), func(_ context.Context, db *gorm.DB, spec pipeline.Spec, l logger.Logger) (pipeline.TaskRunResults, error) { + anyRemaining, err2 := orm.ProcessNextUnfinishedRun(context.Background(), func(_ context.Context, db *gorm.DB, spec pipeline.Spec, l logger.Logger) (pipeline.TaskRunResults, bool, error) { t.Fatal("this callback should never be reached") - return nil, nil + return nil, false, nil }) require.NoError(t, err2) require.False(t, anyRemaining) diff --git a/core/services/pipeline/common.go b/core/services/pipeline/common.go index 15144a1db9c..f759b5a3c43 100644 --- a/core/services/pipeline/common.go +++ b/core/services/pipeline/common.go @@ -282,6 +282,9 @@ const ( TaskTypeJSONParse TaskType = "jsonparse" TaskTypeResult TaskType = "result" TaskTypeAny TaskType = "any" + + // Testing only. + TaskTypePanic TaskType = "panic" ) const ResultTaskDotID = "__result__" @@ -299,6 +302,8 @@ func UnmarshalTaskFromMap(taskType TaskType, taskMap interface{}, dotID string, var task Task switch taskType { + case TaskTypePanic: + task = &PanicTask{BaseTask: BaseTask{dotID: dotID, nPreds: nPreds}} case TaskTypeHTTP: task = &HTTPTask{config: config, BaseTask: BaseTask{dotID: dotID, nPreds: nPreds}} case TaskTypeBridge: diff --git a/core/services/pipeline/models.go b/core/services/pipeline/models.go index 91d2a69604c..3f6d1df927b 100644 --- a/core/services/pipeline/models.go +++ b/core/services/pipeline/models.go @@ -23,6 +23,19 @@ func (Spec) TableName() string { return "pipeline_specs" } +func (s Spec) TasksInDependencyOrderWithResultTask() ([]Task, error) { + d := TaskDAG{} + err := d.UnmarshalText([]byte(s.DotDagSource)) + if err != nil { + return nil, err + } + tasks, err := d.TasksInDependencyOrderWithResultTask() + if err != nil { + return nil, err + } + return tasks, nil +} + type Run struct { ID int64 `json:"-" gorm:"primary_key"` PipelineSpecID int32 `json:"-"` diff --git a/core/services/pipeline/orm.go b/core/services/pipeline/orm.go index bd9f7e24f80..f679d486bfb 100644 --- a/core/services/pipeline/orm.go +++ b/core/services/pipeline/orm.go @@ -141,7 +141,7 @@ func (o *orm) CreateRun(ctx context.Context, jobID int32, meta map[string]interf // TODO: Remove generation of special "result" task // TODO: Remove the unique index on successor_id // https://www.pivotaltracker.com/story/show/176557536 -type ProcessRunFunc func(ctx context.Context, txdb *gorm.DB, spec Spec, l logger.Logger) (TaskRunResults, error) +type ProcessRunFunc func(ctx context.Context, txdb *gorm.DB, spec Spec, l logger.Logger) (TaskRunResults, bool, error) // ProcessNextUnfinishedRun pulls the next available unfinished run from the // database and passes it into the provided ProcessRunFunc for execution. @@ -191,7 +191,7 @@ func (o *orm) processNextUnfinishedRun(ctx context.Context, fn ProcessRunFunc) e } logger.Infow("Pipeline run started", "runID", pRun.ID) - trrs, err := fn(ctx, tx, pRun.PipelineSpec, *logger.Default) + trrs, _, err := fn(ctx, tx, pRun.PipelineSpec, *logger.Default) if err != nil { return errors.Wrap(err, "error calling ProcessRunFunc") } diff --git a/core/services/pipeline/runner.go b/core/services/pipeline/runner.go index 94babbf89b6..4d1bbda61c8 100644 --- a/core/services/pipeline/runner.go +++ b/core/services/pipeline/runner.go @@ -3,10 +3,14 @@ package pipeline import ( "context" "fmt" + "runtime/debug" "sort" "sync" "time" + "github.com/jpillora/backoff" + "gopkg.in/guregu/null.v4" + "github.com/smartcontractkit/chainlink/core/store/models" "github.com/pkg/errors" @@ -53,6 +57,7 @@ var ( }, []string{"pipeline_spec_id", "task_type"}, ) + ErrRunPanicked = errors.New("pipeline run panicked") ) func NewRunner(orm ORM, config Config) *runner { @@ -222,11 +227,81 @@ func (m *memoryTaskRun) results() (a []Result) { return } -func (r *runner) ExecuteRun(ctx context.Context, spec Spec, l logger.Logger) (trrs TaskRunResults, err error) { - return r.executeRun(ctx, r.orm.DB(), spec, l) +func (r *runner) ExecuteRun(ctx context.Context, spec Spec, l logger.Logger) (TaskRunResults, error) { + var ( + trrs TaskRunResults + err error + retry bool + i int + numPanicRetries = 5 + ) + b := &backoff.Backoff{ + Min: 100 * time.Second, + Max: 1 * time.Second, + Factor: 2, + Jitter: false, + } + for i = 0; i < numPanicRetries; i++ { + trrs, retry, err = r.executeRun(ctx, r.orm.DB(), spec, l) + if retry { + time.Sleep(b.Duration()) + continue + } else { + break + } + } + if i == numPanicRetries { + return r.panickedRunResults(spec) + } + return trrs, err } -func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, spec Spec, l logger.Logger) (TaskRunResults, error) { +// Generate a errored run from the spec. +func (r *runner) panickedRunResults(spec Spec) ([]TaskRunResult, error) { + var panickedTrrs []TaskRunResult + var finalVals []interface{} + var finalErrs FinalErrors + tasks, err := spec.TasksInDependencyOrderWithResultTask() + if err != nil { + return nil, err + } + f := time.Now() + for _, task := range tasks { + if task.Type() == TaskTypeResult { + continue + } + if task.OutputTask() != nil && task.OutputTask().Type() == TaskTypeResult { + finalVals = append(finalVals, nil) + finalErrs = append(finalErrs, null.StringFrom(ErrRunPanicked.Error())) + } + panickedTrrs = append(panickedTrrs, TaskRunResult{ + Task: task, + TaskRun: TaskRun{ + CreatedAt: f, + FinishedAt: &f, + Index: task.OutputIndex(), + DotID: task.DotID(), + }, + Result: Result{Value: nil, Error: ErrRunPanicked}, + FinishedAt: time.Now(), + IsTerminal: false, + }) + } + panickedTrrs = append(panickedTrrs, TaskRunResult{ + TaskRun: TaskRun{ + CreatedAt: f, + FinishedAt: &f, + Index: 0, + DotID: ResultTaskDotID, + }, + Result: Result{Value: finalVals, Error: finalErrs}, + FinishedAt: f, + IsTerminal: true, + }) + return panickedTrrs, nil +} + +func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, spec Spec, l logger.Logger) (TaskRunResults, bool, error) { l.Debugw("Initiating tasks for pipeline run of spec", "spec", spec.ID) var ( err error @@ -237,13 +312,13 @@ func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, spec Spec, l log d := TaskDAG{} err = d.UnmarshalText([]byte(spec.DotDagSource)) if err != nil { - return trrs, err + return trrs, false, err } // Find "firsts" and work forwards tasks, err := d.TasksInDependencyOrderWithResultTask() if err != nil { - return nil, err + return nil, false, err } all := make(map[string]*memoryTaskRun) var graph []*memoryTaskRun @@ -285,10 +360,18 @@ func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, spec Spec, l log // 3. Execute tasks using "fan in" job processing var updateMu sync.Mutex var wg sync.WaitGroup + var retry bool wg.Add(len(graph)) for _, mtr := range graph { go func(m *memoryTaskRun) { - defer wg.Done() + defer func() { + if err := recover(); err != nil { + logger.Default.Errorw("goroutine panicked executing run", "panic", err, "stacktrace", string(debug.Stack())) + // No mutex needed: if any goroutine panics, we retry the run. + retry = true + } + wg.Done() + }() for m != nil { m.predMu.RLock() nPredecessors := m.nPredecessors @@ -361,7 +444,7 @@ func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, spec Spec, l log runTime := time.Since(startRun) l.Debugw("Finished all tasks for pipeline run", "specID", spec.ID, "runTime", runTime) - return trrs, err + return trrs, retry, err } func (r *runner) executeTaskRun(ctx context.Context, spec Spec, task Task, taskRun TaskRun, inputs []Result, l logger.Logger) Result { diff --git a/core/services/pipeline/runner_test.go b/core/services/pipeline/runner_test.go index e1e9a77e12c..14a8b437c90 100644 --- a/core/services/pipeline/runner_test.go +++ b/core/services/pipeline/runner_test.go @@ -9,6 +9,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "gopkg.in/guregu/null.v4" + "github.com/smartcontractkit/chainlink/core/logger" "github.com/shopspring/decimal" @@ -176,3 +179,34 @@ answer1 [type=median index=0]; } } } + +func TestPanicTask_Run(t *testing.T) { + store, cleanup := cltest.NewStore(t) + defer cleanup() + orm := new(mocks.ORM) + orm.On("DB").Return(store.DB) + s := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { + res.WriteHeader(http.StatusOK) + res.Write([]byte(`{"result":10}`)) + })) + r := pipeline.NewRunner(orm, store.Config) + trrs, err := r.ExecuteRun(context.Background(), pipeline.Spec{ + DotDagSource: fmt.Sprintf(` +ds1 [type=http url="%s"] +ds_parse [type=jsonparse path="result"] +ds_multiply [type=multiply times=10] +ds_panic [type=panic msg="oh no"] +ds1->ds_parse->ds_multiply->ds_panic;`, s.URL), + }, *logger.Default) + require.NoError(t, err) + require.Equal(t, 5, len(trrs)) + assert.Equal(t, []interface{}{nil}, trrs.FinalResult().Values) + assert.Equal(t, pipeline.ErrRunPanicked.Error(), trrs.FinalResult().Errors[0].Error()) + for _, trr := range trrs { + if trr.IsTerminal { + continue + } + assert.Equal(t, null.NewString("pipeline run panicked", true), trr.Result.ErrorDB()) + assert.Equal(t, true, trr.Result.OutputDB().Null) + } +} diff --git a/core/services/pipeline/task.panic.go b/core/services/pipeline/task.panic.go new file mode 100644 index 00000000000..55da3b86030 --- /dev/null +++ b/core/services/pipeline/task.panic.go @@ -0,0 +1,22 @@ +package pipeline + +import "context" + +type PanicTask struct { + BaseTask `mapstructure:",squash"` + Msg string +} + +var _ Task = (*PanicTask)(nil) + +func (t *PanicTask) Type() TaskType { + return TaskTypePanic +} + +func (t *PanicTask) SetDefaults(_ map[string]string, _ TaskDAG, _ taskDAGNode) error { + return nil +} + +func (t *PanicTask) Run(_ context.Context, _ TaskRun, _ []Result) (result Result) { + panic(t.Msg) +} From f16ee079d9d83b20a05cbb4268ae0b9193d50dc9 Mon Sep 17 00:00:00 2001 From: James Kong Date: Mon, 22 Mar 2021 14:41:39 +0800 Subject: [PATCH 072/116] Refactors FMV2 polling and adds testing. Also cleans up logging This change is to help with adding polling jitter to FM jobs --- core/services/fluxmonitorv2/flux_monitor.go | 281 ++++-------- .../fluxmonitorv2/flux_monitor_test.go | 12 +- core/services/fluxmonitorv2/idle_timer.go | 53 --- .../services/fluxmonitorv2/idle_timer_test.go | 32 -- core/services/fluxmonitorv2/poll_manager.go | 249 +++++++---- .../fluxmonitorv2/poll_manager_test.go | 414 ++++++++++++++++++ core/services/fluxmonitorv2/poll_ticker.go | 60 --- .../fluxmonitorv2/poll_ticker_test.go | 31 -- 8 files changed, 673 insertions(+), 459 deletions(-) delete mode 100644 core/services/fluxmonitorv2/idle_timer.go delete mode 100644 core/services/fluxmonitorv2/idle_timer_test.go create mode 100644 core/services/fluxmonitorv2/poll_manager_test.go delete mode 100644 core/services/fluxmonitorv2/poll_ticker.go delete mode 100644 core/services/fluxmonitorv2/poll_ticker_test.go diff --git a/core/services/fluxmonitorv2/flux_monitor.go b/core/services/fluxmonitorv2/flux_monitor.go index d5f9e82c9de..6a0bb992cc0 100644 --- a/core/services/fluxmonitorv2/flux_monitor.go +++ b/core/services/fluxmonitorv2/flux_monitor.go @@ -27,8 +27,6 @@ import ( "gorm.io/gorm" ) -const hibernationPollPeriod = 24 * time.Hour - // FluxMonitor polls external price adapters via HTTP to check for price swings. type FluxMonitor struct { jobID int32 @@ -39,6 +37,7 @@ type FluxMonitor struct { jobORM job.ORM pipelineORM pipeline.ORM keyStore KeyStoreInterface + pollManager *PollManager paymentChecker *PaymentChecker contractSubmitter ContractSubmitter deviationChecker *DeviationChecker @@ -50,16 +49,10 @@ type FluxMonitor struct { logger *logger.Logger precision int32 - isHibernating bool connected *abool.AtomicBool backlog *utils.BoundedPriorityQueue chProcessLogs chan struct{} - pollTicker *PollTicker - hibernationTimer utils.ResettableTimer - idleTimer *IdleTimer - roundTimer utils.ResettableTimer - readyForLogs func() chStop chan struct{} waitOnStop chan struct{} @@ -73,8 +66,7 @@ func NewFluxMonitor( jobORM job.ORM, pipelineORM pipeline.ORM, keyStore KeyStoreInterface, - pollTicker *PollTicker, - idleTimer *IdleTimer, + pollManager *PollManager, paymentChecker *PaymentChecker, contractAddress common.Address, contractSubmitter ContractSubmitter, @@ -94,25 +86,19 @@ func NewFluxMonitor( jobORM: jobORM, pipelineORM: pipelineORM, keyStore: keyStore, - pollTicker: pollTicker, - idleTimer: idleTimer, + pollManager: pollManager, paymentChecker: paymentChecker, contractAddress: contractAddress, contractSubmitter: contractSubmitter, deviationChecker: deviationChecker, submissionChecker: submissionChecker, flags: flags, - - readyForLogs: readyForLogs, - logBroadcaster: logBroadcaster, - fluxAggregator: fluxAggregator, - precision: precision, - logger: fmLogger, - - hibernationTimer: utils.NewResettableTimer(), - roundTimer: utils.NewResettableTimer(), - isHibernating: false, - connected: abool.New(), + readyForLogs: readyForLogs, + logBroadcaster: logBroadcaster, + fluxAggregator: fluxAggregator, + precision: precision, + logger: fmLogger, + connected: abool.New(), backlog: utils.NewBoundedPriorityQueue(map[uint]uint{ // We want reconnecting nodes to be able to submit to a round // that hasn't hit maxAnswers yet, as well as the newest round. @@ -207,6 +193,17 @@ func NewFromJobSpec( ), ) + pollManager := NewPollManager( + PollManagerConfig{ + PollTickerInterval: fmSpec.PollTimerPeriod, + PollTickerDisabled: fmSpec.PollTimerDisabled, + IdleTimerPeriod: fmSpec.IdleTimerPeriod, + IdleTimerDisabled: fmSpec.IdleTimerDisabled, + HibernationPollPeriod: 24 * time.Hour, // Not currently configurable + }, + fmLogger, + ) + return NewFluxMonitor( jobSpec.ID, pipelineRun, @@ -214,8 +211,7 @@ func NewFromJobSpec( jobORM, pipelineORM, keyStore, - NewPollTicker(fmSpec.PollTimerPeriod, fmSpec.PollTimerDisabled), - NewIdleTimer(fmSpec.IdleTimerPeriod, fmSpec.IdleTimerDisabled), + pollManager, paymentChecker, fmSpec.ContractAddress.Address(), contractSubmitter, @@ -251,30 +247,25 @@ func (fm *FluxMonitor) Start() error { return nil } -func (fm *FluxMonitor) setIsHibernatingStatus() { +func (fm *FluxMonitor) IsHibernating() bool { if !fm.flags.ContractExists() { - fm.isHibernating = false - - return + return false } isFlagLowered, err := fm.flags.IsLowered(fm.contractAddress) if err != nil { - fm.logger.Errorf("unable to set hibernation status: %v", err) + fm.logger.Errorf("unable to determine hibernation status: %v", err) - fm.isHibernating = false - } else { - fm.isHibernating = !isFlagLowered + return false } + + return !isFlagLowered } // Close implements the job.Service interface. It stops this instance from // polling, cleaning up resources. func (fm *FluxMonitor) Close() error { - fm.pollTicker.Stop() - fm.hibernationTimer.Stop() - fm.idleTimer.Stop() - fm.roundTimer.Stop() + fm.pollManager.Stop() close(fm.chStop) <-fm.waitOnStop @@ -390,10 +381,14 @@ func (fm *FluxMonitor) consume() { } fm.readyForLogs() - fm.setIsHibernatingStatus() - fm.setInitialTickers() + fm.pollManager.Start(fm.IsHibernating(), fm.initialRoundState()) fm.performInitialPoll() + tickLogger := fm.logger.With( + "pollInterval", fm.pollManager.cfg.PollTickerInterval, + "idlePeriod", fm.pollManager.cfg.IdleTimerPeriod, + ) + for { select { case <-fm.chStop: @@ -402,20 +397,20 @@ func (fm *FluxMonitor) consume() { case <-fm.chProcessLogs: fm.processLogs() - case <-fm.pollTicker.Ticks(): - fm.logger.Debugw("Poll ticker fired", fm.loggerFieldsForTick()...) + case <-fm.pollManager.PollTickerTicks(): + tickLogger.Debugw("Poll ticker fired") fm.pollIfEligible(fm.deviationChecker) - case <-fm.idleTimer.Ticks(): - fm.logger.Debugw("Idle ticker fired", fm.loggerFieldsForTick()...) + case <-fm.pollManager.IdleTimerTicks(): + tickLogger.Debugw("Idle timer fired") fm.pollIfEligible(NewZeroDeviationChecker()) - case <-fm.roundTimer.Ticks(): - fm.logger.Debugw("Round timeout ticker fired", fm.loggerFieldsForTick()...) + case <-fm.pollManager.RoundTimerTicks(): + tickLogger.Debugw("Round timer fired") fm.pollIfEligible(fm.deviationChecker) - case <-fm.hibernationTimer.Ticks(): - fm.logger.Debugw("Hibernation timout ticker fired", fm.loggerFieldsForTick()...) + case <-fm.pollManager.HibernationTimerTicks(): + tickLogger.Debugw("Hibernation timer fired") fm.pollIfEligible(NewZeroDeviationChecker()) } } @@ -458,30 +453,11 @@ func (fm *FluxMonitor) SetOracleAddress() error { // performInitialPoll performs the initial poll if required func (fm *FluxMonitor) performInitialPoll() { - if fm.shouldPerformInitialPoll() { + if fm.pollManager.ShouldPerformInitialPoll() { fm.pollIfEligible(fm.deviationChecker) } } -func (fm *FluxMonitor) shouldPerformInitialPoll() bool { - return !(fm.pollTicker.IsDisabled() && fm.idleTimer.IsDisabled() || fm.isHibernating) -} - -// hibernate restarts the PollingDeviationChecker in hibernation mode -func (fm *FluxMonitor) hibernate() { - fm.logger.Info("entering hibernation mode") - fm.isHibernating = true - fm.resetTickers(flux_aggregator_wrapper.OracleRoundState{}) -} - -// reactivate restarts the PollingDeviationChecker without hibernation mode -func (fm *FluxMonitor) reactivate() { - fm.logger.Info("exiting hibernation mode, reactivating contract") - fm.isHibernating = false - fm.setInitialTickers() - fm.pollIfEligible(NewZeroDeviationChecker()) -} - func (fm *FluxMonitor) processLogs() { for !fm.backlog.Empty() { maybeBroadcast := fm.backlog.Take() @@ -517,12 +493,14 @@ func (fm *FluxMonitor) processLogs() { isFlagLowered, err = fm.flags.IsLowered(fm.contractAddress) fm.logger.ErrorIf(err, "Error determining if flag is still raised") if !isFlagLowered { - fm.hibernate() + fm.pollManager.Hibernate() } err = broadcast.MarkConsumed() case *flags_wrapper.FlagsFlagLowered: - fm.reactivate() + fm.pollManager.Awaken(fm.initialRoundState()) + fm.pollIfEligible(NewZeroDeviationChecker()) + err = broadcast.MarkConsumed() default: @@ -537,21 +515,34 @@ func (fm *FluxMonitor) processLogs() { // answer. We update our view of the oracleRoundState in case this log was // generated by a chain reorg. func (fm *FluxMonitor) respondToAnswerUpdatedLog(log flux_aggregator_wrapper.FluxAggregatorAnswerUpdated) { - fm.logger.Debugw("AnswerUpdated log", fm.loggerFieldsForAnswerUpdated(log)...) + answerUpdatedLogger := fm.logger.With( + "round", log.RoundId, + "answer", log.Current.String(), + "timestamp", log.UpdatedAt.String(), + ) + + answerUpdatedLogger.Debug("AnswerUpdated log") roundState, err := fm.roundState(0) if err != nil { - logger.Errorw(fmt.Sprintf("could not fetch oracleRoundState: %v", err), fm.loggerFieldsForAnswerUpdated(log)...) + answerUpdatedLogger.Errorf("could not fetch oracleRoundState: %v", err) + return } - fm.resetTickers(roundState) + + fm.pollManager.Reset(roundState) } // The NewRound log tells us that an oracle has initiated a new round. This tells us that we // need to poll and submit an answer to the contract regardless of the deviation. func (fm *FluxMonitor) respondToNewRoundLog(log flux_aggregator_wrapper.FluxAggregatorNewRound) { - fm.logger.Debugw("NewRound log", fm.loggerFieldsForNewRound(log)...) + newRoundLogger := fm.logger.With( + "round", log.RoundId, + "startedBy", log.StartedBy.Hex(), + "startedAt", log.StartedAt.String(), + ) + newRoundLogger.Debug("NewRound log") promfm.SetBigInt(promfm.SeenRound.WithLabelValues(fmt.Sprintf("%d", fm.jobID)), log.RoundId) // @@ -600,25 +591,25 @@ func (fm *FluxMonitor) respondToNewRoundLog(log flux_aggregator_wrapper.FluxAggr logRoundID := uint32(log.RoundId.Uint64()) // We always want to reset the idle timer upon receiving a NewRound log, so we do it before any `return` statements. - fm.resetIdleTimer(log.StartedAt.Uint64()) + fm.pollManager.ResetIdleTimer(log.StartedAt.Uint64()) mostRecentRoundID, err := fm.orm.MostRecentFluxMonitorRoundID(fm.contractAddress) if err != nil && err != gorm.ErrRecordNotFound { - fm.logger.Errorw(fmt.Sprintf("error fetching Flux Monitor most recent round ID from DB: %v", err), fm.loggerFieldsForNewRound(log)...) + newRoundLogger.Errorf("error fetching Flux Monitor most recent round ID from DB: %v", err) return } if logRoundID < mostRecentRoundID { err = fm.orm.DeleteFluxMonitorRoundsBackThrough(fm.contractAddress, logRoundID) if err != nil { - fm.logger.Errorw(fmt.Sprintf("error deleting reorged Flux Monitor rounds from DB: %v", err), fm.loggerFieldsForNewRound(log)...) + newRoundLogger.Errorf("error deleting reorged Flux Monitor rounds from DB: %v", err) return } } roundStats, jobRunStatus, err := fm.statsAndStatusForRound(logRoundID) if err != nil { - fm.logger.Errorw(fmt.Sprintf("error determining round stats / run status for round: %v", err), fm.loggerFieldsForNewRound(log)...) + newRoundLogger.Errorf("error determining round stats / run status for round: %v", err) return } @@ -629,36 +620,37 @@ func (fm *FluxMonitor) respondToNewRoundLog(log flux_aggregator_wrapper.FluxAggr // If our previous attempt is still pending, return early and don't re-submit // If our previous attempt is already over (completed or errored), we should retry if !jobRunStatus.Finished() { - fm.logger.Debugw("Ignoring new round request: started round simultaneously with another node", fm.loggerFieldsForNewRound(log)...) + newRoundLogger.Debug("Ignoring new round request: started round simultaneously with another node") return } } // Ignore rounds we started if fm.oracleAddress == log.StartedBy { - fm.logger.Infow("Ignoring new round request: we started this round", fm.loggerFieldsForNewRound(log)...) + newRoundLogger.Info("Ignoring new round request: we started this round") return } // Ignore rounds we're not eligible for, or for which we won't be paid roundState, err := fm.roundState(logRoundID) if err != nil { - fm.logger.Errorw(fmt.Sprintf("Ignoring new round request: error fetching eligibility from contract: %v", err), fm.loggerFieldsForNewRound(log)...) + newRoundLogger.Errorf("Ignoring new round request: error fetching eligibility from contract: %v", err) return } - fm.resetTickers(roundState) + + fm.pollManager.Reset(roundState) err = fm.checkEligibilityAndAggregatorFunding(roundState) if err != nil { - fm.logger.Infow(fmt.Sprintf("Ignoring new round request: %v", err), fm.loggerFieldsForNewRound(log)...) + newRoundLogger.Infof("Ignoring new round request: %v", err) return } - logger.Infow("Responding to new round request", fm.loggerFieldsForNewRound(log)...) + newRoundLogger.Info("Responding to new round request") // Call the v2 pipeline to execute a new job run runID, answer, err := fm.pipelineRun.Execute() if err != nil { - fm.logger.Errorw(fmt.Sprintf("unable to fetch median price: %v", err), fm.loggerFieldsForNewRound(log)...) + newRoundLogger.Errorf("unable to fetch median price: %v", err) return } @@ -668,12 +660,12 @@ func (fm *FluxMonitor) respondToNewRoundLog(log flux_aggregator_wrapper.FluxAggr } if roundState.PaymentAmount == nil { - fm.logger.Error("roundState.PaymentAmount shouldn't be nil") + newRoundLogger.Error("roundState.PaymentAmount shouldn't be nil") } err = fm.submitTransaction(runID, *answer, roundState.RoundId) if err != nil { - fm.logger.Errorw(fmt.Sprintf("unable to create job run: %v", err), fm.loggerFieldsForNewRound(log)...) + newRoundLogger.Errorf("unable to create job run: %v", err) return } @@ -739,7 +731,7 @@ func (fm *FluxMonitor) pollIfEligible(deviationChecker *DeviationChecker) { return } - fm.resetTickers(roundState) + fm.pollManager.Reset(roundState) l = l.With("reportableRound", roundState.RoundId) roundStats, jobRunStatus, err := fm.statsAndStatusForRound(roundState.RoundId) @@ -760,7 +752,7 @@ func (fm *FluxMonitor) pollIfEligible(deviationChecker *DeviationChecker) { // Don't submit if we're not eligible, or won't get paid err = fm.checkEligibilityAndAggregatorFunding(roundState) if err != nil { - l.Infow(fmt.Sprintf("skipping poll: %v", err)) + l.Infof("skipping poll: %v", err) return } @@ -867,85 +859,6 @@ func (fm *FluxMonitor) initialRoundState() flux_aggregator_wrapper.OracleRoundSt return latestRoundState } -func (fm *FluxMonitor) resetTickers(roundState flux_aggregator_wrapper.OracleRoundState) { - fm.resetPollTicker() - fm.resetHibernationTimer() - fm.resetIdleTimer(roundState.StartedAt) - fm.resetRoundTimer(roundStateTimesOutAt(roundState)) -} - -func (fm *FluxMonitor) setInitialTickers() { - fm.resetTickers(fm.initialRoundState()) -} - -func (fm *FluxMonitor) resetPollTicker() { - if fm.pollTicker.IsEnabled() && !fm.isHibernating { - fm.pollTicker.Resume() - } else { - fm.pollTicker.Pause() - } -} - -func (fm *FluxMonitor) resetHibernationTimer() { - if !fm.isHibernating { - fm.hibernationTimer.Stop() - } else { - fm.hibernationTimer.Reset(hibernationPollPeriod) - } -} - -func (fm *FluxMonitor) resetRoundTimer(roundTimesOutAt uint64) { - if fm.isHibernating { - fm.roundTimer.Stop() - return - } - - loggerFields := fm.loggerFields("timesOutAt", roundTimesOutAt) - - if roundTimesOutAt == 0 { - fm.roundTimer.Stop() - fm.logger.Debugw("disabling roundTimer, no active round", loggerFields...) - - } else { - timesOutAt := time.Unix(int64(roundTimesOutAt), 0) - timeUntilTimeout := time.Until(timesOutAt) - - if timeUntilTimeout <= 0 { - fm.roundTimer.Stop() - fm.logger.Debugw("roundTimer has run down; disabling", loggerFields...) - } else { - fm.roundTimer.Reset(timeUntilTimeout) - loggerFields = append(loggerFields, "value", roundTimesOutAt) - fm.logger.Debugw("updating roundState.TimesOutAt", loggerFields...) - } - } -} - -func (fm *FluxMonitor) resetIdleTimer(roundStartedAtUTC uint64) { - if fm.isHibernating || fm.idleTimer.IsDisabled() { - fm.idleTimer.Stop() - return - } else if roundStartedAtUTC == 0 { - // There is no active round, so keep using the idleTimer we already have - return - } - - startedAt := time.Unix(int64(roundStartedAtUTC), 0) - idleDeadline := startedAt.Add(fm.idleTimer.Period()) - timeUntilIdleDeadline := time.Until(idleDeadline) - loggerFields := fm.loggerFields( - "startedAt", roundStartedAtUTC, - "timeUntilIdleDeadline", timeUntilIdleDeadline, - ) - - if timeUntilIdleDeadline <= 0 { - fm.logger.Debugw("not resetting idleTimer, negative duration", loggerFields...) - return - } - fm.idleTimer.Reset(timeUntilIdleDeadline) - fm.logger.Debugw("resetting idleTimer", loggerFields...) -} - func (fm *FluxMonitor) submitTransaction( runID int64, answer decimal.Decimal, @@ -978,36 +891,6 @@ func (fm *FluxMonitor) submitTransaction( return nil } -func (fm *FluxMonitor) loggerFields(added ...interface{}) []interface{} { - return append(added, []interface{}{ - "pollFrequency", fm.pollTicker.Interval, - "idleDuration", fm.idleTimer.Period, - }...) -} - -func (fm *FluxMonitor) loggerFieldsForNewRound(log flux_aggregator_wrapper.FluxAggregatorNewRound) []interface{} { - return []interface{}{ - "round", log.RoundId, - "startedBy", log.StartedBy.Hex(), - "startedAt", log.StartedAt.String(), - } -} - -func (fm *FluxMonitor) loggerFieldsForAnswerUpdated(log flux_aggregator_wrapper.FluxAggregatorAnswerUpdated) []interface{} { - return []interface{}{ - "round", log.RoundId, - "answer", log.Current.String(), - "timestamp", log.UpdatedAt.String(), - } -} - -func (fm *FluxMonitor) loggerFieldsForTick() []interface{} { - return []interface{}{ - "pollPeriod", fm.pollTicker.Interval, - "idleDuration", fm.idleTimer.Period, - } -} - func (fm *FluxMonitor) statsAndStatusForRound(roundID uint32) ( FluxMonitorRoundStatsV2, pipeline.RunStatus, @@ -1029,7 +912,3 @@ func (fm *FluxMonitor) statsAndStatusForRound(roundID uint32) ( return roundStats, run.Status(), nil } - -func roundStateTimesOutAt(rs flux_aggregator_wrapper.OracleRoundState) uint64 { - return rs.StartedAt + rs.Timeout -} diff --git a/core/services/fluxmonitorv2/flux_monitor_test.go b/core/services/fluxmonitorv2/flux_monitor_test.go index 63b29f64c54..865dfdab882 100644 --- a/core/services/fluxmonitorv2/flux_monitor_test.go +++ b/core/services/fluxmonitorv2/flux_monitor_test.go @@ -164,8 +164,16 @@ func setup(t *testing.T, optionFns ...func(*setupOptions)) (*fluxmonitorv2.FluxM tm.jobORM, tm.pipelineORM, tm.keyStore, - fluxmonitorv2.NewPollTicker(time.Minute, options.pollTickerDisabled), - fluxmonitorv2.NewIdleTimer(options.idleTimerPeriod, options.idleTimerDisabled), + fluxmonitorv2.NewPollManager( + fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: time.Minute, + PollTickerDisabled: options.pollTickerDisabled, + IdleTimerPeriod: options.idleTimerPeriod, + IdleTimerDisabled: options.idleTimerDisabled, + HibernationPollPeriod: 24 * time.Hour, + }, + logger.Default, + ), fluxmonitorv2.NewPaymentChecker(assets.NewLink(1), nil), contractAddress, tm.contractSubmitter, diff --git a/core/services/fluxmonitorv2/idle_timer.go b/core/services/fluxmonitorv2/idle_timer.go deleted file mode 100644 index 69b90294bb2..00000000000 --- a/core/services/fluxmonitorv2/idle_timer.go +++ /dev/null @@ -1,53 +0,0 @@ -package fluxmonitorv2 - -import ( - "time" - - "github.com/smartcontractkit/chainlink/core/utils" -) - -// IdleTimer defines a ResettableTimer which can be disabled -type IdleTimer struct { - timer utils.ResettableTimer - period time.Duration - disabled bool -} - -// NewIdleTimer constructs a new IdleTimer -func NewIdleTimer(period time.Duration, disabled bool) *IdleTimer { - return &IdleTimer{ - timer: utils.NewResettableTimer(), - period: period, - disabled: disabled, - } -} - -// Period gets the timer period -func (t *IdleTimer) Period() time.Duration { - return t.period -} - -// IsEnabled determines if the timer is enabled -func (t *IdleTimer) IsEnabled() bool { - return !t.disabled -} - -// IsDisabled determines if the timer is disabled -func (t *IdleTimer) IsDisabled() bool { - return t.disabled -} - -// Ticks ticks on a given interval -func (t *IdleTimer) Ticks() <-chan time.Time { - return t.timer.Ticks() -} - -// Reset resets the timer -func (t *IdleTimer) Reset(period time.Duration) { - t.timer.Reset(period) -} - -// Stop stops the timer permanently -func (t *IdleTimer) Stop() { - t.timer.Stop() -} diff --git a/core/services/fluxmonitorv2/idle_timer_test.go b/core/services/fluxmonitorv2/idle_timer_test.go deleted file mode 100644 index f09b793b015..00000000000 --- a/core/services/fluxmonitorv2/idle_timer_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package fluxmonitorv2_test - -import ( - "testing" - "time" - - "github.com/smartcontractkit/chainlink/core/services/fluxmonitorv2" - "github.com/stretchr/testify/assert" -) - -func TestIdleTimer_Getters(t *testing.T) { - t.Parallel() - - ticker := fluxmonitorv2.NewIdleTimer(time.Second, false) - - t.Run("Period", func(t *testing.T) { - assert.Equal(t, time.Second, ticker.Period()) - }) - - t.Run("IsEnabled", func(t *testing.T) { - assert.Equal(t, true, ticker.IsEnabled()) - }) - - t.Run("IsDisabled", func(t *testing.T) { - assert.Equal(t, false, ticker.IsDisabled()) - }) -} - -// TODO - Test the ticker functions -func TestIdleTimer_Ticker(t *testing.T) { - t.Skip() -} diff --git a/core/services/fluxmonitorv2/poll_manager.go b/core/services/fluxmonitorv2/poll_manager.go index eaa488f11c8..ca813a9caaf 100644 --- a/core/services/fluxmonitorv2/poll_manager.go +++ b/core/services/fluxmonitorv2/poll_manager.go @@ -4,34 +4,60 @@ import ( "time" "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/utils" ) type PollManagerConfig struct { - IsHibernating bool - PollTickerInterval time.Duration - PollTickerDisabled bool - IdleTimerInterval time.Duration - IdleTimerDisabled bool + IsHibernating bool + PollTickerInterval time.Duration + PollTickerDisabled bool + IdleTimerPeriod time.Duration + IdleTimerDisabled bool + HibernationPollPeriod time.Duration } // PollManager manages the tickers/timers which cause the Flux Monitor to start -// a poll +// a poll. It contains 4 types of tickers and timers which determine when to +// initiate a poll +// +// HibernationTimer - The PollManager can be set to hibernate, which disables all +// other ticker/timers, and enables the hibernation timer. Upon expiry of the +// hibernation timer, a poll is requested. When the PollManager is awakened, the +// other tickers and timers are enabled with the current round state, and the +// hibernation timer is disabled. +// +// PollTicker - The poll ticker requests a poll at a given interval defined in +// PollManagerConfig. Disabling this through config will permanently disable +// the ticker, even through a resets. +// +// IdleTimer - The idle timer requests a poll after no poll has taken place +// since the last round was start and the IdleTimerPeriod has elapsed. This can +// also be known as a heartbeat. +// +// RoundTimer - The round timer requests a poll when the round state provided by +// the contract has timed out. type PollManager struct { cfg PollManagerConfig - isHibernating bool - pollTicker *PollTicker - idleTimer *IdleTimer + hibernationTimer utils.ResettableTimer + pollTicker utils.PausableTicker + idleTimer utils.ResettableTimer + roundTimer utils.ResettableTimer + + logger *logger.Logger } // NewPollManager initializes a new PollManager -func NewPollManager(cfg PollManagerConfig) *PollManager { +func NewPollManager(cfg PollManagerConfig, logger *logger.Logger) *PollManager { return &PollManager{ - cfg: cfg, - isHibernating: cfg.IsHibernating, + cfg: cfg, + logger: logger, - pollTicker: NewPollTicker(cfg.PollTickerInterval, cfg.PollTickerDisabled), - idleTimer: NewIdleTimer(cfg.IdleTimerInterval, cfg.IdleTimerDisabled), + hibernationTimer: utils.NewResettableTimer(), + pollTicker: utils.NewPausableTicker(cfg.PollTickerInterval), + idleTimer: utils.NewResettableTimer(), + roundTimer: utils.NewResettableTimer(), } } @@ -45,99 +71,162 @@ func (pm *PollManager) IdleTimerTicks() <-chan time.Time { return pm.idleTimer.Ticks() } -func (pm *PollManager) Stop() { - pm.pollTicker.Stop() - pm.idleTimer.Stop() +// HibernationTimerTicks ticks after a given period +func (pm *PollManager) HibernationTimerTicks() <-chan time.Time { + return pm.hibernationTimer.Ticks() } -// Reset resets all tickers/timers -func (pm *PollManager) Reset(roundState flux_aggregator_wrapper.OracleRoundState) { - pm.ResetPollTicker() - pm.ResetIdleTimer(roundStateTimesOutAt(roundState)) +// RoundTimerTicks ticks after a given period +func (pm *PollManager) RoundTimerTicks() <-chan time.Time { + return pm.roundTimer.Ticks() } -// ResetPollTicker resets the poll ticker if enabled and not hibernating -func (pm *PollManager) ResetPollTicker() { - if pm.pollTicker.IsEnabled() && !pm.isHibernating { - pm.pollTicker.Resume() +// Start initializes all the timers and determines whether to go into immediate +// hibernation. +func (pm *PollManager) Start(hibernate bool, roundState flux_aggregator_wrapper.OracleRoundState) { + if hibernate { + pm.Hibernate() } else { - pm.pollTicker.Pause() + pm.Awaken(roundState) } } -func (pm *PollManager) IsPollTickerDisabled() bool { - return pm.pollTicker.IsDisabled() +// ShouldPerformInitialPoll determines whether to perform an initial poll +func (pm *PollManager) ShouldPerformInitialPoll() bool { + return (!pm.cfg.PollTickerDisabled || !pm.cfg.IdleTimerDisabled) && !pm.cfg.IsHibernating } -func (pm *PollManager) IsIdleTimerDisabled() bool { - return pm.idleTimer.IsDisabled() +// Reset resets the timers except for the hibernation timer. Will not reset if +// hibernating. +func (pm *PollManager) Reset(roundState flux_aggregator_wrapper.OracleRoundState) { + if !pm.cfg.IsHibernating { + pm.startPollTicker() + pm.startIdleTimer(roundState.StartedAt) + pm.startRoundTimer(roundStateTimesOutAt(roundState)) + } } +// Reset resets the idle timer unless hibernating func (pm *PollManager) ResetIdleTimer(roundStartedAtUTC uint64) { - // Stop the timer if hibernating or disabled - if pm.isHibernating || pm.idleTimer.IsDisabled() { + if !pm.cfg.IsHibernating { + pm.startIdleTimer(roundStartedAtUTC) + } +} + +// Stop stops all timers/tickers +func (pm *PollManager) Stop() { + pm.hibernationTimer.Stop() + pm.pollTicker.Destroy() + pm.idleTimer.Stop() + pm.roundTimer.Stop() +} + +// Hibernate sets hibernation to true, starts the hibernation timer and stops +// all other ticker/timers +func (pm *PollManager) Hibernate() { + pm.logger.Info("entering hibernation mode") + + // Start the hibernation timer + pm.cfg.IsHibernating = true + pm.hibernationTimer.Reset(pm.cfg.HibernationPollPeriod) + + // Stop the other tickers + pm.pollTicker.Pause() + pm.idleTimer.Stop() + pm.roundTimer.Stop() +} + +// Awaken sets hibernation to false, stops the hibernation timer and starts all +// other tickers +func (pm *PollManager) Awaken(roundState flux_aggregator_wrapper.OracleRoundState) { + pm.logger.Info("exiting hibernation mode, reactivating contract") + + // Stop the hibernation timer + pm.cfg.IsHibernating = false + pm.hibernationTimer.Stop() + + // Start the other tickers + pm.startPollTicker() + pm.startIdleTimer(roundState.StartedAt) + pm.startRoundTimer(roundStateTimesOutAt(roundState)) +} + +// startPollTicker starts the poll ticker if it is enabled +func (pm *PollManager) startPollTicker() { + if pm.cfg.PollTickerDisabled { + pm.pollTicker.Pause() + + return + } + + pm.pollTicker.Resume() +} + +// startIdleTimer starts the idle timer if it is enabled +func (pm *PollManager) startIdleTimer(roundStartedAtUTC uint64) { + if pm.cfg.IdleTimerDisabled { pm.idleTimer.Stop() return } - // There is no active round, so keep using the idleTimer we already have + // Keep using the idleTimer we already have if roundStartedAtUTC == 0 { + pm.logger.Debugw("keeping existing timer, no active round") + return } startedAt := time.Unix(int64(roundStartedAtUTC), 0) - idleDeadline := startedAt.Add(pm.idleTimer.Period()) - timeUntilIdleDeadline := time.Until(idleDeadline) + deadline := startedAt.Add(pm.cfg.IdleTimerPeriod) + deadlineDuration := time.Until(deadline) - // loggerFields := fm.loggerFields( - // "startedAt", roundStartedAtUTC, - // "timeUntilIdleDeadline", timeUntilIdleDeadline, - // ) + log := pm.logger.With( + "pollFrequency", pm.cfg.PollTickerInterval, + "idleDuration", pm.cfg.IdleTimerPeriod, + "startedAt", roundStartedAtUTC, + "timeUntilIdleDeadline", deadlineDuration, + ) - if timeUntilIdleDeadline <= 0 { - // fm.logger.Debugw("not resetting idleTimer, negative duration", loggerFields...) + if deadlineDuration <= 0 { + log.Debugw("not resetting idleTimer, negative duration") return } - pm.idleTimer.Reset(timeUntilIdleDeadline) - // fm.logger.Debugw("resetting idleTimer", loggerFields...) -} - -// func (fm *FluxMonitor) resetIdleTimer(roundStartedAtUTC uint64) { -// if fm.isHibernating || fm.idleTimer.IsDisabled() { -// fm.idleTimer.Stop() -// return -// } else if roundStartedAtUTC == 0 { -// // There is no active round, so keep using the idleTimer we already have -// return -// } - -// startedAt := time.Unix(int64(roundStartedAtUTC), 0) -// idleDeadline := startedAt.Add(fm.idleTimer.Period()) -// timeUntilIdleDeadline := time.Until(idleDeadline) -// loggerFields := fm.loggerFields( -// "startedAt", roundStartedAtUTC, -// "timeUntilIdleDeadline", timeUntilIdleDeadline, -// ) - -// if timeUntilIdleDeadline <= 0 { -// fm.logger.Debugw("not resetting idleTimer, negative duration", loggerFields...) -// return -// } -// fm.idleTimer.Reset(timeUntilIdleDeadline) -// fm.logger.Debugw("resetting idleTimer", loggerFields...) -// } - -// Hibernate sets hibernation to true and resets all ticker/timers -func (pm *PollManager) Hibernate(roundState flux_aggregator_wrapper.OracleRoundState) { - pm.isHibernating = true - pm.Reset(roundState) -} - -// Awaken sets hibernation to false and resets all ticker/timers -func (pm *PollManager) Awaken(roundState flux_aggregator_wrapper.OracleRoundState) { - pm.isHibernating = false - pm.Reset(roundState) + pm.idleTimer.Reset(deadlineDuration) + log.Debugw("resetting idleTimer") +} + +// startRoundTimer starts the round timer +func (pm *PollManager) startRoundTimer(roundTimesOutAt uint64) { + log := pm.logger.With( + "pollFrequency", pm.cfg.PollTickerInterval, + "idleDuration", pm.cfg.IdleTimerPeriod, + "timesOutAt", roundTimesOutAt, + ) + + if roundTimesOutAt == 0 { + log.Debugw("disabling roundTimer, no active round") + pm.roundTimer.Stop() + + return + } + + timesOutAt := time.Unix(int64(roundTimesOutAt), 0) + timeoutDuration := time.Until(timesOutAt) + + if timeoutDuration <= 0 { + log.Debugw("roundTimer has run down; disabling") + pm.roundTimer.Stop() + + return + } + + pm.roundTimer.Reset(timeoutDuration) + log.Debugw("updating roundState.TimesOutAt", "value", roundTimesOutAt) +} + +func roundStateTimesOutAt(rs flux_aggregator_wrapper.OracleRoundState) uint64 { + return rs.StartedAt + rs.Timeout } diff --git a/core/services/fluxmonitorv2/poll_manager_test.go b/core/services/fluxmonitorv2/poll_manager_test.go new file mode 100644 index 00000000000..b829611cb8b --- /dev/null +++ b/core/services/fluxmonitorv2/poll_manager_test.go @@ -0,0 +1,414 @@ +package fluxmonitorv2_test + +import ( + "testing" + "time" + + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flux_aggregator_wrapper" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/fluxmonitorv2" + "github.com/stretchr/testify/assert" +) + +var ( + pollTickerDefaultDuration = 200 * time.Millisecond + idleTickerDefaultDuration = 1 * time.Second // Setting this too low will cause the idle timer to fire before the assert +) + +func newPollManager() *fluxmonitorv2.PollManager { + return fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + IsHibernating: false, + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: false, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: false, + HibernationPollPeriod: 24 * time.Hour, + }, logger.Default) +} + +type tickChecker struct { + pollTicked bool + idleTicked bool + roundTicked bool + hibernationTicked bool +} + +// watchTicks watches the PollManager for ticks for the waitDuration +func watchTicks(t *testing.T, pm *fluxmonitorv2.PollManager, waitDuration time.Duration) tickChecker { + ticks := tickChecker{ + pollTicked: false, + idleTicked: false, + roundTicked: false, + hibernationTicked: false, + } + + waitCh := time.After(waitDuration) + for { + select { + case <-pm.PollTickerTicks(): + ticks.pollTicked = true + case <-pm.IdleTimerTicks(): + ticks.idleTicked = true + case <-pm.RoundTimerTicks(): + ticks.roundTicked = true + case <-pm.HibernationTimerTicks(): + ticks.hibernationTicked = true + case <-waitCh: + waitCh = nil + } + + if waitCh == nil { + break + } + } + + return ticks +} + +func TestPollManager_PollTicker(t *testing.T) { + t.Parallel() + + pm := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: false, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: true, + HibernationPollPeriod: 24 * time.Hour, + }, logger.Default) + t.Cleanup(pm.Stop) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{}) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.True(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) +} + +func TestPollManager_IdleTimer(t *testing.T) { + t.Parallel() + + pm := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: 100 * time.Millisecond, + PollTickerDisabled: true, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: false, + HibernationPollPeriod: 24 * time.Hour, + }, logger.Default) + t.Cleanup(pm.Stop) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + }) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.False(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) +} + +func TestPollManager_RoundTimer(t *testing.T) { + t.Parallel() + + pm := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: true, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: true, + HibernationPollPeriod: 24 * time.Hour, + }, logger.Default) + t.Cleanup(pm.Stop) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + t.Cleanup(pm.Stop) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) +} + +func TestFluxMonitor_HibernationTimer(t *testing.T) { + t.Parallel() + + pm := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: true, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: true, + HibernationPollPeriod: 1 * time.Second, + }, logger.Default) + t.Cleanup(pm.Stop) + + pm.Start(true, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.True(t, ticks.hibernationTicked) +} + +func TestPollManager_HibernationOnStartThenAwaken(t *testing.T) { + t.Parallel() + + pm := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: false, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: false, + HibernationPollPeriod: 24 * time.Hour, + }, logger.Default) + t.Cleanup(pm.Stop) + + pm.Start(true, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) + + pm.Awaken(flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + + ticks = watchTicks(t, pm, 2*time.Second) + + assert.True(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) +} + +func TestPollManager_AwakeOnStartThenHibernate(t *testing.T) { + t.Parallel() + + pm := newPollManager() + t.Cleanup(pm.Stop) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.True(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) + + pm.Hibernate() + + ticks = watchTicks(t, pm, 2*time.Second) + + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) +} + +func TestPollManager_ShouldPerformInitialPoll(t *testing.T) { + testCases := []struct { + name string + pollTickerDisabled bool + idleTimerDisabled bool + isHibernating bool + want bool + }{ + { + name: "perform poll - all enabled", + pollTickerDisabled: false, + idleTimerDisabled: false, + isHibernating: false, + want: true, + }, + { + name: "don't perform poll - hibernating", + pollTickerDisabled: false, + idleTimerDisabled: false, + isHibernating: true, + want: false, + }, + { + name: "perform poll - only pollTickerDisabled", + pollTickerDisabled: true, + idleTimerDisabled: false, + isHibernating: false, + want: true, + }, + { + name: "perform poll - only idleTimerDisabled", + pollTickerDisabled: false, + idleTimerDisabled: true, + isHibernating: false, + want: true, + }, + { + name: "don't perform poll - idleTimerDisabled and pollTimerDisabled", + pollTickerDisabled: true, + idleTimerDisabled: true, + isHibernating: false, + want: false, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + pm := fluxmonitorv2.NewPollManager(fluxmonitorv2.PollManagerConfig{ + IsHibernating: tc.isHibernating, + HibernationPollPeriod: 24 * time.Hour, + PollTickerInterval: pollTickerDefaultDuration, + PollTickerDisabled: tc.pollTickerDisabled, + IdleTimerPeriod: idleTickerDefaultDuration, + IdleTimerDisabled: tc.idleTimerDisabled, + }, logger.Default) + t.Cleanup(pm.Stop) + + assert.Equal(t, tc.want, pm.ShouldPerformInitialPoll()) + }) + + } +} + +func TestPollManager_Stop(t *testing.T) { + t.Parallel() + + pm := newPollManager() + t.Cleanup(pm.Stop) + + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + t.Cleanup(pm.Stop) + + ticks := watchTicks(t, pm, 2*time.Second) + + assert.True(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) + + pm.Stop() + + ticks = watchTicks(t, pm, 2*time.Second) + + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) +} + +func TestPollManager_ResetIdleTimer(t *testing.T) { + t.Parallel() + + pm := newPollManager() + t.Cleanup(pm.Stop) + + // Start again in awake mode + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + + // Idle timer fires when not hibernating + ticks := watchTicks(t, pm, 2*time.Second) + assert.True(t, ticks.idleTicked) + + // Idle timer fires again after reset + pm.ResetIdleTimer(uint64(time.Now().Unix()) + 1) // 1 second after now + ticks = watchTicks(t, pm, 2*time.Second) + assert.True(t, ticks.idleTicked) +} + +func TestPollManager_ResetIdleTimerWhenHibernating(t *testing.T) { + t.Parallel() + + pm := newPollManager() + t.Cleanup(pm.Stop) + + // Start in hibernation + pm.Start(true, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + + // Idle timer does not fire when hibernating + ticks := watchTicks(t, pm, 2*time.Second) + assert.False(t, ticks.idleTicked) + + // Idle timer does not reset because in hibernation, so it does not fire + pm.ResetIdleTimer(uint64(time.Now().Unix())) + ticks = watchTicks(t, pm, 2*time.Second) + assert.False(t, ticks.idleTicked) +} + +func TestPollManager_Reset(t *testing.T) { + t.Parallel() + + pm := newPollManager() + t.Cleanup(pm.Stop) + + // Start again in awake mode + pm.Start(false, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + + // Ticker/timers fires when not hibernating + ticks := watchTicks(t, pm, 2*time.Second) + assert.True(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) + + // Idle timer fires again after reset + pm.Reset(flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, + }) + ticks = watchTicks(t, pm, 2*time.Second) + assert.True(t, ticks.pollTicked) + assert.True(t, ticks.idleTicked) + assert.True(t, ticks.roundTicked) +} + +func TestPollManager_ResetWhenHibernating(t *testing.T) { + t.Parallel() + + pm := newPollManager() + t.Cleanup(pm.Stop) + + // Start in hibernation + pm.Start(true, flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + + // Ticker/timers do not fire when hibernating + ticks := watchTicks(t, pm, 2*time.Second) + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) + + // Ticker/timers does not reset because in hibernation, so they do not fire + pm.Reset(flux_aggregator_wrapper.OracleRoundState{ + StartedAt: uint64(time.Now().Unix()), + Timeout: 1, // in seconds + }) + ticks = watchTicks(t, pm, 2*time.Second) + assert.False(t, ticks.pollTicked) + assert.False(t, ticks.idleTicked) + assert.False(t, ticks.roundTicked) +} diff --git a/core/services/fluxmonitorv2/poll_ticker.go b/core/services/fluxmonitorv2/poll_ticker.go deleted file mode 100644 index 3d72a3604c2..00000000000 --- a/core/services/fluxmonitorv2/poll_ticker.go +++ /dev/null @@ -1,60 +0,0 @@ -package fluxmonitorv2 - -import ( - "time" - - "github.com/smartcontractkit/chainlink/core/utils" -) - -// PollTicker defines a PausableTicker which can be disabled -type PollTicker struct { - ticker utils.PausableTicker - interval time.Duration - disabled bool -} - -// NewPollTicker constructs a new PollTicker -func NewPollTicker(interval time.Duration, disabled bool) *PollTicker { - return &PollTicker{ - ticker: utils.NewPausableTicker(interval), - interval: interval, - disabled: disabled, - } -} - -// Interval gets the ticker interval -func (t *PollTicker) Interval() time.Duration { - return t.interval -} - -// IsEnabled determines if the picker is enabled -func (t *PollTicker) IsEnabled() bool { - return !t.disabled -} - -// IsDisabled determines if the picker is disabled -func (t *PollTicker) IsDisabled() bool { - return t.disabled -} - -// Ticks ticks on a given interval -func (t *PollTicker) Ticks() <-chan time.Time { - return t.ticker.Ticks() -} - -// Resume resumes the ticker if it is enabled -func (t *PollTicker) Resume() { - if t.IsEnabled() { - t.ticker.Resume() - } -} - -// Pause pauses the ticker -func (t *PollTicker) Pause() { - t.ticker.Pause() -} - -// Stop stops the ticker permanently -func (t *PollTicker) Stop() { - t.ticker.Destroy() -} diff --git a/core/services/fluxmonitorv2/poll_ticker_test.go b/core/services/fluxmonitorv2/poll_ticker_test.go deleted file mode 100644 index f4452b70844..00000000000 --- a/core/services/fluxmonitorv2/poll_ticker_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package fluxmonitorv2_test - -import ( - "testing" - "time" - - "github.com/smartcontractkit/chainlink/core/services/fluxmonitorv2" - "github.com/stretchr/testify/assert" -) - -func TestPollTicker_Getters(t *testing.T) { - t.Parallel() - - ticker := fluxmonitorv2.NewPollTicker(time.Second, false) - - t.Run("Interval", func(t *testing.T) { - assert.Equal(t, time.Second, ticker.Interval()) - }) - - t.Run("IsEnabled", func(t *testing.T) { - assert.Equal(t, true, ticker.IsEnabled()) - }) - - t.Run("IsDisabled", func(t *testing.T) { - assert.Equal(t, false, ticker.IsDisabled()) - }) -} - -// TODO - Test the ticker functions -func TestPollTimer_Ticker(t *testing.T) { -} From 12a9ab6d3ef71e830c00b969950e8023a0ed3cd5 Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Tue, 23 Mar 2021 11:48:04 -0400 Subject: [PATCH 073/116] adding back default log level env var, updating config set to read / set runtimeStore --- core/store/orm/config.go | 21 +++++++++++++++++++++ core/web/log_controller.go | 2 +- core/web/log_controller_test.go | 4 ++++ tools/docker/.env | 1 + 4 files changed, 27 insertions(+), 1 deletion(-) diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 0adf3d75735..3c819c9ce57 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -769,9 +769,30 @@ func (c Config) OperatorContractAddress() common.Address { // LogLevel represents the maximum level of log messages to output. func (c Config) LogLevel() LogLevel { + if c.runtimeStore != nil { + var value LogLevel + if err := c.runtimeStore.GetConfigValue("LogLevel", &value); err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + logger.Warnw("Error while trying to fetch LogLevel.", "error", err) + } else if err == nil { + return value + } + } return c.getWithFallback("LogLevel", parseLogLevel).(LogLevel) } +// SetLogLevel saves a runtime value for the default logger level +func (c Config) SetLogLevel(value string) error { + if c.runtimeStore == nil { + return errors.New("No runtime store installed") + } + var ll LogLevel + err := ll.Set(value) + if err != nil { + return err + } + return c.runtimeStore.SetConfigValue("LogLevel", ll) +} + // LogToDisk configures disk preservation of logs. func (c Config) LogToDisk() bool { return c.viper.GetBool(EnvVarName("LogToDisk")) diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 6144f5b410d..13fe6bafaa4 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -55,7 +55,7 @@ func (cc *LogController) Patch(c *gin.Context) { jsonAPIError(c, http.StatusBadRequest, err) return } - cc.App.GetStore().Config.Set("LOG_LEVEL", ll.String()) + err = cc.App.GetStore().Config.SetLogLevel(ll.String()) err = cc.App.GetStore().SetConfigStrValue(c.Request.Context(), "LogLevel", ll.String()) if err != nil { jsonAPIError(c, http.StatusInternalServerError, err) diff --git a/core/web/log_controller_test.go b/core/web/log_controller_test.go index 5cb2101d23f..115472e2107 100644 --- a/core/web/log_controller_test.go +++ b/core/web/log_controller_test.go @@ -21,6 +21,7 @@ type testCase struct { logSql *bool expectedLogLevel zapcore.Level + expectedLogSQL bool expectedErrorCode int } @@ -86,12 +87,14 @@ func TestLogController_PatchLogConfig(t *testing.T) { logLevel: "info", logSql: &sqlTrue, expectedLogLevel: zapcore.InfoLevel, + expectedLogSQL: true, }, { Description: "Set log level to warn and log sql to false", logLevel: "warn", logSql: &sqlFalse, expectedLogLevel: zapcore.WarnLevel, + expectedLogSQL: false, }, { Description: "Send no params to updater", @@ -124,6 +127,7 @@ func TestLogController_PatchLogConfig(t *testing.T) { } if tc.logSql != nil { assert.Equal(t, tc.logSql, &lR.SqlEnabled) + assert.Equal(t, &tc.expectedLogSQL, &lR.SqlEnabled) } assert.Equal(t, tc.expectedLogLevel.String(), app.GetStore().Config.LogLevel().String()) } diff --git a/tools/docker/.env b/tools/docker/.env index 179da05c33c..016faeaae5d 100644 --- a/tools/docker/.env +++ b/tools/docker/.env @@ -8,6 +8,7 @@ CHAINLINK_PGPASSWORD=node ETH_CHAIN_ID=34055 ETH_URL=ws://devnet:8546 EXPLORER_URL=ws://explorer:3001 +LOG_LEVEL=info MINIMUM_CONTRACT_PAYMENT=1000000000000 MIN_INCOMING_CONFIRMATIONS=1 MIN_OUTGOING_CONFIRMATIONS=1 From e6c00fbb9f95b4882d657618330a1dc36d60c7ea Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Tue, 23 Mar 2021 12:48:09 -0400 Subject: [PATCH 074/116] setting env var within runtime store to always read from db, test included for getting bool value from config --- core/store/orm/config.go | 22 ++++++++++++++++++++-- core/store/orm/orm.go | 15 +++++++++++++++ core/store/orm/orm_test.go | 12 ++++++++++++ core/web/log_controller.go | 9 ++------- 4 files changed, 49 insertions(+), 9 deletions(-) diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 3c819c9ce57..f2d17463314 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -1,6 +1,7 @@ package orm import ( + "context" "encoding/base64" "fmt" "io/ioutil" @@ -781,7 +782,7 @@ func (c Config) LogLevel() LogLevel { } // SetLogLevel saves a runtime value for the default logger level -func (c Config) SetLogLevel(value string) error { +func (c Config) SetLogLevel(ctx context.Context, value string) error { if c.runtimeStore == nil { return errors.New("No runtime store installed") } @@ -790,7 +791,7 @@ func (c Config) SetLogLevel(value string) error { if err != nil { return err } - return c.runtimeStore.SetConfigValue("LogLevel", ll) + return c.runtimeStore.SetConfigStrValue(ctx, "LogLevel", ll.String()) } // LogToDisk configures disk preservation of logs. @@ -800,9 +801,26 @@ func (c Config) LogToDisk() bool { // LogSQLStatements tells chainlink to log all SQL statements made using the default logger func (c Config) LogSQLStatements() bool { + if c.runtimeStore != nil { + logSqlStatements, err := c.runtimeStore.GetConfigBoolValue("LogSQLStatements") + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + logger.Warnw("Error while trying to fetch LogSQLStatements.", "error", err) + } else if err == nil { + return *logSqlStatements + } + } return c.viper.GetBool(EnvVarName("LogSQLStatements")) } +// SetLogSQLStatements saves a runtime value for enabling/disabling logging all SQL statements on the default logger +func (c Config) SetLogSQLStatements(ctx context.Context, sqlEnabled bool) error { + if c.runtimeStore == nil { + return errors.New("No runtime store installed") + } + + return c.runtimeStore.SetConfigStrValue(ctx, "LogSQLStatements", strconv.FormatBool(sqlEnabled)) +} + // LogSQLMigrations tells chainlink to log all SQL migrations made using the default logger func (c Config) LogSQLMigrations() bool { return c.viper.GetBool(EnvVarName("LogSQLMigrations")) diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index a118264e331..f7f4da3832e 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -11,6 +11,7 @@ import ( "net/url" "os" "path/filepath" + "strconv" "strings" "sync" "time" @@ -666,6 +667,20 @@ func (orm *ORM) GetConfigValue(field string, value encoding.TextUnmarshaler) err return value.UnmarshalText([]byte(config.Value)) } +// GetConfigBoolValue returns a boolean value for a named configuration entry +func (orm *ORM) GetConfigBoolValue(field string) (*bool, error) { + name := EnvVarName(field) + config := models.Configuration{} + if err := orm.DB.First(&config, "name = ?", name).Error; err != nil { + return nil, err + } + value, err := strconv.ParseBool(config.Value) + if err != nil { + return nil, err + } + return &value, nil +} + // SetConfigValue returns the value for a named configuration entry func (orm *ORM) SetConfigValue(field string, value encoding.TextMarshaler) error { name := EnvVarName(field) diff --git a/core/store/orm/orm_test.go b/core/store/orm/orm_test.go index 6dd4724bd91..98d3d38c104 100644 --- a/core/store/orm/orm_test.go +++ b/core/store/orm/orm_test.go @@ -1964,3 +1964,15 @@ func TestORM_SetConfigStrValue(t *testing.T) { require.NoError(t, err) require.Equal(t, strconv.FormatBool(isSqlStatementEnabled), res.Value) } + +func TestORM_GetConfigBoolValue(t *testing.T) { + t.Parallel() + store, cleanup := cltest.NewStore(t) + defer cleanup() + store.Config.SetRuntimeStore(store.ORM) + + isSqlStatementEnabled := true + err := store.Config.SetLogSQLStatements(context.TODO(), isSqlStatementEnabled) + require.NoError(t, err) + assert.Equal(t, isSqlStatementEnabled, store.Config.LogSQLStatements()) +} diff --git a/core/web/log_controller.go b/core/web/log_controller.go index 13fe6bafaa4..f9bfbc81d92 100644 --- a/core/web/log_controller.go +++ b/core/web/log_controller.go @@ -3,7 +3,6 @@ package web import ( "fmt" "net/http" - "strconv" "github.com/gin-gonic/gin" "github.com/smartcontractkit/chainlink/core/logger" @@ -55,18 +54,14 @@ func (cc *LogController) Patch(c *gin.Context) { jsonAPIError(c, http.StatusBadRequest, err) return } - err = cc.App.GetStore().Config.SetLogLevel(ll.String()) - err = cc.App.GetStore().SetConfigStrValue(c.Request.Context(), "LogLevel", ll.String()) - if err != nil { + if err = cc.App.GetStore().Config.SetLogLevel(c.Request.Context(), ll.String()); err != nil { jsonAPIError(c, http.StatusInternalServerError, err) return } } if request.SqlEnabled != nil { - cc.App.GetStore().Config.Set("LOG_SQL", request.SqlEnabled) - err := cc.App.GetStore().SetConfigStrValue(c.Request.Context(), "LogSQLStatements", strconv.FormatBool(*request.SqlEnabled)) - if err != nil { + if err := cc.App.GetStore().Config.SetLogSQLStatements(c.Request.Context(), *request.SqlEnabled); err != nil { jsonAPIError(c, http.StatusInternalServerError, err) return } From aab594a4f90d1a9aa446382da8e30db4b5de4a2e Mon Sep 17 00:00:00 2001 From: AndrewInsignares Date: Tue, 23 Mar 2021 12:59:02 -0400 Subject: [PATCH 075/116] fix indenting --- core/cmd/renderer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/cmd/renderer.go b/core/cmd/renderer.go index 53b8c0d75b6..1edac78ec1e 100644 --- a/core/cmd/renderer.go +++ b/core/cmd/renderer.go @@ -102,7 +102,7 @@ func (rt RendererTable) Render(v interface{}, headers ...string) error { return rt.renderJobsV2([]Job{*typed}) case *pipeline.Run: return rt.renderPipelineRun(*typed) - case *webPresenters.LogResource: + case *webPresenters.LogResource: return rt.renderLogResource(*typed) default: return fmt.Errorf("unable to render object of type %T: %v", typed, typed) From d116bc57c6c3f34e09969665b5b0f54c0c2a35da Mon Sep 17 00:00:00 2001 From: Connor Stein Date: Tue, 23 Mar 2021 16:58:37 -0400 Subject: [PATCH 076/116] Fix FM handling, remove old meta data (#4105) --- core/adapters/bridge.go | 39 ++--------- core/adapters/bridge_test.go | 55 ---------------- core/internal/features_test.go | 24 ++++--- core/services/fluxmonitor/fetchers_test.go | 16 ++--- core/services/fluxmonitor/flux_monitor.go | 66 +++++++++++-------- .../services/fluxmonitor/flux_monitor_test.go | 21 +++--- core/services/pipeline/task.bridge_test.go | 13 ++-- core/store/models/bridge_type.go | 23 +++++++ docs/CHANGELOG.md | 1 + 9 files changed, 100 insertions(+), 158 deletions(-) diff --git a/core/adapters/bridge.go b/core/adapters/bridge.go index 6a715faad97..5f0de8039f0 100644 --- a/core/adapters/bridge.go +++ b/core/adapters/bridge.go @@ -8,13 +8,11 @@ import ( "net/http" "net/url" - uuid "github.com/satori/go.uuid" "github.com/smartcontractkit/chainlink/core/store" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/utils" "github.com/pkg/errors" - "github.com/tidwall/gjson" ) // Bridge adapter is responsible for connecting the task pipeline to external @@ -42,31 +40,10 @@ func (ba *Bridge) Perform(input models.RunInput, store *store.Store) models.RunO } else if input.Status().PendingBridge() { return models.NewRunOutputInProgress(input.Data()) } - meta := getMeta(store, input.JobRunID()) - return ba.handleNewRun(input, meta, store) + return ba.handleNewRun(input, store) } -func getMeta(store *store.Store, jobRunID uuid.UUID) *models.JSON { - jobRun, err := store.ORM.FindJobRun(jobRunID) - if err != nil { - return nil - } else if jobRun.RunRequest.TxHash == nil || jobRun.RunRequest.BlockHash == nil { - return nil - } - meta := fmt.Sprintf(` - { - "initiator": { - "transactionHash": "%s", - "blockHash": "%s" - } - }`, - jobRun.RunRequest.TxHash.Hex(), - jobRun.RunRequest.BlockHash.Hex(), - ) - return &models.JSON{Result: gjson.Parse(meta)} -} - -func (ba *Bridge) handleNewRun(input models.RunInput, meta *models.JSON, store *store.Store) models.RunOutput { +func (ba *Bridge) handleNewRun(input models.RunInput, store *store.Store) models.RunOutput { data, err := models.Merge(input.Data(), ba.Params) if err != nil { return models.NewRunOutputError(baRunResultError("handling data param", err)) @@ -82,7 +59,7 @@ func (ba *Bridge) handleNewRun(input models.RunInput, meta *models.JSON, store * // Some node operators may run external adapters on their own hardware httpConfig.AllowUnrestrictedNetworkAccess = true - body, err := ba.postToExternalAdapter(input, meta, responseURL, httpConfig) + body, err := ba.postToExternalAdapter(input, responseURL, httpConfig) if err != nil { return models.NewRunOutputError(baRunResultError("post to external adapter", err)) } @@ -120,7 +97,6 @@ func (ba *Bridge) responseToRunResult(body []byte, input models.RunInput) models func (ba *Bridge) postToExternalAdapter( input models.RunInput, - meta *models.JSON, bridgeResponseURL *url.URL, config utils.HTTPRequestConfig, ) ([]byte, error) { @@ -129,7 +105,7 @@ func (ba *Bridge) postToExternalAdapter( return nil, errors.Wrap(err, "error merging bridge params with input params") } - outgoing := bridgeOutgoing{JobRunID: input.JobRunID().String(), Data: data, Meta: meta} + outgoing := bridgeOutgoing{JobRunID: input.JobRunID().String(), Data: data} if bridgeResponseURL != nil { outgoing.ResponseURL = bridgeResponseURL.String() } @@ -169,10 +145,9 @@ func baRunResultError(str string, err error) error { } type bridgeOutgoing struct { - JobRunID string `json:"id"` - Data models.JSON `json:"data"` - Meta *models.JSON `json:"meta,omitempty"` - ResponseURL string `json:"responseURL,omitempty"` + JobRunID string `json:"id"` + Data models.JSON `json:"data"` + ResponseURL string `json:"responseURL,omitempty"` } var zeroURL = new(url.URL) diff --git a/core/adapters/bridge_test.go b/core/adapters/bridge_test.go index 5c905f73a0b..5c2a9fd1368 100644 --- a/core/adapters/bridge_test.go +++ b/core/adapters/bridge_test.go @@ -1,17 +1,13 @@ package adapters_test import ( - "encoding/hex" "fmt" "net/http" "testing" - "github.com/smartcontractkit/chainlink/core/services/eth" - uuid "github.com/satori/go.uuid" "github.com/smartcontractkit/chainlink/core/adapters" "github.com/smartcontractkit/chainlink/core/internal/cltest" - "github.com/smartcontractkit/chainlink/core/store" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/stretchr/testify/assert" @@ -24,13 +20,11 @@ func TestBridge_PerformEmbedsParamsInData(t *testing.T) { store.Config.Set("BRIDGE_RESPONSE_URL", cltest.WebURL(t, "")) data := "" - meta := false token := "" mock, cleanup := cltest.NewHTTPMockServer(t, http.StatusOK, "POST", `{"pending": true}`, func(h http.Header, b string) { body := cltest.JSONFromString(t, b) data = body.Get("data").String() - meta = body.Get("meta").Exists() token = h.Get("Authorization") }, ) @@ -44,55 +38,6 @@ func TestBridge_PerformEmbedsParamsInData(t *testing.T) { result := ba.Perform(input, store) require.NoError(t, result.Error()) assert.Equal(t, `{"bodyParam":true,"result":"100"}`, data) - assert.False(t, meta) - assert.Equal(t, "Bearer "+bt.OutgoingToken, token) -} - -func setupJobRunAndStore(t *testing.T, txHash []byte, blockHash []byte) (*store.Store, uuid.UUID, func()) { - rpcClient, gethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) - app, cleanup := cltest.NewApplication(t, - eth.NewClientWith(rpcClient, gethClient), - ) - app.Store.Config.Set("BRIDGE_RESPONSE_URL", cltest.WebURL(t, "")) - require.NoError(t, app.Start()) - jr := app.MustCreateJobRun(txHash, blockHash) - - return app.Store, jr.ID, func() { - assertMocksCalled() - cleanup() - } -} - -func TestBridge_IncludesMetaIfJobRunIsInDB(t *testing.T) { - txHashHex := "d6432b8321d9988e664f23cfce392dff8221da36a44ebb622160156dcef4abb9" - blockHashHex := "d5150a4f602af1de7ff51f02c5b55b130693596c68f00b7796ac2b0f51175675" - txHash, _ := hex.DecodeString(txHashHex) - blockHash, _ := hex.DecodeString(blockHashHex) - store, jobRunID, cleanup := setupJobRunAndStore(t, txHash, blockHash) - defer cleanup() - - data := "" - meta := "" - token := "" - mock, cleanup := cltest.NewHTTPMockServer(t, http.StatusOK, "POST", `{"pending": true}`, - func(h http.Header, b string) { - body := cltest.JSONFromString(t, b) - data = body.Get("data").String() - meta = body.Get("meta").String() - token = h.Get("Authorization") - }, - ) - defer cleanup() - - _, bt := cltest.NewBridgeType(t, "auctionBidding", mock.URL) - params := cltest.JSONFromString(t, `{"bodyParam": true}`) - ba := &adapters.Bridge{BridgeType: *bt, Params: params} - - input := cltest.NewRunInputWithResultAndJobRunID("100", jobRunID) - result := ba.Perform(input, store) - require.NoError(t, result.Error()) - assert.Equal(t, `{"bodyParam":true,"result":"100"}`, data) - assert.Equal(t, fmt.Sprintf(`{"initiator":{"transactionHash":"0x%s","blockHash":"0x%s"}}`, txHashHex, blockHashHex), meta) assert.Equal(t, "Bearer "+bt.OutgoingToken, token) } diff --git a/core/internal/features_test.go b/core/internal/features_test.go index 31903510f4a..cda2b34333d 100644 --- a/core/internal/features_test.go +++ b/core/internal/features_test.go @@ -5,7 +5,6 @@ import ( "context" "encoding/hex" "encoding/json" - "errors" "fmt" "io" "io/ioutil" @@ -820,6 +819,11 @@ func TestIntegration_FluxMonitor_Deviation(t *testing.T) { Return(cltest.MustGenericEncode([]string{"uint256"}, big.NewInt(0)), nil).Once() cltest.MockFluxAggCall(gethClient, cltest.FluxAggAddress, "maxSubmissionValue"). Return(cltest.MustGenericEncode([]string{"uint256"}, big.NewInt(10000000)), nil).Once() + cltest.MockFluxAggCall(gethClient, cltest.FluxAggAddress, "latestRoundData"). + Return(cltest.MustGenericEncode( + []string{"uint80", "int256", "uint256", "uint256", "uint80"}, + big.NewInt(2), big.NewInt(1), big.NewInt(1), big.NewInt(1), big.NewInt(1), + ), nil).Maybe() // Called 3-4 times. // Configure fake Eth Node to return 10,000 cents when FM initiates price. minPayment := app.Store.Config.MinimumContractPayment().ToInt().Uint64() @@ -831,15 +835,6 @@ func TestIntegration_FluxMonitor_Deviation(t *testing.T) { cltest.MockFluxAggCall(gethClient, cltest.FluxAggAddress, "getOracles"). Return(getOraclesResult, nil).Once() - // latestRoundData() - lrdTypes := []string{"uint80", "int256", "uint256", "uint256", "uint80"} - latestRoundDataResult, err := cltest.GenericEncode( - lrdTypes, big.NewInt(2), big.NewInt(1), big.NewInt(1), big.NewInt(1), big.NewInt(1), - ) - require.NoError(t, err) - cltest.MockFluxAggCall(gethClient, cltest.FluxAggAddress, "latestRoundData"). - Return(latestRoundDataResult, nil).Once() - // oracleRoundState() result := cltest.MakeRoundStateReturnData(2, true, 10000, 7, 0, availableFunds, minPayment, 1) cltest.MockFluxAggCall(gethClient, cltest.FluxAggAddress, "oracleRoundState"). @@ -963,6 +958,12 @@ func TestIntegration_FluxMonitor_NewRound(t *testing.T) { Return(cltest.MustGenericEncode([]string{"uint256"}, big.NewInt(0)), nil).Once() cltest.MockFluxAggCall(gethClient, cltest.FluxAggAddress, "maxSubmissionValue"). Return(cltest.MustGenericEncode([]string{"uint256"}, big.NewInt(10000000)), nil).Once() + require.NoError(t, err) + cltest.MockFluxAggCall(gethClient, cltest.FluxAggAddress, "latestRoundData"). + Return(cltest.MustGenericEncode( + []string{"uint80", "int256", "uint256", "uint256", "uint80"}, + big.NewInt(2), big.NewInt(1), big.NewInt(1), big.NewInt(1), big.NewInt(1), + ), nil).Maybe() // Called 3-4 times. // Configure fake Eth Node to return 10,000 cents when FM initiates price. getOraclesResult, err := cltest.GenericEncode([]string{"address[]"}, []common.Address{}) @@ -970,9 +971,6 @@ func TestIntegration_FluxMonitor_NewRound(t *testing.T) { cltest.MockFluxAggCall(gethClient, cltest.FluxAggAddress, "getOracles"). Return(getOraclesResult, nil).Once() - cltest.MockFluxAggCall(gethClient, cltest.FluxAggAddress, "latestRoundData"). - Return(nil, errors.New("first round")).Once() - result := cltest.MakeRoundStateReturnData(2, true, 10000, 7, 0, availableFunds, minPayment, 1) cltest.MockFluxAggCall(gethClient, cltest.FluxAggAddress, "oracleRoundState"). Return(result, nil) diff --git a/core/services/fluxmonitor/fetchers_test.go b/core/services/fluxmonitor/fetchers_test.go index d5392191a4f..49f9ebd2514 100644 --- a/core/services/fluxmonitor/fetchers_test.go +++ b/core/services/fluxmonitor/fetchers_test.go @@ -3,8 +3,8 @@ package fluxmonitor import ( "context" "encoding/json" - "fmt" "io/ioutil" + "math/big" "net/http" "net/http/httptest" "net/url" @@ -16,7 +16,6 @@ import ( "github.com/stretchr/testify/require" "gopkg.in/guregu/null.v4" - "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flux_aggregator_wrapper" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/utils" ) @@ -114,20 +113,13 @@ func TestHTTPFetcher_Meta(t *testing.T) { var req fetcherRequest body, _ := ioutil.ReadAll(r.Body) err := json.Unmarshal(body, &req) - fmt.Println(req.Meta) require.NoError(t, err) - require.Equal(t, false, req.Meta["EligibleToSubmit"]) - require.Equal(t, float64(0), req.Meta["OracleCount"]) - require.Equal(t, float64(7), req.Meta["RoundId"]) - require.Equal(t, float64(0), req.Meta["StartedAt"]) - require.Equal(t, float64(11), req.Meta["Timeout"]) - w.Header().Set("Content-Type", "application/json") + require.Equal(t, float64(10), req.Meta["latestAnswer"]) w.Header().Set("Content-Type", "application/json") require.NoError(t, json.NewEncoder(w).Encode(empty)) }) - roundState := flux_aggregator_wrapper.OracleRoundState{RoundId: 7, Timeout: 11} - request, err := models.MarshalToMap(&roundState) + md, err := models.BridgeMetaData(big.NewInt(10), big.NewInt(1616447984)) require.NoError(t, err) s1 := httptest.NewServer(handler) @@ -137,7 +129,7 @@ func TestHTTPFetcher_Meta(t *testing.T) { require.NoError(t, err) fetcher := newHTTPFetcher(defaultHTTPTimeout, ethUSDPairing, feedURL, 32768) - fetcher.Fetch(context.Background(), request) + fetcher.Fetch(context.Background(), md) } func TestHTTPFetcher_ErrorMessage(t *testing.T) { diff --git a/core/services/fluxmonitor/flux_monitor.go b/core/services/fluxmonitor/flux_monitor.go index 6456f44273b..7b91cb62b69 100644 --- a/core/services/fluxmonitor/flux_monitor.go +++ b/core/services/fluxmonitor/flux_monitor.go @@ -6,6 +6,7 @@ import ( "math/big" "net/url" "reflect" + "strings" "sync" "time" @@ -799,7 +800,14 @@ func (p *PollingDeviationChecker) respondToAnswerUpdatedLog(log flux_aggregator_ // The NewRound log tells us that an oracle has initiated a new round. This tells us that we // need to poll and submit an answer to the contract regardless of the deviation. func (p *PollingDeviationChecker) respondToNewRoundLog(log flux_aggregator_wrapper.FluxAggregatorNewRound) { - logger.Debugw("NewRound log", p.loggerFieldsForNewRound(log)...) + l := logger.Default.With( + "round", log.RoundId, + "startedBy", log.StartedBy.Hex(), + "startedAt", log.StartedAt.String(), + "contract", p.fluxAggregator.Address().Hex(), + "jobID", p.initr.JobSpecID, + ) + l.Debugw("NewRound log") promfm.SetBigInt(promfm.SeenRound.WithLabelValues(p.initr.JobSpecID.String()), log.RoundId) @@ -853,21 +861,21 @@ func (p *PollingDeviationChecker) respondToNewRoundLog(log flux_aggregator_wrapp mostRecentRoundID, err := p.store.MostRecentFluxMonitorRoundID(p.initr.Address) if err != nil && err != gorm.ErrRecordNotFound { - logger.Errorw(fmt.Sprintf("error fetching Flux Monitor most recent round ID from DB: %v", err), p.loggerFieldsForNewRound(log)...) + l.Errorw("error fetching Flux Monitor most recent round ID from DB", "err", err) return } if logRoundID < mostRecentRoundID { err = p.store.DeleteFluxMonitorRoundsBackThrough(p.initr.Address, logRoundID) if err != nil { - logger.Errorw(fmt.Sprintf("error deleting reorged Flux Monitor rounds from DB: %v", err), p.loggerFieldsForNewRound(log)...) + l.Errorw("error deleting reorged Flux Monitor rounds from DB", "err", err) return } } roundStats, jobRunStatus, err := p.statsAndStatusForRound(logRoundID) if err != nil { - logger.Errorw(fmt.Sprintf("error determining round stats / run status for round: %v", err), p.loggerFieldsForNewRound(log)...) + l.Errorw("error determining round stats / run status for round", "err", err) return } @@ -878,43 +886,49 @@ func (p *PollingDeviationChecker) respondToNewRoundLog(log flux_aggregator_wrapp // If our previous attempt is still pending, return early and don't re-submit // If our previous attempt is already over (completed or errored), we should retry if !jobRunStatus.Finished() { - logger.Debugw("Ignoring new round request: started round simultaneously with another node", p.loggerFieldsForNewRound(log)...) + l.Debugw("Ignoring new round request: started round simultaneously with another node") return } } // Ignore rounds we started if p.oracleAddress == log.StartedBy { - logger.Infow("Ignoring new round request: we started this round", p.loggerFieldsForNewRound(log)...) + l.Infow("Ignoring new round request: we started this round") return } // Ignore rounds we're not eligible for, or for which we won't be paid roundState, err := p.roundState(logRoundID) if err != nil { - logger.Errorw(fmt.Sprintf("Ignoring new round request: error fetching eligibility from contract: %v", err), p.loggerFieldsForNewRound(log)...) + l.Errorw("Ignoring new round request: error fetching eligibility from contract", "err", err) return } p.resetTickers(roundState) err = p.checkEligibilityAndAggregatorFunding(roundState) if err != nil { - logger.Infow(fmt.Sprintf("Ignoring new round request: %v", err), p.loggerFieldsForNewRound(log)...) + l.Infow("Ignoring new round request", "err", err) return } - logger.Infow("Responding to new round request", p.loggerFieldsForNewRound(log)...) + l.Infow("Responding to new round request") - request, err := models.MarshalToMap(&roundState) + lrd, err := p.fluxAggregator.LatestRoundData(nil) + if err != nil && !strings.Contains(err.Error(), "No data present") { + l.Warnw("Error reading latest round data for request meta", "err", err) + return + } + // If no data present, just send 0 for backwards compatibility. + metaDataForBridge, err := models.BridgeMetaData(lrd.Answer, lrd.UpdatedAt) if err != nil { - logger.Warnw("Error marshalling roundState for request meta", p.loggerFieldsForNewRound(log)...) + logger.Warnw("Error marshalling roundState for request meta", "err", err) return } ctx, cancel := utils.CombinedContext(p.chStop) defer cancel() - polledAnswer, err := p.fetcher.Fetch(ctx, request) + polledAnswer, err := p.fetcher.Fetch(ctx, metaDataForBridge) if err != nil { - logger.Errorw(fmt.Sprintf("unable to fetch median price: %v", err), p.loggerFieldsForNewRound(log)...) + l.Errorw("unable to fetch median price", "err", err) return } @@ -924,14 +938,14 @@ func (p *PollingDeviationChecker) respondToNewRoundLog(log flux_aggregator_wrapp var payment assets.Link if roundState.PaymentAmount == nil { - logger.Error("roundState.PaymentAmount shouldn't be nil") + l.Error("roundState.PaymentAmount shouldn't be nil") } else { payment = assets.Link(*roundState.PaymentAmount) } err = p.createJobRun(polledAnswer, logRoundID, &payment) if err != nil { - logger.Errorw(fmt.Sprintf("unable to create job run: %v", err), p.loggerFieldsForNewRound(log)...) + l.Errorw("unable to create job run", "err", err) return } } @@ -1035,15 +1049,21 @@ func (p *PollingDeviationChecker) pollIfEligible(thresholds DeviationThresholds) return } - request, err := models.MarshalToMap(&roundState) + lrd, err := p.fluxAggregator.LatestRoundData(nil) + if err != nil && !strings.Contains(err.Error(), "No data present") { + l.Warnw("Error reading latest round data for request meta", "err", err) + return + } + // If no data present, just send 0 for backwards compatibility. + metaDataForBridge, err := models.BridgeMetaData(lrd.Answer, lrd.UpdatedAt) if err != nil { - l.Warnw("Error marshalling roundState for request meta") + logger.Warnw("Error marshalling roundState for request meta", "err", err) return } ctx, cancel := utils.CombinedContext(p.chStop) defer cancel() - polledAnswer, err := p.fetcher.Fetch(ctx, request) + polledAnswer, err := p.fetcher.Fetch(ctx, metaDataForBridge) if err != nil { l.Errorw("can't fetch answer", "err", err) p.store.UpsertErrorFor(p.JobID(), "Error polling") @@ -1277,16 +1297,6 @@ func (p *PollingDeviationChecker) loggerFields(added ...interface{}) []interface }...) } -func (p *PollingDeviationChecker) loggerFieldsForNewRound(log flux_aggregator_wrapper.FluxAggregatorNewRound) []interface{} { - return []interface{}{ - "round", log.RoundId, - "startedBy", log.StartedBy.Hex(), - "startedAt", log.StartedAt.String(), - "contract", p.fluxAggregator.Address().Hex(), - "jobID", p.initr.JobSpecID, - } -} - func (p *PollingDeviationChecker) loggerFieldsForAnswerUpdated(log flux_aggregator_wrapper.FluxAggregatorAnswerUpdated) []interface{} { return []interface{}{ "round", log.RoundId, diff --git a/core/services/fluxmonitor/flux_monitor_test.go b/core/services/fluxmonitor/flux_monitor_test.go index 8ee82eaafa6..512c65e04fd 100644 --- a/core/services/fluxmonitor/flux_monitor_test.go +++ b/core/services/fluxmonitor/flux_monitor_test.go @@ -46,7 +46,7 @@ var ( } } freshContractRoundDataResponse = func() (flux_aggregator_wrapper.LatestRoundData, error) { - return flux_aggregator_wrapper.LatestRoundData{}, errors.New("unstarted") + return flux_aggregator_wrapper.LatestRoundData{}, errors.New("No data present") } ) @@ -233,6 +233,7 @@ func TestPollingDeviationChecker_PollIfEligible(t *testing.T) { OracleCount: oracleCount, } fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)).Return(roundState, nil).Maybe() + fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Maybe() if test.expectedToPoll { fetcher.On("Fetch", mock.Anything, mock.Anything).Return(decimal.NewFromInt(answers.polledAnswer), nil) @@ -387,7 +388,7 @@ func TestPollingDeviationChecker_BuffersLogs(t *testing.T) { chSafeToFillQueue := make(chan struct{}) fluxAggregator := new(mocks.FluxAggregator) - fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Once() + fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Times(4) fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(1)). Return(makeRoundStateForRoundID(1), nil). Run(func(mock.Arguments) { @@ -503,7 +504,7 @@ func TestPollingDeviationChecker_TriggerIdleTimeThreshold(t *testing.T) { idleDurationOccured := make(chan struct{}, 3) - fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Once() + fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Maybe() if test.expectedToSubmit { // performInitialPoll() roundState1 := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now()} @@ -597,7 +598,7 @@ func TestPollingDeviationChecker_RoundTimeoutCausesPoll_timesOutAtZero(t *testin answerBigInt := big.NewInt(fetchedAnswer * int64(math.Pow10(int(initr.InitiatorParams.Precision)))) logBroadcaster.On("Register", mock.Anything, mock.Anything).Return(true, func() {}) - fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Once() + fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Maybe() fluxAggregator.On("Address").Return(initr.Address).Maybe() roundState0 := flux_aggregator_wrapper.OracleRoundState{RoundId: 1, EligibleToSubmit: false, LatestSubmission: answerBigInt, StartedAt: now()} fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(1)).Return(roundState0, nil).Once() // initialRoundState() @@ -678,7 +679,7 @@ func TestPollingDeviationChecker_UsesPreviousRoundStateOnStartup_RoundTimeout(t fluxAggregator.On("Address").Return(initr.Address).Maybe() fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) - fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Once() + fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Maybe() fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(1)).Return(flux_aggregator_wrapper.OracleRoundState{ RoundId: 1, EligibleToSubmit: false, @@ -765,7 +766,7 @@ func TestPollingDeviationChecker_UsesPreviousRoundStateOnStartup_IdleTimer(t *te fluxAggregator.On("Address").Return(initr.Address).Maybe() fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) - fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Once() + fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Maybe() // first roundstate in setInitialTickers() fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(1)).Return(flux_aggregator_wrapper.OracleRoundState{ RoundId: 1, @@ -842,7 +843,7 @@ func TestPollingDeviationChecker_RoundTimeoutCausesPoll_timesOutNotZero(t *testi fluxAggregator.On("Address").Return(initr.Address).Maybe() fluxAggregator.On("GetOracles", nilOpts).Return(oracles, nil) - fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Once() + fluxAggregator.On("LatestRoundData", nilOpts).Return(makeRoundDataForRoundID(1), nil).Maybe() fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(1)).Return(flux_aggregator_wrapper.OracleRoundState{ RoundId: 1, EligibleToSubmit: false, @@ -1573,6 +1574,7 @@ func TestPollingDeviationChecker_DoesNotDoubleSubmit(t *testing.T) { checker.SetOracleAddress() checker.OnConnect() + fluxAggregator.On("LatestRoundData", nilOpts).Return(freshContractRoundDataResponse()).Maybe() // Fire off the NewRound log, which the node should respond to fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(roundID)). Return(flux_aggregator_wrapper.OracleRoundState{ @@ -1667,6 +1669,7 @@ func TestPollingDeviationChecker_DoesNotDoubleSubmit(t *testing.T) { checker.OnConnect() + fluxAggregator.On("LatestRoundData", nilOpts).Return(flux_aggregator_wrapper.LatestRoundData{Answer: big.NewInt(100), UpdatedAt: big.NewInt(1616447984)}, nil).Maybe() // First, force the node to try to poll, which should result in a submission fluxAggregator.On("OracleRoundState", nilOpts, nodeAddr, uint32(0)). Return(flux_aggregator_wrapper.OracleRoundState{ @@ -1678,8 +1681,8 @@ func TestPollingDeviationChecker_DoesNotDoubleSubmit(t *testing.T) { OracleCount: 1, }, nil). Once() - meta := utils.MustUnmarshalToMap(`{"AvailableFunds":100000, "EligibleToSubmit":true, "LatestSubmission":100, "OracleCount":1, "PaymentAmount":100, "RoundId":3, "StartedAt":0, "Timeout":0}`) - fetcher.On("Fetch", mock.Anything, meta). + md, _ := models.BridgeMetaData(big.NewInt(100), big.NewInt(1616447984)) + fetcher.On("Fetch", mock.Anything, md). Return(decimal.NewFromInt(answer), nil). Once() rm.On("Create", job.ID, &initr, mock.Anything, mock.Anything). diff --git a/core/services/pipeline/task.bridge_test.go b/core/services/pipeline/task.bridge_test.go index 1247b05d82e..0654945ad53 100644 --- a/core/services/pipeline/task.bridge_test.go +++ b/core/services/pipeline/task.bridge_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "math/big" "net/http" "net/http/httptest" "net/url" @@ -15,7 +16,6 @@ import ( "gopkg.in/guregu/null.v4" "github.com/smartcontractkit/chainlink/core/internal/cltest" - "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/flux_aggregator_wrapper" "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/utils" @@ -157,17 +157,12 @@ func TestBridgeTask_Meta(t *testing.T) { body, _ := ioutil.ReadAll(r.Body) err := json.Unmarshal(body, &req) require.NoError(t, err) - require.Equal(t, false, req.Meta["eligibleToSubmit"]) - require.Equal(t, float64(0), req.Meta["oracleCount"]) - require.Equal(t, float64(7), req.Meta["reportableRoundID"]) - require.Equal(t, float64(0), req.Meta["startedAt"]) - require.Equal(t, float64(11), req.Meta["timeout"]) + require.Equal(t, 10, req.Meta["latestAnswer"]) w.Header().Set("Content-Type", "application/json") require.NoError(t, json.NewEncoder(w).Encode(empty)) }) - roundState := flux_aggregator_wrapper.OracleRoundState{RoundId: 7, Timeout: 11} - request, err := models.MarshalToMap(&roundState) + metaDataForBridge, err := models.BridgeMetaData(big.NewInt(10), big.NewInt(1616447984)) require.NoError(t, err) s1 := httptest.NewServer(handler) @@ -188,7 +183,7 @@ func TestBridgeTask_Meta(t *testing.T) { task.Run(context.Background(), pipeline.TaskRun{ PipelineRun: pipeline.Run{ - Meta: pipeline.JSONSerializable{request, false}, + Meta: pipeline.JSONSerializable{metaDataForBridge, false}, }, }, nil) } diff --git a/core/store/models/bridge_type.go b/core/store/models/bridge_type.go index aa6acf2e714..94ddd4381a9 100644 --- a/core/store/models/bridge_type.go +++ b/core/store/models/bridge_type.go @@ -2,7 +2,9 @@ package models import ( "crypto/subtle" + "encoding/json" "fmt" + "math/big" "time" "github.com/smartcontractkit/chainlink/core/assets" @@ -141,3 +143,24 @@ func incomingTokenHash(token, salt string) (string, error) { } return hash, nil } + +// NOTE: latestAnswer and updatedAt is the only metadata used. +// Currently market closer adapter and outlier detection depend latestAnswer. +// https://github.com/smartcontractkit/external-adapters-js/tree/f474bd2e2de13ebe5c9dc3df36ebb7018817005e/composite/market-closure +// https://github.com/smartcontractkit/external-adapters-js/tree/5abb8e5ec2024f724fd39122897baa63c3cd0167/composite/outlier-detection +func BridgeMetaData(latestAnswer *big.Int, updatedAt *big.Int) (map[string]interface{}, error) { + type m struct { + LatestAnswer *big.Int `json:"latestAnswer"` + UpdatedAt *big.Int `json:"updatedAt"` // A unix timestamp + } + b, err := json.Marshal(&m{LatestAnswer: latestAnswer, UpdatedAt: updatedAt}) + if err != nil { + return nil, err + } + var mp map[string]interface{} + err = json.Unmarshal(b, &mp) + if err != nil { + return nil, err + } + return mp, nil +} diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 2969ae64506..ac58c5ce01e 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -29,6 +29,7 @@ Example settings: ### Fixed - Chainlink node now automatically sets the correct nonce on startup if you are restoring from a previous backup (manual setnextnonce is no longer necessary). +- Flux monitor jobs should now work correctly with [outlier-detection](https://github.com/smartcontractkit/external-adapters-js/tree/develop/composite/outlier-detection) and [market-closure](https://github.com/smartcontractkit/external-adapters-js/tree/develop/composite/market-closure) external adapters. - Performance improvements to OCR job adds. Removed the pipeline_task_specs table and added a new column `dot_id` to the pipeline_task_runs table which links a pipeline_task_run From f61259d5ba792640aae491dd84419bc5ec484c53 Mon Sep 17 00:00:00 2001 From: John Barker Date: Tue, 16 Mar 2021 12:19:59 -0600 Subject: [PATCH 077/116] cookie file = not found, treat as missing !error --- core/cmd/client.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/cmd/client.go b/core/cmd/client.go index 50b821c90a4..d00268c7b82 100644 --- a/core/cmd/client.go +++ b/core/cmd/client.go @@ -9,6 +9,7 @@ import ( "io/ioutil" "log" "net/http" + "os" "path" "strconv" "strings" @@ -342,6 +343,9 @@ func (d DiskCookieStore) Save(cookie *http.Cookie) error { func (d DiskCookieStore) Retrieve() (*http.Cookie, error) { b, err := ioutil.ReadFile(d.cookiePath()) if err != nil { + if os.IsNotExist(err) { + return nil, nil + } return nil, multierr.Append(errors.New("unable to retrieve credentials, you must first login through the CLI"), err) } header := http.Header{} From 16062fda75535e854fa2c257c0bb681233d80a10 Mon Sep 17 00:00:00 2001 From: John Barker Date: Tue, 16 Mar 2021 12:33:20 -0600 Subject: [PATCH 078/116] Allow nil cookie if file missing --- core/cmd/client_test.go | 44 ++++++++++++------------- core/internal/fixtures/badcookie/cookie | 0 2 files changed, 22 insertions(+), 22 deletions(-) create mode 100644 core/internal/fixtures/badcookie/cookie diff --git a/core/cmd/client_test.go b/core/cmd/client_test.go index c82c23cc242..3499c8a0df7 100644 --- a/core/cmd/client_test.go +++ b/core/cmd/client_test.go @@ -96,28 +96,28 @@ func TestDiskCookieStore_Retrieve(t *testing.T) { defer cleanup() config := tc.Config - tests := []struct { - name string - rootDir string - wantError bool - }{ - {"missing", config.RootDir(), true}, - {"correct fixture", "../internal/fixtures", false}, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - config.Set("ROOT", test.rootDir) - store := cmd.DiskCookieStore{Config: config} - cookie, err := store.Retrieve() - if test.wantError { - assert.Error(t, err) - assert.Nil(t, cookie) - } else { - assert.NoError(t, err) - assert.NotNil(t, cookie) - } - }) - } + t.Run("missing cookie file", func(t *testing.T) { + store := cmd.DiskCookieStore{Config: config} + cookie, err := store.Retrieve() + assert.NoError(t, err) + assert.Nil(t, cookie) + }) + + t.Run("invalid cookie file", func(t *testing.T) { + config.Set("ROOT", "../internal/fixtures/badcookie") + store := cmd.DiskCookieStore{Config: config} + cookie, err := store.Retrieve() + assert.Error(t, err) + assert.Nil(t, cookie) + }) + + t.Run("valid cookie file", func(t *testing.T) { + config.Set("ROOT", "../internal/fixtures") + store := cmd.DiskCookieStore{Config: config} + cookie, err := store.Retrieve() + assert.NoError(t, err) + assert.NotNil(t, cookie) + }) } func TestTerminalAPIInitializer_InitializeWithoutAPIUser(t *testing.T) { diff --git a/core/internal/fixtures/badcookie/cookie b/core/internal/fixtures/badcookie/cookie new file mode 100644 index 00000000000..e69de29bb2d From 52119b23cb257048570685f31a023208507e7295 Mon Sep 17 00:00:00 2001 From: John Barker Date: Tue, 16 Mar 2021 13:15:05 -0600 Subject: [PATCH 079/116] Revert changes to RemoteLogin to restore password prompt --- core/cmd/remote_client.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/core/cmd/remote_client.go b/core/cmd/remote_client.go index a61300364a2..7d5c726e4d4 100644 --- a/core/cmd/remote_client.go +++ b/core/cmd/remote_client.go @@ -429,11 +429,7 @@ func (cli *Client) RemoveBridge(c *clipkg.Context) (err error) { // RemoteLogin creates a cookie session to run remote commands. func (cli *Client) RemoteLogin(c *clipkg.Context) error { - credentialsFile := c.String("file") - if credentialsFile == "" { - credentialsFile = cli.Config.AdminCredentialsFile() - } - sessionRequest, err := cli.buildSessionRequest(credentialsFile) + sessionRequest, err := cli.buildSessionRequest(c.String("file")) if err != nil { return cli.errorOut(err) } From 713ec0efb8f717d7230f7dfecdc868f4f6288bd0 Mon Sep 17 00:00:00 2001 From: Alex Roan Date: Wed, 24 Mar 2021 15:22:12 +0000 Subject: [PATCH 080/116] 0.7/dev contracts style --- .../src/v0.7/dev/AggregatorProxy.sol | 115 ++++++++++++++---- evm-contracts/src/v0.7/dev/ConfirmedOwner.sol | 12 +- .../src/v0.7/dev/LinkTokenReceiver.sol | 8 +- evm-contracts/src/v0.7/dev/Operator.sol | 4 +- .../src/v0.7/dev/OperatorFactory.sol | 35 +++--- .../src/v0.7/dev/OperatorForwarder.sol | 4 +- 6 files changed, 130 insertions(+), 48 deletions(-) diff --git a/evm-contracts/src/v0.7/dev/AggregatorProxy.sol b/evm-contracts/src/v0.7/dev/AggregatorProxy.sol index f3b03cee6cd..0a014f11f29 100644 --- a/evm-contracts/src/v0.7/dev/AggregatorProxy.sol +++ b/evm-contracts/src/v0.7/dev/AggregatorProxy.sol @@ -24,10 +24,20 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { uint256 constant private PHASE_SIZE = 16; uint256 constant private MAX_ID = 2**(PHASE_OFFSET+PHASE_SIZE) - 1; - event AggregatorProposed(address indexed current, address indexed proposed); - event AggregatorConfirmed(address indexed previous, address indexed latest); - - constructor(address aggregatorAddress) ConfirmedOwner(msg.sender) { + event AggregatorProposed( + address indexed current, + address indexed proposed + ); + event AggregatorConfirmed( + address indexed previous, + address indexed latest + ); + + constructor( + address aggregatorAddress + ) + ConfirmedOwner(msg.sender) + { setAggregator(aggregatorAddress); } @@ -44,7 +54,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { view virtual override - returns (int256 answer) + returns ( + int256 answer + ) { return s_currentPhase.aggregator.latestAnswer(); } @@ -62,7 +74,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { view virtual override - returns (uint256 updatedAt) + returns ( + uint256 updatedAt + ) { return s_currentPhase.aggregator.latestTimestamp(); } @@ -76,12 +90,16 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { * an already answered Aggregator or use the recommended getRoundData * instead which includes better verification information. */ - function getAnswer(uint256 roundId) + function getAnswer( + uint256 roundId + ) public view virtual override - returns (int256 answer) + returns ( + int256 answer + ) { if (roundId > MAX_ID) return 0; @@ -101,12 +119,16 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { * an already answered Aggregator or use the recommended getRoundData * instead which includes better verification information. */ - function getTimestamp(uint256 roundId) + function getTimestamp( + uint256 roundId + ) public view virtual override - returns (uint256 updatedAt) + returns ( + uint256 updatedAt + ) { if (roundId > MAX_ID) return 0; @@ -132,7 +154,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { view virtual override - returns (uint256 roundId) + returns ( + uint256 roundId + ) { Phase memory phase = s_currentPhase; // cache storage reads return addPhase(phase.id, uint64(phase.aggregator.latestRound())); @@ -163,7 +187,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { * (Only some AggregatorV3Interface implementations return meaningful values) * @dev Note that answer and updatedAt may change between queries. */ - function getRoundData(uint80 roundId) + function getRoundData( + uint80 roundId + ) public view virtual @@ -249,7 +275,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { * @return answeredInRound is the round ID of the round in which the answer * was computed. */ - function proposedGetRoundData(uint80 roundId) + function proposedGetRoundData( + uint80 roundId + ) external view virtual @@ -301,7 +329,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { external view override - returns (address) + returns ( + address + ) { return address(s_currentPhase.aggregator); } @@ -313,7 +343,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { external view override - returns (uint16) + returns ( + uint16 + ) { return s_currentPhase.id; } @@ -325,7 +357,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { external view override - returns (uint8) + returns ( + uint8 + ) { return s_currentPhase.aggregator.decimals(); } @@ -338,7 +372,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { external view override - returns (uint256) + returns ( + uint256 + ) { return s_currentPhase.aggregator.version(); } @@ -350,7 +386,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { external view override - returns (string memory) + returns ( + string memory + ) { return s_currentPhase.aggregator.description(); } @@ -362,7 +400,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { external view override - returns (address) + returns ( + address + ) { return address(s_proposedAggregator); } @@ -372,11 +412,15 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { * * @param phaseId uint16 */ - function phaseAggregators(uint16 phaseId) + function phaseAggregators( + uint16 phaseId + ) external view override - returns (address) + returns ( + address + ) { return address(s_phaseAggregators[phaseId]); } @@ -385,7 +429,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { * @notice Allows the owner to propose a new address for the aggregator * @param aggregatorAddress The new address for the aggregator contract */ - function proposeAggregator(address aggregatorAddress) + function proposeAggregator( + address aggregatorAddress + ) external onlyOwner() { @@ -400,7 +446,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { * proposed * @param aggregatorAddress The new address for the aggregator contract */ - function confirmAggregator(address aggregatorAddress) + function confirmAggregator( + address aggregatorAddress + ) external onlyOwner() { @@ -416,7 +464,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { * Internal */ - function setAggregator(address aggregatorAddress) + function setAggregator( + address aggregatorAddress + ) internal { uint16 id = s_currentPhase.id + 1; @@ -430,7 +480,9 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { ) internal pure - returns (uint80) + returns ( + uint80 + ) { return uint80(uint256(phase) << PHASE_OFFSET | originalId); } @@ -440,7 +492,10 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { ) internal pure - returns (uint16, uint64) + returns ( + uint16, + uint64 + ) { uint16 phaseId = uint16(roundId >> PHASE_OFFSET); uint64 aggregatorRoundId = uint64(roundId); @@ -458,7 +513,13 @@ contract AggregatorProxy is AggregatorProxyInterface, ConfirmedOwner { ) internal pure - returns (uint80, int256, uint256, uint256, uint80) + returns ( + uint80, + int256, + uint256, + uint256, + uint80 + ) { return ( addPhase(phaseId, uint64(roundId)), diff --git a/evm-contracts/src/v0.7/dev/ConfirmedOwner.sol b/evm-contracts/src/v0.7/dev/ConfirmedOwner.sol index 6fabaf5ab80..deeb94af426 100644 --- a/evm-contracts/src/v0.7/dev/ConfirmedOwner.sol +++ b/evm-contracts/src/v0.7/dev/ConfirmedOwner.sol @@ -27,7 +27,9 @@ contract ConfirmedOwner { * @notice Allows an owner to begin transferring ownership to a new address, * pending. */ - function transferOwnership(address to) + function transferOwnership( + address to + ) external onlyOwner() { @@ -56,7 +58,13 @@ contract ConfirmedOwner { /** * @notice Get the current owner */ - function owner() public view returns (address) { + function owner() + public + view + returns ( + address + ) + { return s_owner; } diff --git a/evm-contracts/src/v0.7/dev/LinkTokenReceiver.sol b/evm-contracts/src/v0.7/dev/LinkTokenReceiver.sol index eb17e9fe5cf..6e7a99bebd9 100644 --- a/evm-contracts/src/v0.7/dev/LinkTokenReceiver.sol +++ b/evm-contracts/src/v0.7/dev/LinkTokenReceiver.sol @@ -50,7 +50,9 @@ abstract contract LinkTokenReceiver { * @dev Reverts if the given data does not begin with the `oracleRequest` function selector * @param data The data payload of the request */ - modifier permittedFunctionsForLINK(bytes memory data) { + modifier permittedFunctionsForLINK( + bytes memory data + ) { bytes4 funcSelector; assembly { // solhint-disable-next-line avoid-low-level-calls @@ -64,7 +66,9 @@ abstract contract LinkTokenReceiver { * @dev Reverts if the given payload is less than needed to create a request * @param data The request payload */ - modifier validRequestLength(bytes memory data) { + modifier validRequestLength( + bytes memory data + ) { require(data.length >= MINIMUM_REQUEST_LENGTH, "Invalid request length"); _; } diff --git a/evm-contracts/src/v0.7/dev/Operator.sol b/evm-contracts/src/v0.7/dev/Operator.sol index 7cd8b23c173..80452e902bd 100644 --- a/evm-contracts/src/v0.7/dev/Operator.sol +++ b/evm-contracts/src/v0.7/dev/Operator.sol @@ -443,7 +443,9 @@ contract Operator is public view override - returns (address) + returns ( + address + ) { return address(linkToken); } diff --git a/evm-contracts/src/v0.7/dev/OperatorFactory.sol b/evm-contracts/src/v0.7/dev/OperatorFactory.sol index 66d86f29b8a..25cfc2aee1c 100644 --- a/evm-contracts/src/v0.7/dev/OperatorFactory.sol +++ b/evm-contracts/src/v0.7/dev/OperatorFactory.sol @@ -9,22 +9,27 @@ import "./Operator.sol"; */ contract OperatorFactory { - address public link; + address public link; - event OperatorCreated(address indexed operator, address indexed owner); + event OperatorCreated( + address indexed operator, + address indexed owner + ); - /** - * @param linkAddress address - */ - constructor(address linkAddress) { - link = linkAddress; - } + /** + * @param linkAddress address + */ + constructor( + address linkAddress + ) { + link = linkAddress; + } - /** - * @notice fallback to create a new Operator contract with the msg.sender as owner - */ - fallback() external { - Operator operator = new Operator(link, msg.sender); - emit OperatorCreated(address(operator), msg.sender); - } + /** + * @notice fallback to create a new Operator contract with the msg.sender as owner + */ + fallback() external { + Operator operator = new Operator(link, msg.sender); + emit OperatorCreated(address(operator), msg.sender); + } } diff --git a/evm-contracts/src/v0.7/dev/OperatorForwarder.sol b/evm-contracts/src/v0.7/dev/OperatorForwarder.sol index 605d80b4d74..5c854c3ac69 100644 --- a/evm-contracts/src/v0.7/dev/OperatorForwarder.sol +++ b/evm-contracts/src/v0.7/dev/OperatorForwarder.sol @@ -11,7 +11,9 @@ contract OperatorForwarder { address public immutable linkAddr; - constructor(address link) { + constructor( + address link + ) { linkAddr = link; authorizedSender1 = ConfirmedOwner(msg.sender).owner(); address[] memory authorizedSenders = OperatorInterface(msg.sender).getAuthorizedSenders(); From 6571ed28a5c9cacbfe2c3e8d6d56f597e0428679 Mon Sep 17 00:00:00 2001 From: Alex Roan Date: Wed, 24 Mar 2021 15:57:12 +0000 Subject: [PATCH 081/116] 0.7 interfaces, tests and base contract style --- evm-contracts/src/v0.7/Chainlink.sol | 61 +++++-- evm-contracts/src/v0.7/ChainlinkClient.sol | 96 +++++++--- .../v0.7/interfaces/AggregatorInterface.sol | 58 +++++- .../interfaces/AggregatorProxyInterface.sol | 60 ++++++- .../v0.7/interfaces/AggregatorV3Interface.sol | 28 ++- .../src/v0.7/interfaces/ENSInterface.sol | 75 ++++++-- .../src/v0.7/interfaces/FlagsInterface.sol | 39 +++- .../v0.7/interfaces/LinkTokenInterface.sol | 112 ++++++++++-- .../src/v0.7/interfaces/OperatorInterface.sol | 45 ++++- .../src/v0.7/interfaces/OracleInterface.sol | 30 +++- .../src/v0.7/interfaces/PointerInterface.sol | 8 +- .../src/v0.7/interfaces/UpkeepInterface.sol | 3 +- .../v0.7/interfaces/WithdrawalInterface.sol | 12 +- .../v0.7/tests/ConfirmedOwnerTestHelper.sol | 4 +- evm-contracts/src/v0.7/tests/Consumer.sol | 43 ++++- .../src/v0.7/tests/MockCompoundOracle.sol | 4 +- .../src/v0.7/tests/MockV2Aggregator.sol | 12 +- .../src/v0.7/tests/MockV3Aggregator.sol | 16 +- .../src/v0.7/tests/MultiWordConsumer.sol | 169 +++++++++++------- .../v0.7/tests/OperatorForwarderDeployer.sol | 4 +- 20 files changed, 704 insertions(+), 175 deletions(-) diff --git a/evm-contracts/src/v0.7/Chainlink.sol b/evm-contracts/src/v0.7/Chainlink.sol index 51cf882b053..8ae05172915 100644 --- a/evm-contracts/src/v0.7/Chainlink.sol +++ b/evm-contracts/src/v0.7/Chainlink.sol @@ -35,7 +35,13 @@ library Chainlink { bytes32 jobId, address callbackAddr, bytes4 callbackFunc - ) internal pure returns (Chainlink.Request memory) { + ) + internal + pure + returns ( + Chainlink.Request memory + ) + { BufferChainlink.init(self.buf, defaultBufferSize); self.id = jobId; self.callbackAddress = callbackAddr; @@ -49,8 +55,12 @@ library Chainlink { * @param self The initialized request * @param data The CBOR data */ - function setBuffer(Request memory self, bytes memory data) - internal pure + function setBuffer( + Request memory self, + bytes memory data + ) + internal + pure { BufferChainlink.init(self.buf, data.length); BufferChainlink.append(self.buf, data); @@ -62,8 +72,13 @@ library Chainlink { * @param key The name of the key * @param value The string value to add */ - function add(Request memory self, string memory key, string memory value) - internal pure + function add( + Request memory self, + string memory key, + string memory value + ) + internal + pure { self.buf.encodeString(key); self.buf.encodeString(value); @@ -75,8 +90,13 @@ library Chainlink { * @param key The name of the key * @param value The bytes value to add */ - function addBytes(Request memory self, string memory key, bytes memory value) - internal pure + function addBytes( + Request memory self, + string memory key, + bytes memory value + ) + internal + pure { self.buf.encodeString(key); self.buf.encodeBytes(value); @@ -88,8 +108,13 @@ library Chainlink { * @param key The name of the key * @param value The int256 value to add */ - function addInt(Request memory self, string memory key, int256 value) - internal pure + function addInt( + Request memory self, + string memory key, + int256 value + ) + internal + pure { self.buf.encodeString(key); self.buf.encodeInt(value); @@ -101,8 +126,13 @@ library Chainlink { * @param key The name of the key * @param value The uint256 value to add */ - function addUint(Request memory self, string memory key, uint256 value) - internal pure + function addUint( + Request memory self, + string memory key, + uint256 value + ) + internal + pure { self.buf.encodeString(key); self.buf.encodeUInt(value); @@ -114,8 +144,13 @@ library Chainlink { * @param key The name of the key * @param values The array of string values to add */ - function addStringArray(Request memory self, string memory key, string[] memory values) - internal pure + function addStringArray( + Request memory self, + string memory key, + string[] memory values + ) + internal + pure { self.buf.encodeString(key); self.buf.startArray(); diff --git a/evm-contracts/src/v0.7/ChainlinkClient.sol b/evm-contracts/src/v0.7/ChainlinkClient.sol index 09c98aba80f..c3eb55b17a3 100644 --- a/evm-contracts/src/v0.7/ChainlinkClient.sol +++ b/evm-contracts/src/v0.7/ChainlinkClient.sol @@ -31,9 +31,15 @@ contract ChainlinkClient { uint256 private requestCount = 1; mapping(bytes32 => address) private pendingRequests; - event ChainlinkRequested(bytes32 indexed id); - event ChainlinkFulfilled(bytes32 indexed id); - event ChainlinkCancelled(bytes32 indexed id); + event ChainlinkRequested( + bytes32 indexed id + ); + event ChainlinkFulfilled( + bytes32 indexed id + ); + event ChainlinkCancelled( + bytes32 indexed id + ); /** * @notice Creates a request that can hold additional parameters @@ -46,7 +52,13 @@ contract ChainlinkClient { bytes32 specId, address callbackAddress, bytes4 callbackFunctionSignature - ) internal pure returns (Chainlink.Request memory) { + ) + internal + pure + returns ( + Chainlink.Request memory + ) + { Chainlink.Request memory req; return req.initialize(specId, callbackAddress, callbackFunctionSignature); } @@ -58,9 +70,14 @@ contract ChainlinkClient { * @param payment The amount of LINK to send for the request * @return requestId The request ID */ - function sendChainlinkRequest(Chainlink.Request memory req, uint256 payment) + function sendChainlinkRequest( + Chainlink.Request memory req, + uint256 payment + ) internal - returns (bytes32) + returns ( + bytes32 + ) { return sendChainlinkRequestTo(address(oracle), req, payment); } @@ -75,9 +92,15 @@ contract ChainlinkClient { * @param payment The amount of LINK to send for the request * @return requestId The request ID */ - function sendChainlinkRequestTo(address oracleAddress, Chainlink.Request memory req, uint256 payment) + function sendChainlinkRequestTo( + address oracleAddress, + Chainlink.Request memory req, + uint256 payment + ) internal - returns (bytes32 requestId) + returns ( + bytes32 requestId + ) { requestId = keccak256(abi.encodePacked(this, requestCount)); req.nonce = requestCount; @@ -117,7 +140,11 @@ contract ChainlinkClient { * @notice Sets the stored oracle address * @param oracleAddress The address of the oracle contract */ - function setChainlinkOracle(address oracleAddress) internal { + function setChainlinkOracle( + address oracleAddress + ) + internal + { oracle = ChainlinkRequestInterface(oracleAddress); } @@ -125,7 +152,11 @@ contract ChainlinkClient { * @notice Sets the LINK token address * @param linkAddress The address of the LINK token contract */ - function setChainlinkToken(address linkAddress) internal { + function setChainlinkToken( + address linkAddress + ) + internal + { link = LinkTokenInterface(linkAddress); } @@ -133,7 +164,9 @@ contract ChainlinkClient { * @notice Sets the Chainlink token address for the public * network as given by the Pointer contract */ - function setPublicChainlinkToken() internal { + function setPublicChainlinkToken() + internal + { setChainlinkToken(PointerInterface(LINK_TOKEN_POINTER).getAddress()); } @@ -144,7 +177,9 @@ contract ChainlinkClient { function chainlinkTokenAddress() internal view - returns (address) + returns ( + address + ) { return address(link); } @@ -156,7 +191,9 @@ contract ChainlinkClient { function chainlinkOracleAddress() internal view - returns (address) + returns ( + address + ) { return address(oracle); } @@ -167,7 +204,10 @@ contract ChainlinkClient { * @param oracleAddress The address of the oracle contract that will fulfill the request * @param requestId The request ID used for the response */ - function addChainlinkExternalRequest(address oracleAddress, bytes32 requestId) + function addChainlinkExternalRequest( + address oracleAddress, + bytes32 requestId + ) internal notPendingRequest(requestId) { @@ -180,7 +220,10 @@ contract ChainlinkClient { * @param ensAddress The address of the ENS contract * @param node The ENS node hash */ - function useChainlinkWithENS(address ensAddress, bytes32 node) + function useChainlinkWithENS( + address ensAddress, + bytes32 node + ) internal { ens = ENSInterface(ensAddress); @@ -211,10 +254,15 @@ contract ChainlinkClient { * @param dataVersion The request data version * @return The bytes payload for the `transferAndCall` method */ - function encodeRequest(Chainlink.Request memory req, uint256 dataVersion) + function encodeRequest( + Chainlink.Request memory req, + uint256 dataVersion + ) private view - returns (bytes memory) + returns ( + bytes memory + ) { return abi.encodeWithSelector( oracle.oracleRequest.selector, @@ -233,7 +281,9 @@ contract ChainlinkClient { * @dev Use if the contract developer prefers methods instead of modifiers for validation * @param requestId The request ID for fulfillment */ - function validateChainlinkCallback(bytes32 requestId) + function validateChainlinkCallback( + bytes32 requestId + ) internal recordChainlinkFulfillment(requestId) // solhint-disable-next-line no-empty-blocks @@ -244,7 +294,10 @@ contract ChainlinkClient { * Emits ChainlinkFulfilled event. * @param requestId The request ID for fulfillment */ - modifier recordChainlinkFulfillment(bytes32 requestId) { + modifier recordChainlinkFulfillment( + bytes32 requestId + ) + { require(msg.sender == pendingRequests[requestId], "Source must be the oracle of the request"); delete pendingRequests[requestId]; @@ -256,7 +309,10 @@ contract ChainlinkClient { * @dev Reverts if the request is already pending * @param requestId The request ID for fulfillment */ - modifier notPendingRequest(bytes32 requestId) { + modifier notPendingRequest( + bytes32 requestId + ) + { require(pendingRequests[requestId] == address(0), "Request is already pending"); _; } diff --git a/evm-contracts/src/v0.7/interfaces/AggregatorInterface.sol b/evm-contracts/src/v0.7/interfaces/AggregatorInterface.sol index 338cd9de76b..31b6fafea17 100644 --- a/evm-contracts/src/v0.7/interfaces/AggregatorInterface.sol +++ b/evm-contracts/src/v0.7/interfaces/AggregatorInterface.sol @@ -2,12 +2,54 @@ pragma solidity >=0.7.0; interface AggregatorInterface { - function latestAnswer() external view returns (int256); - function latestTimestamp() external view returns (uint256); - function latestRound() external view returns (uint256); - function getAnswer(uint256 roundId) external view returns (int256); - function getTimestamp(uint256 roundId) external view returns (uint256); - - event AnswerUpdated(int256 indexed current, uint256 indexed roundId, uint256 updatedAt); - event NewRound(uint256 indexed roundId, address indexed startedBy, uint256 startedAt); + function latestAnswer() + external + view + returns ( + int256 + ); + + function latestTimestamp() + external + view + returns ( + uint256 + ); + + function latestRound() + external + view + returns ( + uint256 + ); + + function getAnswer( + uint256 roundId + ) + external + view + returns ( + int256 + ); + + function getTimestamp( + uint256 roundId + ) + external + view + returns ( + uint256 + ); + + event AnswerUpdated( + int256 indexed current, + uint256 indexed roundId, + uint256 updatedAt + ); + + event NewRound( + uint256 indexed roundId, + address indexed startedBy, + uint256 startedAt + ); } diff --git a/evm-contracts/src/v0.7/interfaces/AggregatorProxyInterface.sol b/evm-contracts/src/v0.7/interfaces/AggregatorProxyInterface.sol index 57961062d46..725f69b6690 100644 --- a/evm-contracts/src/v0.7/interfaces/AggregatorProxyInterface.sol +++ b/evm-contracts/src/v0.7/interfaces/AggregatorProxyInterface.sol @@ -4,10 +4,58 @@ pragma solidity >=0.7.0; import "./AggregatorV2V3Interface.sol"; interface AggregatorProxyInterface is AggregatorV2V3Interface { - function phaseAggregators(uint16 phaseId) external view returns (address); - function phaseId() external view returns (uint16); - function proposedAggregator() external view returns (address); - function proposedGetRoundData(uint80 roundId) external view returns (uint80 id, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound); - function proposedLatestRoundData() external view returns (uint80 id, int256 answer, uint256 startedAt, uint256 updatedAt, uint80 answeredInRound); - function aggregator() external view returns (address); + + function phaseAggregators( + uint16 phaseId + ) + external + view + returns ( + address + ); + + function phaseId() + external + view + returns ( + uint16 + ); + + function proposedAggregator() + external + view + returns ( + address + ); + + function proposedGetRoundData( + uint80 roundId + ) + external + view + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + function proposedLatestRoundData() + external + view + returns ( + uint80 id, + int256 answer, + uint256 startedAt, + uint256 updatedAt, + uint80 answeredInRound + ); + + function aggregator() + external + view + returns ( + address + ); } diff --git a/evm-contracts/src/v0.7/interfaces/AggregatorV3Interface.sol b/evm-contracts/src/v0.7/interfaces/AggregatorV3Interface.sol index ba6fa501fb7..777fbf73cfa 100644 --- a/evm-contracts/src/v0.7/interfaces/AggregatorV3Interface.sol +++ b/evm-contracts/src/v0.7/interfaces/AggregatorV3Interface.sol @@ -3,14 +3,33 @@ pragma solidity >=0.7.0; interface AggregatorV3Interface { - function decimals() external view returns (uint8); - function description() external view returns (string memory); - function version() external view returns (uint256); + function decimals() + external + view + returns ( + uint8 + ); + + function description() + external + view + returns ( + string memory + ); + + function version() + external + view + returns ( + uint256 + ); // getRoundData and latestRoundData should both raise "No data present" // if they do not have data to report, instead of returning unset values // which could be misinterpreted as actual reported values. - function getRoundData(uint80 _roundId) + function getRoundData( + uint80 _roundId + ) external view returns ( @@ -20,6 +39,7 @@ interface AggregatorV3Interface { uint256 updatedAt, uint80 answeredInRound ); + function latestRoundData() external view diff --git a/evm-contracts/src/v0.7/interfaces/ENSInterface.sol b/evm-contracts/src/v0.7/interfaces/ENSInterface.sol index 2af6f323173..47e332f0c97 100644 --- a/evm-contracts/src/v0.7/interfaces/ENSInterface.sol +++ b/evm-contracts/src/v0.7/interfaces/ENSInterface.sol @@ -4,24 +4,77 @@ pragma solidity ^0.7.0; interface ENSInterface { // Logged when the owner of a node assigns a new owner to a subnode. - event NewOwner(bytes32 indexed node, bytes32 indexed label, address owner); + event NewOwner( + bytes32 indexed node, + bytes32 indexed label, + address owner + ); // Logged when the owner of a node transfers ownership to a new account. - event Transfer(bytes32 indexed node, address owner); + event Transfer( + bytes32 indexed node, + address owner + ); // Logged when the resolver for a node changes. - event NewResolver(bytes32 indexed node, address resolver); + event NewResolver( + bytes32 indexed node, + address resolver + ); // Logged when the TTL of a node changes - event NewTTL(bytes32 indexed node, uint64 ttl); + event NewTTL( + bytes32 indexed node, + uint64 ttl + ); - function setSubnodeOwner(bytes32 node, bytes32 label, address owner) external; - function setResolver(bytes32 node, address resolver) external; - function setOwner(bytes32 node, address owner) external; - function setTTL(bytes32 node, uint64 ttl) external; - function owner(bytes32 node) external view returns (address); - function resolver(bytes32 node) external view returns (address); - function ttl(bytes32 node) external view returns (uint64); + function setSubnodeOwner( + bytes32 node, + bytes32 label, + address owner + ) external; + + function setResolver( + bytes32 node, + address resolver + ) external; + + function setOwner( + bytes32 node, + address owner + ) external; + + function setTTL( + bytes32 node, + uint64 ttl + ) external; + + function owner( + bytes32 node + ) + external + view + returns ( + address + ); + + function resolver( + bytes32 node + ) + external + view + returns ( + address + ); + + function ttl( + bytes32 node + ) + external + view + returns ( + uint64 + ); } diff --git a/evm-contracts/src/v0.7/interfaces/FlagsInterface.sol b/evm-contracts/src/v0.7/interfaces/FlagsInterface.sol index 1e6b4a11e3c..2eb1080a151 100644 --- a/evm-contracts/src/v0.7/interfaces/FlagsInterface.sol +++ b/evm-contracts/src/v0.7/interfaces/FlagsInterface.sol @@ -2,10 +2,37 @@ pragma solidity ^0.7.0; interface FlagsInterface { - function getFlag(address) external view returns (bool); - function getFlags(address[] calldata) external view returns (bool[] memory); - function raiseFlag(address) external; - function raiseFlags(address[] calldata) external; - function lowerFlags(address[] calldata) external; - function setRaisingAccessController(address) external; + function getFlag( + address + ) + external + view + returns ( + bool + ); + + function getFlags( + address[] calldata + ) + external + view + returns ( + bool[] memory + ); + + function raiseFlag( + address + ) external; + + function raiseFlags( + address[] calldata + ) external; + + function lowerFlags( + address[] calldata + ) external; + + function setRaisingAccessController( + address + ) external; } diff --git a/evm-contracts/src/v0.7/interfaces/LinkTokenInterface.sol b/evm-contracts/src/v0.7/interfaces/LinkTokenInterface.sol index 32bd6dc42de..874eca80549 100644 --- a/evm-contracts/src/v0.7/interfaces/LinkTokenInterface.sol +++ b/evm-contracts/src/v0.7/interfaces/LinkTokenInterface.sol @@ -2,16 +2,104 @@ pragma solidity ^0.7.0; interface LinkTokenInterface { - function allowance(address owner, address spender) external view returns (uint256 remaining); - function approve(address spender, uint256 value) external returns (bool success); - function balanceOf(address owner) external view returns (uint256 balance); - function decimals() external view returns (uint8 decimalPlaces); - function decreaseApproval(address spender, uint256 addedValue) external returns (bool success); - function increaseApproval(address spender, uint256 subtractedValue) external; - function name() external view returns (string memory tokenName); - function symbol() external view returns (string memory tokenSymbol); - function totalSupply() external view returns (uint256 totalTokensIssued); - function transfer(address to, uint256 value) external returns (bool success); - function transferAndCall(address to, uint256 value, bytes calldata data) external returns (bool success); - function transferFrom(address from, address to, uint256 value) external returns (bool success); + + function allowance( + address owner, + address spender + ) + external + view + returns ( + uint256 remaining + ); + + function approve( + address spender, + uint256 value + ) + external + returns ( + bool success + ); + + function balanceOf( + address owner + ) + external + view + returns ( + uint256 balance + ); + + function decimals() + external + view + returns ( + uint8 decimalPlaces + ); + + function decreaseApproval( + address spender, + uint256 addedValue + ) + external + returns ( + bool success + ); + + function increaseApproval( + address spender, + uint256 subtractedValue + ) external; + + function name() + external + view + returns ( + string memory tokenName + ); + + function symbol() + external + view + returns ( + string memory tokenSymbol + ); + + function totalSupply() + external + view + returns ( + uint256 totalTokensIssued + ); + + function transfer( + address to, + uint256 value + ) + external + returns ( + bool success + ); + + function transferAndCall( + address to, + uint256 value, + bytes calldata data + ) + external + returns ( + bool success + ); + + function transferFrom( + address from, + address to, + uint256 value + ) + external + returns ( + bool success + ); + } diff --git a/evm-contracts/src/v0.7/interfaces/OperatorInterface.sol b/evm-contracts/src/v0.7/interfaces/OperatorInterface.sol index deab83da319..2de75fece0c 100644 --- a/evm-contracts/src/v0.7/interfaces/OperatorInterface.sol +++ b/evm-contracts/src/v0.7/interfaces/OperatorInterface.sol @@ -4,6 +4,7 @@ pragma solidity ^0.7.0; import "./OracleInterface.sol"; interface OperatorInterface is OracleInterface { + function fulfillOracleRequest2( bytes32 requestId, uint256 payment, @@ -11,10 +12,42 @@ interface OperatorInterface is OracleInterface { bytes4 callbackFunctionId, uint256 expiration, bytes calldata data - ) external returns (bool); - function operatorTransferAndCall(address to, uint256 value, bytes calldata data) external returns (bool success); - function distributeFunds(address payable[] calldata receivers,uint[] calldata amounts) external payable; - function getAuthorizedSenders() external returns (address[] memory); - function setAuthorizedSenders(address[] calldata senders) external; - function getForwarders() external returns (address[] memory); + ) + external + returns ( + bool + ); + + function operatorTransferAndCall( + address to, + uint256 value, + bytes calldata data + ) + external + returns ( + bool success + ); + + function distributeFunds( + address payable[] calldata receivers, + uint[] calldata amounts + ) + external + payable; + + function getAuthorizedSenders() + external + returns ( + address[] memory + ); + + function setAuthorizedSenders( + address[] calldata senders + ) external; + + function getForwarders() + external + returns ( + address[] memory + ); } diff --git a/evm-contracts/src/v0.7/interfaces/OracleInterface.sol b/evm-contracts/src/v0.7/interfaces/OracleInterface.sol index 043fc636c52..f34f8c6f307 100644 --- a/evm-contracts/src/v0.7/interfaces/OracleInterface.sol +++ b/evm-contracts/src/v0.7/interfaces/OracleInterface.sol @@ -9,8 +9,30 @@ interface OracleInterface { bytes4 callbackFunctionId, uint256 expiration, bytes32 data - ) external returns (bool); - function isAuthorizedSender(address node) external view returns (bool); - function withdraw(address recipient, uint256 amount) external; - function withdrawable() external view returns (uint256); + ) + external + returns ( + bool + ); + + function isAuthorizedSender( + address node + ) + external + view + returns ( + bool + ); + + function withdraw( + address recipient, + uint256 amount + ) external; + + function withdrawable() + external + view + returns ( + uint256 + ); } diff --git a/evm-contracts/src/v0.7/interfaces/PointerInterface.sol b/evm-contracts/src/v0.7/interfaces/PointerInterface.sol index ee3d8ae9ced..284b59a5f5e 100644 --- a/evm-contracts/src/v0.7/interfaces/PointerInterface.sol +++ b/evm-contracts/src/v0.7/interfaces/PointerInterface.sol @@ -2,5 +2,11 @@ pragma solidity ^0.7.0; interface PointerInterface { - function getAddress() external view returns (address); + + function getAddress() + external + view + returns ( + address + ); } diff --git a/evm-contracts/src/v0.7/interfaces/UpkeepInterface.sol b/evm-contracts/src/v0.7/interfaces/UpkeepInterface.sol index 7473b7ab572..fe7e19473ba 100644 --- a/evm-contracts/src/v0.7/interfaces/UpkeepInterface.sol +++ b/evm-contracts/src/v0.7/interfaces/UpkeepInterface.sol @@ -13,6 +13,5 @@ interface UpkeepInterface { function performUpkeep( bytes calldata data - ) - external; + ) external; } diff --git a/evm-contracts/src/v0.7/interfaces/WithdrawalInterface.sol b/evm-contracts/src/v0.7/interfaces/WithdrawalInterface.sol index 8049850a796..631c4aeb575 100644 --- a/evm-contracts/src/v0.7/interfaces/WithdrawalInterface.sol +++ b/evm-contracts/src/v0.7/interfaces/WithdrawalInterface.sol @@ -8,10 +8,18 @@ interface WithdrawalInterface { * @param recipient is the address to send the LINK to * @param amount is the amount of LINK to send */ - function withdraw(address recipient, uint256 amount) external; + function withdraw( + address recipient, + uint256 amount + ) external; /** * @notice query the available amount of LINK to withdraw by msg.sender */ - function withdrawable() external view returns (uint256); + function withdrawable() + external + view + returns ( + uint256 + ); } diff --git a/evm-contracts/src/v0.7/tests/ConfirmedOwnerTestHelper.sol b/evm-contracts/src/v0.7/tests/ConfirmedOwnerTestHelper.sol index 5319729b1aa..bfff55ea623 100644 --- a/evm-contracts/src/v0.7/tests/ConfirmedOwnerTestHelper.sol +++ b/evm-contracts/src/v0.7/tests/ConfirmedOwnerTestHelper.sol @@ -7,7 +7,9 @@ contract ConfirmedOwnerTestHelper is ConfirmedOwner { event Here(); - constructor() ConfirmedOwner(msg.sender) {} + constructor() + ConfirmedOwner(msg.sender) + {} function modifierOnlyOwner() public diff --git a/evm-contracts/src/v0.7/tests/Consumer.sol b/evm-contracts/src/v0.7/tests/Consumer.sol index dc2a9584254..5ed45066dd1 100644 --- a/evm-contracts/src/v0.7/tests/Consumer.sol +++ b/evm-contracts/src/v0.7/tests/Consumer.sol @@ -14,17 +14,34 @@ contract Consumer is ChainlinkClient { bytes32 indexed price ); - constructor(address _link, address _oracle, bytes32 _specId) public { + constructor( + address _link, + address _oracle, + bytes32 _specId + ) + public + { setChainlinkToken(_link); setChainlinkOracle(_oracle); specId = _specId; } - function requestEthereumPrice(string memory _currency, uint256 _payment) public { + function requestEthereumPrice( + string memory _currency, + uint256 _payment + ) + public + { requestEthereumPriceByCallback(_currency, _payment, address(this)); } - function requestEthereumPriceByCallback(string memory _currency, uint256 _payment, address _callback) public { + function requestEthereumPriceByCallback( + string memory _currency, + uint256 _payment, + address _callback + ) + public + { Chainlink.Request memory req = buildChainlinkRequest(specId, _callback, this.fulfill.selector); req.add("get", "https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD,EUR,JPY"); string[] memory path = new string[](1); @@ -40,21 +57,33 @@ contract Consumer is ChainlinkClient { uint256 _payment, bytes4 _callbackFunctionId, uint256 _expiration - ) public { + ) + public + { ChainlinkRequestInterface requested = ChainlinkRequestInterface(_oracle); requested.cancelOracleRequest(_requestId, _payment, _callbackFunctionId, _expiration); } - function withdrawLink() public { + function withdrawLink() + public + { LinkTokenInterface _link = LinkTokenInterface(chainlinkTokenAddress()); require(_link.transfer(msg.sender, _link.balanceOf(address(this))), "Unable to transfer"); } - function addExternalRequest(address _oracle, bytes32 _requestId) external { + function addExternalRequest( + address _oracle, + bytes32 _requestId + ) + external + { addChainlinkExternalRequest(_oracle, _requestId); } - function fulfill(bytes32 _requestId, bytes32 _price) + function fulfill( + bytes32 _requestId, + bytes32 _price + ) public recordChainlinkFulfillment(_requestId) { diff --git a/evm-contracts/src/v0.7/tests/MockCompoundOracle.sol b/evm-contracts/src/v0.7/tests/MockCompoundOracle.sol index f2eae0f72c9..69c34214e80 100644 --- a/evm-contracts/src/v0.7/tests/MockCompoundOracle.sol +++ b/evm-contracts/src/v0.7/tests/MockCompoundOracle.sol @@ -18,7 +18,9 @@ contract MockCompoundOracle is UniswapAnchoredView { external override view - returns (uint256) + returns ( + uint256 + ) { return s_oracleDetails[symbol].price; } diff --git a/evm-contracts/src/v0.7/tests/MockV2Aggregator.sol b/evm-contracts/src/v0.7/tests/MockV2Aggregator.sol index 6934a603c51..e9f8de2e363 100644 --- a/evm-contracts/src/v0.7/tests/MockV2Aggregator.sol +++ b/evm-contracts/src/v0.7/tests/MockV2Aggregator.sol @@ -22,13 +22,17 @@ contract MockV2Aggregator is AggregatorInterface { constructor( int256 _initialAnswer - ) public { + ) + public + { updateAnswer(_initialAnswer); } function updateAnswer( int256 _answer - ) public { + ) + public + { latestAnswer = _answer; latestTimestamp = block.timestamp; latestRound++; @@ -41,7 +45,9 @@ contract MockV2Aggregator is AggregatorInterface { int256 _answer, uint256 _timestamp, uint256 _startedAt - ) public { + ) + public + { latestRound = _roundId; latestAnswer = _answer; latestTimestamp = _timestamp; diff --git a/evm-contracts/src/v0.7/tests/MockV3Aggregator.sol b/evm-contracts/src/v0.7/tests/MockV3Aggregator.sol index d3ca6bd8178..f16e0805105 100644 --- a/evm-contracts/src/v0.7/tests/MockV3Aggregator.sol +++ b/evm-contracts/src/v0.7/tests/MockV3Aggregator.sol @@ -33,7 +33,9 @@ contract MockV3Aggregator is AggregatorV2V3Interface { function updateAnswer( int256 _answer - ) public { + ) + public + { latestAnswer = _answer; latestTimestamp = block.timestamp; latestRound++; @@ -47,7 +49,9 @@ contract MockV3Aggregator is AggregatorV2V3Interface { int256 _answer, uint256 _timestamp, uint256 _startedAt - ) public { + ) + public + { latestRound = _roundId; latestAnswer = _answer; latestTimestamp = _timestamp; @@ -56,7 +60,9 @@ contract MockV3Aggregator is AggregatorV2V3Interface { getStartedAt[latestRound] = _startedAt; } - function getRoundData(uint80 _roundId) + function getRoundData( + uint80 _roundId + ) external view override @@ -102,7 +108,9 @@ contract MockV3Aggregator is AggregatorV2V3Interface { external pure override - returns (string memory) + returns ( + string memory + ) { return "v0.6/tests/MockV3Aggregator.sol"; } diff --git a/evm-contracts/src/v0.7/tests/MultiWordConsumer.sol b/evm-contracts/src/v0.7/tests/MultiWordConsumer.sol index 2b1eb4db052..9e2c102394b 100644 --- a/evm-contracts/src/v0.7/tests/MultiWordConsumer.sol +++ b/evm-contracts/src/v0.7/tests/MultiWordConsumer.sol @@ -3,84 +3,127 @@ pragma solidity ^0.7.0; import "../ChainlinkClient.sol"; contract MultiWordConsumer is ChainlinkClient{ - bytes32 internal specId; - bytes public currentPrice; + bytes32 internal specId; + bytes public currentPrice; - bytes32 public usd; - bytes32 public eur; - bytes32 public jpy; + bytes32 public usd; + bytes32 public eur; + bytes32 public jpy; - event RequestFulfilled( - bytes32 indexed requestId, // User-defined ID - bytes indexed price - ); + event RequestFulfilled( + bytes32 indexed requestId, // User-defined ID + bytes indexed price + ); - event RequestMultipleFulfilled( - bytes32 indexed requestId, - bytes32 indexed usd, - bytes32 indexed eur, - bytes32 jpy - ); + event RequestMultipleFulfilled( + bytes32 indexed requestId, + bytes32 indexed usd, + bytes32 indexed eur, + bytes32 jpy + ); - constructor(address _link, address _oracle, bytes32 _specId) public { - setChainlinkToken(_link); - setChainlinkOracle(_oracle); - specId = _specId; - } + constructor( + address _link, + address _oracle, + bytes32 _specId + ) + public + { + setChainlinkToken(_link); + setChainlinkOracle(_oracle); + specId = _specId; + } - function setSpecID(bytes32 _specId) public { - specId = _specId; - } + function setSpecID( + bytes32 _specId + ) + public + { + specId = _specId; + } - function requestEthereumPrice(string memory _currency, uint256 _payment) public { - requestEthereumPriceByCallback(_currency, _payment, address(this)); - } + function requestEthereumPrice( + string memory _currency, + uint256 _payment + ) + public + { + requestEthereumPriceByCallback(_currency, _payment, address(this)); + } - function requestEthereumPriceByCallback(string memory _currency, uint256 _payment, address _callback) public { - Chainlink.Request memory req = buildChainlinkRequest(specId, _callback, this.fulfillBytes.selector); - sendChainlinkRequest(req, _payment); - } + function requestEthereumPriceByCallback( + string memory _currency, + uint256 _payment, + address _callback + ) + public + { + Chainlink.Request memory req = buildChainlinkRequest(specId, _callback, this.fulfillBytes.selector); + sendChainlinkRequest(req, _payment); + } - function requestMultipleParameters(string memory _currency, uint256 _payment) public { - Chainlink.Request memory req = buildChainlinkRequest(specId, address(this), this.fulfillMultipleParameters.selector); - sendChainlinkRequest(req, _payment); - } + function requestMultipleParameters( + string memory _currency, + uint256 _payment + ) + public + { + Chainlink.Request memory req = buildChainlinkRequest(specId, address(this), this.fulfillMultipleParameters.selector); + sendChainlinkRequest(req, _payment); + } - function cancelRequest( - address _oracle, - bytes32 _requestId, - uint256 _payment, - bytes4 _callbackFunctionId, - uint256 _expiration - ) public { - ChainlinkRequestInterface requested = ChainlinkRequestInterface(_oracle); - requested.cancelOracleRequest(_requestId, _payment, _callbackFunctionId, _expiration); - } + function cancelRequest( + address _oracle, + bytes32 _requestId, + uint256 _payment, + bytes4 _callbackFunctionId, + uint256 _expiration + ) + public + { + ChainlinkRequestInterface requested = ChainlinkRequestInterface(_oracle); + requested.cancelOracleRequest(_requestId, _payment, _callbackFunctionId, _expiration); + } - function withdrawLink() public { - LinkTokenInterface _link = LinkTokenInterface(chainlinkTokenAddress()); - require(_link.transfer(msg.sender, _link.balanceOf(address(this))), "Unable to transfer"); - } + function withdrawLink() + public + { + LinkTokenInterface _link = LinkTokenInterface(chainlinkTokenAddress()); + require(_link.transfer(msg.sender, _link.balanceOf(address(this))), "Unable to transfer"); + } - function addExternalRequest(address _oracle, bytes32 _requestId) external { - addChainlinkExternalRequest(_oracle, _requestId); - } + function addExternalRequest( + address _oracle, + bytes32 _requestId + ) + external + { + addChainlinkExternalRequest(_oracle, _requestId); + } - function fulfillMultipleParameters(bytes32 _requestId, bytes32 _usd, bytes32 _eur, bytes32 _jpy) + function fulfillMultipleParameters( + bytes32 _requestId, + bytes32 _usd, + bytes32 _eur, + bytes32 _jpy + ) public recordChainlinkFulfillment(_requestId) - { - emit RequestMultipleFulfilled(_requestId, _usd, _eur, _jpy); - usd = _usd; - eur = _eur; - jpy = _jpy; - } + { + emit RequestMultipleFulfilled(_requestId, _usd, _eur, _jpy); + usd = _usd; + eur = _eur; + jpy = _jpy; + } - function fulfillBytes(bytes32 _requestId, bytes memory _price) + function fulfillBytes( + bytes32 _requestId, + bytes memory _price + ) public recordChainlinkFulfillment(_requestId) - { - emit RequestFulfilled(_requestId, _price); - currentPrice = _price; - } + { + emit RequestFulfilled(_requestId, _price); + currentPrice = _price; + } } diff --git a/evm-contracts/src/v0.7/tests/OperatorForwarderDeployer.sol b/evm-contracts/src/v0.7/tests/OperatorForwarderDeployer.sol index e32ae29aa88..1efa42820ec 100644 --- a/evm-contracts/src/v0.7/tests/OperatorForwarderDeployer.sol +++ b/evm-contracts/src/v0.7/tests/OperatorForwarderDeployer.sol @@ -9,7 +9,9 @@ contract OperatorForwarderDeployer is ConfirmedOwner { address private immutable linkAddress; address[] private s_authorisedSenders; - event ForwarderDeployed(address indexed forwarder); + event ForwarderDeployed( + address indexed forwarder + ); constructor( address link, From 6d9d8204349465dce2678e44e72b8c5c6b1e25e4 Mon Sep 17 00:00:00 2001 From: Alex Roan Date: Wed, 24 Mar 2021 16:15:23 +0000 Subject: [PATCH 082/116] 0.7/vendor style --- .../src/v0.7/vendor/BufferChainlink.sol | 181 ++++++++++++++-- .../src/v0.7/vendor/CBORChainlink.sol | 200 ++++++++++++------ evm-contracts/src/v0.7/vendor/ENSResolver.sol | 10 +- .../src/v0.7/vendor/SafeMathChainlink.sol | 55 ++++- 4 files changed, 356 insertions(+), 90 deletions(-) diff --git a/evm-contracts/src/v0.7/vendor/BufferChainlink.sol b/evm-contracts/src/v0.7/vendor/BufferChainlink.sol index ef19ffc5024..f830dc2b3ae 100644 --- a/evm-contracts/src/v0.7/vendor/BufferChainlink.sol +++ b/evm-contracts/src/v0.7/vendor/BufferChainlink.sol @@ -26,7 +26,16 @@ library BufferChainlink { * @param capacity The number of bytes of space to allocate the buffer. * @return The buffer, for chaining. */ - function init(buffer memory buf, uint capacity) internal pure returns(buffer memory) { + function init( + buffer memory buf, + uint capacity + ) + internal + pure + returns( + buffer memory + ) + { if (capacity % 32 != 0) { capacity += 32 - (capacity % 32); } @@ -47,20 +56,43 @@ library BufferChainlink { * @param b The bytes object to initialize the buffer with. * @return A new buffer. */ - function fromBytes(bytes memory b) internal pure returns(buffer memory) { + function fromBytes( + bytes memory b + ) + internal + pure + returns( + buffer memory + ) + { buffer memory buf; buf.buf = b; buf.capacity = b.length; return buf; } - function resize(buffer memory buf, uint capacity) private pure { + function resize( + buffer memory buf, + uint capacity + ) + private + pure + { bytes memory oldbuf = buf.buf; init(buf, capacity); append(buf, oldbuf); } - function max(uint a, uint b) private pure returns(uint) { + function max( + uint a, + uint b + ) + private + pure + returns( + uint + ) + { if (a > b) { return a; } @@ -72,7 +104,15 @@ library BufferChainlink { * @param buf The buffer to truncate. * @return The original buffer, for chaining.. */ - function truncate(buffer memory buf) internal pure returns (buffer memory) { + function truncate( + buffer memory buf + ) + internal + pure + returns ( + buffer memory + ) + { assembly { let bufptr := mload(buf) mstore(bufptr, 0) @@ -89,7 +129,18 @@ library BufferChainlink { * @param len The number of bytes to copy. * @return The original buffer, for chaining. */ - function write(buffer memory buf, uint off, bytes memory data, uint len) internal pure returns(buffer memory) { + function write( + buffer memory buf, + uint off, + bytes memory data, + uint len + ) + internal + pure + returns( + buffer memory + ) + { require(len <= data.length); if (off + len > buf.capacity) { @@ -140,7 +191,17 @@ library BufferChainlink { * @param len The number of bytes to copy. * @return The original buffer, for chaining. */ - function append(buffer memory buf, bytes memory data, uint len) internal pure returns (buffer memory) { + function append( + buffer memory buf, + bytes memory data, + uint len + ) + internal + pure + returns ( + buffer memory + ) + { return write(buf, buf.buf.length, data, len); } @@ -151,7 +212,16 @@ library BufferChainlink { * @param data The data to append. * @return The original buffer, for chaining. */ - function append(buffer memory buf, bytes memory data) internal pure returns (buffer memory) { + function append( + buffer memory buf, + bytes memory data + ) + internal + pure + returns ( + buffer memory + ) + { return write(buf, buf.buf.length, data, data.length); } @@ -163,7 +233,17 @@ library BufferChainlink { * @param data The data to append. * @return The original buffer, for chaining. */ - function writeUint8(buffer memory buf, uint off, uint8 data) internal pure returns(buffer memory) { + function writeUint8( + buffer memory buf, + uint off, + uint8 data + ) + internal + pure + returns( + buffer memory + ) + { if (off >= buf.capacity) { resize(buf, buf.capacity * 2); } @@ -191,7 +271,16 @@ library BufferChainlink { * @param data The data to append. * @return The original buffer, for chaining. */ - function appendUint8(buffer memory buf, uint8 data) internal pure returns(buffer memory) { + function appendUint8( + buffer memory buf, + uint8 data + ) + internal + pure + returns( + buffer memory + ) + { return writeUint8(buf, buf.buf.length, data); } @@ -204,7 +293,18 @@ library BufferChainlink { * @param len The number of bytes to write (left-aligned). * @return The original buffer, for chaining. */ - function write(buffer memory buf, uint off, bytes32 data, uint len) private pure returns(buffer memory) { + function write( + buffer memory buf, + uint off, + bytes32 data, + uint len + ) + private + pure + returns( + buffer memory + ) + { if (len + off > buf.capacity) { resize(buf, (len + off) * 2); } @@ -234,7 +334,17 @@ library BufferChainlink { * @param data The data to append. * @return The original buffer, for chaining. */ - function writeBytes20(buffer memory buf, uint off, bytes20 data) internal pure returns (buffer memory) { + function writeBytes20( + buffer memory buf, + uint off, + bytes20 data + ) + internal + pure + returns ( + buffer memory + ) + { return write(buf, off, bytes32(data), 20); } @@ -245,7 +355,16 @@ library BufferChainlink { * @param data The data to append. * @return The original buffer, for chhaining. */ - function appendBytes20(buffer memory buf, bytes20 data) internal pure returns (buffer memory) { + function appendBytes20( + buffer memory buf, + bytes20 data + ) + internal + pure + returns ( + buffer memory + ) + { return write(buf, buf.buf.length, bytes32(data), 20); } @@ -256,7 +375,16 @@ library BufferChainlink { * @param data The data to append. * @return The original buffer, for chaining. */ - function appendBytes32(buffer memory buf, bytes32 data) internal pure returns (buffer memory) { + function appendBytes32( + buffer memory buf, + bytes32 data + ) + internal + pure + returns ( + buffer memory + ) + { return write(buf, buf.buf.length, data, 32); } @@ -269,7 +397,18 @@ library BufferChainlink { * @param len The number of bytes to write (right-aligned). * @return The original buffer, for chaining. */ - function writeInt(buffer memory buf, uint off, uint data, uint len) private pure returns(buffer memory) { + function writeInt( + buffer memory buf, + uint off, + uint data, + uint len + ) + private + pure + returns( + buffer memory + ) + { if (len + off > buf.capacity) { resize(buf, (len + off) * 2); } @@ -296,7 +435,17 @@ library BufferChainlink { * @param data The data to append. * @return The original buffer. */ - function appendInt(buffer memory buf, uint data, uint len) internal pure returns(buffer memory) { + function appendInt( + buffer memory buf, + uint data, + uint len + ) + internal + pure + returns( + buffer memory + ) + { return writeInt(buf, buf.buf.length, data, len); } } diff --git a/evm-contracts/src/v0.7/vendor/CBORChainlink.sol b/evm-contracts/src/v0.7/vendor/CBORChainlink.sol index 8c2ea04f99f..5777a2e9ce7 100644 --- a/evm-contracts/src/v0.7/vendor/CBORChainlink.sol +++ b/evm-contracts/src/v0.7/vendor/CBORChainlink.sol @@ -4,87 +4,151 @@ pragma solidity >= 0.4.19; import { BufferChainlink } from "./BufferChainlink.sol"; library CBORChainlink { - using BufferChainlink for BufferChainlink.buffer; + using BufferChainlink for BufferChainlink.buffer; - uint8 private constant MAJOR_TYPE_INT = 0; - uint8 private constant MAJOR_TYPE_NEGATIVE_INT = 1; - uint8 private constant MAJOR_TYPE_BYTES = 2; - uint8 private constant MAJOR_TYPE_STRING = 3; - uint8 private constant MAJOR_TYPE_ARRAY = 4; - uint8 private constant MAJOR_TYPE_MAP = 5; - uint8 private constant MAJOR_TYPE_TAG = 6; - uint8 private constant MAJOR_TYPE_CONTENT_FREE = 7; + uint8 private constant MAJOR_TYPE_INT = 0; + uint8 private constant MAJOR_TYPE_NEGATIVE_INT = 1; + uint8 private constant MAJOR_TYPE_BYTES = 2; + uint8 private constant MAJOR_TYPE_STRING = 3; + uint8 private constant MAJOR_TYPE_ARRAY = 4; + uint8 private constant MAJOR_TYPE_MAP = 5; + uint8 private constant MAJOR_TYPE_TAG = 6; + uint8 private constant MAJOR_TYPE_CONTENT_FREE = 7; - uint8 private constant TAG_TYPE_BIGNUM = 2; - uint8 private constant TAG_TYPE_NEGATIVE_BIGNUM = 3; + uint8 private constant TAG_TYPE_BIGNUM = 2; + uint8 private constant TAG_TYPE_NEGATIVE_BIGNUM = 3; - function encodeType(BufferChainlink.buffer memory buf, uint8 major, uint value) private pure { - if(value <= 23) { - buf.appendUint8(uint8((major << 5) | value)); - } else if(value <= 0xFF) { - buf.appendUint8(uint8((major << 5) | 24)); - buf.appendInt(value, 1); - } else if(value <= 0xFFFF) { - buf.appendUint8(uint8((major << 5) | 25)); - buf.appendInt(value, 2); - } else if(value <= 0xFFFFFFFF) { - buf.appendUint8(uint8((major << 5) | 26)); - buf.appendInt(value, 4); - } else if(value <= 0xFFFFFFFFFFFFFFFF) { - buf.appendUint8(uint8((major << 5) | 27)); - buf.appendInt(value, 8); - } + function encodeType( + BufferChainlink.buffer memory buf, + uint8 major, + uint value + ) + private + pure + { + if(value <= 23) { + buf.appendUint8(uint8((major << 5) | value)); + } else if(value <= 0xFF) { + buf.appendUint8(uint8((major << 5) | 24)); + buf.appendInt(value, 1); + } else if(value <= 0xFFFF) { + buf.appendUint8(uint8((major << 5) | 25)); + buf.appendInt(value, 2); + } else if(value <= 0xFFFFFFFF) { + buf.appendUint8(uint8((major << 5) | 26)); + buf.appendInt(value, 4); + } else if(value <= 0xFFFFFFFFFFFFFFFF) { + buf.appendUint8(uint8((major << 5) | 27)); + buf.appendInt(value, 8); } + } - function encodeIndefiniteLengthType(BufferChainlink.buffer memory buf, uint8 major) private pure { - buf.appendUint8(uint8((major << 5) | 31)); - } + function encodeIndefiniteLengthType( + BufferChainlink.buffer memory buf, + uint8 major + ) + private + pure + { + buf.appendUint8(uint8((major << 5) | 31)); + } - function encodeUInt(BufferChainlink.buffer memory buf, uint value) internal pure { - encodeType(buf, MAJOR_TYPE_INT, value); - } + function encodeUInt( + BufferChainlink.buffer memory buf, + uint value + ) + internal + pure + { + encodeType(buf, MAJOR_TYPE_INT, value); + } - function encodeInt(BufferChainlink.buffer memory buf, int value) internal pure { - if(value < -0x10000000000000000) { - encodeSignedBigNum(buf, value); - } else if(value > 0xFFFFFFFFFFFFFFFF) { - encodeBigNum(buf, value); - } else if(value >= 0) { - encodeType(buf, MAJOR_TYPE_INT, uint(value)); - } else { - encodeType(buf, MAJOR_TYPE_NEGATIVE_INT, uint(-1 - value)); - } + function encodeInt( + BufferChainlink.buffer memory buf, + int value + ) + internal + pure + { + if(value < -0x10000000000000000) { + encodeSignedBigNum(buf, value); + } else if(value > 0xFFFFFFFFFFFFFFFF) { + encodeBigNum(buf, value); + } else if(value >= 0) { + encodeType(buf, MAJOR_TYPE_INT, uint(value)); + } else { + encodeType(buf, MAJOR_TYPE_NEGATIVE_INT, uint(-1 - value)); } + } - function encodeBytes(BufferChainlink.buffer memory buf, bytes memory value) internal pure { - encodeType(buf, MAJOR_TYPE_BYTES, value.length); - buf.append(value); - } + function encodeBytes( + BufferChainlink.buffer memory buf, + bytes memory value + ) + internal + pure + { + encodeType(buf, MAJOR_TYPE_BYTES, value.length); + buf.append(value); + } - function encodeBigNum(BufferChainlink.buffer memory buf, int value) internal pure { - buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_BIGNUM)); - encodeBytes(buf, abi.encode(uint(value))); - } + function encodeBigNum( + BufferChainlink.buffer memory buf, + int value + ) + internal + pure + { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_BIGNUM)); + encodeBytes(buf, abi.encode(uint(value))); + } - function encodeSignedBigNum(BufferChainlink.buffer memory buf, int input) internal pure { - buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_NEGATIVE_BIGNUM)); - encodeBytes(buf, abi.encode(uint(-1 - input))); - } + function encodeSignedBigNum( + BufferChainlink.buffer memory buf, + int input + ) + internal + pure + { + buf.appendUint8(uint8((MAJOR_TYPE_TAG << 5) | TAG_TYPE_NEGATIVE_BIGNUM)); + encodeBytes(buf, abi.encode(uint(-1 - input))); + } - function encodeString(BufferChainlink.buffer memory buf, string memory value) internal pure { - encodeType(buf, MAJOR_TYPE_STRING, bytes(value).length); - buf.append(bytes(value)); - } + function encodeString( + BufferChainlink.buffer memory buf, + string memory value + ) + internal + pure + { + encodeType(buf, MAJOR_TYPE_STRING, bytes(value).length); + buf.append(bytes(value)); + } - function startArray(BufferChainlink.buffer memory buf) internal pure { - encodeIndefiniteLengthType(buf, MAJOR_TYPE_ARRAY); - } + function startArray( + BufferChainlink.buffer memory buf + ) + internal + pure + { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_ARRAY); + } - function startMap(BufferChainlink.buffer memory buf) internal pure { - encodeIndefiniteLengthType(buf, MAJOR_TYPE_MAP); - } + function startMap( + BufferChainlink.buffer memory buf + ) + internal + pure + { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_MAP); + } - function endSequence(BufferChainlink.buffer memory buf) internal pure { - encodeIndefiniteLengthType(buf, MAJOR_TYPE_CONTENT_FREE); - } + function endSequence( + BufferChainlink.buffer memory buf + ) + internal + pure + { + encodeIndefiniteLengthType(buf, MAJOR_TYPE_CONTENT_FREE); + } } diff --git a/evm-contracts/src/v0.7/vendor/ENSResolver.sol b/evm-contracts/src/v0.7/vendor/ENSResolver.sol index d5cbc6727bf..9fd61d2bcea 100644 --- a/evm-contracts/src/v0.7/vendor/ENSResolver.sol +++ b/evm-contracts/src/v0.7/vendor/ENSResolver.sol @@ -2,5 +2,13 @@ pragma solidity ^0.7.0; abstract contract ENSResolver { - function addr(bytes32 node) public view virtual returns (address); + function addr( + bytes32 node + ) + public + view + virtual + returns ( + address + ); } diff --git a/evm-contracts/src/v0.7/vendor/SafeMathChainlink.sol b/evm-contracts/src/v0.7/vendor/SafeMathChainlink.sol index 3345db7c963..b80b9a68e64 100644 --- a/evm-contracts/src/v0.7/vendor/SafeMathChainlink.sol +++ b/evm-contracts/src/v0.7/vendor/SafeMathChainlink.sol @@ -24,7 +24,16 @@ library SafeMathChainlink { * Requirements: * - Addition cannot overflow. */ - function add(uint256 a, uint256 b) internal pure returns (uint256) { + function add( + uint256 a, + uint256 b + ) + internal + pure + returns ( + uint256 + ) + { uint256 c = a + b; require(c >= a, "SafeMath: addition overflow"); @@ -40,7 +49,16 @@ library SafeMathChainlink { * Requirements: * - Subtraction cannot overflow. */ - function sub(uint256 a, uint256 b) internal pure returns (uint256) { + function sub( + uint256 a, + uint256 b + ) + internal + pure + returns ( + uint256 + ) + { require(b <= a, "SafeMath: subtraction overflow"); uint256 c = a - b; @@ -56,7 +74,16 @@ library SafeMathChainlink { * Requirements: * - Multiplication cannot overflow. */ - function mul(uint256 a, uint256 b) internal pure returns (uint256) { + function mul( + uint256 a, + uint256 b + ) + internal + pure + returns ( + uint256 + ) + { // Gas optimization: this is cheaper than requiring 'a' not being zero, but the // benefit is lost if 'b' is also tested. // See: https://github.com/OpenZeppelin/openzeppelin-solidity/pull/522 @@ -81,7 +108,16 @@ library SafeMathChainlink { * Requirements: * - The divisor cannot be zero. */ - function div(uint256 a, uint256 b) internal pure returns (uint256) { + function div( + uint256 a, + uint256 b + ) + internal + pure + returns ( + uint256 + ) + { // Solidity only automatically asserts when dividing by 0 require(b > 0, "SafeMath: division by zero"); uint256 c = a / b; @@ -101,7 +137,16 @@ library SafeMathChainlink { * Requirements: * - The divisor cannot be zero. */ - function mod(uint256 a, uint256 b) internal pure returns (uint256) { + function mod( + uint256 a, + uint256 b + ) + internal + pure + returns ( + uint256 + ) + { require(b != 0, "SafeMath: modulo by zero"); return a % b; } From 65a3f3d973dcbe1b6758a418fe586e5260223059 Mon Sep 17 00:00:00 2001 From: Ryan Hall Date: Tue, 23 Mar 2021 12:37:25 -0500 Subject: [PATCH 083/116] add keeper debug logging --- .../registry_synchronizer_log_listener.go | 9 ++++++++- .../registry_synchronizer_process_logs.go | 8 ++++++++ core/services/keeper/upkeep_executer.go | 20 ++++++++++++++----- 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/core/services/keeper/registry_synchronizer_log_listener.go b/core/services/keeper/registry_synchronizer_log_listener.go index 49cf31fde2a..2f73c492880 100644 --- a/core/services/keeper/registry_synchronizer_log_listener.go +++ b/core/services/keeper/registry_synchronizer_log_listener.go @@ -28,10 +28,17 @@ func (rs *RegistrySynchronizer) IsV2Job() bool { func (rs *RegistrySynchronizer) HandleLog(broadcast log.Broadcast) { log := broadcast.DecodedLog() if log == nil || reflect.ValueOf(log).IsNil() { - logger.Error("HandleLog: ignoring nil value") + logger.Errorf("RegistrySynchronizer: HandleLog: ignoring nil value, type: %T", broadcast) return } + logger.Debugw( + "RegistrySynchronizer: received log, waiting for confirmations", + "jobID", rs.job.ID, + "logType", reflect.TypeOf(log), + "txHash", broadcast.RawLog().TxHash.Hex(), + ) + switch log := log.(type) { case *keeper_registry_wrapper.KeeperRegistryKeepersUpdated: rs.mailRoom.mbSyncRegistry.Deliver(broadcast) // same mailbox because same action diff --git a/core/services/keeper/registry_synchronizer_process_logs.go b/core/services/keeper/registry_synchronizer_process_logs.go index c4750599a77..249e67c750a 100644 --- a/core/services/keeper/registry_synchronizer_process_logs.go +++ b/core/services/keeper/registry_synchronizer_process_logs.go @@ -33,6 +33,8 @@ func (rs *RegistrySynchronizer) handleSyncRegistryLog(head models.Head, done fun logger.Errorf("RegistrySynchronizer: invariant violation, expected log.Broadcast but got %T", broadcast) return } + txHash := broadcast.RawLog().TxHash.Hex() + logger.Debugw("RegistrySynchronizer: processing SyncRegistry log", "jobID", rs.job.ID, "txHash", txHash) was, err := broadcast.WasAlreadyConsumed() if err != nil { logger.Warn(errors.Wrapf(err, "RegistrySynchronizer: unable to check if log was consumed, jobID: %d", rs.job.ID)) @@ -66,6 +68,8 @@ func (rs *RegistrySynchronizer) handleUpkeepCanceledLogs(head models.Head, done logger.Errorf("RegistrySynchronizer: invariant violation, expected log.Broadcast but got %T", broadcast) continue } + txHash := broadcast.RawLog().TxHash.Hex() + logger.Debugw("RegistrySynchronizer: processing UpkeepCanceled log", "jobID", rs.job.ID, "txHash", txHash) was, err := broadcast.WasAlreadyConsumed() if err != nil { logger.Warn(errors.Wrapf(err, "RegistrySynchronizer: unable to check if log was consumed, jobID: %d", rs.job.ID)) @@ -114,6 +118,8 @@ func (rs *RegistrySynchronizer) handleUpkeepRegisteredLogs(head models.Head, don logger.Errorf("RegistrySynchronizer: invariant violation, expected log.Broadcast but got %T", broadcast) continue } + txHash := broadcast.RawLog().TxHash.Hex() + logger.Debugw("RegistrySynchronizer: processing UpkeepRegistered log", "jobID", rs.job.ID, "txHash", txHash) was, err := broadcast.WasAlreadyConsumed() if err != nil { logger.Warn(errors.Wrapf(err, "RegistrySynchronizer: unable to check if log was consumed, jobID: %d", rs.job.ID)) @@ -153,6 +159,8 @@ func (rs *RegistrySynchronizer) handleUpkeepPerformedLogs(head models.Head, done logger.Errorf("RegistrySynchronizer: invariant violation, expected log.Broadcast but got %T", broadcast) continue } + txHash := broadcast.RawLog().TxHash.Hex() + logger.Debugw("RegistrySynchronizer: processing UpkeepPerformed log", "jobID", rs.job.ID, "txHash", txHash) was, err := broadcast.WasAlreadyConsumed() if err != nil { logger.Warn(errors.Wrapf(err, "RegistrySynchronizer: unable to check if log was consumed, jobID: %d", rs.job.ID)) diff --git a/core/services/keeper/upkeep_executer.go b/core/services/keeper/upkeep_executer.go index 3ff8cc14c8c..e10f1f9d499 100644 --- a/core/services/keeper/upkeep_executer.go +++ b/core/services/keeper/upkeep_executer.go @@ -109,7 +109,8 @@ func (executor *UpkeepExecutor) processActiveUpkeeps() { logger.Errorf("expected `models.Head`, got %T", head) return } - logger.Debug("received new block, running checkUpkeep for keeper registrations", "blockheight", head.Number) + + logger.Debugw("UpkeepExecutor: checking active upkeeps", "blockheight", head.Number, "jobID", executor.job.ID) ctx, cancel := postgres.DefaultQueryCtx() defer cancel() @@ -135,20 +136,27 @@ func (executor *UpkeepExecutor) processActiveUpkeeps() { func (executor *UpkeepExecutor) execute(upkeep UpkeepRegistration, headNumber int64, done func()) { defer done() + logArgs := []interface{}{ + "jobID", executor.job.ID, + "blockNum", headNumber, + "registryAddress", upkeep.Registry.ContractAddress.Hex(), + "upkeepID", upkeep.UpkeepID, + } + msg, err := constructCheckUpkeepCallMsg(upkeep) if err != nil { logger.Error(err) return } - logger.Debugf("Checking upkeep on registry: %s, upkeepID %d", upkeep.Registry.ContractAddress.Hex(), upkeep.UpkeepID) + logger.Debugw("UpkeepExecutor: checking upkeep", logArgs...) ctxService, cancel := utils.ContextFromChan(executor.chStop) defer cancel() checkUpkeepResult, err := executor.ethClient.CallContract(ctxService, msg, nil) if err != nil { - logger.Debugf("checkUpkeep failed on registry: %s, upkeepID %d", upkeep.Registry.ContractAddress.Hex(), upkeep.UpkeepID) + logger.Debugw("UpkeepExecutor: checkUpkeep failed", logArgs...) return } @@ -158,7 +166,7 @@ func (executor *UpkeepExecutor) execute(upkeep UpkeepRegistration, headNumber in return } - logger.Debugf("Performing upkeep on registry: %s, upkeepID %d", upkeep.Registry.ContractAddress.Hex(), upkeep.UpkeepID) + logger.Debugw("UpkeepExecutor: performing upkeep", logArgs...) ctxQuery, _ := postgres.DefaultQueryCtx() ctxCombined, cancel := utils.CombinedContext(executor.chStop, ctxQuery) @@ -177,7 +185,9 @@ func (executor *UpkeepExecutor) execute(upkeep UpkeepRegistration, headNumber in // that the tx gets confirmed in. This is fine because this grace period is just used as a fallback // in case we miss the UpkeepPerformed log or the tx errors. It does not need to be exact. err = executor.orm.SetLastRunHeightForUpkeepOnJob(ctxCombined, executor.job.ID, upkeep.UpkeepID, headNumber) - logger.ErrorIf(err, "UpkeepExecutor: unable to setLastRunHeightForUpkeep for upkeep") + if err != nil { + logger.Errorw("UpkeepExecutor: unable to setLastRunHeightForUpkeep for upkeep", logArgs...) + } } func constructCheckUpkeepCallMsg(upkeep UpkeepRegistration) (ethereum.CallMsg, error) { From e33bc22bfaf18afc2ad2274f79e61466534187e9 Mon Sep 17 00:00:00 2001 From: Sam Date: Wed, 24 Mar 2021 17:08:47 +0000 Subject: [PATCH 084/116] Remove minimum gas bump Wei limit (for RSK chain) --- core/store/orm/config.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 72882a7efcd..939e34d7f99 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -111,9 +111,6 @@ func (c *Config) Validate() error { ethCore.DefaultTxPoolConfig.PriceBump, ) } - if c.EthGasBumpWei().Cmp(big.NewInt(1000000000)) < 0 { - return errors.Errorf("ETH_GAS_BUMP_WEI of %s Wei may not be less than the minimum allowed value of 5 GWei", c.EthGasBumpWei().String()) - } if c.EthHeadTrackerHistoryDepth() < c.EthFinalityDepth() { return errors.New("ETH_HEAD_TRACKER_HISTORY_DEPTH must be equal to or greater than ETH_FINALITY_DEPTH") From e22ef010f0cc52132d8d271f133d90b820147ed0 Mon Sep 17 00:00:00 2001 From: Connor Stein Date: Wed, 24 Mar 2021 16:25:07 -0400 Subject: [PATCH 085/116] Remove result task type (#4110) Remove result task type. Introduced more specific RunErrors type for pipeline.Run.Errors --- core/internal/cltest/cltest.go | 4 +- core/internal/cltest/factories.go | 2 +- .../fluxmonitorv2/flux_monitor_test.go | 8 +- .../job/job_pipeline_orm_integration_test.go | 57 +++++++--- core/services/job/runner_integration_test.go | 19 +--- core/services/pipeline/common.go | 104 ++++-------------- core/services/pipeline/graph.go | 22 ---- core/services/pipeline/models.go | 61 +++++++--- core/services/pipeline/models_test.go | 54 +++++++-- core/services/pipeline/orm.go | 99 ++++++----------- core/services/pipeline/orm_test.go | 41 +++---- core/services/pipeline/runner.go | 63 +++-------- core/services/pipeline/runner_test.go | 51 +++------ core/services/pipeline/task.base.go | 38 +++++++ core/services/pipeline/task.result.go | 85 -------------- core/services/pipeline/test_helpers.go | 10 -- .../migrations/0020_remove_result_task.go | 42 +++++++ core/store/migrations/migrate_test.go | 4 +- go.sum | 2 - 19 files changed, 326 insertions(+), 440 deletions(-) create mode 100644 core/services/pipeline/task.base.go delete mode 100644 core/services/pipeline/task.result.go create mode 100644 core/store/migrations/0020_remove_result_task.go diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index a5c2726964a..58de042dac3 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -1155,9 +1155,7 @@ func WaitForPipelineComplete(t testing.TB, nodeID int, jobID int32, jo job.ORM, assert.NoError(t, err) for i := range prs { if !prs[i].Outputs.Null { - errs, err := prs[i].Errors.MarshalJSON() - assert.NoError(t, err) - if string(errs) != "[null]" { + if prs[i].Errors.HasError() { return nil } pr = prs[i] diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go index ff0912b5f07..2053d7b9ec3 100644 --- a/core/internal/cltest/factories.go +++ b/core/internal/cltest/factories.go @@ -828,7 +828,7 @@ func NewRoundStateForRoundID(store *strpkg.Store, roundID uint32, latestSubmissi func MustInsertPipelineRun(t *testing.T, db *gorm.DB) pipeline.Run { run := pipeline.Run{ Outputs: pipeline.JSONSerializable{Null: true}, - Errors: pipeline.JSONSerializable{Null: true}, + Errors: pipeline.RunErrors{}, FinishedAt: nil, } require.NoError(t, db.Create(&run).Error) diff --git a/core/services/fluxmonitorv2/flux_monitor_test.go b/core/services/fluxmonitorv2/flux_monitor_test.go index 865dfdab882..225abe80b0f 100644 --- a/core/services/fluxmonitorv2/flux_monitor_test.go +++ b/core/services/fluxmonitorv2/flux_monitor_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "gopkg.in/guregu/null.v4" + "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -29,7 +31,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "gopkg.in/guregu/null.v4" ) const oracleCount uint8 = 17 @@ -318,9 +319,8 @@ func TestFluxMonitor_PollIfEligible(t *testing.T) { now := time.Now() run.FinishedAt = &now case pipeline.RunStatusErrored: - run.Errors = pipeline.JSONSerializable{ - Val: pipeline.FinalErrors{null.StringFrom("Random: String, foo")}, - Null: false, + run.Errors = []null.String{ + null.StringFrom("Random: String, foo"), } default: } diff --git a/core/services/job/job_pipeline_orm_integration_test.go b/core/services/job/job_pipeline_orm_integration_test.go index 0c2f7ae26cf..9908a46b06b 100644 --- a/core/services/job/job_pipeline_orm_integration_test.go +++ b/core/services/job/job_pipeline_orm_integration_test.go @@ -6,19 +6,21 @@ import ( "testing" "time" + "gopkg.in/guregu/null.v4" + + "github.com/pkg/errors" + "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/services/pipeline" - "github.com/pkg/errors" "github.com/shopspring/decimal" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/services/postgres" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "gopkg.in/guregu/null.v4" "gorm.io/gorm" ) @@ -40,9 +42,6 @@ func TestPipelineORM_Integration(t *testing.T) { u, err := url.Parse("https://chain.link/voter_turnout/USA-2020") require.NoError(t, err) - result := &pipeline.ResultTask{ - BaseTask: pipeline.NewBaseTask("__result__", nil, 0, 0), - } answer1 := &pipeline.MedianTask{ BaseTask: pipeline.NewBaseTask("answer1", nil, 0, 0), } @@ -76,7 +75,17 @@ func TestPipelineORM_Integration(t *testing.T) { RequestData: pipeline.HttpRequestData{"hi": "hello"}, BaseTask: pipeline.NewBaseTask("ds2", ds2_parse, 0, 0), } - expectedTasks := []pipeline.Task{result, answer1, answer2, ds1_multiply, ds1_parse, ds1, ds2_multiply, ds2_parse, ds2} + taskMap := map[string]pipeline.Task{ + "answer1": answer1, + "answer2": answer2, + "ds1": ds1, + "ds2": ds2, + "ds1_parse": ds1_parse, + "ds2_parse": ds2_parse, + "ds1_multiply": ds1_multiply, + "ds2_multiply": ds2_multiply, + } + expectedTasks := []pipeline.Task{answer1, answer2, ds1_multiply, ds1_parse, ds1, ds2_multiply, ds2_parse, ds2} _, bridge := cltest.NewBridgeType(t, "voter_turnout", "http://blah.com") require.NoError(t, db.Create(bridge).Error) _, bridge2 := cltest.NewBridgeType(t, "election_winner", "http://blah.com") @@ -153,7 +162,7 @@ func TestPipelineORM_Integration(t *testing.T) { name string answers map[string]pipeline.Result runOutputs interface{} - runErrors interface{} + runErrors pipeline.RunErrors }{ { "all succeeded", @@ -166,10 +175,9 @@ func TestPipelineORM_Integration(t *testing.T) { "ds2_multiply": {Value: float64(6)}, "answer1": {Value: float64(7)}, "answer2": {Value: float64(8)}, - "__result__": {Value: []interface{}{float64(7), float64(8)}, Error: pipeline.FinalErrors{{}, {}}}, }, []interface{}{float64(7), float64(8)}, - []interface{}{nil, nil}, + []null.String{{}, {}}, }, { "all failed", @@ -182,10 +190,9 @@ func TestPipelineORM_Integration(t *testing.T) { "ds2_multiply": {Error: errors.New("fail 6")}, "answer1": {Error: errors.New("fail 7")}, "answer2": {Error: errors.New("fail 8")}, - "__result__": {Value: []interface{}{nil, nil}, Error: pipeline.FinalErrors{null.StringFrom("fail 7"), null.StringFrom("fail 8")}}, }, []interface{}{nil, nil}, - []interface{}{"fail 7", "fail 8"}, + []null.String{null.StringFrom("fail 7"), null.StringFrom("fail 8")}, }, { "some succeeded, some failed", @@ -198,10 +205,28 @@ func TestPipelineORM_Integration(t *testing.T) { "ds2_multiply": {Value: float64(4)}, "answer1": {Error: errors.New("fail 3")}, "answer2": {Value: float64(5)}, - "__result__": {Value: []interface{}{nil, float64(5)}, Error: pipeline.FinalErrors{null.StringFrom("fail 3"), {}}}, }, []interface{}{nil, float64(5)}, - []interface{}{"fail 3", nil}, + []null.String{null.StringFrom("fail 3"), {}}, + }, + { + name: "different output types", + answers: map[string]pipeline.Result{ + "ds1": {Value: float64(1)}, + "ds1_parse": {Value: float64(2)}, + "ds1_multiply": {Value: float64(3)}, + "ds2": {Value: float64(4)}, + "ds2_parse": {Value: float64(5)}, + "ds2_multiply": {Value: float64(6)}, + "answer1": {Value: map[string]interface{}{ + "a": float64(10), + }}, + "answer2": {Value: "blah"}, + }, + runOutputs: []interface{}{map[string]interface{}{ + "a": float64(10), + }, "blah"}, + runErrors: []null.String{{}, {}}, }, } @@ -274,9 +299,10 @@ func TestPipelineORM_Integration(t *testing.T) { First(&tr).Error) trr := pipeline.TaskRunResult{ ID: tr.ID, + Task: taskMap[dotID], Result: result, FinishedAt: time.Now(), - IsTerminal: dotID == "__result__", + IsTerminal: dotID == "answer1" || dotID == "answer2", } trrs = append(trrs, trr) } @@ -330,8 +356,7 @@ func TestPipelineORM_Integration(t *testing.T) { err = db.First(&pipelineRun).Error require.NoError(t, err) - require.NotNil(t, pipelineRun.Errors.Val) - require.Equal(t, test.runErrors, pipelineRun.Errors.Val) + require.Equal(t, test.runErrors, pipelineRun.Errors) require.NotNil(t, pipelineRun.Outputs.Val) require.Equal(t, test.runOutputs, pipelineRun.Outputs.Val) } diff --git a/core/services/job/runner_integration_test.go b/core/services/job/runner_integration_test.go index 6f399120ba4..0a9cfa303a8 100644 --- a/core/services/job/runner_integration_test.go +++ b/core/services/job/runner_integration_test.go @@ -114,7 +114,7 @@ func TestRunner(t *testing.T) { Where("pipeline_run_id = ?", runID). Find(&runs).Error assert.NoError(t, err) - assert.Len(t, runs, 9) + assert.Len(t, runs, 8) for _, run := range runs { if run.GetDotID() == "answer2" { @@ -133,8 +133,6 @@ func TestRunner(t *testing.T) { assert.Equal(t, "6257", run.Output.Val) } else if run.GetDotID() == "answer1" { assert.Equal(t, "6225.6", run.Output.Val) - } else if run.GetDotID() == "__result__" { - assert.Equal(t, []interface{}{"6225.6", "Hal Finney"}, run.Output.Val) } else { t.Fatalf("unknown task '%v'", run.GetDotID()) } @@ -218,7 +216,7 @@ func TestRunner(t *testing.T) { Where("pipeline_run_id = ?", runID). Find(&runs).Error assert.NoError(t, err) - require.Len(t, runs, 4) + require.Len(t, runs, 3) for _, run := range runs { if run.GetDotID() == "ds1" { @@ -232,9 +230,6 @@ func TestRunner(t *testing.T) { } else if run.GetDotID() == "ds1_multiply" { assert.Equal(t, "type cannot be converted to decimal.Decimal", run.Error.ValueOrZero()) assert.Nil(t, run.Output) - } else if run.GetDotID() == "__result__" { - assert.Equal(t, []interface{}{nil}, run.Output.Val) - assert.Equal(t, "[\"type \\u003cnil\\u003e cannot be converted to decimal.Decimal\"]", run.Error.ValueOrZero()) } else { t.Fatalf("unknown task '%v'", run.GetDotID()) } @@ -278,7 +273,7 @@ func TestRunner(t *testing.T) { Where("pipeline_run_id = ?", runID). Find(&runs).Error assert.NoError(t, err) - require.Len(t, runs, 4) + require.Len(t, runs, 3) for _, run := range runs { if run.GetDotID() == "ds1" { @@ -290,9 +285,6 @@ func TestRunner(t *testing.T) { } else if run.GetDotID() == "ds1_multiply" { assert.Equal(t, "could not resolve path [\"USD\"] in {\"Response\":\"Error\",\"Message\":\"You are over your rate limit please upgrade your account!\",\"HasWarning\":false,\"Type\":99,\"RateLimit\":{\"calls_made\":{\"second\":5,\"minute\":5,\"hour\":955,\"day\":10004,\"month\":15146,\"total_calls\":15152},\"max_calls\":{\"second\":20,\"minute\":300,\"hour\":3000,\"day\":10000,\"month\":75000}},\"Data\":{}}", run.Error.ValueOrZero()) assert.Nil(t, run.Output) - } else if run.GetDotID() == "__result__" { - assert.Equal(t, []interface{}{nil}, run.Output.Val) - assert.Equal(t, "[\"could not resolve path [\\\"USD\\\"] in {\\\"Response\\\":\\\"Error\\\",\\\"Message\\\":\\\"You are over your rate limit please upgrade your account!\\\",\\\"HasWarning\\\":false,\\\"Type\\\":99,\\\"RateLimit\\\":{\\\"calls_made\\\":{\\\"second\\\":5,\\\"minute\\\":5,\\\"hour\\\":955,\\\"day\\\":10004,\\\"month\\\":15146,\\\"total_calls\\\":15152},\\\"max_calls\\\":{\\\"second\\\":20,\\\"minute\\\":300,\\\"hour\\\":3000,\\\"day\\\":10000,\\\"month\\\":75000}},\\\"Data\\\":{}}\"]", run.Error.ValueOrZero()) } else { t.Fatalf("unknown task '%v'", run.GetDotID()) } @@ -336,7 +328,7 @@ func TestRunner(t *testing.T) { Where("pipeline_run_id = ?", runID). Find(&runs).Error assert.NoError(t, err) - require.Len(t, runs, 4) + require.Len(t, runs, 3) for _, run := range runs { if run.GetDotID() == "ds1" { @@ -348,9 +340,6 @@ func TestRunner(t *testing.T) { } else if run.GetDotID() == "ds1_multiply" { assert.Equal(t, "type cannot be converted to decimal.Decimal", run.Error.ValueOrZero()) assert.Nil(t, run.Output) - } else if run.GetDotID() == "__result__" { - assert.Equal(t, []interface{}{nil}, run.Output.Val) - assert.Equal(t, "[\"type \\u003cnil\\u003e cannot be converted to decimal.Decimal\"]", run.Error.ValueOrZero()) } else { t.Fatalf("unknown task '%v'", run.GetDotID()) } diff --git a/core/services/pipeline/common.go b/core/services/pipeline/common.go index f759b5a3c43..171e5b8ca1f 100644 --- a/core/services/pipeline/common.go +++ b/core/services/pipeline/common.go @@ -6,6 +6,7 @@ import ( "encoding/json" "net/url" "reflect" + "sort" "strconv" "strings" "sync" @@ -73,14 +74,6 @@ type Result struct { Error error } -// FinalResult is the result of a Run -// TODO: Get rid of FinalErrors and use FinalResult instead -// https://www.pivotaltracker.com/story/show/176557536 -type FinalResult struct { - Values []interface{} - Errors []error -} - // OutputDB dumps a single result output for a pipeline_run or pipeline_task_run func (result Result) OutputDB() JSONSerializable { return JSONSerializable{Val: result.Value, Null: result.Value == nil} @@ -89,21 +82,25 @@ func (result Result) OutputDB() JSONSerializable { // ErrorDB dumps a single result error for a pipeline_task_run func (result Result) ErrorDB() null.String { var errString null.String - if finalErrors, is := result.Error.(FinalErrors); is { - errString = null.StringFrom(finalErrors.Error()) - } else if result.Error != nil { + if result.Error != nil { errString = null.StringFrom(result.Error.Error()) } return errString } +// FinalResult is the result of a Run +type FinalResult struct { + Values []interface{} + Errors []error +} + // OutputsDB dumps a result output for a pipeline_run func (result FinalResult) OutputsDB() JSONSerializable { return JSONSerializable{Val: result.Values, Null: false} } // ErrorsDB dumps a result error for a pipeline_run -func (result FinalResult) ErrorsDB() JSONSerializable { +func (result FinalResult) ErrorsDB() RunErrors { errStrs := make([]null.String, len(result.Errors)) for i, err := range result.Errors { if err == nil { @@ -113,7 +110,7 @@ func (result FinalResult) ErrorsDB() JSONSerializable { } } - return JSONSerializable{Val: errStrs, Null: false} + return errStrs } // HasErrors returns true if the final result has any errors @@ -151,33 +148,17 @@ type TaskRunResult struct { type TaskRunResults []TaskRunResult // FinalResult pulls the FinalResult for the pipeline_run from the task runs -func (trrs TaskRunResults) FinalResult() (result FinalResult) { +// It needs to respect the output index of each task +func (trrs TaskRunResults) FinalResult() FinalResult { var found bool + var fr FinalResult + sort.Slice(trrs, func(i, j int) bool { + return trrs[i].Task.OutputIndex() < trrs[j].Task.OutputIndex() + }) for _, trr := range trrs { if trr.IsTerminal { - // FIXME: This is a mess because of the special `__result__` task. - // It gets much simpler and will change when the magical - // "__result__" type is removed. - // https://www.pivotaltracker.com/story/show/176557536 - values, is := trr.Result.Value.([]interface{}) - if !is { - panic("expected terminal task run result to have multiple values") - } - result.Values = append(result.Values, values...) - - finalErrs, is := trr.Result.Error.(FinalErrors) - if !is { - panic("expected terminal task run result to be FinalErrors") - } - errs := make([]error, len(finalErrs)) - for i, finalErr := range finalErrs { - if finalErr.IsZero() { - errs[i] = nil - } else { - errs[i] = errors.New(finalErr.ValueOrZero()) - } - } - result.Errors = append(result.Errors, errs...) + fr.Values = append(fr.Values, trr.Result.Value) + fr.Errors = append(fr.Errors, trr.Result.Error) found = true } } @@ -186,7 +167,7 @@ func (trrs TaskRunResults) FinalResult() (result FinalResult) { logger.Errorw("expected at least one task to be final", "tasks", trrs) panic("expected at least one task to be final") } - return + return fr } type RunWithResults struct { @@ -194,41 +175,6 @@ type RunWithResults struct { TaskRunResults TaskRunResults } -type BaseTask struct { - outputTask Task - dotID string `mapstructure:"-"` - nPreds int `mapstructure:"-"` - Index int32 `mapstructure:"index" json:"-" ` - Timeout time.Duration `mapstructure:"timeout"` -} - -func (t BaseTask) NPreds() int { - return t.nPreds -} - -func (t BaseTask) DotID() string { - return t.dotID -} - -func (t BaseTask) OutputIndex() int32 { - return t.Index -} - -func (t BaseTask) OutputTask() Task { - return t.outputTask -} - -func (t *BaseTask) SetOutputTask(outputTask Task) { - t.outputTask = outputTask -} - -func (t BaseTask) TaskTimeout() (time.Duration, bool) { - if t.Timeout == time.Duration(0) { - return time.Duration(0), false - } - return t.Timeout, true -} - type JSONSerializable struct { Val interface{} Null bool @@ -280,15 +226,12 @@ const ( TaskTypeMedian TaskType = "median" TaskTypeMultiply TaskType = "multiply" TaskTypeJSONParse TaskType = "jsonparse" - TaskTypeResult TaskType = "result" TaskTypeAny TaskType = "any" // Testing only. TaskTypePanic TaskType = "panic" ) -const ResultTaskDotID = "__result__" - func UnmarshalTaskFromMap(taskType TaskType, taskMap interface{}, dotID string, config Config, txdb *gorm.DB, txdbMutex *sync.Mutex, nPreds int) (_ Task, err error) { defer utils.WrapIfError(&err, "UnmarshalTaskFromMap") @@ -316,8 +259,6 @@ func UnmarshalTaskFromMap(taskType TaskType, taskMap interface{}, dotID string, task = &JSONParseTask{BaseTask: BaseTask{dotID: dotID, nPreds: nPreds}} case TaskTypeMultiply: task = &MultiplyTask{BaseTask: BaseTask{dotID: dotID, nPreds: nPreds}} - case TaskTypeResult: - task = &ResultTask{BaseTask: BaseTask{dotID: ResultTaskDotID}} default: return nil, errors.Errorf(`unknown task type: "%v"`, taskType) } @@ -380,13 +321,6 @@ func UnmarshalTaskFromMap(taskType TaskType, taskMap interface{}, dotID string, return task, nil } -func WrapResultIfError(result *Result, msg string, args ...interface{}) { - if result.Error != nil { - logger.Errorf(msg+": %+v", append(args, result.Error)...) - result.Error = errors.Wrapf(result.Error, msg, args...) - } -} - type HttpRequestData map[string]interface{} func (h *HttpRequestData) Scan(value interface{}) error { return json.Unmarshal(value.([]byte), h) } diff --git a/core/services/pipeline/graph.go b/core/services/pipeline/graph.go index 1982a8dfe9e..07ec70ad616 100644 --- a/core/services/pipeline/graph.go +++ b/core/services/pipeline/graph.go @@ -92,28 +92,6 @@ func (g TaskDAG) TasksInDependencyOrder() ([]Task, error) { return tasks, nil } -func (g TaskDAG) TasksInDependencyOrderWithResultTask() ([]Task, error) { - tasks, err := g.TasksInDependencyOrder() - if err != nil { - return nil, err - } - // Create the final result task that collects the answers from the pipeline's - // outputs. This is a Postgres-related performance optimization. - resultTask := ResultTask{BaseTask{dotID: ResultTaskDotID}} - resultPreds := 0 - for _, task := range tasks { - if task.DotID() == ResultTaskDotID { - return nil, errors.Errorf("%v is a reserved keyword and cannot be used in job specs", ResultTaskDotID) - } - if task.OutputTask() == nil { - task.SetOutputTask(&resultTask) - resultPreds++ - } - } - resultTask.nPreds = resultPreds - return append([]Task{&resultTask}, tasks...), nil -} - func (g TaskDAG) MinTimeout() (time.Duration, bool, error) { var minTimeout time.Duration = 1<<63 - 1 var aTimeoutSet bool diff --git a/core/services/pipeline/models.go b/core/services/pipeline/models.go index 3f6d1df927b..8c227a57c0e 100644 --- a/core/services/pipeline/models.go +++ b/core/services/pipeline/models.go @@ -1,6 +1,8 @@ package pipeline import ( + "database/sql/driver" + "encoding/json" "fmt" "strconv" "time" @@ -23,13 +25,13 @@ func (Spec) TableName() string { return "pipeline_specs" } -func (s Spec) TasksInDependencyOrderWithResultTask() ([]Task, error) { +func (s Spec) TasksInDependencyOrder() ([]Task, error) { d := TaskDAG{} err := d.UnmarshalText([]byte(s.DotDagSource)) if err != nil { return nil, err } - tasks, err := d.TasksInDependencyOrderWithResultTask() + tasks, err := d.TasksInDependencyOrder() if err != nil { return nil, err } @@ -37,11 +39,15 @@ func (s Spec) TasksInDependencyOrderWithResultTask() ([]Task, error) { } type Run struct { - ID int64 `json:"-" gorm:"primary_key"` - PipelineSpecID int32 `json:"-"` - PipelineSpec Spec `json:"pipelineSpec"` - Meta JSONSerializable `json:"meta"` - Errors JSONSerializable `json:"errors" gorm:"type:jsonb"` + ID int64 `json:"-" gorm:"primary_key"` + PipelineSpecID int32 `json:"-"` + PipelineSpec Spec `json:"pipelineSpec"` + Meta JSONSerializable `json:"meta"` + // The errors are only ever strings + // DB example: [null, null, "my error"] + Errors RunErrors `json:"errors" gorm:"type:jsonb"` + // The outputs can be anything. + // DB example: [1234, {"a": 10}, null] Outputs JSONSerializable `json:"outputs" gorm:"type:jsonb"` CreatedAt time.Time `json:"createdAt"` FinishedAt *time.Time `json:"finishedAt"` @@ -66,12 +72,12 @@ func (r *Run) SetID(value string) error { } func (r Run) HasErrors() bool { - return r.FinalErrors().HasErrors() -} - -func (r Run) FinalErrors() (f FinalErrors) { - f, _ = r.Errors.Val.(FinalErrors) - return f + for _, err := range r.Errors { + if !err.IsZero() { + return true + } + } + return false } // Status determines the status of the run. @@ -85,6 +91,35 @@ func (r *Run) Status() RunStatus { return RunStatusInProgress } +type RunErrors []null.String + +func (re *RunErrors) Scan(value interface{}) error { + if value == nil { + return nil + } + bytes, ok := value.([]byte) + if !ok { + return errors.Errorf("RunErrors#Scan received a value of type %T", value) + } + return json.Unmarshal(bytes, re) +} + +func (re RunErrors) Value() (driver.Value, error) { + if len(re) == 0 { + return nil, nil + } + return json.Marshal(re) +} + +func (re RunErrors) HasError() bool { + for _, e := range re { + if !e.IsZero() { + return true + } + } + return false +} + type TaskRun struct { ID int64 `json:"-" gorm:"primary_key"` Type TaskType `json:"type"` diff --git a/core/services/pipeline/models_test.go b/core/services/pipeline/models_test.go index 23076f6a064..39d57ffa917 100644 --- a/core/services/pipeline/models_test.go +++ b/core/services/pipeline/models_test.go @@ -1,12 +1,12 @@ package pipeline_test import ( + "errors" "testing" "time" "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/stretchr/testify/assert" - "gopkg.in/guregu/null.v4" ) func TestRunStatus(t *testing.T) { @@ -25,6 +25,46 @@ func TestRunStatus(t *testing.T) { func TestRun_Status(t *testing.T) { now := time.Now() + var success = pipeline.TaskRunResults{ + { + Task: &pipeline.HTTPTask{}, + Result: pipeline.Result{ + Value: 10, + Error: nil, + }, + FinishedAt: time.Now(), + IsTerminal: true, + }, + { + Task: &pipeline.HTTPTask{}, + Result: pipeline.Result{ + Value: 10, + Error: nil, + }, + FinishedAt: time.Now(), + IsTerminal: true, + }, + } + var fail = pipeline.TaskRunResults{ + { + Task: &pipeline.HTTPTask{}, + Result: pipeline.Result{ + Value: nil, + Error: errors.New("fail"), + }, + FinishedAt: time.Now(), + IsTerminal: true, + }, + { + Task: &pipeline.HTTPTask{}, + Result: pipeline.Result{ + Value: nil, + Error: errors.New("fail"), + }, + FinishedAt: time.Now(), + IsTerminal: true, + }, + } testCases := []struct { name string @@ -34,7 +74,8 @@ func TestRun_Status(t *testing.T) { { name: "In Progress", run: &pipeline.Run{ - Errors: pipeline.JSONSerializable{}, + Errors: pipeline.RunErrors{}, + Outputs: pipeline.JSONSerializable{}, FinishedAt: nil, }, want: pipeline.RunStatusInProgress, @@ -42,7 +83,8 @@ func TestRun_Status(t *testing.T) { { name: "Completed", run: &pipeline.Run{ - Errors: pipeline.JSONSerializable{}, + Errors: success.FinalResult().ErrorsDB(), + Outputs: success.FinalResult().OutputsDB(), FinishedAt: &now, }, want: pipeline.RunStatusCompleted, @@ -50,10 +92,8 @@ func TestRun_Status(t *testing.T) { { name: "Error", run: &pipeline.Run{ - Errors: pipeline.JSONSerializable{ - Val: pipeline.FinalErrors{null.StringFrom("Random: String, foo")}, - Null: false, - }, + Outputs: fail.FinalResult().OutputsDB(), + Errors: fail.FinalResult().ErrorsDB(), FinishedAt: nil, }, want: pipeline.RunStatusErrored, diff --git a/core/services/pipeline/orm.go b/core/services/pipeline/orm.go index f679d486bfb..a24483859fa 100644 --- a/core/services/pipeline/orm.go +++ b/core/services/pipeline/orm.go @@ -2,7 +2,6 @@ package pipeline import ( "context" - "encoding/json" "fmt" "strings" "time" @@ -27,19 +26,19 @@ var ( type ORM interface { CreateSpec(ctx context.Context, db *gorm.DB, taskDAG TaskDAG, maxTaskTimeout models.Interval) (int32, error) - CreateRun(ctx context.Context, jobID int32, meta map[string]interface{}) (int64, error) - ProcessNextUnfinishedRun(ctx context.Context, fn ProcessRunFunc) (bool, error) - ListenForNewRuns() (postgres.Subscription, error) InsertFinishedRunWithResults(ctx context.Context, run Run, trrs []TaskRunResult) (runID int64, err error) - AwaitRun(ctx context.Context, runID int64) error - RunFinished(runID int64) (bool, error) - ResultsForRun(ctx context.Context, runID int64) ([]Result, error) DeleteRunsOlderThan(threshold time.Duration) error - FindBridge(name models.TaskType) (models.BridgeType, error) FindRun(id int64) (Run, error) - DB() *gorm.DB + + // Note below methods are not currently used to process runs. + CreateRun(ctx context.Context, jobID int32, meta map[string]interface{}) (int64, error) + AwaitRun(ctx context.Context, runID int64) error + ProcessNextUnfinishedRun(ctx context.Context, fn ProcessRunFunc) (bool, error) + ListenForNewRuns() (postgres.Subscription, error) + RunFinished(runID int64) (bool, error) + ResultsForRun(ctx context.Context, runID int64) ([]Result, error) } type orm struct { @@ -120,7 +119,7 @@ func (o *orm) CreateRun(ctx context.Context, jobID int32, meta map[string]interf } var trs []TaskRun - tasks, err := d.TasksInDependencyOrderWithResultTask() + tasks, err := d.TasksInDependencyOrder() if err != nil { return err } @@ -133,43 +132,17 @@ func (o *orm) CreateRun(ctx context.Context, jobID int32, meta map[string]interf }) } runID = run.ID - return tx.Create(&trs).Error + if len(trs) > 0 { + return tx.Create(&trs).Error + } + return nil }) return runID, errors.WithStack(err) } -// TODO: Remove generation of special "result" task -// TODO: Remove the unique index on successor_id -// https://www.pivotaltracker.com/story/show/176557536 type ProcessRunFunc func(ctx context.Context, txdb *gorm.DB, spec Spec, l logger.Logger) (TaskRunResults, bool, error) -// ProcessNextUnfinishedRun pulls the next available unfinished run from the -// database and passes it into the provided ProcessRunFunc for execution. -func (o *orm) ProcessNextUnfinishedRun(ctx context.Context, fn ProcessRunFunc) (anyRemaining bool, err error) { - // Passed in context cancels on (chStop || JobPipelineMaxTaskDuration) - utils.RetryWithBackoff(ctx, func() (retry bool) { - err = o.processNextUnfinishedRun(ctx, fn) - // "Record not found" errors mean that we're done with all unclaimed - // job runs. - if errors.Is(err, gorm.ErrRecordNotFound) { - anyRemaining = false - retry = false - err = nil - } else if err != nil { - retry = true - err = errors.Wrap(err, "Pipeline runner could not process job run") - logger.Error(err) - - } else { - anyRemaining = true - retry = false - } - return - }) - return anyRemaining, errors.WithStack(err) -} - -func (o *orm) processNextUnfinishedRun(ctx context.Context, fn ProcessRunFunc) error { +func (o *orm) ProcessNextUnfinishedRun(ctx context.Context, fn ProcessRunFunc) (bool, error) { // Passed in context cancels on (chStop || JobPipelineMaxTaskDuration) txContext, cancel := context.WithTimeout(context.Background(), o.config.DatabaseMaximumTxDuration()) defer cancel() @@ -187,7 +160,7 @@ func (o *orm) processNextUnfinishedRun(ctx context.Context, fn ProcessRunFunc) e }). First(&pRun).Error if err != nil { - return errors.Wrap(err, "error loading run associations") + return errors.Wrap(err, "error finding unfinished run") } logger.Infow("Pipeline run started", "runID", pRun.ID) @@ -229,10 +202,13 @@ func (o *orm) processNextUnfinishedRun(ctx context.Context, fn ProcessRunFunc) e return nil }) if err != nil { - return errors.Wrap(err, "while processing run") + if errors.Is(err, gorm.ErrRecordNotFound) { + return false, nil + } + return false, errors.Wrap(err, "while processing run") } logger.Infow("Pipeline run completed", "runID", pRun.ID) - return nil + return true, nil } // updateTaskRuns updates multiple task runs in one query @@ -279,8 +255,8 @@ func (o *orm) InsertFinishedRunWithResults(ctx context.Context, run Run, trrs [] if run.FinishedAt.IsZero() { return 0, errors.New("run.FinishedAt must be set") } - if run.Outputs.Val == nil || run.Errors.Val == nil { - return 0, errors.Errorf("run must have both Outputs and Errors, got Outputs: %#v, Errors: %#v", run.Outputs.Val, run.Errors.Val) + if run.Outputs.Val == nil || len(run.Errors) == 0 { + return 0, errors.Errorf("run must have both Outputs and Errors, got Outputs: %#v, Errors: %#v", run.Outputs.Val, run.Errors) } err = postgres.GormTransaction(ctx, o.db, func(tx *gorm.DB) error { @@ -377,41 +353,34 @@ func (o *orm) ResultsForRun(ctx context.Context, runID int64) ([]Result, error) var results []Result err = postgres.GormTransaction(ctx, o.db, func(tx *gorm.DB) error { - var resultTaskRun TaskRun + var run Run err = tx.Raw(` - SELECT * FROM pipeline_task_runs - WHERE pipeline_run_id = ? + SELECT * FROM pipeline_runs + WHERE id = ? AND finished_at IS NOT NULL - AND dot_id = ? -`, runID, ResultTaskDotID).Scan(&resultTaskRun). +`, runID).Scan(&run). Error if err != nil { return errors.Wrapf(err, "Pipeline runner could not fetch pipeline results (runID: %v)", runID) } var values []interface{} - var errs FinalErrors - if resultTaskRun.Output != nil && resultTaskRun.Output.Val != nil { - vals, is := resultTaskRun.Output.Val.([]interface{}) + if !run.Outputs.Null { + vals, is := run.Outputs.Val.([]interface{}) if !is { - return errors.Errorf("Pipeline runner invariant violation: result task run's output must be []interface{}, got %T", resultTaskRun.Output.Val) + return errors.Errorf("Pipeline runner invariant violation: result task run's output must be []interface{}, got %T", run.Outputs.Val) } values = vals } - if !resultTaskRun.Error.IsZero() { - err = json.Unmarshal([]byte(resultTaskRun.Error.ValueOrZero()), &errs) - if err != nil { - return errors.Errorf("Pipeline runner invariant violation: result task run's errors must be []error, got %v", resultTaskRun.Error.ValueOrZero()) - } - } - if len(values) != len(errs) { - return errors.Errorf("Pipeline runner invariant violation: result task run must have equal numbers of outputs and errors (got %v and %v)", len(values), len(errs)) + + if len(values) != len(run.Errors) { + return errors.Errorf("Pipeline runner invariant violation: result task run must have equal numbers of outputs and errors (got %v and %v)", len(values), len(run.Errors)) } results = make([]Result, len(values)) for i := range values { results[i].Value = values[i] - if !errs[i].IsZero() { - results[i].Error = errors.New(errs[i].ValueOrZero()) + if !run.Errors[i].IsZero() { + results[i].Error = errors.New(run.Errors[i].String) } } return nil diff --git a/core/services/pipeline/orm_test.go b/core/services/pipeline/orm_test.go index 310d555f03c..2cd46884278 100644 --- a/core/services/pipeline/orm_test.go +++ b/core/services/pipeline/orm_test.go @@ -2,14 +2,16 @@ package pipeline_test import ( "context" + "errors" "testing" "time" + "gopkg.in/guregu/null.v4" + "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/services/pipeline" "github.com/smartcontractkit/chainlink/core/services/postgres/mocks" "github.com/stretchr/testify/require" - "gopkg.in/guregu/null.v4" ) func Test_PipelineORM_CreateRun(t *testing.T) { @@ -35,12 +37,13 @@ func Test_PipelineORM_CreateRun(t *testing.T) { require.Len(t, prs, 1) require.Equal(t, runID, prs[0].ID) - require.Len(t, trs, 4) + require.Len(t, trs, 3) } func Test_PipelineORM_UpdatePipelineRun(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() + db := store.DB require.NoError(t, db.Exec(`SET CONSTRAINTS pipeline_runs_pipeline_spec_id_fkey DEFERRED`).Error) @@ -53,33 +56,23 @@ func Test_PipelineORM_UpdatePipelineRun(t *testing.T) { trrs := pipeline.TaskRunResults{ pipeline.TaskRunResult{ IsTerminal: true, + Task: &pipeline.HTTPTask{BaseTask: pipeline.BaseTask{ + Index: 0, + }}, Result: pipeline.Result{ - Value: []interface{}{nil}, - Error: pipeline.FinalErrors{null.StringFrom("Random: String, foo")}, + Value: nil, + Error: errors.New("Random: String, foo"), }, FinishedAt: time.Now(), }, - } - - err := orm.UpdatePipelineRun(db, &run, trrs.FinalResult()) - require.NoError(t, err) - - require.Equal(t, []interface{}{nil}, run.Outputs.Val) - require.Equal(t, []interface{}{"Random: String, foo"}, run.Errors.Val) - require.NotNil(t, run.FinishedAt) - }) - - t.Run("saves errored run with final errors correctly", func(t *testing.T) { - run := cltest.MustInsertPipelineRun(t, db) - trrs := pipeline.TaskRunResults{ pipeline.TaskRunResult{ IsTerminal: true, + Task: &pipeline.HTTPTask{BaseTask: pipeline.BaseTask{ + Index: 1, + }}, Result: pipeline.Result{ - Value: []interface{}{1, nil}, - Error: pipeline.FinalErrors([]null.String{ - null.String{}, - null.StringFrom(`Random: String, foo`), - }), + Value: 1, + Error: nil, }, FinishedAt: time.Now(), }, @@ -88,8 +81,8 @@ func Test_PipelineORM_UpdatePipelineRun(t *testing.T) { err := orm.UpdatePipelineRun(db, &run, trrs.FinalResult()) require.NoError(t, err) - require.Equal(t, []interface{}{float64(1), nil}, run.Outputs.Val) - require.Equal(t, []interface{}{nil, "Random: String, foo"}, run.Errors.Val) + require.Equal(t, []interface{}{nil, float64(1)}, run.Outputs.Val) + require.Equal(t, pipeline.RunErrors([]null.String{null.StringFrom("Random: String, foo"), null.String{}}), run.Errors) require.NotNil(t, run.FinishedAt) }) } diff --git a/core/services/pipeline/runner.go b/core/services/pipeline/runner.go index 4d1bbda61c8..728e19bed12 100644 --- a/core/services/pipeline/runner.go +++ b/core/services/pipeline/runner.go @@ -9,8 +9,6 @@ import ( "time" "github.com/jpillora/backoff" - "gopkg.in/guregu/null.v4" - "github.com/smartcontractkit/chainlink/core/store/models" "github.com/pkg/errors" @@ -95,10 +93,7 @@ func (r *runner) Start() error { for { select { case <-newRunEvents: - _, err = r.processRun() - if err != nil { - logger.Errorf("Error processing incomplete task runs: %v", err) - } + r.processUnfinishedRuns() case <-r.chStop: return } @@ -180,17 +175,13 @@ func (r *runner) ResultsForRun(ctx context.Context, runID int64) ([]Result, erro // NOTE: This could potentially run on a different machine in the cluster than // the one that originally added the job run. func (r *runner) processUnfinishedRuns() { - _, err := r.processRun() - if err != nil { - logger.Errorf("Error processing unfinished run: %v", err) - } -} - -func (r *runner) processRun() (anyRemaining bool, err error) { ctx, cancel := utils.CombinedContext(r.chStop, r.config.JobPipelineMaxRunDuration()) defer cancel() - return r.orm.ProcessNextUnfinishedRun(ctx, r.executeRun) + _, err := r.orm.ProcessNextUnfinishedRun(ctx, r.executeRun) + if err != nil { + logger.Errorf("Error processing unfinished run: %v", err) + } } type ( @@ -259,21 +250,12 @@ func (r *runner) ExecuteRun(ctx context.Context, spec Spec, l logger.Logger) (Ta // Generate a errored run from the spec. func (r *runner) panickedRunResults(spec Spec) ([]TaskRunResult, error) { var panickedTrrs []TaskRunResult - var finalVals []interface{} - var finalErrs FinalErrors - tasks, err := spec.TasksInDependencyOrderWithResultTask() + tasks, err := spec.TasksInDependencyOrder() if err != nil { return nil, err } f := time.Now() for _, task := range tasks { - if task.Type() == TaskTypeResult { - continue - } - if task.OutputTask() != nil && task.OutputTask().Type() == TaskTypeResult { - finalVals = append(finalVals, nil) - finalErrs = append(finalErrs, null.StringFrom(ErrRunPanicked.Error())) - } panickedTrrs = append(panickedTrrs, TaskRunResult{ Task: task, TaskRun: TaskRun{ @@ -284,20 +266,9 @@ func (r *runner) panickedRunResults(spec Spec) ([]TaskRunResult, error) { }, Result: Result{Value: nil, Error: ErrRunPanicked}, FinishedAt: time.Now(), - IsTerminal: false, + IsTerminal: task.OutputTask() == nil, }) } - panickedTrrs = append(panickedTrrs, TaskRunResult{ - TaskRun: TaskRun{ - CreatedAt: f, - FinishedAt: &f, - Index: 0, - DotID: ResultTaskDotID, - }, - Result: Result{Value: finalVals, Error: finalErrs}, - FinishedAt: f, - IsTerminal: true, - }) return panickedTrrs, nil } @@ -316,7 +287,7 @@ func (r *runner) executeRun(ctx context.Context, txdb *gorm.DB, spec Spec, l log } // Find "firsts" and work forwards - tasks, err := d.TasksInDependencyOrderWithResultTask() + tasks, err := d.TasksInDependencyOrder() if err != nil { return nil, false, err } @@ -470,18 +441,14 @@ func (r *runner) executeTaskRun(ctx context.Context, spec Spec, task Task, taskR } result := task.Run(ctx, taskRun, inputs) - if _, is := result.Error.(FinalErrors); !is && result.Error != nil { - f := append(loggerFields, "error", result.Error) - l.Warnw("Pipeline task run errored", f...) - } else { - f := append(loggerFields, "result", result.Value) - switch v := result.Value.(type) { - case []byte: - f = append(f, "resultString", fmt.Sprintf("%q", v)) - f = append(f, "resultHex", fmt.Sprintf("%x", v)) - } - l.Debugw("Pipeline task completed", f...) + loggerFields = append(loggerFields, "result value", result.Value) + loggerFields = append(loggerFields, "result error", result.Error) + switch v := result.Value.(type) { + case []byte: + loggerFields = append(loggerFields, "resultString", fmt.Sprintf("%q", v)) + loggerFields = append(loggerFields, "resultHex", fmt.Sprintf("%x", v)) } + l.Debugw("Pipeline task completed", loggerFields...) return result } diff --git a/core/services/pipeline/runner_test.go b/core/services/pipeline/runner_test.go index 14a8b437c90..cbb3aa76797 100644 --- a/core/services/pipeline/runner_test.go +++ b/core/services/pipeline/runner_test.go @@ -23,8 +23,6 @@ import ( "github.com/stretchr/testify/require" ) -// TODO: Add a test for multiple terminal tasks after __result__ is deprecated -// https://www.pivotaltracker.com/story/show/176557536 func Test_PipelineRunner_ExecuteTaskRuns(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() @@ -89,37 +87,17 @@ ds5 [type=http method="GET" url="%s" index=2] } trrs, err := r.ExecuteRun(context.Background(), spec, *logger.Default) require.NoError(t, err) - require.Len(t, trrs, len(ts)+1) // +1 for the result task - - var finalResults []pipeline.Result - for _, trr := range trrs { - if trr.IsTerminal { - finalResults = append(finalResults, trr.Result) - } - } - - require.Len(t, finalResults, 1) - result := finalResults[0] - - require.Len(t, result.Value, 3) - finalValues := result.Value.([]interface{}) - - { - // Median - finalValue := finalValues[0].(decimal.Decimal) - require.Equal(t, "9650000000000000000000", finalValue.String()) - - } - - { - // Strings 1 and 2 - require.Equal(t, "foo-index-1", finalValues[1].(string)) - require.Equal(t, "bar-index-2", finalValues[2].(string)) - } - - require.Len(t, result.Error, 3) - finalError := result.Error.(pipeline.FinalErrors) - require.False(t, finalError.HasErrors()) + require.Len(t, trrs, len(ts)) + + finalResults := trrs.FinalResult() + require.Len(t, finalResults.Values, 3) + require.Len(t, finalResults.Errors, 3) + assert.Equal(t, "9650000000000000000000", finalResults.Values[0].(decimal.Decimal).String()) + assert.Nil(t, finalResults.Errors[0]) + assert.Equal(t, "foo-index-1", finalResults.Values[1].(string)) + assert.Nil(t, finalResults.Errors[1]) + assert.Equal(t, "bar-index-2", finalResults.Values[2].(string)) + assert.Nil(t, finalResults.Errors[2]) var errorResults []pipeline.TaskRunResult for _, trr := range trrs { @@ -175,7 +153,7 @@ answer1 [type=median index=0]; require.NoError(t, err) for _, trr := range trrs { if trr.IsTerminal { - require.Equal(t, decimal.RequireFromString("1100"), trr.Result.Value.([]interface{})[0].(decimal.Decimal)) + require.Equal(t, decimal.RequireFromString("1100"), trr.Result.Value.(decimal.Decimal)) } } } @@ -199,13 +177,10 @@ ds_panic [type=panic msg="oh no"] ds1->ds_parse->ds_multiply->ds_panic;`, s.URL), }, *logger.Default) require.NoError(t, err) - require.Equal(t, 5, len(trrs)) + require.Equal(t, 4, len(trrs)) assert.Equal(t, []interface{}{nil}, trrs.FinalResult().Values) assert.Equal(t, pipeline.ErrRunPanicked.Error(), trrs.FinalResult().Errors[0].Error()) for _, trr := range trrs { - if trr.IsTerminal { - continue - } assert.Equal(t, null.NewString("pipeline run panicked", true), trr.Result.ErrorDB()) assert.Equal(t, true, trr.Result.OutputDB().Null) } diff --git a/core/services/pipeline/task.base.go b/core/services/pipeline/task.base.go new file mode 100644 index 00000000000..ccd21627305 --- /dev/null +++ b/core/services/pipeline/task.base.go @@ -0,0 +1,38 @@ +package pipeline + +import "time" + +type BaseTask struct { + outputTask Task + dotID string `mapstructure:"-"` + nPreds int `mapstructure:"-"` + Index int32 `mapstructure:"index" json:"-" ` + Timeout time.Duration `mapstructure:"timeout"` +} + +func (t BaseTask) NPreds() int { + return t.nPreds +} + +func (t BaseTask) DotID() string { + return t.dotID +} + +func (t BaseTask) OutputIndex() int32 { + return t.Index +} + +func (t BaseTask) OutputTask() Task { + return t.outputTask +} + +func (t *BaseTask) SetOutputTask(outputTask Task) { + t.outputTask = outputTask +} + +func (t BaseTask) TaskTimeout() (time.Duration, bool) { + if t.Timeout == time.Duration(0) { + return time.Duration(0), false + } + return t.Timeout, true +} diff --git a/core/services/pipeline/task.result.go b/core/services/pipeline/task.result.go deleted file mode 100644 index 9cf4c11f91e..00000000000 --- a/core/services/pipeline/task.result.go +++ /dev/null @@ -1,85 +0,0 @@ -package pipeline - -import ( - "context" - "database/sql/driver" - "encoding/json" - "fmt" - - "github.com/pkg/errors" - "gopkg.in/guregu/null.v4" -) - -// TODO: This task type is no longer necessary, we should deprecate/remove it. -// See: https://www.pivotaltracker.com/story/show/176557536 - -// ResultTask exists solely as a Postgres performance optimization. It's added -// automatically to the end of every pipeline, and it receives the outputs of all -// tasks that have no successor tasks. This allows the pipeline runner to detect -// when it has reached the end a given pipeline simply by checking the `successor_id` -// field, rather than having to try to SELECT all of the pipeline run's task runs, -// (which must be done from inside of a transaction, and causes lock contention -// and serialization anomaly issues). -type ResultTask struct { - BaseTask `mapstructure:",squash"` -} - -var _ Task = (*ResultTask)(nil) - -func (t *ResultTask) Type() TaskType { - return TaskTypeResult -} - -func (t *ResultTask) SetDefaults(inputValues map[string]string, g TaskDAG, self taskDAGNode) error { - return nil -} - -func (t *ResultTask) Run(_ context.Context, taskRun TaskRun, inputs []Result) Result { - values := make([]interface{}, len(inputs)) - errors := make(FinalErrors, len(inputs)) - for i, input := range inputs { - values[i] = input.Value - if input.Error != nil { - errors[i] = null.StringFrom(input.Error.Error()) - } - } - return Result{Value: values, Error: errors} -} - -// FIXME: This error/FinalErrors conflation exists solely because of the __result__ task. -// It is confusing and needs to go, making this note to remove it along with the -// special __result__ task. -// https://www.pivotaltracker.com/story/show/176557536 -type FinalErrors []null.String - -func (fe FinalErrors) HasErrors() bool { - for _, err := range fe { - if !err.IsZero() { - return true - } - } - return false -} - -func (fe FinalErrors) Error() string { - bs, err := json.Marshal(fe) - if err != nil { - return `["could not unmarshal final pipeline errors"]` - } - return string(bs) -} - -func (fe FinalErrors) Value() (driver.Value, error) { - return fe.Error(), nil -} - -func (fe *FinalErrors) Scan(value interface{}) error { - switch v := value.(type) { - case []byte: - return json.Unmarshal(v, fe) - case string: - return json.Unmarshal([]byte(v), fe) - default: - return errors.New(fmt.Sprintf("%s", value)) - } -} diff --git a/core/services/pipeline/test_helpers.go b/core/services/pipeline/test_helpers.go index 98f06f97408..c40241f658d 100644 --- a/core/services/pipeline/test_helpers.go +++ b/core/services/pipeline/test_helpers.go @@ -39,16 +39,6 @@ func (t *HTTPTask) HelperSetConfig(config Config) { t.config = config } -func (t ResultTask) ExportedEquals(otherTask Task) bool { - other, ok := otherTask.(*ResultTask) - if !ok { - return false - } else if t.Index != other.Index { - return false - } - return true -} - func (t MultiplyTask) ExportedEquals(otherTask Task) bool { other, ok := otherTask.(*MultiplyTask) if !ok { diff --git a/core/store/migrations/0020_remove_result_task.go b/core/store/migrations/0020_remove_result_task.go new file mode 100644 index 00000000000..7dd69411297 --- /dev/null +++ b/core/store/migrations/0020_remove_result_task.go @@ -0,0 +1,42 @@ +package migrations + +import ( + "github.com/go-gormigrate/gormigrate/v2" + "gorm.io/gorm" +) + +const ( + up20 = ` + ALTER TABLE pipeline_task_runs DROP CONSTRAINT chk_pipeline_task_run_fsm; + ALTER TABLE pipeline_task_runs + ADD CONSTRAINT chk_pipeline_task_run_fsm CHECK ( + ((finished_at IS NOT NULL) AND (num_nonnulls(output, error) != 2)) + OR + (num_nulls(finished_at, output, error) = 3) + ); + ` + down20 = ` + ALTER TABLE pipeline_task_runs DROP CONSTRAINT chk_pipeline_task_run_fsm; + ALTER TABLE pipeline_task_runs + ADD CONSTRAINT chk_pipeline_task_run_fsm CHECK ( + (((type <> 'result'::text) AND (((finished_at IS NULL) AND (error IS NULL) AND (output IS NULL)) + OR + ((finished_at IS NOT NULL) AND (NOT ((error IS NOT NULL) AND (output IS NOT NULL)))))) + OR + ((type = 'result'::text) AND (((output IS NULL) AND (error IS NULL) AND (finished_at IS NULL)) + OR + ((output IS NOT NULL) AND (error IS NOT NULL) AND (finished_at IS NOT NULL)))))); + ` +) + +func init() { + Migrations = append(Migrations, &gormigrate.Migration{ + ID: "0020_remove_result_task", + Migrate: func(db *gorm.DB) error { + return db.Exec(up20).Error + }, + Rollback: func(db *gorm.DB) error { + return db.Exec(down20).Error + }, + }) +} diff --git a/core/store/migrations/migrate_test.go b/core/store/migrations/migrate_test.go index 15104a0dadb..ca66322683a 100644 --- a/core/store/migrations/migrate_test.go +++ b/core/store/migrations/migrate_test.go @@ -212,7 +212,7 @@ func TestMigrate_PipelineTaskRunDotID(t *testing.T) { pr := pipeline.Run{ PipelineSpecID: ps.ID, Meta: pipeline.JSONSerializable{}, - Errors: pipeline.JSONSerializable{Null: true}, + Errors: pipeline.RunErrors{}, Outputs: pipeline.JSONSerializable{Null: true}, } require.NoError(t, orm.DB.Create(&pr).Error) @@ -231,7 +231,7 @@ func TestMigrate_PipelineTaskRunDotID(t *testing.T) { PipelineTaskSpecID int32 `json:"-"` } tr1 := PipelineTaskRun{ - Type: pipeline.TaskTypeResult, + Type: pipeline.TaskTypeAny, PipelineRunID: pr.ID, PipelineTaskSpecID: result.ID, Output: &pipeline.JSONSerializable{Null: true}, diff --git a/go.sum b/go.sum index 881ae1cea22..e7cb9d2a338 100644 --- a/go.sum +++ b/go.sum @@ -1268,8 +1268,6 @@ github.com/smartcontractkit/chainlink v0.9.5-0.20201207211610-6c7fee37d5b7/go.mo github.com/smartcontractkit/go-txdb v0.1.4-0.20210313013032-3a5ba5dff784 h1:8rMSBiLE7U01bZ2qEaLjH2e+K96nlDwM410CLa5bKzg= github.com/smartcontractkit/go-txdb v0.1.4-0.20210313013032-3a5ba5dff784/go.mod h1:DhAhxMXZpUJVGnT+p9IbzJoRKvlArO2pkHjnGX7o0n0= github.com/smartcontractkit/libocr v0.0.0-20201203233047-5d9b24f0cbb5/go.mod h1:bfdSuLnBWCkafDvPGsQ1V6nrXhg046gh227MKi4zkpc= -github.com/smartcontractkit/libocr v0.0.0-20210302210303-55a103050dc5 h1:O5Op1j4dpCAebdxoDTTrdL30wrZP1nNrvzX1GfcocsY= -github.com/smartcontractkit/libocr v0.0.0-20210302210303-55a103050dc5/go.mod h1:cm4TomvY09A1mADIHeIo1dOcOVL1EeSEqga4cmCxhl4= github.com/smartcontractkit/libocr v0.0.0-20210319202758-14aa50f869b7 h1:r6l0wcc7YwDMoobAhg4i2ZwhE7xsgd47XeU0gNSxIoc= github.com/smartcontractkit/libocr v0.0.0-20210319202758-14aa50f869b7/go.mod h1:cm4TomvY09A1mADIHeIo1dOcOVL1EeSEqga4cmCxhl4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= From f1ea21e97d2b4745818a2e9eb740fce1cf5382f0 Mon Sep 17 00:00:00 2001 From: James Kong Date: Wed, 24 Mar 2021 00:59:47 +0800 Subject: [PATCH 086/116] UI for configuring logging --- docs/CHANGELOG.md | 2 + operator_ui/@types/core/store/models.d.ts | 13 ++ operator_ui/src/api/v2/index.ts | 2 + operator_ui/src/api/v2/logConfig.ts | 36 ++++++ operator_ui/src/components/KeyValueList.tsx | 2 +- .../Index.test.tsx} | 24 ++-- .../Configuration/{Index.js => Index.tsx} | 40 +++--- .../pages/Configuration/LoggingCard.test.tsx | 50 ++++++++ .../src/pages/Configuration/LoggingCard.tsx | 119 ++++++++++++++++++ operator_ui/src/theme.ts | 5 + .../support/factories/jsonApiLogConfig.ts | 15 +++ 11 files changed, 271 insertions(+), 37 deletions(-) create mode 100644 operator_ui/src/api/v2/logConfig.ts rename operator_ui/src/pages/{Configuration.test.js => Configuration/Index.test.tsx} (67%) rename operator_ui/src/pages/Configuration/{Index.js => Index.tsx} (70%) create mode 100644 operator_ui/src/pages/Configuration/LoggingCard.test.tsx create mode 100644 operator_ui/src/pages/Configuration/LoggingCard.tsx create mode 100644 operator_ui/support/factories/jsonApiLogConfig.ts diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 5255cd88d1c..77e34976700 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -26,6 +26,8 @@ Example settings: `DATABASE_BACKUP_MODE="lite"` and `DATABASE_BACKUP_FREQUENCY="1h"` will lead to a partial backup on node start and then again a partial backup every one hour. +- Logging can now be configured in the Operator UI. + ### Fixed - Chainlink node now automatically sets the correct nonce on startup if you are restoring from a previous backup (manual setnextnonce is no longer necessary). diff --git a/operator_ui/@types/core/store/models.d.ts b/operator_ui/@types/core/store/models.d.ts index 7d9f59d0a4a..b9ac0bc5551 100644 --- a/operator_ui/@types/core/store/models.d.ts +++ b/operator_ui/@types/core/store/models.d.ts @@ -539,6 +539,18 @@ declare module 'core/store/models' { dotDagSource: string } } + + export type LogConfigLevel = 'debug' | 'info' | 'warn' | 'error' + + export interface LogConfig { + level: LogConfigLevel + sqlEnabled: boolean + } + + export interface LogConfigRequest { + level: LogConfigLevel + sqlEnabled: boolean + } } export interface PipelineTaskRun { @@ -549,3 +561,4 @@ export interface PipelineTaskRun { dotId: string type: string } + diff --git a/operator_ui/src/api/v2/index.ts b/operator_ui/src/api/v2/index.ts index acd668d097d..632135199f0 100644 --- a/operator_ui/src/api/v2/index.ts +++ b/operator_ui/src/api/v2/index.ts @@ -11,6 +11,7 @@ import { OcrKeys } from './ocrKeys' import { P2PKeys } from './p2pKeys' import { OcrRuns } from './ocrRuns' import { Jobs } from './jobs' +import { LogConfig } from './logConfig' export class V2 { constructor(private api: Api) {} @@ -27,4 +28,5 @@ export class V2 { public p2pKeys = new P2PKeys(this.api) public jobs = new Jobs(this.api) public ocrRuns = new OcrRuns(this.api) + public logConfig = new LogConfig(this.api) } diff --git a/operator_ui/src/api/v2/logConfig.ts b/operator_ui/src/api/v2/logConfig.ts new file mode 100644 index 00000000000..7a3d6f3c73d --- /dev/null +++ b/operator_ui/src/api/v2/logConfig.ts @@ -0,0 +1,36 @@ +import * as jsonapi from 'utils/json-api-client' +import { boundMethod } from 'autobind-decorator' +import * as models from 'core/store/models' + +/** + * Show returns the whitelist of config variables + * + * @example "/config" + */ +const ENDPOINT = '/v2/log' + +export class LogConfig { + constructor(private api: jsonapi.Api) {} + + /** + * Get log configuration variables + */ + @boundMethod + public getLogConfig(): Promise> { + return this.show() + } + + @boundMethod + public updateLogConfig( + request: models.LogConfigRequest, + ): Promise> { + return this.update(request) + } + + private show = this.api.fetchResource<{}, models.LogConfig, {}>(ENDPOINT) + + private update = this.api.updateResource< + models.LogConfigRequest, + models.LogConfig + >(ENDPOINT) +} diff --git a/operator_ui/src/components/KeyValueList.tsx b/operator_ui/src/components/KeyValueList.tsx index ada956703d2..ca359b8db5d 100644 --- a/operator_ui/src/components/KeyValueList.tsx +++ b/operator_ui/src/components/KeyValueList.tsx @@ -81,7 +81,7 @@ const HeadCol = ({ children }: HeadColProps) => ( interface KeyValueListProps { entries: Array> - titleize: boolean + titleize?: boolean showHead: boolean title?: string error?: string diff --git a/operator_ui/src/pages/Configuration.test.js b/operator_ui/src/pages/Configuration/Index.test.tsx similarity index 67% rename from operator_ui/src/pages/Configuration.test.js rename to operator_ui/src/pages/Configuration/Index.test.tsx index 95ea2292a63..76ea16c3289 100644 --- a/operator_ui/src/pages/Configuration.test.js +++ b/operator_ui/src/pages/Configuration/Index.test.tsx @@ -1,17 +1,10 @@ -/* eslint-env jest */ -import { ConnectedConfiguration as Configuration } from 'pages/Configuration/Index' -import configurationFactory from 'factories/configuration' import React from 'react' -import mountWithinStoreAndRouter from 'test-helpers/mountWithinStoreAndRouter' +import { Route } from 'react-router-dom' +import { mountWithProviders } from 'test-helpers/mountWithTheme' import syncFetch from 'test-helpers/syncFetch' import globPath from 'test-helpers/globPath' - -const classes = {} -const mount = (props) => { - return mountWithinStoreAndRouter( - , - ) -} +import Configuration from 'pages/Configuration/Index' +import configurationFactory from 'factories/configuration' describe('pages/Configuration', () => { it('renders the list of configuration options', async () => { @@ -23,9 +16,14 @@ describe('pages/Configuration', () => { }) global.fetch.getOnce(globPath('/v2/config'), configurationResponse) - const wrapper = mount() - + const wrapper = mountWithProviders( + , + { + initialEntries: [`/config`], + }, + ) await syncFetch(wrapper) + expect(wrapper.text()).toContain('BAND') expect(wrapper.text()).toContain('Major Lazer') expect(wrapper.text()).toContain('SINGER') diff --git a/operator_ui/src/pages/Configuration/Index.js b/operator_ui/src/pages/Configuration/Index.tsx similarity index 70% rename from operator_ui/src/pages/Configuration/Index.js rename to operator_ui/src/pages/Configuration/Index.tsx index 90e46d7dd0f..a18ef6ec593 100644 --- a/operator_ui/src/pages/Configuration/Index.js +++ b/operator_ui/src/pages/Configuration/Index.tsx @@ -1,3 +1,4 @@ +import React, { useEffect } from 'react' import { PaddedCard } from 'components/PaddedCard' import { KeyValueList } from 'components/KeyValueList' import Grid from '@material-ui/core/Grid' @@ -5,26 +6,30 @@ import Typography from '@material-ui/core/Typography' import { fetchConfiguration } from 'actionCreators' import Content from 'components/Content' import DeleteJobRuns from 'pages/Configuration/DeleteJobRuns' -import PropTypes from 'prop-types' -import React, { useEffect } from 'react' -import { connect } from 'react-redux' +import { useDispatch, useSelector } from 'react-redux' import configurationSelector from 'selectors/configuration' import extractBuildInfo from 'utils/extractBuildInfo' -import matchRouteAndMapDispatchToProps from 'utils/matchRouteAndMapDispatchToProps' +import { LoggingCard } from './LoggingCard' const buildInfo = extractBuildInfo() -export const Configuration = ({ fetchConfiguration, data }) => { +export const Configuration = () => { + const dispatch = useDispatch() + const config = useSelector(configurationSelector) + useEffect(() => { document.title = 'Configuration' - fetchConfiguration() - }, [fetchConfiguration]) + }) + + useEffect(() => { + dispatch(fetchConfiguration()) + }, [dispatch]) return ( - + @@ -51,6 +56,9 @@ export const Configuration = ({ fetchConfiguration, data }) => { + + + @@ -58,18 +66,4 @@ export const Configuration = ({ fetchConfiguration, data }) => { ) } -Configuration.propTypes = { - data: PropTypes.array.isRequired, -} - -const mapStateToProps = (state) => { - const data = configurationSelector(state) - return { data } -} - -export const ConnectedConfiguration = connect( - mapStateToProps, - matchRouteAndMapDispatchToProps({ fetchConfiguration }), -)(Configuration) - -export default ConnectedConfiguration +export default Configuration diff --git a/operator_ui/src/pages/Configuration/LoggingCard.test.tsx b/operator_ui/src/pages/Configuration/LoggingCard.test.tsx new file mode 100644 index 00000000000..9224581e7dc --- /dev/null +++ b/operator_ui/src/pages/Configuration/LoggingCard.test.tsx @@ -0,0 +1,50 @@ +import React from 'react' +import { mountWithProviders } from 'test-helpers/mountWithTheme' +import { syncFetch } from 'test-helpers/syncFetch' +import globPath from 'test-helpers/globPath' +import { logConfigFactory } from 'factories/jsonApiLogConfig' +import { LoggingCard } from './LoggingCard' +// import { Checkbox } from '@material-ui/core' + +describe('pages/Configuration/LoggingCard', () => { + it('renders the logging configuration card', async () => { + const logConfig = logConfigFactory({ + level: 'info', + sqlEnabled: false, + }) + + global.fetch.getOnce(globPath('/v2/log'), logConfig) + + const wrapper = mountWithProviders() + await syncFetch(wrapper) + + expect(wrapper.find('input[name="level"]').first().props().value).toEqual( + 'info', + ) + expect( + wrapper.find('input[name="sqlEnabled"]').first().props().checked, + ).toEqual(false) + }) + + it('updates the logging configuration', async () => { + const logConfig = logConfigFactory({ + level: 'info', + sqlEnabled: false, + }) + + global.fetch.getOnce(globPath('/v2/log'), logConfig) + const submit = global.fetch.patchOnce(globPath('/v2/log'), logConfig) + + const wrapper = mountWithProviders() + await syncFetch(wrapper) + + // Cannot figure out how to change the select and checkbox inputs for submit + + wrapper.find('form').simulate('submit') + await syncFetch(wrapper) + + expect(submit.lastCall()[1].body).toEqual( + '{"level":"info","sqlEnabled":false}', + ) + }) +}) diff --git a/operator_ui/src/pages/Configuration/LoggingCard.tsx b/operator_ui/src/pages/Configuration/LoggingCard.tsx new file mode 100644 index 00000000000..0da19f7d288 --- /dev/null +++ b/operator_ui/src/pages/Configuration/LoggingCard.tsx @@ -0,0 +1,119 @@ +import React, { useEffect, useState } from 'react' +import capitalize from 'lodash/capitalize' +import { useDispatch } from 'react-redux' +import { useFormik } from 'formik' +import Button from 'components/Button' +import Card from '@material-ui/core/Card' +import CardContent from '@material-ui/core/CardContent' +import CardHeader from '@material-ui/core/CardHeader' +import Checkbox from '@material-ui/core/Checkbox' +import FormControlLabel from '@material-ui/core/FormControlLabel' +import FormGroup from '@material-ui/core/FormGroup' +import MenuItem from '@material-ui/core/MenuItem' +import TextField from '@material-ui/core/TextField' +import * as models from 'core/store/models' +import { v2 } from 'api' +import { notifyError, notifySuccess } from 'actionCreators' +import ErrorMessage from 'components/Notifications/DefaultError' +import { useErrorHandler } from 'hooks/useErrorHandler' +import { useLoadingPlaceholder } from 'hooks/useLoadingPlaceholder' + +const logLevels = ['debug', 'info', 'warn', 'error'] + +type FormValues = { + level: models.LogConfigLevel + sqlEnabled: boolean +} + +const LogConfigurationForm: React.FC<{ initialValues: FormValues }> = ({ + initialValues, +}) => { + const dispatch = useDispatch() + const formik = useFormik({ + initialValues, + onSubmit: async (values) => { + try { + await v2.logConfig.updateLogConfig(values) + + dispatch(notifySuccess(() => <>Logging Configuration Updated, {})) + } catch (e) { + dispatch(notifyError(ErrorMessage, e)) + } + }, + }) + + return ( +
+ + {logLevels.map((level) => ( + + {capitalize(level)} + + ))} + + + + + } + label="Log SQL Statements" + /> + + + +
+ ) +} + +export const LoggingCard = () => { + const [logConfig, setLogConfig] = useState(null) + const { error, ErrorComponent, setError } = useErrorHandler() + const { LoadingPlaceholder } = useLoadingPlaceholder(!error && !logConfig) + + useEffect(() => { + async function fetch() { + try { + const res = await v2.logConfig.getLogConfig() + + setLogConfig(res.data.attributes) + } catch (e) { + setError(e) + } + } + + fetch() + }, []) + + return ( + + + + + + + {logConfig && } + + + ) +} diff --git a/operator_ui/src/theme.ts b/operator_ui/src/theme.ts index 30faa1b61c2..c88cc9fed6e 100644 --- a/operator_ui/src/theme.ts +++ b/operator_ui/src/theme.ts @@ -114,6 +114,11 @@ const mainTheme: ThemeOptions = { fontSize: '1rem', }, }, + MuiCardHeader: { + title: { + marginBottom: spacing.unit, + }, + }, }, typography: { useNextVariants: true, diff --git a/operator_ui/support/factories/jsonApiLogConfig.ts b/operator_ui/support/factories/jsonApiLogConfig.ts new file mode 100644 index 00000000000..92da6daa4db --- /dev/null +++ b/operator_ui/support/factories/jsonApiLogConfig.ts @@ -0,0 +1,15 @@ +import { ApiResponse } from 'utils/json-api-client' +import { LogConfig } from 'core/store/models' + +export function logConfigFactory(config: Partial) { + return { + data: { + id: 'log', + type: 'logs', + attributes: { + level: config.level || 'debug', + sqlEnabled: config.sqlEnabled || false, + }, + }, + } as ApiResponse +} From b833e4cdc13b1cbcb1eb4a401f2a630b431ffa49 Mon Sep 17 00:00:00 2001 From: James Kong Date: Fri, 26 Mar 2021 19:19:56 +0800 Subject: [PATCH 087/116] Add Deividas to code owners and update to test select change --- CODEOWNERS | 2 ++ .../src/pages/Configuration/LoggingCard.test.tsx | 14 +++++++++++--- .../src/pages/Configuration/LoggingCard.tsx | 2 +- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index f7ffecdd79a..2b29f48d622 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1,3 @@ core @se3000 @RyanRHall @spooktheducks @samsondav @j16r @connorwstein + +/operator-ui/ @DeividasK \ No newline at end of file diff --git a/operator_ui/src/pages/Configuration/LoggingCard.test.tsx b/operator_ui/src/pages/Configuration/LoggingCard.test.tsx index 9224581e7dc..54f3c84e853 100644 --- a/operator_ui/src/pages/Configuration/LoggingCard.test.tsx +++ b/operator_ui/src/pages/Configuration/LoggingCard.test.tsx @@ -4,7 +4,7 @@ import { syncFetch } from 'test-helpers/syncFetch' import globPath from 'test-helpers/globPath' import { logConfigFactory } from 'factories/jsonApiLogConfig' import { LoggingCard } from './LoggingCard' -// import { Checkbox } from '@material-ui/core' +import { act } from 'react-dom/test-utils' describe('pages/Configuration/LoggingCard', () => { it('renders the logging configuration card', async () => { @@ -38,13 +38,21 @@ describe('pages/Configuration/LoggingCard', () => { const wrapper = mountWithProviders() await syncFetch(wrapper) - // Cannot figure out how to change the select and checkbox inputs for submit + act(() => { + const selectInput = wrapper.find('#select-level').first() + const selectOnChange = selectInput.prop('onChange') + if (selectOnChange) { + selectOnChange({ + target: { name: 'level', value: 'debug' }, + } as any) + } + }) wrapper.find('form').simulate('submit') await syncFetch(wrapper) expect(submit.lastCall()[1].body).toEqual( - '{"level":"info","sqlEnabled":false}', + '{"level":"debug","sqlEnabled":false}', ) }) }) diff --git a/operator_ui/src/pages/Configuration/LoggingCard.tsx b/operator_ui/src/pages/Configuration/LoggingCard.tsx index 0da19f7d288..f1db2e3c30e 100644 --- a/operator_ui/src/pages/Configuration/LoggingCard.tsx +++ b/operator_ui/src/pages/Configuration/LoggingCard.tsx @@ -100,7 +100,7 @@ export const LoggingCard = () => { } fetch() - }, []) + }, [setError]) return ( From 881c990d17cf168f3c164f9f0d1c27610467ad18 Mon Sep 17 00:00:00 2001 From: James Kong Date: Fri, 26 Mar 2021 20:06:18 +0800 Subject: [PATCH 088/116] Fix flaky remote client test --- core/cmd/remote_client_test.go | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/core/cmd/remote_client_test.go b/core/cmd/remote_client_test.go index 56d08b484b0..85480bab9c2 100644 --- a/core/cmd/remote_client_test.go +++ b/core/cmd/remote_client_test.go @@ -1309,21 +1309,8 @@ func TestClient_AutoLogin(t *testing.T) { func TestClient_SetLogConfig(t *testing.T) { t.Parallel() - config, cleanup := cltest.NewConfig(t) - defer cleanup() - app, cleanup := cltest.NewApplicationWithConfig(t, config) - defer cleanup() - require.NoError(t, app.Start()) - - user := cltest.MustRandomUser() - require.NoError(t, app.Store.SaveUser(&user)) - sr := models.SessionRequest{ - Email: user.Email, - Password: cltest.Password, - } + app := startNewApplication(t) client, _ := app.NewClientAndRenderer() - client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.Config.Config, &cmd.MemoryCookieStore{}) - client.HTTP = cmd.NewAuthenticatedHTTPClient(config, client.CookieAuthenticator, sr) infoLevel := "warn" set := flag.NewFlagSet("loglevel", 0) From 9e56c80ac4f99abea645065d8321f2307004e102 Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 26 Mar 2021 12:37:22 +0000 Subject: [PATCH 089/116] Wait for app to fully start in TestOCRKeysController_Delete_InvalidOCRKey --- core/web/ocr_keys_controller_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/web/ocr_keys_controller_test.go b/core/web/ocr_keys_controller_test.go index 7ca771bf87a..c19e6f2d2e3 100644 --- a/core/web/ocr_keys_controller_test.go +++ b/core/web/ocr_keys_controller_test.go @@ -112,7 +112,7 @@ func setupOCRKeysControllerTests(t *testing.T) (cltest.HTTPClientCleaner, *offch app, cleanup := cltest.NewApplication(t, eth.NewClientWith(rpcClient, gethClient), ) - require.NoError(t, app.Start()) + require.NoError(t, app.StartAndConnect()) client := app.NewHTTPClient() OCRKeyStore := app.GetStore().OCRKeyStore From 6abe55523c09319b69c01729180f9a2143527142 Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 26 Mar 2021 12:53:29 +0000 Subject: [PATCH 090/116] Try setting head tracker buffer size to something large; attempt fix TestIntegration_HttpRequestWithHeaders --- core/internal/features_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/core/internal/features_test.go b/core/internal/features_test.go index cda2b34333d..2afa7c3646b 100644 --- a/core/internal/features_test.go +++ b/core/internal/features_test.go @@ -89,6 +89,7 @@ func TestIntegration_HttpRequestWithHeaders(t *testing.T) { config, cfgCleanup := cltest.NewConfig(t) defer cfgCleanup() config.Set("ADMIN_CREDENTIALS_FILE", "") + config.Set("ETH_HEAD_TRACKER_MAX_BUFFER_SIZE", 99) rpcClient, gethClient, sub, assertMocksCalled := cltest.NewEthMocks(t) defer assertMocksCalled() From 41ef633b547489952f7ca3725ebfa338a032f37d Mon Sep 17 00:00:00 2001 From: Sam Date: Tue, 23 Mar 2021 14:27:22 +0000 Subject: [PATCH 091/116] Add EthResender which periodically retransmits eth_txes One minor design irration in the existing EthBroadcaster/EthConfirmer model is that there is no explicit periodic resending of transactions that are still unconfirmed. It has been anecdotally many times in production that sent transactions can "disappear". Networking issues, bugs in the eth node, mempool pressure ejecting transactions, eth nodes being switched out behind load balancers etc... the list goes on. Nobody has noticed a problem up til now because the gas bumper implicitly resends transactions at ETH_GAS_BUMP_THRESHOLD anyway. Now that gas bumping can be disabled, the problem has become obvious since without bumping, some transactions can disappear and never be resent again leading to a stuck node. This commit implements explicit periodic resending of unconfirmed transactions that runs parallel to and independent of the existing gas bumper. This has two advantages: 1. Does not rely on gas bumping settings to ensure our transactions always end up in the mempool 2. Continues to resend existing transactions even in the event that heads are delayed (new heads are no longer required to ensure that transactions are kept in the mempool). This change is likely to save users a bit of gas and shorten inclusion time for transactions in some cases since we resend dropped transactions instead of waiting to bump gas. It does this at the expense of placing slightly more load on the eth node. --- core/internal/cltest/cltest.go | 19 +-- core/internal/cltest/factories.go | 12 +- core/internal/cltest/simulated_backend.go | 4 + core/internal/mocks/client.go | 14 ++ .../bulletprooftxmanager/eth_confirmer.go | 53 +++--- .../eth_confirmer_test.go | 12 +- .../bulletprooftxmanager/eth_resender.go | 158 ++++++++++++++++++ .../bulletprooftxmanager/eth_resender_test.go | 98 +++++++++++ .../bulletprooftxmanager/nonce_syncer.go | 1 + core/services/eth/client.go | 22 +++ core/services/eth/null_client.go | 4 + core/services/log/broadcaster_test.go | 2 +- core/store/models/eth.go | 10 +- core/store/orm/config.go | 10 ++ core/store/orm/config_reader.go | 1 + core/store/orm/schema.go | 1 + docs/CHANGELOG.md | 6 +- 17 files changed, 379 insertions(+), 48 deletions(-) create mode 100644 core/services/bulletprooftxmanager/eth_resender.go create mode 100644 core/services/bulletprooftxmanager/eth_resender_test.go diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index 58de042dac3..f7ddec09e52 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -189,6 +189,8 @@ func NewConfig(t testing.TB) (*TestConfig, func()) { config.Set("DEFAULT_HTTP_ALLOW_UNRESTRICTED_NETWORK_ACCESS", true) // Disable gas updater for application tests config.Set("GAS_UPDATER_ENABLED", false) + // Disable tx re-sending for application tests + config.Set("ETH_TX_RESEND_AFTER_THRESHOLD", 0) return config, cleanup } @@ -1835,15 +1837,11 @@ type testifyExpectationsAsserter interface { AssertExpectations(t mock.TestingT) bool } -type fakeT struct { - didFail bool -} +type fakeT struct{} -func (t fakeT) Logf(format string, args ...interface{}) {} -func (t fakeT) Errorf(format string, args ...interface{}) {} -func (t fakeT) FailNow() { - t.didFail = true -} +func (ft fakeT) Logf(format string, args ...interface{}) {} +func (ft fakeT) Errorf(format string, args ...interface{}) {} +func (ft fakeT) FailNow() {} func EventuallyExpectationsMet(t *testing.T, mock testifyExpectationsAsserter, timeout time.Duration, interval time.Duration) { t.Helper() @@ -1851,12 +1849,13 @@ func EventuallyExpectationsMet(t *testing.T, mock testifyExpectationsAsserter, t chTimeout := time.After(timeout) for { var ft fakeT - mock.AssertExpectations(ft) - if !ft.didFail { + success := mock.AssertExpectations(ft) + if success { return } select { case <-chTimeout: + mock.AssertExpectations(t) t.FailNow() default: time.Sleep(interval) diff --git a/core/internal/cltest/factories.go b/core/internal/cltest/factories.go index 2053d7b9ec3..78dcf868830 100644 --- a/core/internal/cltest/factories.go +++ b/core/internal/cltest/factories.go @@ -514,11 +514,17 @@ func NewEthTx(t *testing.T, store *strpkg.Store, fromAddress common.Address) mod } } -func MustInsertUnconfirmedEthTxWithBroadcastAttempt(t *testing.T, store *strpkg.Store, nonce int64, fromAddress common.Address) models.EthTx { - timeNow := time.Now() +func MustInsertUnconfirmedEthTxWithBroadcastAttempt(t *testing.T, store *strpkg.Store, nonce int64, fromAddress common.Address, opts ...interface{}) models.EthTx { + broadcastAt := time.Now() + for _, opt := range opts { + switch v := opt.(type) { + case time.Time: + broadcastAt = v + } + } etx := NewEthTx(t, store, fromAddress) - etx.BroadcastAt = &timeNow + etx.BroadcastAt = &broadcastAt n := nonce etx.Nonce = &n etx.State = models.EthTxUnconfirmed diff --git a/core/internal/cltest/simulated_backend.go b/core/internal/cltest/simulated_backend.go index 30b8eab533b..6a220a42892 100644 --- a/core/internal/cltest/simulated_backend.go +++ b/core/internal/cltest/simulated_backend.go @@ -416,6 +416,10 @@ func (c *SimulatedBackendClient) BatchCallContext(ctx context.Context, b []rpc.B return nil } +func (c *SimulatedBackendClient) RoundRobinBatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + return c.BatchCallContext(ctx, b) +} + // Mine forces the simulated backend to produce a new block every 2 seconds func Mine(backend *backends.SimulatedBackend, blockTime time.Duration) (stopMining func()) { timer := time.NewTicker(blockTime) diff --git a/core/internal/mocks/client.go b/core/internal/mocks/client.go index 5449138a59a..78b148c24cd 100644 --- a/core/internal/mocks/client.go +++ b/core/internal/mocks/client.go @@ -389,6 +389,20 @@ func (_m *Client) PendingNonceAt(ctx context.Context, account common.Address) (u return r0, r1 } +// RoundRobinBatchCallContext provides a mock function with given fields: ctx, b +func (_m *Client) RoundRobinBatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + ret := _m.Called(ctx, b) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, []rpc.BatchElem) error); ok { + r0 = rf(ctx, b) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // SendRawTx provides a mock function with given fields: bytes func (_m *Client) SendRawTx(bytes []byte) (common.Hash, error) { ret := _m.Called(bytes) diff --git a/core/services/bulletprooftxmanager/eth_confirmer.go b/core/services/bulletprooftxmanager/eth_confirmer.go index 3b85560a115..30a8b6c8808 100644 --- a/core/services/bulletprooftxmanager/eth_confirmer.go +++ b/core/services/bulletprooftxmanager/eth_confirmer.go @@ -49,9 +49,17 @@ type ethConfirmer struct { ctx context.Context ctxCancel context.CancelFunc chDone chan struct{} + + ethResender *EthResender } func NewEthConfirmer(store *store.Store, config orm.ConfigReader) *ethConfirmer { + var ethResender *EthResender + if config.EthTxResendAfterThreshold() > 0 { + ethResender = NewEthResender(store.DB, store.EthClient, defaultResenderPollInterval, config.EthTxResendAfterThreshold()) + } else { + logger.Info("ethResender: Disabled") + } context, cancel := context.WithCancel(context.Background()) return ðConfirmer{ utils.StartStopOnce{}, @@ -62,6 +70,7 @@ func NewEthConfirmer(store *store.Store, config orm.ConfigReader) *ethConfirmer context, cancel, make(chan struct{}), + ethResender, } } @@ -96,6 +105,9 @@ func (ec *ethConfirmer) Start() error { logger.Infow(fmt.Sprintf("EthConfirmer: Gas bumping is enabled, unconfirmed transactions will have their gas price bumped every %d blocks", ec.config.EthGasBumpThreshold()), "ethGasBumpThreshold", ec.config.EthGasBumpThreshold()) } go ec.runLoop() + if ec.ethResender != nil { + ec.ethResender.Start() + } return nil } @@ -103,6 +115,9 @@ func (ec *ethConfirmer) Close() error { if !ec.OkayToStop() { return errors.New("EthConfirmer has already been stopped") } + if ec.ethResender != nil { + ec.ethResender.Stop() + } ec.ctxCancel() <-ec.chDone return nil @@ -393,12 +408,11 @@ func (ec *ethConfirmer) saveFetchedReceipts(ctx context.Context, receipts []Rece // In this case we mark these transactions as 'confirmed_missing_receipt' to // prevent gas bumping. // -// FIXME: We should continue to attempt to resend eth_txes in this state on +// NOTE: We continue to attempt to resend eth_txes in this state on // every head to guard against the extremely rare scenario of nonce gap due to // reorg that excludes the transaction (from another wallet) that had this // nonce (until finality depth is reached, after which we make the explicit -// decision to give up). -// https://www.pivotaltracker.com/story/show/177389604 +// decision to give up). This is done in the EthResender. // // We will continue to try to fetch a receipt for these attempts until all // attempts are below the finality depth from current head. @@ -699,6 +713,7 @@ func (ec *ethConfirmer) handleInProgressAttempt(ctx context.Context, etx models. return errors.Errorf("invariant violation: expected eth_tx_attempt %v to be in_progress, it was %s", attempt.ID, attempt.State) } + now := time.Now() sendError := sendTransaction(ctx, ec.ethClient, attempt) if sendError.IsTerminallyUnderpriced() { @@ -750,20 +765,6 @@ func (ec *ethConfirmer) handleInProgressAttempt(ctx context.Context, etx models. "blockHeight", blockHeight, "id", "RPCTxFeeCapExceeded", ) - if len(etx.EthTxAttempts) > 0 { - previousAttempt := etx.EthTxAttempts[0] - sendError2 := sendTransaction(ctx, ec.ethClient, previousAttempt) - l := logger.Default - if sendError2 != nil { - l = logger.CreateLogger(l.With("err", sendError2)) - } - l.Infow("EthConfirmer: optimistic re-send of prior attempt due to exceeding eth node's RPCTxFeeCap", - "ethTxID", etx.ID, - "id", "RPCTxFeeCapExceeded", - ) - } else { - logger.Errorw("EthConfirmer: invariant violation, expected eth_tx to have 1 or more attempts", "ethTxID", etx.ID) - } return deleteInProgressAttempt(ec.store.DB, attempt) } @@ -820,12 +821,12 @@ func (ec *ethConfirmer) handleInProgressAttempt(ctx context.Context, etx models. "ACTION REQUIRED: Chainlink wallet with address 0x%x is OUT OF FUNDS", attempt.ID, attempt.Hash, attempt.GasPrice.String(), sendError.Error(), etx.FromAddress, ), "err", sendError) - return saveInsufficientEthAttempt(ec.store.DB, &attempt) + return saveInsufficientEthAttempt(ec.store.DB, &attempt, now) } if sendError == nil { logger.Debugw("EthConfirmer: successfully broadcast transaction", "ethTxID", etx.ID, "ethTxAttemptID", attempt.ID, "txHash", attempt.Hash.Hex()) - return saveSentAttempt(ec.store.DB, &attempt) + return saveSentAttempt(ec.store.DB, &attempt, now) } // Any other type of error is considered temporary or resolvable by the @@ -845,26 +846,32 @@ func deleteInProgressAttempt(db *gorm.DB, attempt models.EthTxAttempt) error { return errors.Wrap(db.Exec(`DELETE FROM eth_tx_attempts WHERE id = ?`, attempt.ID).Error, "deleteInProgressAttempt failed") } -func saveSentAttempt(db *gorm.DB, attempt *models.EthTxAttempt) error { +func saveSentAttempt(db *gorm.DB, attempt *models.EthTxAttempt, broadcastAt time.Time) error { if attempt.State != models.EthTxAttemptInProgress { return errors.New("expected state to be in_progress") } attempt.State = models.EthTxAttemptBroadcast return postgres.GormTransaction(context.Background(), db, func(tx *gorm.DB) error { - if err := tx.Exec(`UPDATE eth_txes SET broadcast_at = NOW() WHERE id = ? AND broadcast_at IS NULL`, attempt.EthTxID).Error; err != nil { + // In case of null broadcast_at (shouldn't happen) we don't want to + // update anyway because it indicates a state where broadcast_at makes + // no sense e.g. fatal_error + if err := tx.Exec(`UPDATE eth_txes SET broadcast_at = ? WHERE id = ? AND broadcast_at < ?`, broadcastAt, attempt.EthTxID, broadcastAt).Error; err != nil { return errors.Wrap(err, "saveSentAttempt failed") } return errors.Wrap(db.Save(attempt).Error, "saveSentAttempt failed") }) } -func saveInsufficientEthAttempt(db *gorm.DB, attempt *models.EthTxAttempt) error { +func saveInsufficientEthAttempt(db *gorm.DB, attempt *models.EthTxAttempt, broadcastAt time.Time) error { if !(attempt.State == models.EthTxAttemptInProgress || attempt.State == models.EthTxAttemptInsufficientEth) { return errors.New("expected state to be either in_progress or insufficient_eth") } attempt.State = models.EthTxAttemptInsufficientEth return postgres.GormTransaction(context.Background(), db, func(tx *gorm.DB) error { - if err := tx.Exec(`UPDATE eth_txes SET broadcast_at = NOW() WHERE id = ? AND broadcast_at IS NULL`, attempt.EthTxID).Error; err != nil { + // In case of null broadcast_at (shouldn't happen) we don't want to + // update anyway because it indicates a state where broadcast_at makes + // no sense e.g. fatal_error + if err := tx.Exec(`UPDATE eth_txes SET broadcast_at = ? WHERE id = ? AND broadcast_at < ?`, broadcastAt, attempt.EthTxID, broadcastAt).Error; err != nil { return errors.Wrap(err, "saveInsufficientEthAttempt failed") } return errors.Wrap(db.Save(attempt).Error, "saveInsufficientEthAttempt failed") diff --git a/core/services/bulletprooftxmanager/eth_confirmer_test.go b/core/services/bulletprooftxmanager/eth_confirmer_test.go index 7018c4cfdf6..2df50652c16 100644 --- a/core/services/bulletprooftxmanager/eth_confirmer_test.go +++ b/core/services/bulletprooftxmanager/eth_confirmer_test.go @@ -1128,7 +1128,8 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) { require.NoError(t, ec.RebroadcastWhereNecessary(context.TODO(), keys, currentHead)) }) - etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastAttempt(t, store, nonce, fromAddress) + originalBroadcastAt := time.Unix(1616509100, 0) + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastAttempt(t, store, nonce, fromAddress, originalBroadcastAt) nonce++ attempt1_1 := etx.EthTxAttempts[0] attempt1_1.BroadcastBeforeBlockNum = &oldEnough @@ -1195,7 +1196,7 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) { ethClient = new(mocks.Client) bulletprooftxmanager.SetEthClientOnEthConfirmer(ethClient, ec) - t.Run("resubmits previous attempt and continues if bumped attempt transaction was too expensive", func(t *testing.T) { + t.Run("does nothing and continues if bumped attempt transaction was too expensive", func(t *testing.T) { ethTx := gethTypes.Transaction{} kst.On("SignTx", mock.AnythingOfType("accounts.Account"), @@ -1214,10 +1215,6 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) { ethClient.On("SendTransaction", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { return tx.Nonce() == uint64(*etx.Nonce) && tx.GasPrice().Int64() == int64(25000000000) })).Return(errors.New("tx fee (1.10 ether) exceeds the configured cap (1.00 ether)")).Once() - // Once for the resubmitted previous attempt - ethClient.On("SendTransaction", mock.Anything, mock.MatchedBy(func(tx *gethTypes.Transaction) bool { - return tx.Nonce() == uint64(*etx.Nonce) && tx.GasPrice().Int64() == int64(342) - })).Return(nil).Once() // Do the thing require.NoError(t, ec.RebroadcastWhereNecessary(context.TODO(), keys, currentHead)) @@ -1228,6 +1225,9 @@ func TestEthConfirmer_RebroadcastWhereNecessary(t *testing.T) { // Did not create an additional attempt require.Len(t, etx.EthTxAttempts, 1) + // broadcast_at did not change + require.Equal(t, etx.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + kst.AssertExpectations(t) ethClient.AssertExpectations(t) }) diff --git a/core/services/bulletprooftxmanager/eth_resender.go b/core/services/bulletprooftxmanager/eth_resender.go new file mode 100644 index 00000000000..4f7fac5896a --- /dev/null +++ b/core/services/bulletprooftxmanager/eth_resender.go @@ -0,0 +1,158 @@ +package bulletprooftxmanager + +import ( + "context" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "github.com/lib/pq" + "github.com/pkg/errors" + "github.com/smartcontractkit/chainlink/core/logger" + "github.com/smartcontractkit/chainlink/core/services/eth" + "github.com/smartcontractkit/chainlink/core/store/models" + "github.com/smartcontractkit/chainlink/core/utils" + "gorm.io/gorm" +) + +// pollInterval is the maximum amount of time in addition to +// EthTxResendAfterThreshold that we will wait before resending an attempt +const defaultResenderPollInterval = 5 * time.Second + +// EthResender periodically picks up transactions that have been languishing +// unconfirmed for a configured amount of time without being sent, and sends +// their highest priced attempt again. This helps to defend against geth/parity +// silently dropping txes, or txes being ejected from the mempool. +// +// Previously we relied on the bumper to do this for us implicitly but there +// can occasionally be problems with this (e.g. abnormally long block times, or +// if gas bumping is disabled) +type EthResender struct { + db *gorm.DB + ethClient eth.Client + interval time.Duration + ageThreshold time.Duration + + chStop chan struct{} + chDone chan struct{} +} + +func NewEthResender(db *gorm.DB, ethClient eth.Client, pollInterval, ethTxResendAfterThreshold time.Duration) *EthResender { + if ethTxResendAfterThreshold == 0 { + panic("EthResender requires a non-zero threshold") + } + return &EthResender{ + db, + ethClient, + pollInterval, + ethTxResendAfterThreshold, + make(chan struct{}), + make(chan struct{}), + } +} + +func (er *EthResender) Start() { + logger.Infof("EthResender: Enabled with poll interval of %s and age threshold of %s", er.interval, er.ageThreshold) + go er.runLoop() +} + +func (er *EthResender) Stop() { + close(er.chStop) + <-er.chDone +} + +func (er *EthResender) runLoop() { + defer close(er.chDone) + + if err := er.resendUnconfirmed(); err != nil { + logger.Warnw("EthResender: failed to resend unconfirmed transactions", "err", err) + } + + ticker := time.NewTicker(utils.WithJitter(er.interval)) + defer ticker.Stop() + for { + select { + case <-er.chStop: + return + case <-ticker.C: + if err := er.resendUnconfirmed(); err != nil { + logger.Warnw("EthResender: failed to resend unconfirmed transactions", "err", err) + } + } + } +} + +func (er *EthResender) resendUnconfirmed() error { + olderThan := time.Now().Add(-er.ageThreshold) + attempts, err := FindEthTxesRequiringResend(er.db, olderThan) + if err != nil { + return errors.Wrap(err, "failed to findEthTxAttemptsRequiringReceiptFetch") + } + + if len(attempts) == 0 { + return nil + } + + logger.Debugw(fmt.Sprintf("EthResender: re-sending %d transactions that were last sent over %s ago", len(attempts), er.ageThreshold), "n", len(attempts)) + + var reqs []rpc.BatchElem + for _, attempt := range attempts { + req := rpc.BatchElem{ + Method: "eth_sendRawTransaction", + Args: []interface{}{hexutil.Encode(attempt.SignedRawTx)}, + Result: &common.Hash{}, + } + + reqs = append(reqs, req) + } + + now := time.Now() + if err := er.ethClient.RoundRobinBatchCallContext(context.Background(), reqs); err != nil { + return errors.Wrap(err, "failed to re-send transactions") + } + + var succeeded []int64 + for i, req := range reqs { + if req.Error == nil { + succeeded = append(succeeded, attempts[i].EthTxID) + } + } + + if err := er.updateBroadcastAts(now, succeeded); err != nil { + return errors.Wrap(err, "failed to update last succeeded on attempts") + } + nSuccess := len(succeeded) + nErrored := len(attempts) - nSuccess + + logger.Debugw("EthResender: completed", "nSuccess", nSuccess, "nErrored", nErrored) + + return nil +} + +// FindEthTxesRequiringResend returns the highest priced attempt for each +// eth_tx that was last sent before or at the given time +func FindEthTxesRequiringResend(db *gorm.DB, olderThan time.Time) (attempts []models.EthTxAttempt, err error) { + err = db.Raw(` +SELECT DISTINCT ON (eth_tx_id) eth_tx_attempts.* +FROM eth_tx_attempts +JOIN eth_txes ON eth_txes.id = eth_tx_attempts.eth_tx_id AND eth_txes.state IN ('unconfirmed', 'confirmed_missing_receipt') +WHERE eth_tx_attempts.state <> 'in_progress' AND eth_txes.broadcast_at <= ? +ORDER BY eth_tx_attempts.eth_tx_id ASC, eth_txes.nonce ASC, eth_tx_attempts.gas_price DESC +`, olderThan). + Find(&attempts).Error + + return +} + +func (er *EthResender) updateBroadcastAts(now time.Time, etxIDs []int64) error { + // Deliberately do nothing on NULL broadcast_at because that indicates the + // tx has been moved into a state where broadcast_at is not relevant, e.g. + // fatally errored. + // + // Since we may have raced with the EthConfirmer (totally OK since highest + // priced transaction always wins) we only want to update broadcast_at if + // our version is later. + return er.db.Exec(`UPDATE eth_txes SET broadcast_at = ? WHERE id = ANY(?) AND broadcast_at < ?`, now, pq.Array(etxIDs), now).Error +} diff --git a/core/services/bulletprooftxmanager/eth_resender_test.go b/core/services/bulletprooftxmanager/eth_resender_test.go new file mode 100644 index 00000000000..54cad70deed --- /dev/null +++ b/core/services/bulletprooftxmanager/eth_resender_test.go @@ -0,0 +1,98 @@ +package bulletprooftxmanager_test + +import ( + "math/big" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + "github.com/smartcontractkit/chainlink/core/internal/cltest" + "github.com/smartcontractkit/chainlink/core/internal/mocks" + "github.com/smartcontractkit/chainlink/core/services/bulletprooftxmanager" + "github.com/smartcontractkit/chainlink/core/store/models" + "github.com/smartcontractkit/chainlink/core/utils" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func Test_EthResender_FindEthTxesRequiringResend(t *testing.T) { + t.Parallel() + + store, cleanup := cltest.NewStore(t) + defer cleanup() + + key := cltest.MustInsertRandomKey(t, store.DB) + fromAddress := key.Address.Address() + + t.Run("returns nothing if there are no transactions", func(t *testing.T) { + olderThan := time.Now() + attempts, err := bulletprooftxmanager.FindEthTxesRequiringResend(store.DB, olderThan) + require.NoError(t, err) + assert.Len(t, attempts, 0) + }) + + etxs := []models.EthTx{ + cltest.MustInsertUnconfirmedEthTxWithBroadcastAttempt(t, store, 0, fromAddress, time.Unix(1616509100, 0)), + cltest.MustInsertUnconfirmedEthTxWithBroadcastAttempt(t, store, 1, fromAddress, time.Unix(1616509200, 0)), + cltest.MustInsertUnconfirmedEthTxWithBroadcastAttempt(t, store, 2, fromAddress, time.Unix(1616509300, 0)), + } + attempt1_2 := newBroadcastEthTxAttempt(t, etxs[0].ID, store) + attempt1_2.GasPrice = *utils.NewBig(big.NewInt(10)) + require.NoError(t, store.DB.Create(&attempt1_2).Error) + + attempt3_2 := newInProgressEthTxAttempt(t, etxs[2].ID, store) + attempt3_2.GasPrice = *utils.NewBig(big.NewInt(10)) + require.NoError(t, store.DB.Create(&attempt3_2).Error) + + t.Run("returns the highest price attempt for each transaction that was last broadcast before or on the given time", func(t *testing.T) { + olderThan := time.Unix(1616509200, 0) + attempts, err := bulletprooftxmanager.FindEthTxesRequiringResend(store.DB, olderThan) + require.NoError(t, err) + assert.Len(t, attempts, 2) + assert.Equal(t, attempt1_2.ID, attempts[0].ID) + assert.Equal(t, etxs[1].EthTxAttempts[0].ID, attempts[1].ID) + }) +} + +func Test_EthResender_Start(t *testing.T) { + t.Parallel() + + store, cleanup := cltest.NewStore(t) + defer cleanup() + key := cltest.MustInsertRandomKey(t, store.DB) + fromAddress := key.Address.Address() + + t.Run("resends transactions that have been languishing unconfirmed for too long", func(t *testing.T) { + ethClient := new(mocks.Client) + + er := bulletprooftxmanager.NewEthResender(store.DB, ethClient, 100*time.Millisecond, 1*time.Hour) + + originalBroadcastAt := time.Unix(1616509100, 0) + etx := cltest.MustInsertUnconfirmedEthTxWithBroadcastAttempt(t, store, 0, fromAddress, originalBroadcastAt) + etx2 := cltest.MustInsertUnconfirmedEthTxWithBroadcastAttempt(t, store, 1, fromAddress, originalBroadcastAt) + cltest.MustInsertUnconfirmedEthTxWithBroadcastAttempt(t, store, 2, fromAddress, time.Now().Add(1*time.Hour)) + + ethClient.On("RoundRobinBatchCallContext", mock.Anything, mock.MatchedBy(func(b []rpc.BatchElem) bool { + return len(b) == 2 && + b[0].Method == "eth_sendRawTransaction" && b[0].Args[0] == hexutil.Encode(etx.EthTxAttempts[0].SignedRawTx) && + b[1].Method == "eth_sendRawTransaction" && b[1].Args[0] == hexutil.Encode(etx2.EthTxAttempts[0].SignedRawTx) + })).Return(nil) + + func() { + er.Start() + defer er.Stop() + + cltest.EventuallyExpectationsMet(t, ethClient, 5*time.Second, 10*time.Millisecond) + }() + + err := store.DB.First(&etx).Error + require.NoError(t, err) + err = store.DB.First(&etx2).Error + require.NoError(t, err) + + assert.Greater(t, etx.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + assert.Greater(t, etx2.BroadcastAt.Unix(), originalBroadcastAt.Unix()) + }) +} diff --git a/core/services/bulletprooftxmanager/nonce_syncer.go b/core/services/bulletprooftxmanager/nonce_syncer.go index a5a897f0bb8..8024066ec3d 100644 --- a/core/services/bulletprooftxmanager/nonce_syncer.go +++ b/core/services/bulletprooftxmanager/nonce_syncer.go @@ -249,6 +249,7 @@ func (s NonceSyncer) fastForwardNonceIfNecessary(ctx context.Context, address co // didn't actually broadcast the transaction, but including it // allows us to avoid changing the state machine limitations and // represents roughly the time we read the tx from the blockchain + // so we can pretty much assume it was "broadcast" at this time. ins.Etx.BroadcastAt = &now if err := dbtx.Create(&ins.Etx).Error; err != nil { return errors.Wrap(err, "NonceSyncer#fastForwardNonceIfNecessary failed to create eth_tx") diff --git a/core/services/eth/client.go b/core/services/eth/client.go index 14a6228516e..c7b34169461 100644 --- a/core/services/eth/client.go +++ b/core/services/eth/client.go @@ -6,6 +6,7 @@ import ( "net/url" "strings" "sync" + "sync/atomic" "github.com/smartcontractkit/chainlink/core/assets" "github.com/smartcontractkit/chainlink/core/logger" @@ -41,6 +42,7 @@ type Client interface { Call(result interface{}, method string, args ...interface{}) error CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error BatchCallContext(ctx context.Context, b []rpc.BatchElem) error + RoundRobinBatchCallContext(ctx context.Context, b []rpc.BatchElem) error // These methods are reimplemented due to a difference in how block header hashes are // calculated by Parity nodes running on Kovan. We have to return our own wrapper @@ -94,6 +96,8 @@ type client struct { SecondaryRPCClients []RPCClient secondaryURLs []url.URL mocked bool + + roundRobinCount uint32 } var _ Client = (*client)(nil) @@ -369,3 +373,21 @@ func (client *client) BatchCallContext(ctx context.Context, b []rpc.BatchElem) e ) return client.RPCClient.BatchCallContext(ctx, b) } + +// RoundRobinBatchCallContext rotates through Primary and all Secondaries, changing node on each call +func (client *client) RoundRobinBatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + nSecondaries := len(client.SecondaryRPCClients) + if nSecondaries == 0 { + return client.BatchCallContext(ctx, b) + } + + // NOTE: AddUint32 returns the number after addition, so we must -1 to get the "current" count + count := atomic.AddUint32(&client.roundRobinCount, 1) - 1 + // idx 0 indicates the primary, subsequent indices represent secondaries + rr := int(count % uint32(nSecondaries+1)) + + if rr == 0 { + return client.BatchCallContext(ctx, b) + } + return client.SecondaryRPCClients[rr-1].BatchCallContext(ctx, b) +} diff --git a/core/services/eth/null_client.go b/core/services/eth/null_client.go index bca1394ab3e..58160d6cebf 100644 --- a/core/services/eth/null_client.go +++ b/core/services/eth/null_client.go @@ -152,3 +152,7 @@ func (nc *NullClient) CodeAt(ctx context.Context, account common.Address, blockN func (nc *NullClient) BatchCallContext(ctx context.Context, b []rpc.BatchElem) error { return nil } + +func (nc *NullClient) RoundRobinBatchCallContext(ctx context.Context, b []rpc.BatchElem) error { + return nil +} diff --git a/core/services/log/broadcaster_test.go b/core/services/log/broadcaster_test.go index 4347eaa5a91..fb686eda7c2 100644 --- a/core/services/log/broadcaster_test.go +++ b/core/services/log/broadcaster_test.go @@ -50,7 +50,7 @@ func TestBroadcaster_AwaitsInitialSubscribersOnStartup(t *testing.T) { listener.On("OnConnect").Return() listener.On("OnDisconnect").Return() - sub.On("Unsubscribe").Return() + sub.On("Unsubscribe").Maybe().Return() sub.On("Err").Return(nil) chSubscribe := make(chan struct{}, 10) diff --git a/core/store/models/eth.go b/core/store/models/eth.go index 73da12f512f..f1c5601b7cc 100644 --- a/core/store/models/eth.go +++ b/core/store/models/eth.go @@ -52,10 +52,12 @@ type EthTx struct { Value assets.Eth GasLimit uint64 Error *string - BroadcastAt *time.Time - CreatedAt time.Time - State EthTxState - EthTxAttempts []EthTxAttempt `gorm:"->"` + // BroadcastAt is updated every time an attempt for this eth_tx is re-sent + // In almost all cases it will be within a second or so of the actual send time. + BroadcastAt *time.Time + CreatedAt time.Time + State EthTxState + EthTxAttempts []EthTxAttempt `gorm:"->"` } func (e EthTx) GetError() error { diff --git a/core/store/orm/config.go b/core/store/orm/config.go index 72882a7efcd..7f678aceed1 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -487,6 +487,16 @@ func (c Config) EthHeadTrackerMaxBufferSize() uint { return uint(c.getWithFallback("EthHeadTrackerMaxBufferSize", parseUint64).(uint64)) } +// EthTxResendAfterThreshold controls how long the ethResender will wait before +// re-sending the latest eth_tx_attempt. This is designed a as a fallback to +// protect against the eth nodes dropping txes (it has been anecdotally +// observed to happen), networking issues or txes being ejected from the +// mempool. +// See eth_resender.go for more details +func (c Config) EthTxResendAfterThreshold() time.Duration { + return c.getWithFallback("EthTxResendAfterThreshold", parseDuration).(time.Duration) +} + // EthereumURL represents the URL of the Ethereum node to connect Chainlink to. func (c Config) EthereumURL() string { return c.viper.GetString(EnvVarName("EthereumURL")) diff --git a/core/store/orm/config_reader.go b/core/store/orm/config_reader.go index fa6665c45ea..747294b04b9 100644 --- a/core/store/orm/config_reader.go +++ b/core/store/orm/config_reader.go @@ -46,6 +46,7 @@ type ConfigReader interface { EthReceiptFetchBatchSize() uint32 EthHeadTrackerHistoryDepth() uint EthHeadTrackerMaxBufferSize() uint + EthTxResendAfterThreshold() time.Duration SetEthGasPriceDefault(value *big.Int) error EthereumURL() string EthereumSecondaryURLs() []url.URL diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index c5cf53e656c..c5093ff8c25 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -57,6 +57,7 @@ type ConfigSchema struct { EthHeadTrackerMaxBufferSize uint `env:"ETH_HEAD_TRACKER_MAX_BUFFER_SIZE" default:"3"` EthBalanceMonitorBlockDelay uint16 `env:"ETH_BALANCE_MONITOR_BLOCK_DELAY" default:"1"` EthReceiptFetchBatchSize uint32 `env:"ETH_RECEIPT_FETCH_BATCH_SIZE" default:"100"` + EthTxResendAfterThreshold time.Duration `env:"ETH_TX_RESEND_AFTER_THRESHOLD" default:"30s"` EthereumURL string `env:"ETH_URL" default:"ws://localhost:8546"` EthereumSecondaryURL string `env:"ETH_SECONDARY_URL" default:""` EthereumSecondaryURLs string `env:"ETH_SECONDARY_URLS" default:""` diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 5255cd88d1c..1719a7e2763 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -22,9 +22,13 @@ at least '1m', it enables periodic backups. Example settings: `DATABASE_BACKUP_MODE="full"` and `DATABASE_BACKUP_FREQUENCY` not set, will run a full back only at the start of the node. +`DATABASE_BACKUP_MODE="lite"` and `DATABASE_BACKUP_FREQUENCY="1h"` will lead to a partial backup on node start and then again a partial backup every one hour. +- Added periodic resending of eth transactions. This means that we no longer rely exclusively on gas bumping to resend unconfirmed transactions that got "lost" for whatever reason. This has two advantages: + 1. Chainlink no longer relies on gas bumping settings to ensure our transactions always end up in the mempool + 2. Chainlink will continue to resend existing transactions even in the event that heads are delayed. This is especially useful on chains like Arbitrum which have very long wait times between heads. -`DATABASE_BACKUP_MODE="lite"` and `DATABASE_BACKUP_FREQUENCY="1h"` will lead to a partial backup on node start and then again a partial backup every one hour. +Periodic resending can be controlled using the `ETH_TX_RESEND_AFTER_THRESHOLD` env var (default 30s). Unconfirmed transactions will be resent periodically at this interval. It is recommended to leave this at the default setting, but it can be set to any [valid duration](https://golang.org/pkg/time/#ParseDuration) or to 0 to disable periodic resending. ### Fixed From 3a9eec4622bae370e73c2fae2f9bc01a98d13481 Mon Sep 17 00:00:00 2001 From: Sam Date: Fri, 26 Mar 2021 11:32:47 +0000 Subject: [PATCH 092/116] Fix SetLogConfig test --- core/cmd/remote_client_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/core/cmd/remote_client_test.go b/core/cmd/remote_client_test.go index 85480bab9c2..d54f2b6d4f0 100644 --- a/core/cmd/remote_client_test.go +++ b/core/cmd/remote_client_test.go @@ -1312,14 +1312,14 @@ func TestClient_SetLogConfig(t *testing.T) { app := startNewApplication(t) client, _ := app.NewClientAndRenderer() - infoLevel := "warn" + logLevel := "warn" set := flag.NewFlagSet("loglevel", 0) - set.String("level", infoLevel, "") + set.String("level", logLevel, "") c := cli.NewContext(nil, set, nil) err := client.SetLogLevel(c) - assert.NoError(t, err) - assert.Equal(t, infoLevel, app.Config.LogLevel().String()) + require.NoError(t, err) + assert.Equal(t, logLevel, app.Config.LogLevel().String()) sqlEnabled := true set = flag.NewFlagSet("logsql", 0) From d9e01ab63bbc436c8a6568c2a71ef2c544cef1f3 Mon Sep 17 00:00:00 2001 From: James Kong Date: Fri, 26 Mar 2021 16:57:39 +0800 Subject: [PATCH 093/116] View DirectRequest jobs in the jobs page Direct Request jobs were not being shown because of they were being filtered out. This commit handles filtering for DirectRequest jobs so they are now shown, and updates the row information for a direct request job --- operator_ui/src/pages/JobsIndex/JobV2Row.tsx | 7 ++-- .../src/pages/JobsIndex/JobsIndex.test.tsx | 8 +++++ operator_ui/src/pages/JobsIndex/JobsIndex.tsx | 35 ++++++++++++++++-- operator_ui/support/factories/jobSpecV2.ts | 36 +++++++++++++++++++ operator_ui/support/factories/jsonApiJobs.ts | 17 ++++++++- 5 files changed, 98 insertions(+), 5 deletions(-) diff --git a/operator_ui/src/pages/JobsIndex/JobV2Row.tsx b/operator_ui/src/pages/JobsIndex/JobV2Row.tsx index 176756055ec..51a8995841d 100644 --- a/operator_ui/src/pages/JobsIndex/JobV2Row.tsx +++ b/operator_ui/src/pages/JobsIndex/JobV2Row.tsx @@ -13,17 +13,18 @@ interface Props extends WithStyles { export const JobV2Row = withStyles(tableStyles)(({ job, classes }: Props) => { const createdAt = React.useMemo(() => { switch (job.attributes.type) { + case 'directrequest': + return job.attributes.directRequestSpec.createdAt case 'fluxmonitor': return job.attributes.fluxMonitorSpec.createdAt case 'offchainreporting': return job.attributes.offChainReportingOracleSpec.createdAt - case 'directrequest': - return job.attributes.directRequestSpec.createdAt } }, [job]) const type = React.useMemo(() => { switch (job.attributes.type) { + case 'directrequest': case 'fluxmonitor': return 'Direct Request' case 'offchainreporting': @@ -35,6 +36,8 @@ export const JobV2Row = withStyles(tableStyles)(({ job, classes }: Props) => { const initiator = React.useMemo(() => { switch (job.attributes.type) { + // TODO - Need to get the API to return the 'initiator' type to implement + // this case 'fluxmonitor': return 'fluxmonitor' case 'offchainreporting': diff --git a/operator_ui/src/pages/JobsIndex/JobsIndex.test.tsx b/operator_ui/src/pages/JobsIndex/JobsIndex.test.tsx index bd8c0f334f7..5f0157d10cc 100644 --- a/operator_ui/src/pages/JobsIndex/JobsIndex.test.tsx +++ b/operator_ui/src/pages/JobsIndex/JobsIndex.test.tsx @@ -3,6 +3,7 @@ import React from 'react' import { Route } from 'react-router-dom' import { jsonApiJobSpecs } from 'factories/jsonApiJobSpecs' import { + directRequestResource, jsonApiJobSpecsV2, fluxMonitorJobResource, ocrJobResource, @@ -39,6 +40,10 @@ describe('pages/JobsIndex/JobsIndex', () => { id: '2000000', createdAt: new Date().toISOString(), }), + directRequestResource({ + id: '3000000', + createdAt: new Date().toISOString(), + }), ]), ) @@ -56,6 +61,9 @@ describe('pages/JobsIndex/JobsIndex', () => { // Flux Monitor V2 Job expect(wrapper.text()).toContain('2000000') + + // Direct Request V2 Job + expect(wrapper.text()).toContain('3000000') }) it('allows searching', async () => { diff --git a/operator_ui/src/pages/JobsIndex/JobsIndex.tsx b/operator_ui/src/pages/JobsIndex/JobsIndex.tsx index d5c4085c125..2c15c5209ab 100644 --- a/operator_ui/src/pages/JobsIndex/JobsIndex.tsx +++ b/operator_ui/src/pages/JobsIndex/JobsIndex.tsx @@ -1,4 +1,4 @@ -import React from 'react' +import React, { useEffect } from 'react' import Button from 'components/Button' import { Title } from 'components/Title' import Content from 'components/Content' @@ -53,6 +53,10 @@ function isJobSpecV2(job: CombinedJobs): job is JobSpecV2 { return job.type === JobSpecTypes.v2 } +function isDirectRequestJobSpecV2(job: JobSpecV2) { + return job.attributes.type === 'directrequest' +} + function isFluxMonitorJobSpecV2(job: JobSpecV2) { return job.attributes.type === 'fluxmonitor' } @@ -107,11 +111,19 @@ const searchIncludes = (searchParam: string) => { } export const simpleJobFilter = (search: string) => (job: CombinedJobs) => { + if (search === '') { + return true + } + if (isJobSpecV1(job)) { return matchV1Job(job, search) } if (isJobSpecV2(job)) { + if (isDirectRequestJobSpecV2(job)) { + return matchDirectRequest(job, search) + } + if (isFluxMonitorJobSpecV2(job)) { return matchFluxMonitor(job, search) } @@ -143,6 +155,25 @@ function matchV1Job(job: DirectRequest, term: string) { return dataset.some(match) } +/** + * matchDirectRequest determines whether the V2 Direct Request job matches the search + * terms. + * + * @param job {JobSpecV2} The V2 Job Spec + * @param term {string} The search term + */ +function matchDirectRequest(job: JobSpecV2, term: string) { + const match = searchIncludes(term) + + const dataset: string[] = [ + job.id, + job.attributes.name || '', + 'direct request', // Hardcoded to match the type column + ] + + return dataset.some(match) +} + /** * matchFluxMonitor determines whether the Flux Monitor job matches the search * terms. @@ -223,7 +254,7 @@ export const JobsIndex = ({ const jobFilter = React.useMemo(() => simpleJobFilter(search), [search]) - React.useEffect(() => { + useEffect(() => { getJobs().then(setJobs).catch(setError) }, [setError]) diff --git a/operator_ui/support/factories/jobSpecV2.ts b/operator_ui/support/factories/jobSpecV2.ts index 362c82c1c43..90bfb87fe4f 100644 --- a/operator_ui/support/factories/jobSpecV2.ts +++ b/operator_ui/support/factories/jobSpecV2.ts @@ -1,6 +1,7 @@ import { partialAsFull } from 'support/test-helpers/partialAsFull' import { JobSpecV2, + DirectRequestJobV2Spec, FluxMonitorJobV2Spec, OffChainReportingOracleJobV2Spec, } from 'core/store/models' @@ -88,3 +89,38 @@ export function fluxMonitorJobV2( }, } } + +export function directRequestJobV2( + spec: Partial = {}, + config: Partial< + { + name?: string + id?: string + maxTaskDuration?: string + } & { + dotDagSource?: string + } + > = {}, +): JobSpecV2 { + const directRequestSpec = partialAsFull< + DirectRequestJobV2Spec['directRequestSpec'] + >({ + createdAt: spec.createdAt || new Date(1600775300410).toISOString(), + }) + return { + name: config.name || 'Direct Request V2 job', + type: 'directrequest', + schemaVersion: 1, + directRequestSpec: directRequestSpec, + offChainReportingOracleSpec: null, + fluxMonitorSpec: null, + errors: [], + maxTaskDuration: '', + pipelineSpec: { + dotDagSource: + typeof config.dotDagSource === 'string' + ? config.dotDagSource + : ' fetch [type=http method=POST url="http://localhost:8001" requestData="{\\"hi\\": \\"hello\\"}"];\n parse [type=jsonparse path="data,result"];\n multiply [type=multiply times=100];\n fetch -\u003e parse -\u003e multiply;\n', + }, + } +} diff --git a/operator_ui/support/factories/jsonApiJobs.ts b/operator_ui/support/factories/jsonApiJobs.ts index 92ee354a29e..1fb22803a66 100644 --- a/operator_ui/support/factories/jsonApiJobs.ts +++ b/operator_ui/support/factories/jsonApiJobs.ts @@ -1,7 +1,7 @@ import { ApiResponse } from 'utils/json-api-client' import { ResourceObject } from 'json-api-normalizer' import { JobSpecV2 } from 'core/store/models' -import { fluxMonitorJobV2, ocrJobSpecV2 } from './jobSpecV2' +import { directRequestJobV2, fluxMonitorJobV2, ocrJobSpecV2 } from './jobSpecV2' function getRandomInt(max: number) { return Math.floor(Math.random() * Math.floor(max)) @@ -19,6 +19,21 @@ export const jsonApiJobSpecsV2 = ( } as ApiResponse } +export const directRequestResource = ( + job: Partial, +) => { + const id = job.id || getRandomInt(1_000_000).toString() + + return { + type: 'jobs', + id, + attributes: { + ...directRequestJobV2(job), + name: job.name, + }, + } as ResourceObject +} + export const ocrJobResource = ( job: Partial< JobSpecV2['offChainReportingOracleSpec'] & { id?: string; name?: string } From 72c4fb72aac862833d123ade1c6f6023181c4998 Mon Sep 17 00:00:00 2001 From: James Kong Date: Fri, 26 Mar 2021 17:04:14 +0800 Subject: [PATCH 094/116] Render the definition for a DirectRequest job --- core/web/presenters/job.go | 8 +++- operator_ui/@types/core/store/models.d.ts | 2 + .../Jobs/generateJobSpecDefinition.test.ts | 37 +++++++++++++++++++ .../pages/Jobs/generateJobSpecDefinition.ts | 31 ++++++++++++++++ operator_ui/src/pages/JobsIndex/JobV2Row.tsx | 4 +- operator_ui/support/factories/jobSpecV2.ts | 2 +- 6 files changed, 79 insertions(+), 5 deletions(-) diff --git a/core/web/presenters/job.go b/core/web/presenters/job.go index f0cf2566e52..21c10902347 100644 --- a/core/web/presenters/job.go +++ b/core/web/presenters/job.go @@ -30,6 +30,7 @@ const ( type DirectRequestSpec struct { ContractAddress models.EIP55Address `json:"contractAddress"` OnChainJobSpecID string `json:"onChainJobSpecId"` + Initiator string `json:"initiator"` CreatedAt time.Time `json:"createdAt"` UpdatedAt time.Time `json:"updatedAt"` } @@ -40,8 +41,11 @@ func NewDirectRequestSpec(spec *job.DirectRequestSpec) *DirectRequestSpec { return &DirectRequestSpec{ ContractAddress: spec.ContractAddress, OnChainJobSpecID: spec.OnChainJobSpecID.String(), - CreatedAt: spec.CreatedAt, - UpdatedAt: spec.UpdatedAt, + // This is hardcoded to runlog. When we support other intiators, we need + // to change this + Initiator: "runlog", + CreatedAt: spec.CreatedAt, + UpdatedAt: spec.UpdatedAt, } } diff --git a/operator_ui/@types/core/store/models.d.ts b/operator_ui/@types/core/store/models.d.ts index b9ac0bc5551..465c4785afe 100644 --- a/operator_ui/@types/core/store/models.d.ts +++ b/operator_ui/@types/core/store/models.d.ts @@ -479,6 +479,8 @@ declare module 'core/store/models' { export type DirectRequestJobV2Spec = BaseJobSpecV2 & { type: 'directrequest' directRequestSpec: { + initiator: 'runlog' + contractAddress: common.Address createdAt: time.Time } fluxMonitorSpec: null diff --git a/operator_ui/src/pages/Jobs/generateJobSpecDefinition.test.ts b/operator_ui/src/pages/Jobs/generateJobSpecDefinition.test.ts index d6a252b00c8..8e428c4317b 100644 --- a/operator_ui/src/pages/Jobs/generateJobSpecDefinition.test.ts +++ b/operator_ui/src/pages/Jobs/generateJobSpecDefinition.test.ts @@ -289,6 +289,43 @@ observationSource = """ multiply [type=multiply times=100]; fetch -> parse -> multiply; """ +` + + const output = generateTOMLDefinition(jobSpecAttributesInput) + expect(output).toEqual(expectedOutput) + }) + + it('generates a valid Direct Request definition', () => { + const jobSpecAttributesInput = { + name: 'DR Job Spec', + schemaVersion: 1, + type: 'directrequest', + fluxMonitorSpec: null, + directRequestSpec: { + initiator: 'runlog', + contractAddress: '0x3cCad4715152693fE3BC4460591e3D3Fbd071b42', + createdAt: '2021-02-19T16:00:01.115227+08:00', + }, + offChainReportingOracleSpec: null, + maxTaskDuration: '10s', + pipelineSpec: { + dotDagSource: + ' fetch [type=http method=POST url="http://localhost:8001" requestData="{\\"hi\\": \\"hello\\"}"];\n parse [type=jsonparse path="data,result"];\n multiply [type=multiply times=100];\n fetch -> parse -> multiply;\n', + }, + errors: [], + } as JobSpecV2 + + const expectedOutput = `type = "directrequest" +schemaVersion = 1 +name = "DR Job Spec" +contractAddress = "0x3cCad4715152693fE3BC4460591e3D3Fbd071b42" +maxTaskDuration = "10s" +observationSource = """ + fetch [type=http method=POST url="http://localhost:8001" requestData="{\\\\"hi\\\\": \\\\"hello\\\\"}"]; + parse [type=jsonparse path="data,result"]; + multiply [type=multiply times=100]; + fetch -> parse -> multiply; +""" ` const output = generateTOMLDefinition(jobSpecAttributesInput) diff --git a/operator_ui/src/pages/Jobs/generateJobSpecDefinition.ts b/operator_ui/src/pages/Jobs/generateJobSpecDefinition.ts index e4b870dfc1b..d035b20ee43 100644 --- a/operator_ui/src/pages/Jobs/generateJobSpecDefinition.ts +++ b/operator_ui/src/pages/Jobs/generateJobSpecDefinition.ts @@ -1,5 +1,6 @@ import { ApiResponse } from 'utils/json-api-client' import { + DirectRequestJobV2Spec, FluxMonitorJobV2Spec, JobSpec, JobSpecV2, @@ -97,6 +98,10 @@ export const generateJSONDefinition = ( export const generateTOMLDefinition = ( jobSpecAttributes: ApiResponse['data']['attributes'], ): string => { + if (jobSpecAttributes.type === 'directrequest') { + return generateDirectRequestDefinition(jobSpecAttributes) + } + if (jobSpecAttributes.type === 'fluxmonitor') { return generateFluxMonitorDefinition(jobSpecAttributes) } @@ -172,3 +177,29 @@ function generateFluxMonitorDefinition( format: JobSpecFormats.TOML, }) } + +function generateDirectRequestDefinition( + attrs: ApiResponse['data']['attributes'], +) { + const { + directRequestSpec, + name, + pipelineSpec, + schemaVersion, + type, + maxTaskDuration, + } = attrs + const { contractAddress } = directRequestSpec + + return stringifyJobSpec({ + value: { + type, + schemaVersion, + name, + contractAddress, + maxTaskDuration, + observationSource: pipelineSpec.dotDagSource, + }, + format: JobSpecFormats.TOML, + }) +} diff --git a/operator_ui/src/pages/JobsIndex/JobV2Row.tsx b/operator_ui/src/pages/JobsIndex/JobV2Row.tsx index 51a8995841d..fe08694d4cd 100644 --- a/operator_ui/src/pages/JobsIndex/JobV2Row.tsx +++ b/operator_ui/src/pages/JobsIndex/JobV2Row.tsx @@ -36,10 +36,10 @@ export const JobV2Row = withStyles(tableStyles)(({ job, classes }: Props) => { const initiator = React.useMemo(() => { switch (job.attributes.type) { - // TODO - Need to get the API to return the 'initiator' type to implement - // this case 'fluxmonitor': return 'fluxmonitor' + case 'directrequest': + return job.attributes.directRequestSpec.initiator case 'offchainreporting': return 'N/A' default: diff --git a/operator_ui/support/factories/jobSpecV2.ts b/operator_ui/support/factories/jobSpecV2.ts index 90bfb87fe4f..3c18413be38 100644 --- a/operator_ui/support/factories/jobSpecV2.ts +++ b/operator_ui/support/factories/jobSpecV2.ts @@ -111,7 +111,7 @@ export function directRequestJobV2( name: config.name || 'Direct Request V2 job', type: 'directrequest', schemaVersion: 1, - directRequestSpec: directRequestSpec, + directRequestSpec, offChainReportingOracleSpec: null, fluxMonitorSpec: null, errors: [], From 6be90b7902b94ee708c4a4de19db9546509a272b Mon Sep 17 00:00:00 2001 From: Connor Stein Date: Fri, 26 Mar 2021 17:05:12 -0400 Subject: [PATCH 095/116] Best effort to attach metadata FMv1 jobs (#4126) --- core/services/fluxmonitor/flux_monitor.go | 33 +++++++++++------------ 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/core/services/fluxmonitor/flux_monitor.go b/core/services/fluxmonitor/flux_monitor.go index 7b91cb62b69..3a0a71d2ec2 100644 --- a/core/services/fluxmonitor/flux_monitor.go +++ b/core/services/fluxmonitor/flux_monitor.go @@ -6,7 +6,6 @@ import ( "math/big" "net/url" "reflect" - "strings" "sync" "time" @@ -912,16 +911,16 @@ func (p *PollingDeviationChecker) respondToNewRoundLog(log flux_aggregator_wrapp l.Infow("Responding to new round request") + // Best effort to attach metadata. + var metaDataForBridge map[string]interface{} lrd, err := p.fluxAggregator.LatestRoundData(nil) - if err != nil && !strings.Contains(err.Error(), "No data present") { - l.Warnw("Error reading latest round data for request meta", "err", err) - return - } - // If no data present, just send 0 for backwards compatibility. - metaDataForBridge, err := models.BridgeMetaData(lrd.Answer, lrd.UpdatedAt) if err != nil { - logger.Warnw("Error marshalling roundState for request meta", "err", err) - return + l.Warnw("Couldn't read latest round data for request meta", "err", err) + } else { + metaDataForBridge, err = models.BridgeMetaData(lrd.Answer, lrd.UpdatedAt) + if err != nil { + l.Warnw("Error marshalling roundState for request meta", "err", err) + } } ctx, cancel := utils.CombinedContext(p.chStop) @@ -1049,16 +1048,16 @@ func (p *PollingDeviationChecker) pollIfEligible(thresholds DeviationThresholds) return } + // Best effort to attach metadata. + var metaDataForBridge map[string]interface{} lrd, err := p.fluxAggregator.LatestRoundData(nil) - if err != nil && !strings.Contains(err.Error(), "No data present") { - l.Warnw("Error reading latest round data for request meta", "err", err) - return - } - // If no data present, just send 0 for backwards compatibility. - metaDataForBridge, err := models.BridgeMetaData(lrd.Answer, lrd.UpdatedAt) if err != nil { - logger.Warnw("Error marshalling roundState for request meta", "err", err) - return + l.Warnw("Couldn't read latest round data for request meta", "err", err) + } else { + metaDataForBridge, err = models.BridgeMetaData(lrd.Answer, lrd.UpdatedAt) + if err != nil { + l.Warnw("Error marshalling roundState for request meta", "err", err) + } } ctx, cancel := utils.CombinedContext(p.chStop) From a7908c3efcf51b60f73ead477de0893883b4bde8 Mon Sep 17 00:00:00 2001 From: Connor Stein Date: Mon, 29 Mar 2021 11:44:13 -0400 Subject: [PATCH 096/116] Fix dependency check failure protobuf 1.3.1(#4135) --- .github/workflows/dependency-check.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/dependency-check.yml b/.github/workflows/dependency-check.yml index 2afa00ba827..6e3b7d44e09 100644 --- a/.github/workflows/dependency-check.yml +++ b/.github/workflows/dependency-check.yml @@ -20,8 +20,10 @@ jobs: - name: Check vulnerabilities uses: sonatype-nexus-community/nancy-github-action@b492e6567a301a914bd227d44bf10dcfe85438ad with: - nancyCommand: sleuth -e bba60acb-c7b5-4621-af69-f4085a8301d0,d373dc3f-aa88-483b-b501-20fe5382cc80,5def94e5-b89c-4a94-b9c6-ae0e120784c2 + nancyCommand: sleuth -e bba60acb-c7b5-4621-af69-f4085a8301d0,d373dc3f-aa88-483b-b501-20fe5382cc80,5def94e5-b89c-4a94-b9c6-ae0e120784c2,dcf6da03-f9dd-4a4e-b792-0262de36a0b1 # Check the dependency by entering the OSS index UUID here: # https://ossindex.sonatype.org/vuln/bba60acb-c7b5-4621-af69-f4085a8301d0 # # To get more detail locally run `go list -json -m all | nancy sleuth` + # dcf6da03-f9dd-4a4e-b792-0262de36a0b1 is because of gogo/protobuf@1.3.1 + # which is used by go-libp2p-core, need them to upgrade to 1.3.2 before we can remove it. From faf6cf8db91752b829d202142d2ef643443fd072 Mon Sep 17 00:00:00 2001 From: Connor Stein Date: Mon, 29 Mar 2021 12:17:07 -0400 Subject: [PATCH 097/116] Bugfix/177474174 Signature corruption (#4125) --- core/services/offchainreporting/database.go | 4 +++ .../offchainreporting/database_test.go | 31 ++++++++++++++++--- docs/CHANGELOG.md | 3 ++ 3 files changed, 33 insertions(+), 5 deletions(-) diff --git a/core/services/offchainreporting/database.go b/core/services/offchainreporting/database.go index 2878f8c9793..1372ce75e6d 100644 --- a/core/services/offchainreporting/database.go +++ b/core/services/offchainreporting/database.go @@ -137,10 +137,14 @@ func (d *db) StorePendingTransmission(ctx context.Context, k ocrtypes.PendingTra median := utils.NewBig(p.Median) var rs [][]byte var ss [][]byte + // Note: p.Rs and p.Ss are of type [][32]byte. + // See last example of https://github.com/golang/go/wiki/CommonMistakes#using-reference-to-loop-iterator-variable for _, v := range p.Rs { + v := v rs = append(rs, v[:]) } for _, v := range p.Ss { + v := v ss = append(ss, v[:]) } diff --git a/core/services/offchainreporting/database_test.go b/core/services/offchainreporting/database_test.go index 0df1f409a0e..8256a244b8c 100644 --- a/core/services/offchainreporting/database_test.go +++ b/core/services/offchainreporting/database_test.go @@ -1,6 +1,7 @@ package offchainreporting_test import ( + "bytes" "context" "math/big" "testing" @@ -106,8 +107,8 @@ func Test_DB_ReadWriteConfig(t *testing.T) { sqldb, _ := store.DB.DB() config := ocrtypes.ContractConfig{ ConfigDigest: cltest.MakeConfigDigest(t), - Signers: []common.Address{cltest.NewAddress()}, - Transmitters: []common.Address{cltest.NewAddress()}, + Signers: []common.Address{cltest.NewAddress(), cltest.NewAddress()}, + Transmitters: []common.Address{cltest.NewAddress(), cltest.NewAddress()}, Threshold: uint8(35), EncodedConfigVersion: uint64(987654), Encoded: []byte{1, 2, 3, 4, 5}, @@ -164,6 +165,20 @@ func Test_DB_ReadWriteConfig(t *testing.T) { }) } +func assertPendingTransmissionEqual(t *testing.T, pt1, pt2 ocrtypes.PendingTransmission) { + require.Equal(t, pt1.Rs, pt2.Rs) + require.Equal(t, pt1.Ss, pt2.Ss) + assert.True(t, bytes.Equal(pt1.Vs[:], pt2.Vs[:])) + assert.True(t, bytes.Equal(pt1.SerializedReport[:], pt2.SerializedReport[:])) + assert.Equal(t, pt1.Median, pt2.Median) + for i := range pt1.Ss { + assert.True(t, bytes.Equal(pt1.Ss[i][:], pt2.Ss[i][:])) + } + for i := range pt1.Rs { + assert.True(t, bytes.Equal(pt1.Rs[i][:], pt2.Rs[i][:])) + } +} + func Test_DB_PendingTransmissions(t *testing.T) { store, cleanup := cltest.NewStore(t) defer cleanup() @@ -193,13 +208,16 @@ func Test_DB_PendingTransmissions(t *testing.T) { Time: time.Now(), Median: ocrtypes.Observation(big.NewInt(41)), SerializedReport: []byte{0, 2, 3}, - Rs: [][32]byte{cltest.Random32Byte()}, - Ss: [][32]byte{cltest.Random32Byte()}, + Rs: [][32]byte{cltest.Random32Byte(), cltest.Random32Byte()}, + Ss: [][32]byte{cltest.Random32Byte(), cltest.Random32Byte()}, Vs: cltest.Random32Byte(), } err := db.StorePendingTransmission(ctx, k, p) require.NoError(t, err) + m, err := db.PendingTransmissionsWithConfigDigest(ctx, configDigest) + require.NoError(t, err) + assertPendingTransmissionEqual(t, m[k], p) // Now overwrite value for k to prove that updating works p = ocrtypes.PendingTransmission{ @@ -212,6 +230,9 @@ func Test_DB_PendingTransmissions(t *testing.T) { } err = db.StorePendingTransmission(ctx, k, p) require.NoError(t, err) + m, err = db.PendingTransmissionsWithConfigDigest(ctx, configDigest) + require.NoError(t, err) + assertPendingTransmissionEqual(t, m[k], p) p2 := ocrtypes.PendingTransmission{ Time: time.Now(), @@ -242,7 +263,7 @@ func Test_DB_PendingTransmissions(t *testing.T) { err = db.StorePendingTransmission(ctx, kRedHerring, pRedHerring) require.NoError(t, err) - m, err := db.PendingTransmissionsWithConfigDigest(ctx, configDigest) + m, err = db.PendingTransmissionsWithConfigDigest(ctx, configDigest) require.NoError(t, err) require.Len(t, m, 2) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 956b9a3d8df..4c9321f8903 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -35,12 +35,15 @@ Periodic resending can be controlled using the `ETH_TX_RESEND_AFTER_THRESHOLD` e ### Fixed - Chainlink node now automatically sets the correct nonce on startup if you are restoring from a previous backup (manual setnextnonce is no longer necessary). + - Flux monitor jobs should now work correctly with [outlier-detection](https://github.com/smartcontractkit/external-adapters-js/tree/develop/composite/outlier-detection) and [market-closure](https://github.com/smartcontractkit/external-adapters-js/tree/develop/composite/market-closure) external adapters. - Performance improvements to OCR job adds. Removed the pipeline_task_specs table and added a new column `dot_id` to the pipeline_task_runs table which links a pipeline_task_run to a dotID in the pipeline_spec.dot_dag_source. +- Fixed bug where node will occasionally submit an invalid OCR transmission which reverts with "address not authorized to sign". + ## [0.10.3] - 2021-03-22 ### Added From 4bafe6e1cffbe9799dc528ae8f29a99a047f6123 Mon Sep 17 00:00:00 2001 From: spooktheducks Date: Mon, 29 Mar 2021 14:36:23 -0500 Subject: [PATCH 098/116] Update libocr to c910dd9 --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index da792efda39..bb77fdaf976 100644 --- a/go.mod +++ b/go.mod @@ -44,7 +44,7 @@ require ( github.com/robfig/cron/v3 v3.0.1 github.com/satori/go.uuid v1.2.0 github.com/shopspring/decimal v1.2.0 - github.com/smartcontractkit/libocr v0.0.0-20210319202758-14aa50f869b7 + github.com/smartcontractkit/libocr v0.0.0-20210329163356-c910dd9a5eae github.com/spf13/viper v1.7.1 github.com/stretchr/testify v1.7.0 github.com/tevino/abool v0.0.0-20170917061928-9b9efcf221b5 diff --git a/go.sum b/go.sum index e7cb9d2a338..f8cde9a8257 100644 --- a/go.sum +++ b/go.sum @@ -1270,6 +1270,8 @@ github.com/smartcontractkit/go-txdb v0.1.4-0.20210313013032-3a5ba5dff784/go.mod github.com/smartcontractkit/libocr v0.0.0-20201203233047-5d9b24f0cbb5/go.mod h1:bfdSuLnBWCkafDvPGsQ1V6nrXhg046gh227MKi4zkpc= github.com/smartcontractkit/libocr v0.0.0-20210319202758-14aa50f869b7 h1:r6l0wcc7YwDMoobAhg4i2ZwhE7xsgd47XeU0gNSxIoc= github.com/smartcontractkit/libocr v0.0.0-20210319202758-14aa50f869b7/go.mod h1:cm4TomvY09A1mADIHeIo1dOcOVL1EeSEqga4cmCxhl4= +github.com/smartcontractkit/libocr v0.0.0-20210329163356-c910dd9a5eae h1:bl1wExRvG6oE3f4ScAOGzujdhkXxeGvPoU3W0BHAXUU= +github.com/smartcontractkit/libocr v0.0.0-20210329163356-c910dd9a5eae/go.mod h1:cm4TomvY09A1mADIHeIo1dOcOVL1EeSEqga4cmCxhl4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= From d05975a5a254c8007288d9cb32f84b9ef0a3b9f4 Mon Sep 17 00:00:00 2001 From: connorwstein Date: Mon, 29 Mar 2021 17:25:05 -0400 Subject: [PATCH 099/116] fix test --- core/services/job/runner_integration_test.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/core/services/job/runner_integration_test.go b/core/services/job/runner_integration_test.go index 0a9cfa303a8..4eea3c264d9 100644 --- a/core/services/job/runner_integration_test.go +++ b/core/services/job/runner_integration_test.go @@ -621,22 +621,26 @@ ds1 -> ds1_parse; services, err := sd.ServicesForSpec(jb) require.NoError(t, err) - // Start and stop the service to generate errors. - // We expect a database timeout and a context cancellation - // error to show up as pipeline_spec_errors. + // Return an error getting the contract code. + geth.On("CodeAt", mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("no such code")) for _, s := range services { err = s.Start() require.NoError(t, err) - err = s.Close() - require.NoError(t, err) } - var se []job.SpecError - err = db.Find(&se).Error - require.NoError(t, err) + require.Eventually(t, func() bool { + err = db.Find(&se).Error + require.NoError(t, err) + return len(se) == 1 + }, time.Second, 100*time.Millisecond) require.Len(t, se, 1) assert.Equal(t, uint(1), se[0].Occurrences) + for _, s := range services { + err = s.Close() + require.NoError(t, err) + } + // Ensure we can delete an errored _, err = jobORM.ClaimUnclaimedJobs(context.Background()) require.NoError(t, err) From 2e16f3d11f225969737a5cf67f2d073fddf3f05d Mon Sep 17 00:00:00 2001 From: "dependabot-preview[bot]" <27856297+dependabot-preview[bot]@users.noreply.github.com> Date: Mon, 29 Mar 2021 21:39:59 +0000 Subject: [PATCH 100/116] [Security] Bump y18n from 3.2.1 to 3.2.2 Bumps [y18n](https://github.com/yargs/y18n) from 3.2.1 to 3.2.2. **This update includes a security fix.** - [Release notes](https://github.com/yargs/y18n/releases) - [Changelog](https://github.com/yargs/y18n/blob/master/CHANGELOG.md) - [Commits](https://github.com/yargs/y18n/commits) Signed-off-by: dependabot-preview[bot] --- yarn.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yarn.lock b/yarn.lock index 35563e99add..54788618b97 100644 --- a/yarn.lock +++ b/yarn.lock @@ -20521,9 +20521,9 @@ xtend@~2.1.1: object-keys "~0.4.0" y18n@^3.2.1: - version "3.2.1" - resolved "https://registry.npmjs.org/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41" - integrity sha1-bRX7qITAhnnA136I53WegR4H+kE= + version "3.2.2" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.2.tgz#85c901bd6470ce71fc4bb723ad209b70f7f28696" + integrity sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ== y18n@^4.0.0: version "4.0.0" From 126924a06026c56530bc04b19dbf74a7d5e8d4ff Mon Sep 17 00:00:00 2001 From: John Barker Date: Mon, 29 Mar 2021 16:29:31 -0600 Subject: [PATCH 101/116] Should only wait 3 seconds, not 20 --- core/internal/cltest/cltest.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/core/internal/cltest/cltest.go b/core/internal/cltest/cltest.go index f7ddec09e52..94b3ee458d8 100644 --- a/core/internal/cltest/cltest.go +++ b/core/internal/cltest/cltest.go @@ -967,8 +967,8 @@ const ( DBWaitTimeout = 20 * time.Second // DBPollingInterval can't be too short to avoid DOSing the test database DBPollingInterval = 100 * time.Millisecond - // AsertNoActionTimeout shouldn't be too long, or it will slow down tests - AsertNoActionTimeout = 3 * time.Second + // AssertNoActionTimeout shouldn't be too long, or it will slow down tests + AssertNoActionTimeout = 3 * time.Second ) // WaitForJobRunToComplete waits for a JobRun to reach Completed Status @@ -1137,7 +1137,7 @@ func WaitForRuns(t testing.TB, j models.JobSpec, store *strpkg.Store, want int) jrs, err = store.JobRunsFor(j.ID) assert.NoError(t, err) return jrs - }, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) + }, AssertNoActionTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) } else { g.Eventually(func() []models.JobRun { jrs, err = store.JobRunsFor(j.ID) @@ -1180,7 +1180,7 @@ func AssertRunsStays(t testing.TB, j models.JobSpec, store *strpkg.Store, want i jrs, err = store.JobRunsFor(j.ID) assert.NoError(t, err) return jrs - }, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) + }, AssertNoActionTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) return jrs } @@ -1196,7 +1196,7 @@ func AssertPipelineRunsStays(t testing.TB, pipelineSpecID int32, store *strpkg.S Find(&prs).Error assert.NoError(t, err) return prs - }, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) + }, AssertNoActionTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) return prs } @@ -1255,7 +1255,7 @@ func AssertEthTxAttemptCountStays(t testing.TB, store *strpkg.Store, want int) [ err = store.DB.Find(&txas).Error assert.NoError(t, err) return txas - }, DBWaitTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) + }, AssertNoActionTimeout, DBPollingInterval).Should(gomega.HaveLen(want)) return txas } @@ -1286,7 +1286,7 @@ func AssertSyncEventCountStays( count, err := orm.CountOf(&models.SyncEvent{}) assert.NoError(t, err) return count - }, DBWaitTimeout, DBPollingInterval).Should(gomega.Equal(want)) + }, AssertNoActionTimeout, DBPollingInterval).Should(gomega.Equal(want)) } // ParseISO8601 given the time string it Must parse the time and return it @@ -1892,7 +1892,7 @@ func AssertCountStays(t testing.TB, store *strpkg.Store, model interface{}, want err = store.DB.Model(model).Count(&count).Error assert.NoError(t, err) return count - }, AsertNoActionTimeout, DBPollingInterval).Should(gomega.Equal(want)) + }, AssertNoActionTimeout, DBPollingInterval).Should(gomega.Equal(want)) } func AssertRecordEventually(t *testing.T, store *strpkg.Store, model interface{}, check func() bool) { From 5af4a060cffd400769f4c965de3ec574ff8ef745 Mon Sep 17 00:00:00 2001 From: Steve Ellis Date: Mon, 29 Mar 2021 21:35:40 -0400 Subject: [PATCH 102/116] ensure VRF key store gets initialized --- core/cmd/local_client_vrf.go | 37 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/core/cmd/local_client_vrf.go b/core/cmd/local_client_vrf.go index 32f3bb70e85..97e0b63a1c4 100644 --- a/core/cmd/local_client_vrf.go +++ b/core/cmd/local_client_vrf.go @@ -6,7 +6,6 @@ import ( "io/ioutil" "os" - "github.com/smartcontractkit/chainlink/core/services/chainlink" "github.com/smartcontractkit/chainlink/core/store/dialects" "github.com/pkg/errors" @@ -26,13 +25,11 @@ func (cli *Client) CreateVRFKey(c *clipkg.Context) error { if err != nil { return err } - var vrfKeyStore *store.VRFKeyStore - _, err = cli.AppFactory.NewApplication(cli.Config, func(app chainlink.Application) { - vrfKeyStore = app.GetStore().VRFKeyStore - }) + app, err := cli.AppFactory.NewApplication(cli.Config) if err != nil { return cli.errorOut(errors.Wrap(err, "creating application")) } + vrfKeyStore := app.GetStore().VRFKeyStore key, err := vrfKeyStore.CreateKey(string(password)) if err != nil { return errors.Wrapf(err, "while creating new account") @@ -73,13 +70,11 @@ func (cli *Client) CreateAndExportWeakVRFKey(c *clipkg.Context) error { if err != nil { return err } - var vrfKeyStore *store.VRFKeyStore - _, err = cli.AppFactory.NewApplication(cli.Config, func(app chainlink.Application) { - vrfKeyStore = app.GetStore().VRFKeyStore - }) + app, err := cli.AppFactory.NewApplication(cli.Config) if err != nil { return cli.errorOut(errors.Wrap(err, "creating application")) } + vrfKeyStore := app.GetStore().VRFKeyStore key, err := vrfKeyStore.CreateWeakInMemoryEncryptedKeyXXXTestingOnly( string(password)) if err != nil { @@ -118,13 +113,11 @@ func (cli *Client) ImportVRFKey(c *clipkg.Context) error { if err != nil { return err } - var vrfKeyStore *store.VRFKeyStore - _, err = cli.AppFactory.NewApplication(cli.Config, func(app chainlink.Application) { - vrfKeyStore = app.GetStore().VRFKeyStore - }) + app, err := cli.AppFactory.NewApplication(cli.Config) if err != nil { return cli.errorOut(errors.Wrap(err, "creating application")) } + vrfKeyStore := app.GetStore().VRFKeyStore if err := vrfKeyStore.Import(keyjson, string(password)); err != nil { if err == store.MatchingVRFKeyError { fmt.Println(`The database already has an entry for that public key.`) @@ -181,13 +174,11 @@ func getKeys(cli *Client, c *clipkg.Context) (*vrfkey.EncryptedVRFKey, error) { if err != nil { return nil, err } - var vrfKeyStore *store.VRFKeyStore - _, err = cli.AppFactory.NewApplication(cli.Config, func(app chainlink.Application) { - vrfKeyStore = app.GetStore().VRFKeyStore - }) + app, err := cli.AppFactory.NewApplication(cli.Config) if err != nil { return nil, cli.errorOut(errors.Wrap(err, "creating application")) } + vrfKeyStore := app.GetStore().VRFKeyStore enckey, err := vrfKeyStore.GetSpecificKey(publicKey) if err != nil { return nil, errors.Wrapf(err, @@ -210,13 +201,11 @@ func (cli *Client) DeleteVRFKey(c *clipkg.Context) error { return nil } - var vrfKeyStore *store.VRFKeyStore - _, err = cli.AppFactory.NewApplication(cli.Config, func(app chainlink.Application) { - vrfKeyStore = app.GetStore().VRFKeyStore - }) + app, err := cli.AppFactory.NewApplication(cli.Config) if err != nil { return cli.errorOut(errors.Wrap(err, "creating application")) } + vrfKeyStore := app.GetStore().VRFKeyStore hardDelete := c.Bool("hard") if hardDelete { @@ -250,13 +239,11 @@ func getPublicKey(c *clipkg.Context) (vrfkey.PublicKey, error) { // ListKeys Lists the keys in the db func (cli *Client) ListKeys(c *clipkg.Context) error { - var vrfKeyStore *store.VRFKeyStore - _, err := cli.AppFactory.NewApplication(cli.Config, func(app chainlink.Application) { - vrfKeyStore = app.GetStore().VRFKeyStore - }) + app, err := cli.AppFactory.NewApplication(cli.Config) if err != nil { return cli.errorOut(errors.Wrap(err, "creating application")) } + vrfKeyStore := app.GetStore().VRFKeyStore keys, err := vrfKeyStore.ListKeys() if err != nil { return err From 46516a17891dc4637db280151de86e571bdba875 Mon Sep 17 00:00:00 2001 From: Ryan Hall Date: Sun, 28 Mar 2021 16:50:29 -0500 Subject: [PATCH 103/116] add fulfilled check to randomness adapter --- core/adapters/random.go | 44 ++++++++++++- core/adapters/random_test.go | 64 +++++++++++++++++++ .../vrf/vrf_simulate_blockchain_test.go | 5 +- core/store/models/parse_randomness_request.go | 2 + core/utils/utils.go | 9 +++ docs/CHANGELOG.md | 6 +- 6 files changed, 125 insertions(+), 5 deletions(-) diff --git a/core/adapters/random.go b/core/adapters/random.go index fa467723bb9..00810b2de4c 100644 --- a/core/adapters/random.go +++ b/core/adapters/random.go @@ -3,10 +3,12 @@ package adapters import ( "fmt" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/solidity_vrf_coordinator_interface" "github.com/smartcontractkit/chainlink/core/services/vrf" "github.com/smartcontractkit/chainlink/core/store" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/store/models/vrfkey" + "github.com/smartcontractkit/chainlink/core/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -57,7 +59,8 @@ type Random struct { // // This is just a hex string because Random is instantiated by json.Unmarshal. // (See adapters.For function.) - PublicKey string `json:"publicKey"` + PublicKey string `json:"publicKey"` + CoordinatorAddress models.EIP55Address `json:"coordinatorAddress"` } // TaskType returns the type of Adapter. @@ -67,6 +70,14 @@ func (ra *Random) TaskType() models.TaskType { // Perform returns the the proof for the VRF output given seed, or an error. func (ra *Random) Perform(input models.RunInput, store *store.Store) models.RunOutput { + shouldFulfill, err := checkFulfillment(ra, input, store) + if err != nil { + return models.NewRunOutputError(errors.Wrapf(err, "unable to determine if fulfillment needed")) + } + if !shouldFulfill { + return models.NewRunOutputError(errors.New("randomness request already fulfilled")) + } + key, i, err := getInputs(ra, input, store) if err != nil { return models.NewRunOutputError(err) @@ -217,3 +228,34 @@ func extractHex(input models.RunInput, key string) ([]byte, error) { } return hexutil.Decode(rawValue.String()) } + +// checkFulfillment checks to see if the randomness request has already been fulfilled or not +func checkFulfillment(ra *Random, input models.RunInput, store *store.Store) (bool, error) { + if len(ra.CoordinatorAddress) == 0 { + return true, nil // only perform this check if the optional address field is present + } + + contract, err := solidity_vrf_coordinator_interface.NewVRFCoordinator( + ra.CoordinatorAddress.Address(), + store.EthClient, + ) + if err != nil { + return false, errors.Wrapf( + err, "unable to create vrf coordinator wrapper, address: %s", ra.CoordinatorAddress.Hex(), + ) + } + requestID, err := extractHex(input, "requestID") + if err != nil { + return false, err + } + requestID32 := [32]byte{} + copy(requestID32[:], requestID) + + callback, err := contract.Callbacks(nil, requestID32) + if err != nil { + return false, err + } + + // If seedAndBlockNumber is non-zero then the response has not yet been fulfilled + return !utils.IsEmpty(callback.SeedAndBlockNum[:]), nil +} diff --git a/core/adapters/random_test.go b/core/adapters/random_test.go index e55948a76de..c6e3b1896be 100644 --- a/core/adapters/random_test.go +++ b/core/adapters/random_test.go @@ -9,6 +9,9 @@ import ( "github.com/smartcontractkit/chainlink/core/adapters" "github.com/smartcontractkit/chainlink/core/internal/cltest" tvrf "github.com/smartcontractkit/chainlink/core/internal/cltest/vrf" + "github.com/smartcontractkit/chainlink/core/internal/gethwrappers/generated/solidity_vrf_coordinator_interface" + "github.com/smartcontractkit/chainlink/core/internal/mocks" + "github.com/smartcontractkit/chainlink/core/services/eth" "github.com/smartcontractkit/chainlink/core/services/vrf" "github.com/smartcontractkit/chainlink/core/store/models" "github.com/smartcontractkit/chainlink/core/utils" @@ -23,6 +26,7 @@ import ( // NB: For changes to the VRF solidity code to be reflected here, "go generate" // must be run in core/services/vrf. func TestRandom_Perform(t *testing.T) { + t.Parallel() store, cleanup := cltest.NewStore(t) defer cleanup() publicKey := cltest.StoredVRFKey(t, store) @@ -63,3 +67,63 @@ func TestRandom_Perform(t *testing.T) { result = adapter.Perform(*input, store) require.Error(t, result.Error(), "must reject if keyHash doesn't match") } + +func TestRandom_Perform_CheckFulfillment(t *testing.T) { + t.Parallel() + store, cleanup := cltest.NewStore(t) + defer cleanup() + + ethMock := new(mocks.Client) + store.EthClient = ethMock + + publicKey := cltest.StoredVRFKey(t, store) + address := cltest.NewEIP55Address() + hash := utils.MustHash("a random string") + seed := big.NewInt(0x10) + blockNum := 10 + jsonInput, err := models.JSON{}.MultiAdd(models.KV{ + "seed": utils.Uint64ToHex(seed.Uint64()), + "keyHash": publicKey.MustHash().Hex(), + "blockHash": hash.Hex(), + "blockNum": blockNum, + "requestID": utils.AddHexPrefix(common.Bytes2Hex([]byte{1, 2, 3})), + }) + require.NoError(t, err) + input := models.NewRunInput(uuid.Nil, uuid.Nil, jsonInput, models.RunStatusUnstarted) + + abi := eth.MustGetABI(solidity_vrf_coordinator_interface.VRFCoordinatorABI) + registryMock := cltest.NewContractMockReceiver(t, ethMock, abi, address.Address()) + + for _, test := range []struct { + name string + addressParamPresent bool + seedAndBlockNumPresent bool + shouldFulfill bool + }{ + {"both missing", false, false, true}, + {"address missing, seed/block present", false, true, true}, + {"address present, seed/block missing", true, false, false}, + {"both present", true, true, true}, + } { + test := test + t.Run(test.name, func(tt *testing.T) { + adapter := adapters.Random{PublicKey: publicKey.String()} + response := solidity_vrf_coordinator_interface.Callbacks{ + CallbackContract: cltest.NewAddress(), + RandomnessFee: big.NewInt(100), + } + + if test.seedAndBlockNumPresent { + response.SeedAndBlockNum = [32]byte{1, 2, 3} + } + if test.addressParamPresent { + adapter.CoordinatorAddress = address + registryMock.MockResponse("callbacks", response).Once() + } + + result := adapter.Perform(*input, store) + require.Equal(tt, test.shouldFulfill, result.Error() == nil) + ethMock.AssertExpectations(t) + }) + } +} diff --git a/core/services/vrf/vrf_simulate_blockchain_test.go b/core/services/vrf/vrf_simulate_blockchain_test.go index d7e5858b4da..cf66f015d5a 100644 --- a/core/services/vrf/vrf_simulate_blockchain_test.go +++ b/core/services/vrf/vrf_simulate_blockchain_test.go @@ -57,8 +57,9 @@ func TestIntegration_RandomnessRequest(t *testing.T) { app.Store.VRFKeyStore.StoreInMemoryXXXTestingOnly(provingKey) j := cltest.NewJobWithRandomnessLog() - task1Params := cltest.JSONFromString(t, fmt.Sprintf(`{"PublicKey": "%s"}`, rawKey)) - task2JSON := fmt.Sprintf(`{"format": "preformatted", "address": "%s", "functionSelector": "0x5e1c1059"}`, cu.rootContractAddress.String()) + contractAddress := cu.rootContractAddress.String() + task1Params := cltest.JSONFromString(t, fmt.Sprintf(`{"publicKey": "%s", "coordinatorAddress": "%s"}`, rawKey, contractAddress)) + task2JSON := fmt.Sprintf(`{"format": "preformatted", "address": "%s", "functionSelector": "0x5e1c1059"}`, contractAddress) task2Params := cltest.JSONFromString(t, task2JSON) j.Initiators[0].Address = cu.rootContractAddress diff --git a/core/store/models/parse_randomness_request.go b/core/store/models/parse_randomness_request.go index 107c60b3fe9..34a69516811 100644 --- a/core/store/models/parse_randomness_request.go +++ b/core/store/models/parse_randomness_request.go @@ -40,6 +40,8 @@ func (parseRandomnessRequest) parseJSON(log Log) (js JSON, err error) { "blockHash": log.BlockHash.Hex(), // Number/height of the block in which this request appeared "blockNum": log.BlockNumber, + // requestID is identifier of the on-chain randomness request to fulfill + "requestID": parsedLog.RequestID.Hex(), }) } diff --git a/core/utils/utils.go b/core/utils/utils.go index d3513d15f73..f782fd1a463 100644 --- a/core/utils/utils.go +++ b/core/utils/utils.go @@ -170,6 +170,15 @@ func AddHexPrefix(str string) string { return str } +func IsEmpty(bytes []byte) bool { + for _, b := range bytes { + if b != 0 { + return false + } + } + return true +} + // Sleeper interface is used for tasks that need to be done on some // interval, excluding Cron, like reconnecting. type Sleeper interface { diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 4c9321f8903..82a5c44ba44 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -9,6 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added +- VRF Jobs now support an optional `coordinatorAddress` field that, when present, will tell the node to check the fulfillment status of any VRF request before attempting the fulfillment transaction. This will assist in the effort to run multiple nodes with one VRF key. + - Experimental: Add `DATABASE_BACKUP_MODE`, `DATABASE_BACKUP_FREQUENCY` and `DATABASE_BACKUP_URL` configuration variables It's now possible to configure database backups: on node start and separately, to be run at given frequency. @@ -30,7 +32,7 @@ Example settings: Periodic resending can be controlled using the `ETH_TX_RESEND_AFTER_THRESHOLD` env var (default 30s). Unconfirmed transactions will be resent periodically at this interval. It is recommended to leave this at the default setting, but it can be set to any [valid duration](https://golang.org/pkg/time/#ParseDuration) or to 0 to disable periodic resending. -- Logging can now be configured in the Operator UI. +- Logging can now be configured in the Operator UI. ### Fixed @@ -119,7 +121,7 @@ This means that the application gas price will always be updated correctly after reboot before the first transaction is ever sent, eliminating the previous scenario where the node could send underpriced or overpriced transactions for a period after a reboot, until the gas updater caught up. - + ### Changed - Bump `ORM_MAX_OPEN_CONNS` default from 10 to 20 From 25b0e476902b0434861247bcf00f0c8482448267 Mon Sep 17 00:00:00 2001 From: Peter van Mourik Date: Tue, 30 Mar 2021 15:29:09 +0200 Subject: [PATCH 104/116] Bump version and Changelog --- VERSION | 2 +- docs/CHANGELOG.md | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/VERSION b/VERSION index a3f5a8ed4d6..9b40aa6c214 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.10.3 +0.10.4 diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 82a5c44ba44..9b2b8f1118c 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.10.4] - 2021-04-05 + ### Added - VRF Jobs now support an optional `coordinatorAddress` field that, when present, will tell the node to check the fulfillment status of any VRF request before attempting the fulfillment transaction. This will assist in the effort to run multiple nodes with one VRF key. From 448742509ff198e46e57bfb19a0af0ef15f1ccfe Mon Sep 17 00:00:00 2001 From: connorwstein Date: Tue, 30 Mar 2021 16:41:21 -0400 Subject: [PATCH 105/116] delete old result tasks --- .../migrations/0020_remove_result_task.go | 1 + core/store/migrations/migrate_test.go | 45 +++++++++++++++++++ 2 files changed, 46 insertions(+) diff --git a/core/store/migrations/0020_remove_result_task.go b/core/store/migrations/0020_remove_result_task.go index 7dd69411297..839641afbfa 100644 --- a/core/store/migrations/0020_remove_result_task.go +++ b/core/store/migrations/0020_remove_result_task.go @@ -8,6 +8,7 @@ import ( const ( up20 = ` ALTER TABLE pipeline_task_runs DROP CONSTRAINT chk_pipeline_task_run_fsm; + DELETE FROM pipeline_task_runs WHERE type = 'result'; ALTER TABLE pipeline_task_runs ADD CONSTRAINT chk_pipeline_task_run_fsm CHECK ( ((finished_at IS NOT NULL) AND (num_nonnulls(output, error) != 2)) diff --git a/core/store/migrations/migrate_test.go b/core/store/migrations/migrate_test.go index ca66322683a..3193acf637c 100644 --- a/core/store/migrations/migrate_test.go +++ b/core/store/migrations/migrate_test.go @@ -256,3 +256,48 @@ func TestMigrate_PipelineTaskRunDotID(t *testing.T) { require.NoError(t, migrations.MigrateDownFrom(orm.DB, "0016_pipeline_task_run_dot_id")) } + +func TestMigrate_RemoveResultTask(t *testing.T) { + _, orm, cleanup := cltest.BootstrapThrowawayORM(t, "migrations_result_task", false) + defer cleanup() + + require.NoError(t, migrations.MigrateUp(orm.DB, "0019_last_run_height_column_to_keeper_table")) + // Add some task specs + ps := pipeline.Spec{ + DotDagSource: "blah", + } + require.NoError(t, orm.DB.Create(&ps).Error) + // Add a pipeline run + pr := pipeline.Run{ + PipelineSpecID: ps.ID, + Meta: pipeline.JSONSerializable{}, + Errors: pipeline.RunErrors{}, + Outputs: pipeline.JSONSerializable{Null: true}, + } + require.NoError(t, orm.DB.Create(&pr).Error) + tr1 := pipeline.TaskRun{ + Type: pipeline.TaskTypeAny, + DotID: "any", + PipelineRunID: pr.ID, + Output: &pipeline.JSONSerializable{Null: true}, + Error: null.String{}, + } + require.NoError(t, orm.DB.Create(&tr1).Error) + f := time.Now() + tr2 := pipeline.TaskRun{ + Type: "result", + DotID: "result", + PipelineRunID: pr.ID, + Output: &pipeline.JSONSerializable{Val: "10"}, + Error: null.StringFrom("[null]"), + FinishedAt: &f, + } + require.NoError(t, orm.DB.Create(&tr2).Error) + + require.NoError(t, migrations.MigrateUp(orm.DB, "0020_remove_result_task")) + var ptrs []pipeline.TaskRun + require.NoError(t, orm.DB.Find(&ptrs).Error) + assert.Equal(t, 1, len(ptrs)) + + require.NoError(t, migrations.MigrateDownFrom(orm.DB, "0020_remove_result_task")) +} From 5237e1e05bb9004969caa3cb68bef0a986f4695b Mon Sep 17 00:00:00 2001 From: Ryan Hall Date: Tue, 30 Mar 2021 16:59:46 -0500 Subject: [PATCH 106/116] add job_id_topic_filter column and field --- .../0021_add_job_id_topic_filter.go | 24 +++++++++++++++++++ core/store/models/job_spec.go | 3 +++ core/store/models/job_spec_test.go | 2 ++ 3 files changed, 29 insertions(+) create mode 100644 core/store/migrations/0021_add_job_id_topic_filter.go diff --git a/core/store/migrations/0021_add_job_id_topic_filter.go b/core/store/migrations/0021_add_job_id_topic_filter.go new file mode 100644 index 00000000000..4044e988573 --- /dev/null +++ b/core/store/migrations/0021_add_job_id_topic_filter.go @@ -0,0 +1,24 @@ +package migrations + +import ( + "github.com/go-gormigrate/gormigrate/v2" + "gorm.io/gorm" +) + +const ( + up21 = `ALTER TABLE initiators ADD COLUMN job_id_topic_filter uuid;` + + down21 = `ALTER TABLE initiators DROP COLUMN job_id_topic_filter;` +) + +func init() { + Migrations = append(Migrations, &gormigrate.Migration{ + ID: "0021_add_job_id_topic_filter", + Migrate: func(db *gorm.DB) error { + return db.Exec(up21).Error + }, + Rollback: func(db *gorm.DB) error { + return db.Exec(down21).Error + }, + }) +} diff --git a/core/store/models/job_spec.go b/core/store/models/job_spec.go index c5fe597136e..1e09b3d64db 100644 --- a/core/store/models/job_spec.go +++ b/core/store/models/job_spec.go @@ -221,6 +221,9 @@ type InitiatorParams struct { FromBlock *utils.Big `json:"fromBlock,omitempty" gorm:"type:varchar(255)"` ToBlock *utils.Big `json:"toBlock,omitempty" gorm:"type:varchar(255)"` Topics Topics `json:"topics,omitempty"` + // JobIDTopicFilter, if present, is used in addition to the job's actual ID when filtering + // initiator logs + JobIDTopicFilter JobID `json:"jobIDTopicFilter,omitempty"` RequestData JSON `json:"requestData,omitempty" gorm:"type:text"` Feeds Feeds `json:"feeds,omitempty" gorm:"type:text"` diff --git a/core/store/models/job_spec_test.go b/core/store/models/job_spec_test.go index c1b94fd2c0c..1babf56ac25 100644 --- a/core/store/models/job_spec_test.go +++ b/core/store/models/job_spec_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + uuid "github.com/satori/go.uuid" "github.com/smartcontractkit/chainlink/core/adapters" "github.com/smartcontractkit/chainlink/core/assets" "github.com/smartcontractkit/chainlink/core/internal/cltest" @@ -116,6 +117,7 @@ func TestInitiatorParams(t *testing.T) { FromBlock: big, ToBlock: big, Topics: topics, + JobIDTopicFilter: models.JobID(uuid.NewV4()), RequestData: json, Feeds: json, Threshold: 42.42, From f1d904387e0d58297672ae940ab7c30eb09281f7 Mon Sep 17 00:00:00 2001 From: Ryan Hall Date: Tue, 30 Mar 2021 17:11:41 -0500 Subject: [PATCH 107/116] add optional jobSpecID query filtering to log-initiated jobs --- core/store/models/log_events.go | 8 +++- core/store/models/log_events_test.go | 61 ++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 1 deletion(-) diff --git a/core/store/models/log_events.go b/core/store/models/log_events.go index 21dff01bc3e..fab6faf70e1 100644 --- a/core/store/models/log_events.go +++ b/core/store/models/log_events.go @@ -120,9 +120,15 @@ func FilterQueryFactory(i Initiator, from *big.Int, addresses ...common.Address) q.Topics = make([][]common.Hash, len(i.Topics)) copy(q.Topics, i.Topics) case initiationRequiresJobSpecID(i.Type): + jobIDFilters := JobSpecIDTopics(i.JobSpecID) + + if !utils.IsEmpty(i.InitiatorParams.JobIDTopicFilter.UUID().Bytes()) { + jobIDFilters = append(jobIDFilters, JobSpecIDTopics(i.InitiatorParams.JobIDTopicFilter)...) + } + q.Topics = [][]common.Hash{ TopicsForInitiatorsWhichRequireJobSpecIDTopic[i.Type], - JobSpecIDTopics(i.JobSpecID), + jobIDFilters, } default: return ethereum.FilterQuery{}, diff --git a/core/store/models/log_events_test.go b/core/store/models/log_events_test.go index af1e5c33643..4811a3e69c4 100644 --- a/core/store/models/log_events_test.go +++ b/core/store/models/log_events_test.go @@ -295,6 +295,67 @@ func TestFilterQueryFactory_InitiatorRunLog(t *testing.T) { assert.Equal(t, want, filter) } +func TestFilterQueryFactory_InitiatorVRFLog(t *testing.T) { + t.Parallel() + + id, err := models.NewIDFromString("4a1eb0e8df314cb894024a38991cff0f") + require.NoError(t, err) + filterID, err := models.NewIDFromString("679fd3c51581478f89f95f5e24de5e09") + require.NoError(t, err) + + t.Run("it only uses the jobID if no additional filter present", func(tt *testing.T) { + i := models.Initiator{ + Type: models.InitiatorRandomnessLog, + JobSpecID: id, + } + fromBlock := big.NewInt(42) + filter, err := models.FilterQueryFactory(i, fromBlock) + assert.NoError(t, err) + + want := ethereum.FilterQuery{ + FromBlock: fromBlock.Add(fromBlock, big.NewInt(1)), + Topics: [][]common.Hash{ + { + models.RandomnessRequestLogTopic, + }, { + common.HexToHash("0x4a1eb0e8df314cb894024a38991cff0f00000000000000000000000000000000"), + common.HexToHash("0x3461316562306538646633313463623839343032346133383939316366663066"), + }, + }, + } + assert.Equal(t, want, filter) + }) + + t.Run("it uses the optional additional jobID filer", func(tt *testing.T) { + i := models.Initiator{ + Type: models.InitiatorRandomnessLog, + JobSpecID: id, + InitiatorParams: models.InitiatorParams{ + JobIDTopicFilter: filterID, + }, + } + fromBlock := big.NewInt(42) + filter, err := models.FilterQueryFactory(i, fromBlock) + assert.NoError(t, err) + + want := ethereum.FilterQuery{ + FromBlock: fromBlock.Add(fromBlock, big.NewInt(1)), + Topics: [][]common.Hash{ + { + models.RandomnessRequestLogTopic, + }, { + common.HexToHash("0x4a1eb0e8df314cb894024a38991cff0f00000000000000000000000000000000"), + common.HexToHash("0x3461316562306538646633313463623839343032346133383939316366663066"), + common.HexToHash("0x679fd3c51581478f89f95f5e24de5e0900000000000000000000000000000000"), + common.HexToHash("0x3637396664336335313538313437386638396639356635653234646535653039"), + }, + }, + } + assert.Equal(t, want, filter) + }) + +} + func TestRunLogEvent_ContractPayment(t *testing.T) { t.Parallel() From 867274d5d7bef037ea3a4468307cd8bae8c841ac Mon Sep 17 00:00:00 2001 From: connorwstein Date: Thu, 1 Apr 2021 11:46:42 -0400 Subject: [PATCH 108/116] port backfill batching --- .../continuous-integration-workflow.yml | 2 +- core/cmd/testscript/wsconnect/main.go | 2 +- core/services/subscription.go | 46 +++++++++++------ core/services/subscription_test.go | 49 +++++++++++++++++++ core/store/orm/config.go | 4 ++ core/store/orm/config_reader.go | 1 + core/store/orm/schema.go | 1 + core/utils/big.go | 7 +++ tools/docker/integration.Dockerfile | 2 + 9 files changed, 96 insertions(+), 18 deletions(-) diff --git a/.github/workflows/continuous-integration-workflow.yml b/.github/workflows/continuous-integration-workflow.yml index 69f0df5d848..42db8ef5185 100644 --- a/.github/workflows/continuous-integration-workflow.yml +++ b/.github/workflows/continuous-integration-workflow.yml @@ -43,7 +43,7 @@ jobs: with: args: psql -v ON_ERROR_STOP=1 --username postgres -h postgres -c "CREATE USER chainlink NOSUPERUSER CREATEDB;" - name: Install Postgres for CLI tools - run: wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - && echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" |sudo tee /etc/apt/sources.list.d/pgdg.list && sudo apt update && sudo apt install -y postgresql-client-13 + run: wget --quiet -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - && echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" |sudo tee /etc/apt/sources.list.d/pgdg.list && sudo apt update && sudo apt install -y postgresql-client-13 - name: Cache Yarn dependencies uses: actions/cache@v2 with: diff --git a/core/cmd/testscript/wsconnect/main.go b/core/cmd/testscript/wsconnect/main.go index 802b8643c5b..e0cd6f95525 100644 --- a/core/cmd/testscript/wsconnect/main.go +++ b/core/cmd/testscript/wsconnect/main.go @@ -23,7 +23,7 @@ func main() { panicErr(err) err = c.Dial(context.Background()) panicErr(err) - sub, err := services.NewManagedSubscription(c, ethereum.FilterQuery{}, cb) + sub, err := services.NewManagedSubscription(c, ethereum.FilterQuery{}, cb, 0) panicErr(err) fmt.Println(sub) time.Sleep(30 * time.Second) diff --git a/core/services/subscription.go b/core/services/subscription.go index 879380257d9..a865d0e00e9 100644 --- a/core/services/subscription.go +++ b/core/services/subscription.go @@ -7,6 +7,8 @@ import ( "sync" "time" + "github.com/ethereum/go-ethereum/core/types" + "github.com/jpillora/backoff" "github.com/smartcontractkit/chainlink/core/logger" @@ -114,7 +116,7 @@ func NewInitiatorSubscription( callback: callback, } - managedSub, err := NewManagedSubscription(client, filter, sub.dispatchLog) + managedSub, err := NewManagedSubscription(client, filter, sub.dispatchLog, config.EthLogBackfillBatchSize()) if err != nil { return sub, errors.Wrap(err, "NewInitiatorSubscription#NewManagedSubscription") } @@ -189,19 +191,16 @@ func runJob(runManager RunManager, le models.LogRequest) { // ManagedSubscription encapsulates the connecting, backfilling, and clean up of an // ethereum node subscription. type ManagedSubscription struct { - logSubscriber eth.Client - logs chan models.Log - ethSubscription ethereum.Subscription - callback func(models.Log) + logSubscriber eth.Client + logs chan models.Log + ethSubscription ethereum.Subscription + callback func(models.Log) + backfillBatchSize uint32 } // NewManagedSubscription subscribes to the ethereum node with the passed filter // and delegates incoming logs to callback. -func NewManagedSubscription( - logSubscriber eth.Client, - filter ethereum.FilterQuery, - callback func(models.Log), -) (*ManagedSubscription, error) { +func NewManagedSubscription(logSubscriber eth.Client, filter ethereum.FilterQuery, callback func(models.Log), backfillBatchSize uint32) (*ManagedSubscription, error) { ctx := context.Background() logs := make(chan models.Log) es, err := logSubscriber.SubscribeFilterLogs(ctx, filter, logs) @@ -210,10 +209,11 @@ func NewManagedSubscription( } sub := &ManagedSubscription{ - logSubscriber: logSubscriber, - callback: callback, - logs: logs, - ethSubscription: es, + logSubscriber: logSubscriber, + callback: callback, + logs: logs, + ethSubscription: es, + backfillBatchSize: backfillBatchSize, } go sub.listenToLogs(filter) return sub, nil @@ -290,13 +290,27 @@ func (sub ManagedSubscription) backfillLogs(q ethereum.FilterQuery) map[string]b if q.FromBlock == nil { return backfilledSet } - - logs, err := sub.logSubscriber.FilterLogs(context.TODO(), q) + b, err := sub.logSubscriber.BlockByNumber(context.Background(), nil) if err != nil { logger.Errorw("Unable to backfill logs", "err", err, "fromBlock", q.FromBlock.String(), "toBlock", q.ToBlock.String()) return backfilledSet } + var logs []types.Log + latest := b.Number() + batchSize := int64(sub.backfillBatchSize) + for i := q.FromBlock.Int64(); i < latest.Int64(); i += batchSize { + q.FromBlock = big.NewInt(i) + to := utils.BigIntSlice{big.NewInt(i + batchSize - 1), latest} + q.ToBlock = to.Min() + batchLogs, err := sub.logSubscriber.FilterLogs(context.TODO(), q) + if err != nil { + logger.Errorw("Unable to backfill logs", "err", err, "fromBlock", q.FromBlock.String(), "toBlock", q.ToBlock.String()) + return backfilledSet + } + logs = append(logs, batchLogs...) + } + for _, log := range logs { backfilledSet[log.BlockHash.String()] = true sub.callback(log) diff --git a/core/services/subscription_test.go b/core/services/subscription_test.go index b44a7a4cbec..8bf9e8f3e07 100644 --- a/core/services/subscription_test.go +++ b/core/services/subscription_test.go @@ -39,6 +39,10 @@ func TestServices_NewInitiatorSubscription_BackfillLogs(t *testing.T) { initr := job.Initiators[0] log := cltest.LogFromFixture(t, "testdata/subscription_logs.json") ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(cltest.EmptyMockSubscription(), nil) + b := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(2), // +1 from log + }) + ethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) ethClient.On("FilterLogs", mock.Anything, mock.Anything).Maybe().Return([]types.Log{log}, nil) var count int32 @@ -53,6 +57,51 @@ func TestServices_NewInitiatorSubscription_BackfillLogs(t *testing.T) { }).Should(gomega.Equal(int32(1))) } +func TestServices_NewInitiatorSubscription_BackfillLogs_BatchWindows(t *testing.T) { + t.Parallel() + + store, cleanup := cltest.NewStore(t) + defer cleanup() + ethClient := new(mocks.Client) + defer ethClient.AssertExpectations(t) + store.EthClient = ethClient + + job := cltest.NewJobWithLogInitiator() + initr := job.Initiators[0] + log := cltest.LogFromFixture(t, "testdata/subscription_logs.json") + ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(cltest.EmptyMockSubscription(), nil) + b := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(213), + }) + ethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) + ethClient.On("FilterLogs", mock.Anything, mock.Anything).Once().Return([]types.Log{log}, nil).Run(func(args mock.Arguments) { + query := args.Get(1).(ethereum.FilterQuery) + assert.Equal(t, big.NewInt(1), query.FromBlock) + assert.Equal(t, big.NewInt(100), query.ToBlock) + }) + ethClient.On("FilterLogs", mock.Anything, mock.Anything).Once().Return([]types.Log{log}, nil).Run(func(args mock.Arguments) { + query := args.Get(1).(ethereum.FilterQuery) + assert.Equal(t, big.NewInt(101), query.FromBlock) + assert.Equal(t, big.NewInt(200), query.ToBlock) + }) + ethClient.On("FilterLogs", mock.Anything, mock.Anything).Once().Return([]types.Log{log}, nil).Run(func(args mock.Arguments) { + query := args.Get(1).(ethereum.FilterQuery) + assert.Equal(t, big.NewInt(201), query.FromBlock) + assert.Equal(t, big.NewInt(213), query.ToBlock) + }) + + var count int32 + callback := func(services.RunManager, models.LogRequest) { atomic.AddInt32(&count, 1) } + fromBlock := cltest.Head(0) + jm := new(mocks.RunManager) + sub, err := services.NewInitiatorSubscription(initr, store.EthClient, jm, fromBlock.NextInt(), store.Config, callback) + assert.NoError(t, err) + defer sub.Unsubscribe() + gomega.NewGomegaWithT(t).Eventually(func() int32 { + return atomic.LoadInt32(&count) + }).Should(gomega.Equal(int32(3))) +} + func TestServices_NewInitiatorSubscription_BackfillLogs_WithNoHead(t *testing.T) { t.Parallel() diff --git a/core/store/orm/config.go b/core/store/orm/config.go index b85ab57bca5..a62000313e6 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -494,6 +494,10 @@ func (c Config) EthTxResendAfterThreshold() time.Duration { return c.getWithFallback("EthTxResendAfterThreshold", parseDuration).(time.Duration) } +func (c Config) EthLogBackfillBatchSize() uint32 { + return c.getWithFallback("EthLogBackfillBatchSize", parseUint32).(uint32) +} + // EthereumURL represents the URL of the Ethereum node to connect Chainlink to. func (c Config) EthereumURL() string { return c.viper.GetString(EnvVarName("EthereumURL")) diff --git a/core/store/orm/config_reader.go b/core/store/orm/config_reader.go index 747294b04b9..250b55eff05 100644 --- a/core/store/orm/config_reader.go +++ b/core/store/orm/config_reader.go @@ -86,4 +86,5 @@ type ConfigReader interface { SessionSecret() ([]byte, error) SessionOptions() sessions.Options TriggerFallbackDBPollInterval() time.Duration + EthLogBackfillBatchSize() uint32 } diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index c5093ff8c25..d392a30943a 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -58,6 +58,7 @@ type ConfigSchema struct { EthBalanceMonitorBlockDelay uint16 `env:"ETH_BALANCE_MONITOR_BLOCK_DELAY" default:"1"` EthReceiptFetchBatchSize uint32 `env:"ETH_RECEIPT_FETCH_BATCH_SIZE" default:"100"` EthTxResendAfterThreshold time.Duration `env:"ETH_TX_RESEND_AFTER_THRESHOLD" default:"30s"` + EthLogBackfillBatchSize uint32 `env:"ETH_LOG_BACKFILL_BATCH_SIZE" default:"100"` EthereumURL string `env:"ETH_URL" default:"ws://localhost:8546"` EthereumSecondaryURL string `env:"ETH_SECONDARY_URL" default:""` EthereumSecondaryURLs string `env:"ETH_SECONDARY_URLS" default:""` diff --git a/core/utils/big.go b/core/utils/big.go index fdc2f9b7914..9bc417b0608 100644 --- a/core/utils/big.go +++ b/core/utils/big.go @@ -163,3 +163,10 @@ func (s BigIntSlice) Max() *big.Int { tmp.Sort() return tmp[len(tmp)-1] } + +func (s BigIntSlice) Min() *big.Int { + tmp := make(BigIntSlice, len(s)) + copy(tmp, s) + tmp.Sort() + return tmp[0] +} diff --git a/tools/docker/integration.Dockerfile b/tools/docker/integration.Dockerfile index fb860bb6be6..64d570638aa 100644 --- a/tools/docker/integration.Dockerfile +++ b/tools/docker/integration.Dockerfile @@ -2,6 +2,8 @@ # THIS LINE IS AUTOGENERATED, DO NOT CHANGE MANUALLY FROM smartcontract/builder:1.0.39 +RUN wget --quiet -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 2>/dev/null + # Install docker and docker compose RUN apt-get update \ # From 2b46bd6e5fdfda67510818318e68875c07e69607 Mon Sep 17 00:00:00 2001 From: connorwstein Date: Thu, 1 Apr 2021 15:31:51 -0400 Subject: [PATCH 109/116] fix sub test --- core/services/subscription_test.go | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/core/services/subscription_test.go b/core/services/subscription_test.go index 8bf9e8f3e07..adaa088e666 100644 --- a/core/services/subscription_test.go +++ b/core/services/subscription_test.go @@ -40,7 +40,7 @@ func TestServices_NewInitiatorSubscription_BackfillLogs(t *testing.T) { log := cltest.LogFromFixture(t, "testdata/subscription_logs.json") ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(cltest.EmptyMockSubscription(), nil) b := types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(2), // +1 from log + Number: big.NewInt(2), }) ethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) ethClient.On("FilterLogs", mock.Anything, mock.Anything).Maybe().Return([]types.Log{log}, nil) @@ -113,6 +113,10 @@ func TestServices_NewInitiatorSubscription_BackfillLogs_WithNoHead(t *testing.T) job := cltest.NewJobWithLogInitiator() initr := job.Initiators[0] + b := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(2), + }) + ethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) ethClient.On("FilterLogs", mock.Anything, mock.Anything).Maybe().Return([]models.Log{}, nil) ethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).Return(cltest.EmptyMockSubscription(), nil) @@ -140,6 +144,10 @@ func TestServices_NewInitiatorSubscription_PreventsDoubleDispatch(t *testing.T) initr := job.Initiators[0] log := cltest.LogFromFixture(t, "testdata/subscription_logs.json") + b := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(2), + }) + gethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) gethClient.On("FilterLogs", mock.Anything, mock.Anything).Maybe().Return([]models.Log{log}, nil) logsCh := cltest.MockSubscribeToLogsCh(gethClient, subMock) var count int32 @@ -242,6 +250,10 @@ func TestServices_StartJobSubscription(t *testing.T) { defer assertMocksCalled() store.EthClient = eth.NewClientWith(rpcClient, gethClient) subMock.On("Err").Return(nil) + b := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(100), + }) + gethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) gethClient.On("FilterLogs", mock.Anything, mock.Anything).Maybe().Return([]models.Log{}, nil) logsCh := cltest.MockSubscribeToLogsCh(gethClient, subMock) job := cltest.NewJob() @@ -310,6 +322,10 @@ func TestServices_StartJobSubscription_RunlogNoTopicMatch(t *testing.T) { subMock.On("Err").Maybe().Return(nil) logsCh := cltest.MockSubscribeToLogsCh(gethClient, subMock) + b := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(100), + }) + gethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) gethClient.On("FilterLogs", mock.Anything, mock.Anything).Maybe().Return([]models.Log{}, nil) job := cltest.NewJob() initr := models.Initiator{Type: "runlog"} @@ -374,6 +390,9 @@ func TestServices_NewInitiatorSubscription_EthLog_ReplayFromBlock(t *testing.T) job := cltest.NewJobWithLogInitiator() job.Initiators[0].InitiatorParams.FromBlock = test.initrParamFromBlock + b := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(100), + }) expectedQuery := ethereum.FilterQuery{ FromBlock: test.wantFromBlock, Addresses: []common.Address{job.Initiators[0].InitiatorParams.Address}, @@ -382,7 +401,9 @@ func TestServices_NewInitiatorSubscription_EthLog_ReplayFromBlock(t *testing.T) log := cltest.LogFromFixture(t, "testdata/subscription_logs.json") + ethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) ethClient.On("SubscribeFilterLogs", mock.Anything, expectedQuery, mock.Anything).Return(cltest.EmptyMockSubscription(), nil) + expectedQuery.ToBlock = b.Number() ethClient.On("FilterLogs", mock.Anything, expectedQuery).Return([]models.Log{log}, nil) executeJobChannel := make(chan struct{}) @@ -444,7 +465,12 @@ func TestServices_NewInitiatorSubscription_RunLog_ReplayFromBlock(t *testing.T) log := receipt.Logs[3] log.Topics[1] = models.IDToTopic(job.ID) + b := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(100), + }) + ethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) ethClient.On("SubscribeFilterLogs", mock.Anything, expectedQuery, mock.Anything).Return(cltest.EmptyMockSubscription(), nil) + expectedQuery.ToBlock = b.Number() ethClient.On("FilterLogs", mock.Anything, expectedQuery).Return([]models.Log{*log}, nil) executeJobChannel := make(chan struct{}) From 5c70acb7110fd210060cb86a997678fcc5ea5e29 Mon Sep 17 00:00:00 2001 From: Sam Date: Mon, 5 Apr 2021 10:22:53 +0100 Subject: [PATCH 110/116] Only log fatal errors from the re-sender All attempts will "error" if successful because they are known transactions. --- .../bulletprooftxmanager/eth_resender.go | 36 +++++++++++-------- .../bulletprooftxmanager/eth_resender_test.go | 7 +++- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/core/services/bulletprooftxmanager/eth_resender.go b/core/services/bulletprooftxmanager/eth_resender.go index 4f7fac5896a..e91a2a3d124 100644 --- a/core/services/bulletprooftxmanager/eth_resender.go +++ b/core/services/bulletprooftxmanager/eth_resender.go @@ -97,15 +97,16 @@ func (er *EthResender) resendUnconfirmed() error { logger.Debugw(fmt.Sprintf("EthResender: re-sending %d transactions that were last sent over %s ago", len(attempts), er.ageThreshold), "n", len(attempts)) - var reqs []rpc.BatchElem - for _, attempt := range attempts { + reqs := make([]rpc.BatchElem, len(attempts)) + ethTxIDs := make([]int64, len(attempts)) + for i, attempt := range attempts { + ethTxIDs[i] = attempt.EthTxID req := rpc.BatchElem{ Method: "eth_sendRawTransaction", Args: []interface{}{hexutil.Encode(attempt.SignedRawTx)}, Result: &common.Hash{}, } - - reqs = append(reqs, req) + reqs[i] = req } now := time.Now() @@ -113,20 +114,11 @@ func (er *EthResender) resendUnconfirmed() error { return errors.Wrap(err, "failed to re-send transactions") } - var succeeded []int64 - for i, req := range reqs { - if req.Error == nil { - succeeded = append(succeeded, attempts[i].EthTxID) - } - } - - if err := er.updateBroadcastAts(now, succeeded); err != nil { + if err := er.updateBroadcastAts(now, ethTxIDs); err != nil { return errors.Wrap(err, "failed to update last succeeded on attempts") } - nSuccess := len(succeeded) - nErrored := len(attempts) - nSuccess - logger.Debugw("EthResender: completed", "nSuccess", nSuccess, "nErrored", nErrored) + logResendResult(reqs) return nil } @@ -156,3 +148,17 @@ func (er *EthResender) updateBroadcastAts(now time.Time, etxIDs []int64) error { // our version is later. return er.db.Exec(`UPDATE eth_txes SET broadcast_at = ? WHERE id = ANY(?) AND broadcast_at < ?`, now, pq.Array(etxIDs), now).Error } + +func logResendResult(reqs []rpc.BatchElem) { + var nNew int + var nFatal int + for _, req := range reqs { + serr := eth.NewSendError(req.Error) + if serr == nil { + nNew++ + } else if serr.Fatal() { + nFatal++ + } + } + logger.Debugw("EthResender: completed", "n", len(reqs), "nNew", nNew, "nFatal", nFatal) +} diff --git a/core/services/bulletprooftxmanager/eth_resender_test.go b/core/services/bulletprooftxmanager/eth_resender_test.go index 54cad70deed..0f6f40551f9 100644 --- a/core/services/bulletprooftxmanager/eth_resender_test.go +++ b/core/services/bulletprooftxmanager/eth_resender_test.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rpc" + "github.com/pkg/errors" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/mocks" "github.com/smartcontractkit/chainlink/core/services/bulletprooftxmanager" @@ -78,7 +79,11 @@ func Test_EthResender_Start(t *testing.T) { return len(b) == 2 && b[0].Method == "eth_sendRawTransaction" && b[0].Args[0] == hexutil.Encode(etx.EthTxAttempts[0].SignedRawTx) && b[1].Method == "eth_sendRawTransaction" && b[1].Args[0] == hexutil.Encode(etx2.EthTxAttempts[0].SignedRawTx) - })).Return(nil) + })).Return(nil).Run(func(args mock.Arguments) { + elems := args.Get(1).([]rpc.BatchElem) + // It should update BroadcastAt even if there is an error here + elems[1].Error = errors.New("kaboom") + }) func() { er.Start() From e30df6a48ed108eedf0f50d385d011c86b945ad0 Mon Sep 17 00:00:00 2001 From: Sam Date: Mon, 5 Apr 2021 10:17:11 +0100 Subject: [PATCH 111/116] Reduce transaction timeout in eth_confirmer We have seen this hit the hard limit (3600s) in prod. It would never make any sense to wait that long. Reduce the timeout here in the hopes that if it happens again at least we fail fast enough to get some useful debugging information. --- core/services/bulletprooftxmanager/eth_confirmer.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/core/services/bulletprooftxmanager/eth_confirmer.go b/core/services/bulletprooftxmanager/eth_confirmer.go index 30a8b6c8808..c85d2bdf84f 100644 --- a/core/services/bulletprooftxmanager/eth_confirmer.go +++ b/core/services/bulletprooftxmanager/eth_confirmer.go @@ -851,7 +851,9 @@ func saveSentAttempt(db *gorm.DB, attempt *models.EthTxAttempt, broadcastAt time return errors.New("expected state to be in_progress") } attempt.State = models.EthTxAttemptBroadcast - return postgres.GormTransaction(context.Background(), db, func(tx *gorm.DB) error { + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + return postgres.GormTransaction(ctx, db, func(tx *gorm.DB) error { // In case of null broadcast_at (shouldn't happen) we don't want to // update anyway because it indicates a state where broadcast_at makes // no sense e.g. fatal_error @@ -867,7 +869,9 @@ func saveInsufficientEthAttempt(db *gorm.DB, attempt *models.EthTxAttempt, broad return errors.New("expected state to be either in_progress or insufficient_eth") } attempt.State = models.EthTxAttemptInsufficientEth - return postgres.GormTransaction(context.Background(), db, func(tx *gorm.DB) error { + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + return postgres.GormTransaction(ctx, db, func(tx *gorm.DB) error { // In case of null broadcast_at (shouldn't happen) we don't want to // update anyway because it indicates a state where broadcast_at makes // no sense e.g. fatal_error From 3f9df0d676d9df2f7354657a022b01208e38be67 Mon Sep 17 00:00:00 2001 From: Steve Ellis Date: Mon, 5 Apr 2021 23:06:51 -0400 Subject: [PATCH 112/116] port over test fixes --- core/services/job_subscriber_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/core/services/job_subscriber_test.go b/core/services/job_subscriber_test.go index d826d28bd78..1df66360de3 100644 --- a/core/services/job_subscriber_test.go +++ b/core/services/job_subscriber_test.go @@ -6,6 +6,7 @@ import ( "sync" "testing" + "github.com/ethereum/go-ethereum/core/types" "github.com/smartcontractkit/chainlink/core/services/eth" "github.com/smartcontractkit/chainlink/core/internal/cltest" @@ -87,6 +88,10 @@ func TestJobSubscriber_AddJob_RemoveJob(t *testing.T) { _, gethClient, _, assertMocksCalled := cltest.NewEthMocks(t) defer assertMocksCalled() store.EthClient = eth.NewClientWith(nil, gethClient) + b := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(2), + }) + gethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) gethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(cltest.EmptyMockSubscription(), nil) gethClient.On("FilterLogs", mock.Anything, mock.Anything).Maybe().Return([]models.Log{}, nil) @@ -149,6 +154,10 @@ func TestJobSubscriber_Connect_Disconnect(t *testing.T) { gethClient := new(mocks.GethClient) defer gethClient.AssertExpectations(t) store.EthClient = eth.NewClientWith(nil, gethClient) + b := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(500), + }) + gethClient.On("BlockByNumber", mock.Anything, mock.Anything).Maybe().Return(b, nil) gethClient.On("SubscribeFilterLogs", mock.Anything, mock.Anything, mock.Anything).Maybe().Return(cltest.EmptyMockSubscription(), nil) gethClient.On("FilterLogs", mock.Anything, mock.Anything).Maybe().Return([]models.Log{}, nil) From 2117910bd40c7bf9696d5b06fdf31f53eacfa06c Mon Sep 17 00:00:00 2001 From: Sam Date: Mon, 5 Apr 2021 20:31:55 +0100 Subject: [PATCH 113/116] Introduce maximum time limit for EthConfirmer#ProcessHead Also disable NonceSyncer by default. --- .../bulletprooftxmanager.go | 12 +--- .../bulletprooftxmanager/eth_broadcaster.go | 12 ++-- .../eth_broadcaster_test.go | 2 + .../bulletprooftxmanager/eth_confirmer.go | 58 ++++++++++++++----- .../eth_confirmer_test.go | 2 +- .../bulletprooftxmanager/eth_resender.go | 1 + .../bulletprooftxmanager/nonce_syncer.go | 2 +- core/services/eth/client.go | 12 ++++ core/store/orm/config.go | 11 +++- core/store/orm/config_reader.go | 3 +- core/store/orm/schema.go | 3 + 11 files changed, 82 insertions(+), 36 deletions(-) diff --git a/core/services/bulletprooftxmanager/bulletprooftxmanager.go b/core/services/bulletprooftxmanager/bulletprooftxmanager.go index f9b66a307dd..6a2650850ec 100644 --- a/core/services/bulletprooftxmanager/bulletprooftxmanager.go +++ b/core/services/bulletprooftxmanager/bulletprooftxmanager.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "math/big" - "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -26,12 +25,6 @@ import ( // For more information about the BulletproofTxManager architecture, see the design doc: // https://www.notion.so/chainlink/BulletproofTxManager-Architecture-Overview-9dc62450cd7a443ba9e7dceffa1a8d6b -const ( - // maxEthNodeRequestTime is the worst case time we will wait for a response - // from the eth node before we consider it to be an error - maxEthNodeRequestTime = 15 * time.Second -) - var ( promNumGasBumps = promauto.NewCounter(prometheus.CounterOpts{ Name: "tx_manager_num_gas_bumps", @@ -104,8 +97,9 @@ func sendTransaction(ctx context.Context, ethClient eth.Client, a models.EthTxAt return eth.NewFatalSendError(err) } - ctx, cancel := context.WithTimeout(ctx, maxEthNodeRequestTime) + ctx, cancel := eth.DefaultQueryCtx(ctx) defer cancel() + err = ethClient.SendTransaction(ctx, signedTx) err = errors.WithStack(err) @@ -135,7 +129,7 @@ func sendEmptyTransaction( if err != nil { return nil, err } - ctx, cancel := context.WithTimeout(context.Background(), maxEthNodeRequestTime) + ctx, cancel := eth.DefaultQueryCtx() defer cancel() err = ethClient.SendTransaction(ctx, signedTx) return signedTx, err diff --git a/core/services/bulletprooftxmanager/eth_broadcaster.go b/core/services/bulletprooftxmanager/eth_broadcaster.go index 02f32775580..4f9b9c7de5d 100644 --- a/core/services/bulletprooftxmanager/eth_broadcaster.go +++ b/core/services/bulletprooftxmanager/eth_broadcaster.go @@ -86,9 +86,11 @@ func (eb *ethBroadcaster) Start() error { return errors.Wrap(err, "EthBroadcaster could not start") } - syncer := NewNonceSyncer(eb.store, eb.config, eb.ethClient) - if err := syncer.SyncAll(eb.ctx); err != nil { - return errors.Wrap(err, "EthBroadcaster failed to sync with on-chain nonce") + if eb.config.EthNonceAutoSync() { + syncer := NewNonceSyncer(eb.store, eb.config, eb.ethClient) + if err := syncer.SyncAll(eb.ctx); err != nil { + return errors.Wrap(err, "EthBroadcaster failed to sync with on-chain nonce") + } } eb.wg.Add(1) @@ -265,9 +267,7 @@ func (eb *ethBroadcaster) handleInProgressEthTx(etx models.EthTx, attempt models return errors.Errorf("invariant violation: expected transaction %v to be in_progress, it was %s", etx.ID, etx.State) } - ctx, cancel := context.WithTimeout(eb.ctx, maxEthNodeRequestTime) - defer cancel() - sendError := sendTransaction(ctx, eb.ethClient, attempt) + sendError := sendTransaction(context.TODO(), eb.ethClient, attempt) if sendError.IsTooExpensive() { logger.Errorw("EthBroadcaster: transaction gas price was rejected by the eth node for being too high. Consider increasing your eth node's RPCTxFeeCap (it is suggested to run geth with no cap i.e. --rpc.gascap=0 --rpc.txfeecap=0)", diff --git a/core/services/bulletprooftxmanager/eth_broadcaster_test.go b/core/services/bulletprooftxmanager/eth_broadcaster_test.go index 7d7948cb97e..fd7d5d46bc4 100644 --- a/core/services/bulletprooftxmanager/eth_broadcaster_test.go +++ b/core/services/bulletprooftxmanager/eth_broadcaster_test.go @@ -228,6 +228,8 @@ func TestEthBroadcaster_AssignsNonceOnStart(t *testing.T) { config, cleanup := cltest.NewConfig(t) defer cleanup() + config.Set("ETH_NONCE_AUTO_SYNC", "true") + ethNodeNonce := uint64(22) // Insert new key to test we only update the intended one diff --git a/core/services/bulletprooftxmanager/eth_confirmer.go b/core/services/bulletprooftxmanager/eth_confirmer.go index c85d2bdf84f..bca3518930f 100644 --- a/core/services/bulletprooftxmanager/eth_confirmer.go +++ b/core/services/bulletprooftxmanager/eth_confirmer.go @@ -27,6 +27,12 @@ import ( "gorm.io/gorm" ) +const ( + // processHeadTimeout represents a sanity limit on how long ProcessHead + // should take to complete + processHeadTimeout = 10 * time.Minute +) + var ( // ErrCouldNotGetReceipt is the error string we save if we reach our finality depth for a confirmed transaction without ever getting a receipt // This most likely happened because an external wallet used the account for this nonce @@ -154,19 +160,25 @@ func (ec *ethConfirmer) runLoop() { // ProcessHead takes all required transactions for the confirmer on a new head func (ec *ethConfirmer) ProcessHead(ctx context.Context, head models.Head) error { - return ec.store.AdvisoryLocker.WithAdvisoryLock(context.TODO(), postgres.AdvisoryLockClassID_EthConfirmer, postgres.AdvisoryLockObjectID_EthConfirmer, func() error { + ctx, cancel := context.WithTimeout(ctx, processHeadTimeout) + defer cancel() + + return ec.store.AdvisoryLocker.WithAdvisoryLock(context.Background(), postgres.AdvisoryLockClassID_EthConfirmer, postgres.AdvisoryLockObjectID_EthConfirmer, func() error { return ec.processHead(ctx, head) }) } // NOTE: This SHOULD NOT be run concurrently or it could behave badly func (ec *ethConfirmer) processHead(ctx context.Context, head models.Head) error { + mark := time.Now() + + // TODO: Use a local logger? + logger.Debugw("EthConfirmer: processHead", "headNum", head.Number, "time", mark, "id", "eth_confirmer") + if err := ec.SetBroadcastBeforeBlockNum(head.Number); err != nil { return errors.Wrap(err, "SetBroadcastBeforeBlockNum failed") } - mark := time.Now() - if err := ec.CheckForReceipts(ctx, head.Number); err != nil { return errors.Wrap(err, "CheckForReceipts failed") } @@ -203,7 +215,7 @@ func (ec *ethConfirmer) SetBroadcastBeforeBlockNum(blockNum int64) error { } func (ec *ethConfirmer) CheckForReceipts(ctx context.Context, blockNum int64) error { - batchSize := int(ec.config.EthReceiptFetchBatchSize()) + batchSize := int(ec.config.EthRPCDefaultBatchSize()) attempts, err := ec.findEthTxAttemptsRequiringReceiptFetch() if err != nil { @@ -229,16 +241,16 @@ func (ec *ethConfirmer) CheckForReceipts(ctx context.Context, blockNum int64) er if err != nil { return errors.Wrap(err, "batchFetchReceipts failed") } - if err := ec.saveFetchedReceipts(ctx, receipts); err != nil { + if err := ec.saveFetchedReceipts(receipts); err != nil { return errors.Wrap(err, "saveFetchedReceipts failed") } } - if err := ec.markConfirmedMissingReceipt(ctx); err != nil { + if err := ec.markConfirmedMissingReceipt(); err != nil { return errors.Wrap(err, "unable to mark eth_txes as 'confirmed_missing_receipt'") } - if err := ec.markOldTxesMissingReceiptAsErrored(ctx, blockNum); err != nil { + if err := ec.markOldTxesMissingReceiptAsErrored(blockNum); err != nil { return errors.Wrap(err, "unable to confirm buried unconfirmed eth_txes") } @@ -266,6 +278,9 @@ func (ec *ethConfirmer) batchFetchReceipts(ctx context.Context, attempts []model reqs = append(reqs, req) } + ctx, cancel := eth.DefaultQueryCtx(ctx) + defer cancel() + err = ec.ethClient.BatchCallContext(ctx, reqs) if err != nil { return nil, errors.Wrap(err, "EthConfirmer#batchFetchReceipts error fetching receipts with BatchCallContext") @@ -326,7 +341,7 @@ func (ec *ethConfirmer) batchFetchReceipts(ctx context.Context, attempts []model return } -func (ec *ethConfirmer) saveFetchedReceipts(ctx context.Context, receipts []Receipt) (err error) { +func (ec *ethConfirmer) saveFetchedReceipts(receipts []Receipt) (err error) { if len(receipts) == 0 { return nil } @@ -392,6 +407,10 @@ func (ec *ethConfirmer) saveFetchedReceipts(ctx context.Context, receipts []Rece ` stmt := fmt.Sprintf(sql, strings.Join(valueStrs, ",")) + + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + _, err = ec.store.MustSQLDB().ExecContext(ctx, stmt, valueArgs...) return errors.Wrap(err, "saveFetchedReceipts failed to save receipts") } @@ -416,11 +435,15 @@ func (ec *ethConfirmer) saveFetchedReceipts(ctx context.Context, receipts []Rece // // We will continue to try to fetch a receipt for these attempts until all // attempts are below the finality depth from current head. -func (ec *ethConfirmer) markConfirmedMissingReceipt(ctx context.Context) (err error) { +func (ec *ethConfirmer) markConfirmedMissingReceipt() (err error) { d, err := ec.store.DB.DB() if err != nil { return err } + + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + _, err = d.ExecContext(ctx, ` UPDATE eth_txes SET state = 'confirmed_missing_receipt' @@ -440,7 +463,7 @@ AND nonce < ( // // The job run will also be marked as errored in this case since we never got a // receipt and thus cannot pass on any transaction hash -func (ec *ethConfirmer) markOldTxesMissingReceiptAsErrored(ctx context.Context, blockNum int64) error { +func (ec *ethConfirmer) markOldTxesMissingReceiptAsErrored(blockNum int64) error { // cutoff is a block height // Any 'confirmed_missing_receipt' eth_tx with all attempts older than this block height will be marked as errored // We will not try to query for receipts for this transaction any more @@ -452,6 +475,10 @@ func (ec *ethConfirmer) markOldTxesMissingReceiptAsErrored(ctx context.Context, if err != nil { return err } + + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + rows, err := d.QueryContext(ctx, ` UPDATE eth_txes SET state='fatal_error', nonce=NULL, error=$1, broadcast_at=NULL @@ -558,10 +585,8 @@ func (ec *ethConfirmer) rebroadcastWhereNecessary(ctx context.Context, address g // re-org, so multiple attempts are allowed to be in in_progress state (but // only one per eth_tx). func (ec *ethConfirmer) handleAnyInProgressAttempts(ctx context.Context, address gethCommon.Address, blockHeight int64) error { - attempts, err := getInProgressEthTxAttempts(ctx, ec.store, address) - if ctx.Err() != nil { - return nil - } else if err != nil { + attempts, err := getInProgressEthTxAttempts(ec.store, address) + if err != nil { return errors.Wrap(err, "getInProgressEthTxAttempts failed") } for _, a := range attempts { @@ -575,7 +600,10 @@ func (ec *ethConfirmer) handleAnyInProgressAttempts(ctx context.Context, address return nil } -func getInProgressEthTxAttempts(ctx context.Context, s *store.Store, address gethCommon.Address) ([]models.EthTxAttempt, error) { +func getInProgressEthTxAttempts(s *store.Store, address gethCommon.Address) ([]models.EthTxAttempt, error) { + ctx, cancel := postgres.DefaultQueryCtx() + defer cancel() + var attempts []models.EthTxAttempt err := s.DB. WithContext(ctx). diff --git a/core/services/bulletprooftxmanager/eth_confirmer_test.go b/core/services/bulletprooftxmanager/eth_confirmer_test.go index 2df50652c16..e8d01347961 100644 --- a/core/services/bulletprooftxmanager/eth_confirmer_test.go +++ b/core/services/bulletprooftxmanager/eth_confirmer_test.go @@ -493,7 +493,7 @@ func TestEthConfirmer_CheckForReceipts_batching(t *testing.T) { config, cleanup := cltest.NewConfig(t) defer cleanup() - config.Set("ETH_RECEIPT_FETCH_BATCH_SIZE", 2) + config.Set("ETH_RPC_DEFAULT_BATCH_SIZE", 2) ec := bulletprooftxmanager.NewEthConfirmer(store, config) ctx := context.Background() diff --git a/core/services/bulletprooftxmanager/eth_resender.go b/core/services/bulletprooftxmanager/eth_resender.go index e91a2a3d124..b5bf7a236c1 100644 --- a/core/services/bulletprooftxmanager/eth_resender.go +++ b/core/services/bulletprooftxmanager/eth_resender.go @@ -110,6 +110,7 @@ func (er *EthResender) resendUnconfirmed() error { } now := time.Now() + // FIXME: Needs to be split into batches of EthRPCDefaultBatchSize if err := er.ethClient.RoundRobinBatchCallContext(context.Background(), reqs); err != nil { return errors.Wrap(err, "failed to re-send transactions") } diff --git a/core/services/bulletprooftxmanager/nonce_syncer.go b/core/services/bulletprooftxmanager/nonce_syncer.go index 8024066ec3d..20b277ec4fc 100644 --- a/core/services/bulletprooftxmanager/nonce_syncer.go +++ b/core/services/bulletprooftxmanager/nonce_syncer.go @@ -264,7 +264,7 @@ func (s NonceSyncer) fastForwardNonceIfNecessary(ctx context.Context, address co } func (s NonceSyncer) pendingNonceFromEthClient(ctx context.Context, account common.Address) (nextNonce uint64, err error) { - ctx, cancel := context.WithTimeout(ctx, maxEthNodeRequestTime) + ctx, cancel := eth.DefaultQueryCtx(ctx) defer cancel() nextNonce, err = s.ethClient.PendingNonceAt(ctx, account) return nextNonce, errors.WithStack(err) diff --git a/core/services/eth/client.go b/core/services/eth/client.go index c7b34169461..bb80c8ec352 100644 --- a/core/services/eth/client.go +++ b/core/services/eth/client.go @@ -7,6 +7,7 @@ import ( "strings" "sync" "sync/atomic" + "time" "github.com/smartcontractkit/chainlink/core/assets" "github.com/smartcontractkit/chainlink/core/logger" @@ -102,6 +103,17 @@ type client struct { var _ Client = (*client)(nil) +// DefaultQueryCtx returns a context with a sensible sanity limit timeout for +// queries to the eth node +func DefaultQueryCtx(ctxs ...context.Context) (ctx context.Context, cancel context.CancelFunc) { + if len(ctxs) > 0 { + ctx = ctxs[0] + } else { + ctx = context.Background() + } + return context.WithTimeout(ctx, 15*time.Second) +} + func NewClient(rpcUrl string, secondaryRPCURLs ...url.URL) (*client, error) { parsed, err := url.ParseRequestURI(rpcUrl) if err != nil { diff --git a/core/store/orm/config.go b/core/store/orm/config.go index a62000313e6..30f2928b001 100644 --- a/core/store/orm/config.go +++ b/core/store/orm/config.go @@ -390,10 +390,10 @@ func (c Config) EthBalanceMonitorBlockDelay() uint16 { return c.getWithFallback("EthBalanceMonitorBlockDelay", parseUint16).(uint16) } -// EthReceiptFetchBatchSize controls the number of receipts fetched in each +// EthRPCDefaultBatchSize controls the number of receipts fetched in each // request in the EthConfirmer -func (c Config) EthReceiptFetchBatchSize() uint32 { - return c.viper.GetUint32(EnvVarName("EthReceiptFetchBatchSize")) +func (c Config) EthRPCDefaultBatchSize() uint32 { + return c.viper.GetUint32(EnvVarName("EthRPCDefaultBatchSize")) } // EthGasBumpThreshold is the number of blocks to wait before bumping gas again on unconfirmed transactions @@ -433,6 +433,11 @@ func (c Config) EthMaxUnconfirmedTransactions() uint64 { return c.getWithFallback("EthMaxUnconfirmedTransactions", parseUint64).(uint64) } +// EthNonceAutoSync enables/disables running the NonceSyncer on application start +func (c Config) EthNonceAutoSync() bool { + return c.getWithFallback("EthNonceAutoSync", parseBool).(bool) +} + // EthGasLimitDefault sets the default gas limit for outgoing transactions. func (c Config) EthGasLimitDefault() uint64 { return c.getWithFallback("EthGasLimitDefault", parseUint64).(uint64) diff --git a/core/store/orm/config_reader.go b/core/store/orm/config_reader.go index 250b55eff05..a8d5933e921 100644 --- a/core/store/orm/config_reader.go +++ b/core/store/orm/config_reader.go @@ -42,8 +42,9 @@ type ConfigReader interface { EthGasLimitDefault() uint64 EthGasPriceDefault() *big.Int EthMaxGasPriceWei() *big.Int + EthNonceAutoSync() bool EthFinalityDepth() uint - EthReceiptFetchBatchSize() uint32 + EthRPCDefaultBatchSize() uint32 EthHeadTrackerHistoryDepth() uint EthHeadTrackerMaxBufferSize() uint EthTxResendAfterThreshold() time.Duration diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index d392a30943a..cbf2d275ee0 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -14,6 +14,8 @@ import ( ) // ConfigSchema records the schema of configuration at the type level +// FIXME: NonceSyncer is temporarily disabled by default because it's buggy +// See: https://app.clubhouse.io/chainlinklabs/story/6701/noncesyncer-has-problems-we-should-disable-it-until-it-has-been-a-t-d-and-problems-below-have-been-addressed type ConfigSchema struct { AdminCredentialsFile string `env:"ADMIN_CREDENTIALS_FILE" default:"$ROOT/apicredentials"` AllowOrigins string `env:"ALLOW_ORIGINS" default:"http://localhost:3000,http://localhost:6688"` @@ -52,6 +54,7 @@ type ConfigSchema struct { EthGasPriceDefault big.Int `env:"ETH_GAS_PRICE_DEFAULT" default:"20000000000"` EthMaxGasPriceWei uint64 `env:"ETH_MAX_GAS_PRICE_WEI" default:"1500000000000"` EthMaxUnconfirmedTransactions uint64 `env:"ETH_MAX_UNCONFIRMED_TRANSACTIONS" default:"500"` + EthNonceAutoSync bool `env:"ETH_NONCE_AUTO_SYNC" default:"false"` EthFinalityDepth uint `env:"ETH_FINALITY_DEPTH" default:"50"` EthHeadTrackerHistoryDepth uint `env:"ETH_HEAD_TRACKER_HISTORY_DEPTH" default:"100"` EthHeadTrackerMaxBufferSize uint `env:"ETH_HEAD_TRACKER_MAX_BUFFER_SIZE" default:"3"` From 6880fcb0d810c518d9cc0424d53314183479779b Mon Sep 17 00:00:00 2001 From: Sam Date: Tue, 6 Apr 2021 12:17:29 +0100 Subject: [PATCH 114/116] Fix tests --- core/store/orm/schema.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/store/orm/schema.go b/core/store/orm/schema.go index cbf2d275ee0..06463597493 100644 --- a/core/store/orm/schema.go +++ b/core/store/orm/schema.go @@ -59,7 +59,7 @@ type ConfigSchema struct { EthHeadTrackerHistoryDepth uint `env:"ETH_HEAD_TRACKER_HISTORY_DEPTH" default:"100"` EthHeadTrackerMaxBufferSize uint `env:"ETH_HEAD_TRACKER_MAX_BUFFER_SIZE" default:"3"` EthBalanceMonitorBlockDelay uint16 `env:"ETH_BALANCE_MONITOR_BLOCK_DELAY" default:"1"` - EthReceiptFetchBatchSize uint32 `env:"ETH_RECEIPT_FETCH_BATCH_SIZE" default:"100"` + EthRPCDefaultBatchSize uint32 `env:"ETH_RPC_DEFAULT_BATCH_SIZE" default:"100"` EthTxResendAfterThreshold time.Duration `env:"ETH_TX_RESEND_AFTER_THRESHOLD" default:"30s"` EthLogBackfillBatchSize uint32 `env:"ETH_LOG_BACKFILL_BATCH_SIZE" default:"100"` EthereumURL string `env:"ETH_URL" default:"ws://localhost:8546"` From f7fffa26e468abdbd58c0c501be68b1a90bc1f75 Mon Sep 17 00:00:00 2001 From: Sam Date: Thu, 8 Apr 2021 16:10:33 +0100 Subject: [PATCH 115/116] Do not uselessly save eth_tx on attempt --- .../bulletprooftxmanager/bulletprooftxmanager.go | 7 ++++--- core/services/bulletprooftxmanager/eth_broadcaster.go | 11 ++++++----- core/services/bulletprooftxmanager/eth_confirmer.go | 7 ++++--- 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/core/services/bulletprooftxmanager/bulletprooftxmanager.go b/core/services/bulletprooftxmanager/bulletprooftxmanager.go index 6a2650850ec..158c93657f2 100644 --- a/core/services/bulletprooftxmanager/bulletprooftxmanager.go +++ b/core/services/bulletprooftxmanager/bulletprooftxmanager.go @@ -20,6 +20,7 @@ import ( gethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/pkg/errors" "gorm.io/gorm" + "gorm.io/gorm/clause" ) // For more information about the BulletproofTxManager architecture, see the design doc: @@ -50,7 +51,7 @@ func SendEther(s *strpkg.Store, from, to gethCommon.Address, value assets.Eth) ( GasLimit: s.Config.EthGasLimitDefault(), State: models.EthTxUnstarted, } - err = s.DB.Create(&etx).Error + err = s.DB.Omit(clause.Associations).Create(&etx).Error return etx, err } @@ -151,10 +152,10 @@ func saveReplacementInProgressAttempt(store *strpkg.Store, oldAttempt models.Eth return errors.New("expected oldAttempt to have an ID") } return store.Transaction(func(tx *gorm.DB) error { - if err := tx.Exec(`DELETE FROM eth_tx_attempts WHERE id = ? `, oldAttempt.ID).Error; err != nil { + if err := tx.Omit(clause.Associations).Exec(`DELETE FROM eth_tx_attempts WHERE id = ? `, oldAttempt.ID).Error; err != nil { return errors.Wrap(err, "saveReplacementInProgressAttempt failed") } - return errors.Wrap(tx.Create(replacementAttempt).Error, "saveReplacementInProgressAttempt failed") + return errors.Wrap(tx.Omit(clause.Associations).Create(replacementAttempt).Error, "saveReplacementInProgressAttempt failed") }) } diff --git a/core/services/bulletprooftxmanager/eth_broadcaster.go b/core/services/bulletprooftxmanager/eth_broadcaster.go index 4f9b9c7de5d..4e83e55deee 100644 --- a/core/services/bulletprooftxmanager/eth_broadcaster.go +++ b/core/services/bulletprooftxmanager/eth_broadcaster.go @@ -17,6 +17,7 @@ import ( gethCommon "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" "gorm.io/gorm" + "gorm.io/gorm/clause" ) // EthBroadcaster monitors eth_txes for transactions that need to @@ -390,10 +391,10 @@ func (eb *ethBroadcaster) saveInProgressTransaction(etx *models.EthTx, attempt * } etx.State = models.EthTxInProgress return eb.store.Transaction(func(tx *gorm.DB) error { - if err := tx.Create(attempt).Error; err != nil { + if err := tx.Omit(clause.Associations).Create(attempt).Error; err != nil { return errors.Wrap(err, "saveInProgressTransaction failed to create eth_tx_attempt") } - return errors.Wrap(tx.Save(etx).Error, "saveInProgressTransaction failed to save eth_tx") + return errors.Wrap(tx.Omit(clause.Associations).Save(etx).Error, "saveInProgressTransaction failed to save eth_tx") }) } @@ -422,10 +423,10 @@ func saveAttempt(store *store.Store, etx *models.EthTx, attempt models.EthTxAtte if err := IncrementNextNonce(tx, etx.FromAddress, *etx.Nonce); err != nil { return errors.Wrap(err, "saveUnconfirmed failed") } - if err := tx.Save(etx).Error; err != nil { + if err := tx.Omit(clause.Associations).Save(etx).Error; err != nil { return errors.Wrap(err, "saveUnconfirmed failed to save eth_tx") } - if err := tx.Save(&attempt).Error; err != nil { + if err := tx.Omit(clause.Associations).Save(&attempt).Error; err != nil { return errors.Wrap(err, "saveUnconfirmed failed to save eth_tx_attempt") } for _, f := range callbacks { @@ -473,7 +474,7 @@ func saveFatallyErroredTransaction(store *store.Store, etx *models.EthTx) error if err := tx.Exec(`DELETE FROM eth_tx_attempts WHERE eth_tx_id = ?`, etx.ID).Error; err != nil { return errors.Wrapf(err, "saveFatallyErroredTransaction failed to delete eth_tx_attempt with eth_tx.ID %v", etx.ID) } - return errors.Wrap(tx.Save(etx).Error, "saveFatallyErroredTransaction failed to save eth_tx") + return errors.Wrap(tx.Omit(clause.Associations).Save(etx).Error, "saveFatallyErroredTransaction failed to save eth_tx") }) } diff --git a/core/services/bulletprooftxmanager/eth_confirmer.go b/core/services/bulletprooftxmanager/eth_confirmer.go index bca3518930f..3a5e6ba47df 100644 --- a/core/services/bulletprooftxmanager/eth_confirmer.go +++ b/core/services/bulletprooftxmanager/eth_confirmer.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" "go.uber.org/multierr" "gorm.io/gorm" + "gorm.io/gorm/clause" ) const ( @@ -733,7 +734,7 @@ func (ec *ethConfirmer) saveInProgressAttempt(attempt *models.EthTxAttempt) erro if attempt.State != models.EthTxAttemptInProgress { return errors.New("saveInProgressAttempt failed: attempt state must be in_progress") } - return errors.Wrap(ec.store.DB.Save(attempt).Error, "saveInProgressAttempt failed") + return errors.Wrap(ec.store.DB.Omit(clause.Associations).Save(attempt).Error, "saveInProgressAttempt failed") } func (ec *ethConfirmer) handleInProgressAttempt(ctx context.Context, etx models.EthTx, attempt models.EthTxAttempt, blockHeight int64) error { @@ -888,7 +889,7 @@ func saveSentAttempt(db *gorm.DB, attempt *models.EthTxAttempt, broadcastAt time if err := tx.Exec(`UPDATE eth_txes SET broadcast_at = ? WHERE id = ? AND broadcast_at < ?`, broadcastAt, attempt.EthTxID, broadcastAt).Error; err != nil { return errors.Wrap(err, "saveSentAttempt failed") } - return errors.Wrap(db.Save(attempt).Error, "saveSentAttempt failed") + return errors.Wrap(db.Omit(clause.Associations).Save(attempt).Error, "saveSentAttempt failed") }) } @@ -906,7 +907,7 @@ func saveInsufficientEthAttempt(db *gorm.DB, attempt *models.EthTxAttempt, broad if err := tx.Exec(`UPDATE eth_txes SET broadcast_at = ? WHERE id = ? AND broadcast_at < ?`, broadcastAt, attempt.EthTxID, broadcastAt).Error; err != nil { return errors.Wrap(err, "saveInsufficientEthAttempt failed") } - return errors.Wrap(db.Save(attempt).Error, "saveInsufficientEthAttempt failed") + return errors.Wrap(db.Omit(clause.Associations).Save(attempt).Error, "saveInsufficientEthAttempt failed") }) } From 07b954d142db4afaf46bfca58e12a9eeb7db0860 Mon Sep 17 00:00:00 2001 From: Sam Date: Mon, 12 Apr 2021 15:39:49 +0100 Subject: [PATCH 116/116] Add ORDER BY to Batch query, fixing duplicates/missing jobs --- core/services/job_subscriber.go | 3 +++ core/services/subscription.go | 1 + core/store/orm/orm.go | 4 ++-- docs/CHANGELOG.md | 2 ++ 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/core/services/job_subscriber.go b/core/services/job_subscriber.go index b45c93f0750..2633bc631b8 100644 --- a/core/services/job_subscriber.go +++ b/core/services/job_subscriber.go @@ -142,9 +142,12 @@ func (js *jobSubscriber) addSubscription(sub JobSubscription) { // Connect connects the jobs to the ethereum node by creating corresponding subscriptions. func (js *jobSubscriber) Connect(bn *models.Head) error { + logger.Debugw("JobSubscriber connect", "head", bn) + var merr error err := js.store.Jobs( func(j *models.JobSpec) bool { + logger.Debugw("JobSubscriber adding job", "jobSpecID", j.ID) merr = multierr.Append(merr, js.AddJob(*j, bn)) return true }, diff --git a/core/services/subscription.go b/core/services/subscription.go index a865d0e00e9..ffdb84761eb 100644 --- a/core/services/subscription.go +++ b/core/services/subscription.go @@ -55,6 +55,7 @@ func StartJobSubscription(job models.JobSpec, head *models.Head, store *strpkg.S } for _, initr := range initrs { + logger.Debugw("Initiator subscribing", "initr", initr) unsubscriber, err := NewInitiatorSubscription(initr, store.EthClient, runManager, nextHead, store.Config, ReceiveLogRequest) if err == nil { unsubscribers = append(unsubscribers, unsubscriber) diff --git a/core/store/orm/orm.go b/core/store/orm/orm.go index f7f4da3832e..e7344004ea9 100644 --- a/core/store/orm/orm.go +++ b/core/store/orm/orm.go @@ -579,13 +579,13 @@ func (orm *ORM) Jobs(cb func(*models.JobSpec) bool, initrTypes ...string) error return err } return Batch(BatchSize, func(offset, limit uint) (uint, error) { - scope := orm.DB.Limit(int(limit)).Offset(int(offset)) + scope := orm.DB.Order("job_specs.id asc").Limit(int(limit)).Offset(int(offset)) if len(initrTypes) > 0 { scope = scope.Where("initiators.type IN (?)", initrTypes) scope = scope.Joins("JOIN initiators ON job_specs.id = initiators.job_spec_id::uuid") } var ids []string - err := scope.Table("job_specs").Pluck("job_specs.id", &ids).Error + err := scope.Table("job_specs").Distinct("job_specs.id").Pluck("job_specs.id", &ids).Error if err != nil { return 0, err } diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 9b2b8f1118c..b884e34b3b0 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -48,6 +48,8 @@ to a dotID in the pipeline_spec.dot_dag_source. - Fixed bug where node will occasionally submit an invalid OCR transmission which reverts with "address not authorized to sign". +- Fixed bug where a node will sometimes double submit on runlog jobs causing reverted transactions on-chain + ## [0.10.3] - 2021-03-22 ### Added