diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml
index 19ccf17bba4..66cefc77c3f 100644
--- a/.github/workflows/ci-core.yml
+++ b/.github/workflows/ci-core.yml
@@ -365,6 +365,7 @@ jobs:
with:
tmpfs: "true"
image-tag: "16-alpine"
+ postgres-options: "-c max_connections=1000 -c shared_buffers=2GB -c fsync=off -c synchronous_commit=off -c full_page_writes=off -c client_min_messages=warning" # Turn off some prod-protections to make postgres go brrr. https://github.com/peterldowns/pgtestdb#how-do-i-make-it-go-faster
- name: Touching core/web/assets/index.html
if: ${{ matrix.type.should-run == 'true' }}
diff --git a/.github/workflows/ci-deployments.yml b/.github/workflows/ci-deployments.yml
index daad9b233b5..3cae1f71371 100644
--- a/.github/workflows/ci-deployments.yml
+++ b/.github/workflows/ci-deployments.yml
@@ -266,6 +266,7 @@ jobs:
with:
tmpfs: "true"
image-tag: "16-alpine"
+ postgres-options: "-c max_connections=1000 -c shared_buffers=2GB -c fsync=off -c synchronous_commit=off -c full_page_writes=off -c client_min_messages=warning" # Turn off some prod-protections to make postgres go brrr. https://github.com/peterldowns/pgtestdb#how-do-i-make-it-go-faster
- name: Download Go vendor packages
run: go mod download
- name: Setup DB
diff --git a/.gitignore b/.gitignore
index 4fe556dd5b5..1a3e7907885 100644
--- a/.gitignore
+++ b/.gitignore
@@ -60,6 +60,7 @@ core/services/job/testdata/wasm/testmodule.wasm
core/services/job/testdata/wasm/testmodule.br
temp-repo
diagnose-*/
+diagnose-attempted-fixes-*.jsonl
# DB state
./db/
diff --git a/.tool-versions b/.tool-versions
index 1d732b1afa7..070681e29f2 100644
--- a/.tool-versions
+++ b/.tool-versions
@@ -1,9 +1,9 @@
-golang 1.26.2
+golang 1.26.3
mockery 2.53.0
nodejs 20.13.1
pnpm 10.6.5
postgres 15.1
helm 3.18.4
-golangci-lint 2.11.4
+golangci-lint 2.12.2
protoc 29.3
python 3.10.5
diff --git a/core/services/ocr2/plugins/llo/config/config.go b/core/services/ocr2/plugins/llo/config/config.go
index 7722a02ef49..dcf506a4b29 100644
--- a/core/services/ocr2/plugins/llo/config/config.go
+++ b/core/services/ocr2/plugins/llo/config/config.go
@@ -121,10 +121,8 @@ func (p PluginConfig) Validate() (merr error) {
if err := json.Unmarshal([]byte(p.ChannelDefinitions), &cd); err != nil {
merr = errors.Join(merr, fmt.Errorf("channelDefinitions is invalid JSON: %w", err))
}
- } else {
- if p.ChannelDefinitionsContractAddress == (common.Address{}) {
- merr = errors.Join(merr, errors.New("llo: ChannelDefinitionsContractAddress is required if ChannelDefinitions is not specified"))
- }
+ } else if p.ChannelDefinitionsContractAddress == (common.Address{}) {
+ merr = errors.Join(merr, errors.New("llo: ChannelDefinitionsContractAddress is required if ChannelDefinitions is not specified"))
}
merr = errors.Join(merr, validateKeyBundleIDs(p.KeyBundleIDs))
diff --git a/core/services/ocr2/plugins/llo/config/config_test.go b/core/services/ocr2/plugins/llo/config/config_test.go
index 0522a7f4a9f..4d488cf9a2d 100644
--- a/core/services/ocr2/plugins/llo/config/config_test.go
+++ b/core/services/ocr2/plugins/llo/config/config_test.go
@@ -112,8 +112,7 @@ func Test_Config(t *testing.T) {
assert.False(t, mc.BenchmarkMode)
err = mc.Validate()
- require.Error(t, err)
- assert.EqualError(t, err, "llo: ChannelDefinitionsContractAddress is required if ChannelDefinitions is not specified")
+ require.EqualError(t, err, "llo: ChannelDefinitionsContractAddress is required if ChannelDefinitions is not specified")
})
t.Run("with invalid values", func(t *testing.T) {
@@ -124,7 +123,7 @@ func Test_Config(t *testing.T) {
var mc PluginConfig
err := toml.Unmarshal([]byte(rawToml), &mc)
require.Error(t, err)
- assert.EqualError(t, err, `toml: cannot decode TOML string into struct field config.PluginConfig.ChannelDefinitionsContractFromBlock of type int64`)
+ require.EqualError(t, err, `toml: cannot decode TOML string into struct field config.PluginConfig.ChannelDefinitionsContractFromBlock of type int64`)
assert.False(t, mc.BenchmarkMode)
rawToml = `
diff --git a/core/services/ocr2/plugins/llo/helpers_test.go b/core/services/ocr2/plugins/llo/helpers_test.go
index d221dbdc9dc..8e21ba57fd6 100644
--- a/core/services/ocr2/plugins/llo/helpers_test.go
+++ b/core/services/ocr2/plugins/llo/helpers_test.go
@@ -62,7 +62,8 @@ type mercuryServer struct {
func startMercuryServer(t *testing.T, srv *mercuryServer, pubKeys []ed25519.PublicKey) (serverURL string) {
// Set up the grpc server
- lis, err := net.Listen("tcp", "127.0.0.1:0")
+ var lc net.ListenConfig
+ lis, err := lc.Listen(testutils.Context(t), "tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("[MAIN] failed to listen: %v", err)
}
@@ -170,44 +171,44 @@ func setupNode(
config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
// [JobPipeline]
- c.JobPipeline.MaxSuccessfulRuns = ptr(uint64(0))
- c.JobPipeline.VerboseLogging = ptr(true)
+ c.JobPipeline.MaxSuccessfulRuns = new(uint64(0))
+ c.JobPipeline.VerboseLogging = new(true)
// [Feature]
- c.Feature.UICSAKeys = ptr(true)
- c.Feature.LogPoller = ptr(true)
- c.Feature.FeedsManager = ptr(false)
+ c.Feature.UICSAKeys = new(true)
+ c.Feature.LogPoller = new(true)
+ c.Feature.FeedsManager = new(false)
// [OCR]
- c.OCR.Enabled = ptr(false)
+ c.OCR.Enabled = new(false)
// [OCR2]
- c.OCR2.Enabled = ptr(true)
+ c.OCR2.Enabled = new(true)
c.OCR2.ContractPollInterval = commonconfig.MustNewDuration(100 * time.Millisecond)
// [P2P]
- c.P2P.PeerID = ptr(p2pKey.PeerID())
- c.P2P.TraceLogging = ptr(true)
+ c.P2P.PeerID = new(p2pKey.PeerID())
+ c.P2P.TraceLogging = new(true)
// [P2P.V2]
- c.P2P.V2.Enabled = ptr(true)
+ c.P2P.V2.Enabled = new(true)
c.P2P.V2.AnnounceAddresses = &p2paddresses
c.P2P.V2.ListenAddresses = &p2paddresses
c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(500 * time.Millisecond)
c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second)
// [Mercury]
- c.Mercury.VerboseLogging = ptr(true)
+ c.Mercury.VerboseLogging = new(true)
// [Log]
- c.Log.Level = ptr(toml.LogLevel(zapcore.DebugLevel)) // generally speaking we want debug level for logs unless overridden
+ c.Log.Level = new(toml.LogLevel(zapcore.DebugLevel)) // generally speaking we want debug level for logs unless overridden
// [CRE]
- c.CRE.UseLocalTimeProvider = ptr(true)
+ c.CRE.UseLocalTimeProvider = new(true)
// [EVM.Transactions]
for _, evmCfg := range c.EVM {
- evmCfg.Transactions.Enabled = ptr(false) // don't need txmgr
+ evmCfg.Transactions.Enabled = new(false) // don't need txmgr
}
// Optional overrides
@@ -232,8 +233,6 @@ func setupNode(
return app, p2pKey.PeerID().Raw(), csaKey.StaticSizedPublicKey(), ocr2kb, observedLogs
}
-func ptr[T any](t T) *T { return &t }
-
// receiveWithTimeout receives from the packet channel with a timeout.
// It returns the packet if a packet was received or an error if the timeout is reached
// or the channel is closed unexpectedly.
@@ -402,14 +401,18 @@ func createSingleDecimalBridge(t *testing.T, name string, i int, p decimal.Decim
ctx := testutils.Context(t)
bridge := httptest.NewServer(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
b, err := io.ReadAll(req.Body)
- require.NoError(t, err)
- require.JSONEq(t, `{"data":{"data":"foo"}}`, string(b))
+ if !assert.NoError(t, err) {
+ return
+ }
+ if !assert.JSONEq(t, `{"data":{"data":"foo"}}`, string(b)) {
+ return
+ }
res.WriteHeader(http.StatusOK)
val := p.String()
resp := fmt.Sprintf(`{"result": %s}`, val)
_, err = res.Write([]byte(resp))
- require.NoError(t, err)
+ assert.NoError(t, err)
}))
t.Cleanup(bridge.Close)
u, _ := url.Parse(bridge.URL)
@@ -418,7 +421,6 @@ func createSingleDecimalBridge(t *testing.T, name string, i int, p decimal.Decim
Name: bridges.BridgeName(bridgeName),
URL: models.WebURL(*u),
}))
-
return bridgeName
}
@@ -508,7 +510,7 @@ func addOCRJobsEVMPremiumLegacy(
jobIDs[i] = make(map[uint32]int32)
}
for j, strm := range streams {
- // assume that streams are native, link and additionals are quote
+ // assume that streams are native, link and additional streams are quote
if j < 2 {
var name string
if j == 0 {
diff --git a/core/services/ocr2/plugins/llo/integration_test.go b/core/services/ocr2/plugins/llo/integration_test.go
index f85d0532474..c82fa3fdf82 100644
--- a/core/services/ocr2/plugins/llo/integration_test.go
+++ b/core/services/ocr2/plugins/llo/integration_test.go
@@ -2,6 +2,7 @@ package llo_test
import (
"crypto/ed25519"
+ sha3 "crypto/sha3"
"encoding/binary"
"encoding/hex"
"encoding/json"
@@ -25,7 +26,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zapcore"
- "golang.org/x/crypto/sha3"
"google.golang.org/grpc/peer"
"google.golang.org/protobuf/proto"
@@ -357,9 +357,9 @@ func setProductionConfig(t *testing.T, donID uint32, steve *bind.TransactOpts, b
func setBlueGreenConfig(t *testing.T, donID uint32, steve *bind.TransactOpts, backend evmtypes.Backend, configurator *configurator.Configurator, configuratorAddress common.Address, nodes []Node, opts ...OCRConfigOption) ocr2types.ConfigDigest {
signers, _, _, onchainConfig, offchainConfigVersion, offchainConfig := generateConfig(t, opts...)
- var onchainPubKeys [][]byte
- for _, signer := range signers {
- onchainPubKeys = append(onchainPubKeys, signer)
+ onchainPubKeys := make([][]byte, len(signers))
+ for i, signer := range signers {
+ onchainPubKeys[i] = signer
}
offchainTransmitters := make([][32]byte, nNodes)
for i := range nNodes {
@@ -450,7 +450,7 @@ func testIntegrationLLOEVMPremiumLegacy(t *testing.T, offchainConfig datastreams
clientPubKeys[i] = key.PublicKey
}
- steve, backend, _, _, verifier, _, verifierProxy, _, configStore, configStoreAddress, legacyVerifier, legacyVerifierAddr, _, _ := setupBlockchain(t)
+ steve, backend, _, _, verifier, _, _, _, configStore, configStoreAddress, legacyVerifier, legacyVerifierAddr, _, _ := setupBlockchain(t)
fromBlock := 1
// Setup bootstrap
@@ -476,7 +476,7 @@ func testIntegrationLLOEVMPremiumLegacy(t *testing.T, offchainConfig datastreams
// Setup oracle nodes
oracles, nodes := setupNodes(t, nNodes, backend, clientCSAKeys, func(c *chainlink.Config) {
- c.Mercury.Transmitter.Protocol = ptr(config.MercuryTransmitterProtocolGRPC)
+ c.Mercury.Transmitter.Protocol = new(config.MercuryTransmitterProtocolGRPC)
})
chainID := testutils.SimulatedChainID
@@ -592,7 +592,8 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi
}
var expectedBm, expectedBid, expectedAsk *big.Int
- if feedID == quoteStreamFeedID1 { //nolint
+ //nolint:gocritic,staticcheck // switch case doesn't play nice with these types
+ if feedID == quoteStreamFeedID1 {
expectedBm = quoteStream1.baseBenchmarkPrice.Mul(multiplier).BigInt()
expectedBid = quoteStream1.baseBid.Mul(multiplier).BigInt()
expectedAsk = quoteStream1.baseAsk.Mul(multiplier).BigInt()
@@ -601,14 +602,14 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi
expectedBid = quoteStream2.baseBid.Mul(multiplier).BigInt()
expectedAsk = quoteStream2.baseAsk.Mul(multiplier).BigInt()
} else {
- t.Fatalf("unrecognized feedID: 0x%x", feedID)
+ require.FailNowf(t, "unrecognized feedID: 0x%x", hex.EncodeToString(feedID[:]))
}
- assert.GreaterOrEqual(t, reportElems["validFromTimestamp"].(uint32), uint32(testStartTimeStamp.Unix()))
- assert.GreaterOrEqual(t, int(reportElems["observationsTimestamp"].(uint32)), int(testStartTimeStamp.Unix()))
+ assert.GreaterOrEqual(t, int64(reportElems["validFromTimestamp"].(uint32)), testStartTimeStamp.Unix())
+ assert.GreaterOrEqual(t, int64(reportElems["observationsTimestamp"].(uint32)), testStartTimeStamp.Unix())
assert.Equal(t, "33597747607000", reportElems["nativeFee"].(*big.Int).String())
assert.Equal(t, "7547169811320755", reportElems["linkFee"].(*big.Int).String())
- assert.Equal(t, reportElems["observationsTimestamp"].(uint32)+uint32(expirationWindow), reportElems["expiresAt"].(uint32))
+ assert.Equal(t, int64(reportElems["observationsTimestamp"].(uint32))+int64(expirationWindow), int64(reportElems["expiresAt"].(uint32)))
assert.Equal(t, expectedBm.String(), reportElems["benchmarkPrice"].(*big.Int).String())
assert.Equal(t, expectedBid.String(), reportElems["bid"].(*big.Int).String())
assert.Equal(t, expectedAsk.String(), reportElems["ask"].(*big.Int).String())
@@ -629,16 +630,6 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi
assert.Subset(t, signerAddresses, reportSigners)
}
- // test on-chain verification
- t.Run("on-chain verification", func(t *testing.T) {
- t.Skip("SKIP - MERC-6637")
- // Disabled because it flakes, sometimes returns "execution reverted"
- // No idea why
- // https://smartcontract-it.atlassian.net/browse/MERC-6637
- _, err = verifierProxy.Verify(steve, req.req.Payload, []byte{})
- require.NoError(t, err)
- })
-
pr, ok := peer.FromContext(req.ctx)
require.True(t, ok)
t.Logf("oracle %x reported for 0x%x", pr.String(), feedID[:])
@@ -717,7 +708,7 @@ func testIntegrationLLOMultiFormats(t *testing.T, offchainConfig datastreamsllo.
// Setup oracle nodes
oracles, nodes := setupNodes(t, nNodes, backend, clientCSAKeys, func(c *chainlink.Config) {
- c.Mercury.Transmitter.Protocol = ptr(config.MercuryTransmitterProtocolGRPC)
+ c.Mercury.Transmitter.Protocol = new(config.MercuryTransmitterProtocolGRPC)
})
chainID := testutils.SimulatedChainID
@@ -1469,7 +1460,7 @@ func TestIntegration_LLO_stress_test_V1(t *testing.T) {
bootstrapCSAKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(salt - 1))
bootstrapNodePort := freeport.GetOne(t)
appBootstrap, bootstrapPeerID, _, bootstrapKb, _ := setupNode(t, bootstrapNodePort, "bootstrap_llo", backend, bootstrapCSAKey, func(c *chainlink.Config) {
- c.Log.Level = ptr(logLevel)
+ c.Log.Level = new(logLevel)
})
bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb}
@@ -1490,8 +1481,8 @@ func TestIntegration_LLO_stress_test_V1(t *testing.T) {
// Setup oracle nodes
oracles, nodes := setupNodes(t, nNodes, backend, clientCSAKeys, func(c *chainlink.Config) {
- c.Mercury.Transmitter.Protocol = ptr(config.MercuryTransmitterProtocolGRPC)
- c.Log.Level = ptr(logLevel)
+ c.Mercury.Transmitter.Protocol = new(config.MercuryTransmitterProtocolGRPC)
+ c.Log.Level = new(logLevel)
})
chainID := testutils.SimulatedChainID
@@ -1546,7 +1537,8 @@ channelDefinitionsContractFromBlock = %d`, serverURL, serverPubKey, donID, confi
}
// Set config on configurator
- opts := []OCRConfigOption{WithOracles(oracles)}
+ opts := make([]OCRConfigOption, 0, 1+len(ocrConfigOpts))
+ opts = append(opts, WithOracles(oracles))
opts = append(opts, ocrConfigOpts...)
blueDigest := setProductionConfig(
t, donID, steve, backend, configurator, configuratorAddress, nodes, opts...,
@@ -1694,7 +1686,7 @@ func TestIntegration_LLO_transmit_errors(t *testing.T) {
bootstrapCSAKey := csakey.MustNewV2XXXTestingOnly(big.NewInt(salt - 1))
bootstrapNodePort := freeport.GetOne(t)
appBootstrap, bootstrapPeerID, _, bootstrapKb, _ := setupNode(t, bootstrapNodePort, "bootstrap_llo", backend, bootstrapCSAKey, func(c *chainlink.Config) {
- c.Log.Level = ptr(logLevel)
+ c.Log.Level = new(logLevel)
})
bootstrapNode := Node{App: appBootstrap, KeyBundle: bootstrapKb}
@@ -1715,9 +1707,9 @@ func TestIntegration_LLO_transmit_errors(t *testing.T) {
// Setup oracle nodes
oracles, nodes := setupNodes(t, nNodes, backend, clientCSAKeys, func(c *chainlink.Config) {
- c.Mercury.Transmitter.Protocol = ptr(config.MercuryTransmitterProtocolGRPC)
- c.Mercury.Transmitter.TransmitQueueMaxSize = ptr(uint32(maxQueueSize)) // Test queue overflow
- c.Log.Level = ptr(logLevel)
+ c.Mercury.Transmitter.Protocol = new(config.MercuryTransmitterProtocolGRPC)
+ c.Mercury.Transmitter.TransmitQueueMaxSize = new(uint32(maxQueueSize)) // Test queue overflow
+ c.Log.Level = new(logLevel)
})
chainID := testutils.SimulatedChainID
@@ -1869,7 +1861,7 @@ func testIntegrationLLOBlueGreenLifecycle(t *testing.T, offchainConfig datastrea
// Setup oracle nodes
oracles, nodes := setupNodes(t, nNodes, backend, clientCSAKeys, func(c *chainlink.Config) {
- c.Mercury.Transmitter.Protocol = ptr(config.MercuryTransmitterProtocolGRPC)
+ c.Mercury.Transmitter.Protocol = new(config.MercuryTransmitterProtocolGRPC)
})
chainID := testutils.SimulatedChainID
@@ -2229,7 +2221,7 @@ func TestIntegration_LLO_channel_merging_owners_adders(t *testing.T) {
// Setup oracle nodes
oracles, nodes := setupNodes(t, nNodes, backend, clientCSAKeys, func(c *chainlink.Config) {
- c.Mercury.Transmitter.Protocol = ptr(config.MercuryTransmitterProtocolGRPC)
+ c.Mercury.Transmitter.Protocol = new(config.MercuryTransmitterProtocolGRPC)
})
chainID := testutils.SimulatedChainID
@@ -2736,7 +2728,7 @@ func TestIntegration_LLO_tombstone_stops_observations_and_reports(t *testing.T)
serverURL := startMercuryServer(t, srv, clientPubKeys)
oracles, nodes := setupNodes(t, nNodes, backend, clientCSAKeys, func(c *chainlink.Config) {
- c.Mercury.Transmitter.Protocol = ptr(config.MercuryTransmitterProtocolGRPC)
+ c.Mercury.Transmitter.Protocol = new(config.MercuryTransmitterProtocolGRPC)
})
chainID := testutils.SimulatedChainID
diff --git a/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go b/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go
index bc1ec5a0071..3af5d772f79 100644
--- a/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go
+++ b/core/services/ocr2/plugins/llo/onchain_channel_definition_cache_integration_test.go
@@ -2,11 +2,13 @@ package llo_test
import (
"bytes"
+ sha3 "crypto/sha3"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
+ "maps"
"math/rand"
"net/http"
"strconv"
@@ -19,7 +21,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zapcore"
- "golang.org/x/crypto/sha3"
"github.com/smartcontractkit/chainlink-common/pkg/services/servicetest"
llotypes "github.com/smartcontractkit/chainlink-common/pkg/types/llo"
@@ -120,9 +121,7 @@ func extractChannelDefinitions(defsJSON json.RawMessage) llotypes.ChannelDefinit
}
result := make(llotypes.ChannelDefinitions)
for _, sourceDef := range sourceDefs {
- for channelID, def := range sourceDef.Definitions {
- result[channelID] = def
- }
+ maps.Copy(result, sourceDef.Definitions)
}
return result
}
diff --git a/core/services/ocr2/plugins/mercury/helpers_test.go b/core/services/ocr2/plugins/mercury/helpers_test.go
index da76f88084d..833c25b0f15 100644
--- a/core/services/ocr2/plugins/mercury/helpers_test.go
+++ b/core/services/ocr2/plugins/mercury/helpers_test.go
@@ -170,30 +170,30 @@ func setupNode(
config, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
// [JobPipeline]
// MaxSuccessfulRuns = 0
- c.JobPipeline.MaxSuccessfulRuns = ptr(uint64(0))
- c.JobPipeline.VerboseLogging = ptr(true)
+ c.JobPipeline.MaxSuccessfulRuns = new(uint64(0))
+ c.JobPipeline.VerboseLogging = new(true)
// [Feature]
// UICSAKeys=true
// LogPoller = true
// FeedsManager = false
- c.Feature.UICSAKeys = ptr(true)
- c.Feature.LogPoller = ptr(true)
- c.Feature.FeedsManager = ptr(false)
+ c.Feature.UICSAKeys = new(true)
+ c.Feature.LogPoller = new(true)
+ c.Feature.FeedsManager = new(false)
// [OCR]
// Enabled = false
- c.OCR.Enabled = ptr(false)
+ c.OCR.Enabled = new(false)
// [OCR2]
// Enabled = true
- c.OCR2.Enabled = ptr(true)
+ c.OCR2.Enabled = new(true)
// [P2P]
// PeerID = '$PEERID'
// TraceLogging = true
- c.P2P.PeerID = ptr(p2pKey.PeerID())
- c.P2P.TraceLogging = ptr(true)
+ c.P2P.PeerID = new(p2pKey.PeerID())
+ c.P2P.TraceLogging = new(true)
// [P2P.V2]
// Enabled = true
@@ -201,7 +201,7 @@ func setupNode(
// ListenAddresses = ['127.0.0.1:17775']
// DeltaDial = 500ms
// DeltaReconcile = 5s
- c.P2P.V2.Enabled = ptr(true)
+ c.P2P.V2.Enabled = new(true)
c.P2P.V2.AnnounceAddresses = &p2paddresses
c.P2P.V2.ListenAddresses = &p2paddresses
c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(500 * time.Millisecond)
diff --git a/core/services/ocr2/plugins/mercury/plugin_test.go b/core/services/ocr2/plugins/mercury/plugin_test.go
index ad4102d2ac5..0893b441e4c 100644
--- a/core/services/ocr2/plugins/mercury/plugin_test.go
+++ b/core/services/ocr2/plugins/mercury/plugin_test.go
@@ -71,11 +71,11 @@ var (
testJob = job.Job{
ID: 1,
ExternalJobID: uuid.Must(uuid.NewRandom()),
- OCR2OracleSpecID: ptr(int32(7)),
+ OCR2OracleSpecID: new(int32(7)),
OCR2OracleSpec: &job.OCR2OracleSpec{
ID: 7,
ContractID: "phony",
- FeedID: ptr(common.BytesToHash([]byte{1, 2, 3})),
+ FeedID: new(common.BytesToHash([]byte{1, 2, 3})),
Relay: relay.NetworkEVM,
ChainID: "1",
},
diff --git a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go
index 0aed10b892d..a3a815685e8 100644
--- a/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go
+++ b/core/services/ocr2/plugins/ocr2keeper/evmregistry/v21/logprovider/integration_test.go
@@ -510,13 +510,13 @@ func ptr[T any](v T) *T { return &v }
func setupDB(t *testing.T) *sqlx.DB {
_, db := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
- c.Feature.LogPoller = ptr(true)
+ c.Feature.LogPoller = new(true)
- c.OCR.Enabled = ptr(false)
- c.OCR2.Enabled = ptr(true)
+ c.OCR.Enabled = new(false)
+ c.OCR2.Enabled = new(true)
- c.EVM[0].Transactions.ForwardersEnabled = ptr(true)
- c.EVM[0].GasEstimator.Mode = ptr("FixedPrice")
+ c.EVM[0].Transactions.ForwardersEnabled = new(true)
+ c.EVM[0].GasEstimator.Mode = new("FixedPrice")
})
return db
}
diff --git a/core/services/ocr2/plugins/ocr2keeper/integration_test.go b/core/services/ocr2/plugins/ocr2keeper/integration_test.go
index 251d28c0e52..f0b7f9dfbd1 100644
--- a/core/services/ocr2/plugins/ocr2keeper/integration_test.go
+++ b/core/services/ocr2/plugins/ocr2keeper/integration_test.go
@@ -119,13 +119,13 @@ func setupNode(
p2pKey := p2pkey.MustNewV2XXXTestingOnly(big.NewInt(int64(port)))
p2paddresses := []string{fmt.Sprintf("127.0.0.1:%d", port)}
cfg, _ := heavyweight.FullTestDBV2(t, func(c *chainlink.Config, s *chainlink.Secrets) {
- c.Feature.LogPoller = ptr(true)
+ c.Feature.LogPoller = new(true)
- c.OCR.Enabled = ptr(false)
- c.OCR2.Enabled = ptr(true)
+ c.OCR.Enabled = new(false)
+ c.OCR2.Enabled = new(true)
- c.P2P.PeerID = ptr(p2pKey.PeerID())
- c.P2P.V2.Enabled = ptr(true)
+ c.P2P.PeerID = new(p2pKey.PeerID())
+ c.P2P.V2.Enabled = new(true)
c.P2P.V2.DeltaDial = commonconfig.MustNewDuration(500 * time.Millisecond)
c.P2P.V2.DeltaReconcile = commonconfig.MustNewDuration(5 * time.Second)
c.P2P.V2.AnnounceAddresses = &p2paddresses
@@ -134,8 +134,8 @@ func setupNode(
c.P2P.V2.DefaultBootstrappers = &p2pV2Bootstrappers
}
- c.EVM[0].Transactions.ForwardersEnabled = ptr(true)
- c.EVM[0].GasEstimator.Mode = ptr("FixedPrice")
+ c.EVM[0].Transactions.ForwardersEnabled = new(true)
+ c.EVM[0].GasEstimator.Mode = new("FixedPrice")
s.Mercury.Credentials = map[string]toml.MercuryCredentials{
MercuryCredName: {
LegacyURL: models.MustSecretURL(mercury.URL()),
diff --git a/core/services/workflows/syncer/engine_registry.go b/core/services/workflows/syncer/engine_registry.go
index d4a45fc8ce2..968850149a5 100644
--- a/core/services/workflows/syncer/engine_registry.go
+++ b/core/services/workflows/syncer/engine_registry.go
@@ -75,7 +75,7 @@ func (r *EngineRegistry) Get(key EngineRegistryKey) (ServiceWithMetadata, bool)
func (r *EngineRegistry) GetAll() []ServiceWithMetadata {
r.mu.RLock()
defer r.mu.RUnlock()
- engines := []ServiceWithMetadata{}
+ engines := make([]ServiceWithMetadata, 0, len(r.engines))
for _, enginWithMetadata := range r.engines {
engines = append(engines, enginWithMetadata)
}
diff --git a/core/services/workflows/syncer/handler.go b/core/services/workflows/syncer/handler.go
index 386b59c463b..2c0a9b8a1f8 100644
--- a/core/services/workflows/syncer/handler.go
+++ b/core/services/workflows/syncer/handler.go
@@ -258,7 +258,7 @@ func NewEventHandler(
func (h *eventHandler) close() error {
es := h.engineRegistry.PopAll()
- cs := []io.Closer{}
+ cs := make([]io.Closer, 0, len(es)+1)
cs = append(cs, h.engineLimiters)
for _, e := range es {
cs = append(cs, e)
diff --git a/core/services/workflows/syncer/handler_test.go b/core/services/workflows/syncer/handler_test.go
index 89b696a6a5d..01a0eba6c70 100644
--- a/core/services/workflows/syncer/handler_test.go
+++ b/core/services/workflows/syncer/handler_test.go
@@ -331,10 +331,10 @@ func Test_workflowRegisteredHandler(t *testing.T) {
secretsURL := "http://example.com/secrets"
configURL := "http://example.com/config"
config := []byte("")
- wfOwner := []byte("0xOwner")
+ wfOwner := testutils.NewAddress().Bytes()
+
binary := wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary := []byte(base64.StdEncoding.EncodeToString(binary))
- workflowName := "workflow-name"
workflowEncryptionKey := workflowkey.MustNewXXXTestingOnly(big.NewInt(1))
defaultValidationFn := func(t *testing.T, ctx context.Context, event WorkflowRegisteredV1, h *eventHandler, s *artifacts.Store, wfOwner []byte, wfName string, wfID string, fetcher *mockFetcher) {
@@ -342,14 +342,14 @@ func Test_workflowRegisteredHandler(t *testing.T) {
require.NoError(t, err)
// Verify the record is updated in the database
- dbSpec, err := s.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
+ dbSpec, err := s.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), wfName)
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, workflowName, dbSpec.WorkflowName)
+ require.Equal(t, wfName, dbSpec.WorkflowName)
require.Equal(t, job.WorkflowSpecStatusActive, dbSpec.Status)
// Verify the engine is started
- engine, ok := h.engineRegistry.Get(EngineRegistryKey{Owner: wfOwner, Name: workflowName})
+ engine, ok := h.engineRegistry.Get(EngineRegistryKey{Owner: wfOwner, Name: wfName})
require.True(t, ok)
err = engine.Ready()
require.NoError(t, err)
@@ -381,12 +381,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURL: binaryURL,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredV1 {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1 {
return WorkflowRegisteredV1{
Status: uint8(0),
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURL,
ConfigURL: configURL,
SecretsURL: secretsURL,
@@ -417,12 +417,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURL: binaryURL,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredV1 {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1 {
return WorkflowRegisteredV1{
Status: uint8(0),
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURL,
ConfigURL: configURL,
SecretsURL: secretsURL,
@@ -446,12 +446,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURL: binaryURL,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredV1 {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1 {
return WorkflowRegisteredV1{
Status: uint8(0),
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURL,
ConfigURL: configURL,
SecretsURL: secretsURL,
@@ -478,12 +478,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURL: binaryURL,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredV1 {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1 {
return WorkflowRegisteredV1{
Status: uint8(0),
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURL,
ConfigURL: configURL,
SecretsURL: secretsURL,
@@ -493,7 +493,7 @@ func Test_workflowRegisteredHandler(t *testing.T) {
me := &mockEngine{}
var wfIDBytes [32]byte
copy(wfIDBytes[:], wfID)
- err := h.engineRegistry.Add(EngineRegistryKey{Owner: wfOwner, Name: workflowName}, me, wfIDBytes)
+ err := h.engineRegistry.Add(EngineRegistryKey{Owner: wfOwner, Name: wfName}, me, wfIDBytes)
require.NoError(t, err)
err = h.workflowRegisteredEvent(ctx, event)
require.NoError(t, err)
@@ -512,12 +512,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURL: binaryURL,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredV1 {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1 {
return WorkflowRegisteredV1{
Status: uint8(0),
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURL,
ConfigURL: configURL,
SecretsURL: secretsURL,
@@ -526,11 +526,11 @@ func Test_workflowRegisteredHandler(t *testing.T) {
validationFn: func(t *testing.T, ctx context.Context, event WorkflowRegisteredV1, h *eventHandler, s *artifacts.Store, wfOwner []byte, wfName string, wfID string, fetcher *mockFetcher) {
me := &mockEngine{}
oldWfIDBytes := [32]byte{0, 1, 2, 3, 5}
- err := h.engineRegistry.Add(EngineRegistryKey{Owner: wfOwner, Name: workflowName}, me, oldWfIDBytes)
+ err := h.engineRegistry.Add(EngineRegistryKey{Owner: wfOwner, Name: wfName}, me, oldWfIDBytes)
require.NoError(t, err)
err = h.workflowRegisteredEvent(ctx, event)
require.NoError(t, err)
- engineInRegistry, ok := h.engineRegistry.Get(EngineRegistryKey{Owner: wfOwner, Name: workflowName})
+ engineInRegistry, ok := h.engineRegistry.Get(EngineRegistryKey{Owner: wfOwner, Name: wfName})
assert.True(t, ok)
require.Equal(t, engineInRegistry.WorkflowID.Hex(), wfID)
},
@@ -548,12 +548,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURL: binaryURL,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredV1 {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1 {
return WorkflowRegisteredV1{
Status: uint8(1),
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURL,
ConfigURL: configURL,
SecretsURL: secretsURL,
@@ -566,10 +566,10 @@ func Test_workflowRegisteredHandler(t *testing.T) {
require.NoError(t, err)
// Verify the record is updated in the database
- dbSpec, err := s.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
+ dbSpec, err := s.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), wfName)
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, workflowName, dbSpec.WorkflowName)
+ require.Equal(t, wfName, dbSpec.WorkflowName)
require.Equal(t, job.WorkflowSpecStatusPaused, dbSpec.Status)
// Verify there is no running engine
@@ -590,12 +590,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURL: binaryURL,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredV1 {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1 {
return WorkflowRegisteredV1{
Status: uint8(0),
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURL,
ConfigURL: configURL,
SecretsURL: secretsURL,
@@ -625,10 +625,10 @@ func Test_workflowRegisteredHandler(t *testing.T) {
require.NoError(t, err)
// Verify the record is updated in the database
- dbSpec, err := s.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
+ dbSpec, err := s.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), wfName)
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, workflowName, dbSpec.WorkflowName)
+ require.Equal(t, wfName, dbSpec.WorkflowName)
// This reflects the event status, not what was previously stored in the DB
require.Equal(t, job.WorkflowSpecStatusActive, dbSpec.Status)
@@ -657,12 +657,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
require.Equal(t, 0, fetcher.Calls(event.ConfigURL))
require.Equal(t, 1, fetcher.Calls(event.SecretsURL))
},
- Event: func(wfID []byte) WorkflowRegisteredV1 {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1 {
return WorkflowRegisteredV1{
Status: uint8(0),
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURL,
SecretsURL: secretsURL,
}
@@ -687,12 +687,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
require.Equal(t, 1, fetcher.Calls(event.ConfigURL))
require.Equal(t, 0, fetcher.Calls(event.SecretsURL))
},
- Event: func(wfID []byte) WorkflowRegisteredV1 {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1 {
return WorkflowRegisteredV1{
Status: uint8(0),
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURL,
ConfigURL: configURL,
}
@@ -732,12 +732,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
require.Equal(t, 0, fetcher.Calls(event.ConfigURL))
require.Equal(t, 0, fetcher.Calls(event.SecretsURL))
},
- Event: func(wfID []byte) WorkflowRegisteredV1 {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1 {
return WorkflowRegisteredV1{
Status: uint8(0),
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURL,
ConfigURL: configURL,
}
@@ -759,7 +759,7 @@ type testCase struct {
ConfigURL string
WFOwner []byte
fetcher *mockFetcher
- Event func([]byte) WorkflowRegisteredV1
+ Event func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredV1
validationFn func(t *testing.T, ctx context.Context, event WorkflowRegisteredV1, h *eventHandler, s *artifacts.Store, wfOwner []byte, wfName string, wfID string, fetcher *mockFetcher)
engineFactoryFn func(ctx context.Context, wfid string, owner string, name types.WorkflowName, config []byte, binary []byte) (services.Service, error)
}
@@ -777,17 +777,16 @@ func testRunningWorkflow(t *testing.T, tc testCase, workflowEncryptionKey workfl
binary = tc.GiveBinary
config = tc.GiveConfig
secretsURL = tc.SecretsURL
- wfOwner = tc.WFOwner
-
- fetcher = tc.fetcher
+ wfName = testutils.RandomizeName(t.Name())
)
+ wfOwner := testutils.NewAddress().Bytes()
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, wfName, binary, config, secretsURL)
require.NoError(t, err)
wfID := hex.EncodeToString(giveWFID[:])
- event := tc.Event(giveWFID[:])
+ event := tc.Event(giveWFID[:], wfName, wfOwner)
er := NewEngineRegistry()
opts := []func(*eventHandler){
@@ -810,14 +809,14 @@ func testRunningWorkflow(t *testing.T, tc testCase, workflowEncryptionKey workfl
require.NoError(t, err)
decrypter := newMockDecrypter()
- artifactStore := artifacts.NewStoreWithDecryptSecretsFn(lggr, orm, fetcher.FetcherFunc(), clockwork.NewFakeClock(), workflowkey.Key{}, custmsg.NewLabeler(), decrypter.decryptSecrets)
+ artifactStore := artifacts.NewStoreWithDecryptSecretsFn(lggr, orm, tc.fetcher.FetcherFunc(), clockwork.NewFakeClock(), workflowkey.Key{}, custmsg.NewLabeler(), decrypter.decryptSecrets)
donTime := dontime.NewStore(dontime.DefaultRequestTimeout)
h, err := NewEventHandler(lggr, store, registry, donTime, true, NewEngineRegistry(), emitter, limiters, featureFlags, rl, workflowLimits, artifactStore, workflowEncryptionKey, &testDonNotifier{}, opts...)
servicetest.Run(t, h)
require.NoError(t, err)
- tc.validationFn(t, ctx, event, h, artifactStore, wfOwner, "workflow-name", wfID, fetcher)
+ tc.validationFn(t, ctx, event, h, artifactStore, wfOwner, wfName, wfID, tc.fetcher)
})
}
@@ -885,11 +884,12 @@ func Test_workflowDeletedHandler(t *testing.T) {
workflowEncryptionKey := workflowkey.MustNewXXXTestingOnly(big.NewInt(1))
t.Run("success deleting existing engine and spec", func(t *testing.T) {
var (
- ctx = testutils.Context(t)
- lggr = logger.TestLogger(t)
- db = pgtest.NewSqlxDB(t)
- orm = artifacts.NewWorkflowRegistryDS(db, lggr)
- emitter = custmsg.NewLabeler()
+ ctx = testutils.Context(t)
+ lggr = logger.TestLogger(t)
+ db = pgtest.NewSqlxDB(t)
+ orm = artifacts.NewWorkflowRegistryDS(db, lggr)
+ emitter = custmsg.NewLabeler()
+ workflowName = testutils.RandomizeName(t.Name())
binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
@@ -897,16 +897,16 @@ func Test_workflowDeletedHandler(t *testing.T) {
secretsURL = "http://example.com"
binaryURL = "http://example.com/binary"
configURL = "http://example.com/config"
- wfOwner = []byte("0xOwner")
-
- fetcher = newMockFetcher(map[string]mockFetchResp{
- binaryURL: {Body: encodedBinary, Err: nil},
- configURL: {Body: config, Err: nil},
- secretsURL: {Body: []byte("secrets"), Err: nil},
- })
)
+ wfOwner := testutils.NewAddress().Bytes()
+
+ fetcher := newMockFetcher(map[string]mockFetchResp{
+ binaryURL: {Body: encodedBinary, Err: nil},
+ configURL: {Body: config, Err: nil},
+ secretsURL: {Body: []byte("secrets"), Err: nil},
+ })
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, secretsURL)
require.NoError(t, err)
@@ -914,7 +914,7 @@ func Test_workflowDeletedHandler(t *testing.T) {
Status: uint8(0),
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
BinaryURL: binaryURL,
ConfigURL: configURL,
SecretsURL: secretsURL,
@@ -943,10 +943,10 @@ func Test_workflowDeletedHandler(t *testing.T) {
require.NoError(t, err)
// Verify the record is updated in the database
- dbSpec, err := orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), "workflow-name")
+ dbSpec, err := orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, "workflow-name", dbSpec.WorkflowName)
+ require.Equal(t, workflowName, dbSpec.WorkflowName)
require.Equal(t, job.WorkflowSpecStatusActive, dbSpec.Status)
// Verify the engine is started
@@ -958,14 +958,14 @@ func Test_workflowDeletedHandler(t *testing.T) {
deleteEvent := WorkflowDeletedV1{
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
DonID: 1,
}
err = h.workflowDeletedEvent(ctx, deleteEvent)
require.NoError(t, err)
// Verify the record is deleted in the database
- _, err = orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), "workflow-name")
+ _, err = orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
require.Error(t, err)
// Verify the engine is deleted
@@ -974,11 +974,12 @@ func Test_workflowDeletedHandler(t *testing.T) {
})
t.Run("success deleting non-existing workflow spec", func(t *testing.T) {
var (
- ctx = testutils.Context(t)
- lggr = logger.TestLogger(t)
- db = pgtest.NewSqlxDB(t)
- orm = artifacts.NewWorkflowRegistryDS(db, lggr)
- emitter = custmsg.NewLabeler()
+ ctx = testutils.Context(t)
+ lggr = logger.TestLogger(t)
+ db = pgtest.NewSqlxDB(t)
+ orm = artifacts.NewWorkflowRegistryDS(db, lggr)
+ emitter = custmsg.NewLabeler()
+ workflowName = testutils.RandomizeName(t.Name())
binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
@@ -986,16 +987,16 @@ func Test_workflowDeletedHandler(t *testing.T) {
secretsURL = "http://example.com"
binaryURL = "http://example.com/binary"
configURL = "http://example.com/config"
- wfOwner = []byte("0xOwner")
-
- fetcher = newMockFetcher(map[string]mockFetchResp{
- binaryURL: {Body: encodedBinary, Err: nil},
- configURL: {Body: config, Err: nil},
- secretsURL: {Body: []byte("secrets"), Err: nil},
- })
)
+ wfOwner := testutils.NewAddress().Bytes()
+
+ fetcher := newMockFetcher(map[string]mockFetchResp{
+ binaryURL: {Body: encodedBinary, Err: nil},
+ configURL: {Body: config, Err: nil},
+ secretsURL: {Body: []byte("secrets"), Err: nil},
+ })
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, secretsURL)
require.NoError(t, err)
er := NewEngineRegistry()
@@ -1020,23 +1021,24 @@ func Test_workflowDeletedHandler(t *testing.T) {
deleteEvent := WorkflowDeletedV1{
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
DonID: 1,
}
err = h.workflowDeletedEvent(ctx, deleteEvent)
require.NoError(t, err)
// Verify the record is deleted in the database
- _, err = orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), "workflow-name")
+ _, err = orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
require.Error(t, err)
})
t.Run("removes from DB before engine registry", func(t *testing.T) {
var (
- ctx = testutils.Context(t)
- lggr = logger.TestLogger(t)
- db = pgtest.NewSqlxDB(t)
- orm = artifacts.NewWorkflowRegistryDS(db, lggr)
- emitter = custmsg.NewLabeler()
+ ctx = testutils.Context(t)
+ lggr = logger.TestLogger(t)
+ db = pgtest.NewSqlxDB(t)
+ orm = artifacts.NewWorkflowRegistryDS(db, lggr)
+ emitter = custmsg.NewLabeler()
+ workflowName = testutils.RandomizeName(t.Name())
binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
@@ -1044,18 +1046,18 @@ func Test_workflowDeletedHandler(t *testing.T) {
secretsURL = "http://example.com"
binaryURL = "http://example.com/binary"
configURL = "http://example.com/config"
- wfOwner = []byte("0xOwner")
-
- fetcher = newMockFetcher(map[string]mockFetchResp{
- binaryURL: {Body: encodedBinary, Err: nil},
- configURL: {Body: config, Err: nil},
- secretsURL: {Body: []byte("secrets"), Err: nil},
- })
failWith = "mocked fail DB delete"
)
+ wfOwner := testutils.NewAddress().Bytes()
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
+ fetcher := newMockFetcher(map[string]mockFetchResp{
+ binaryURL: {Body: encodedBinary, Err: nil},
+ configURL: {Body: config, Err: nil},
+ secretsURL: {Body: []byte("secrets"), Err: nil},
+ })
+
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, secretsURL)
require.NoError(t, err)
@@ -1063,7 +1065,7 @@ func Test_workflowDeletedHandler(t *testing.T) {
Status: uint8(0),
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
BinaryURL: binaryURL,
ConfigURL: configURL,
SecretsURL: secretsURL,
@@ -1093,10 +1095,10 @@ func Test_workflowDeletedHandler(t *testing.T) {
require.NoError(t, err)
// Verify the record is updated in the database
- dbSpec, err := orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), "workflow-name")
+ dbSpec, err := orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, "workflow-name", dbSpec.WorkflowName)
+ require.Equal(t, workflowName, dbSpec.WorkflowName)
require.Equal(t, job.WorkflowSpecStatusActive, dbSpec.Status)
// Verify the engine is started
@@ -1108,14 +1110,14 @@ func Test_workflowDeletedHandler(t *testing.T) {
deleteEvent := WorkflowDeletedV1{
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
DonID: 1,
}
err = h.workflowDeletedEvent(ctx, deleteEvent)
require.Error(t, err, failWith)
// Verify the record is still in the DB
- _, err = orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), "workflow-name")
+ _, err = orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
require.NoError(t, err)
// Verify the engine is still running
@@ -1128,11 +1130,12 @@ func Test_workflowPausedActivatedUpdatedHandler(t *testing.T) {
t.Parallel()
t.Run("success pausing activating and updating existing engine and spec", func(t *testing.T) {
var (
- ctx = testutils.Context(t)
- lggr = logger.TestLogger(t)
- db = pgtest.NewSqlxDB(t)
- orm = artifacts.NewWorkflowRegistryDS(db, lggr)
- emitter = custmsg.NewLabeler()
+ ctx = testutils.Context(t)
+ lggr = logger.TestLogger(t)
+ db = pgtest.NewSqlxDB(t)
+ orm = artifacts.NewWorkflowRegistryDS(db, lggr)
+ emitter = custmsg.NewLabeler()
+ workflowName = testutils.RandomizeName(t.Name())
binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
@@ -1142,19 +1145,19 @@ func Test_workflowPausedActivatedUpdatedHandler(t *testing.T) {
binaryURL = "http://example.com/binary"
configURL = "http://example.com/config"
newConfigURL = "http://example.com/new-config"
- wfOwner = []byte("0xOwner")
-
- fetcher = newMockFetcher(map[string]mockFetchResp{
- binaryURL: {Body: encodedBinary, Err: nil},
- configURL: {Body: config, Err: nil},
- newConfigURL: {Body: updateConfig, Err: nil},
- secretsURL: {Body: []byte("secrets"), Err: nil},
- })
)
+ wfOwner := testutils.NewAddress().Bytes()
+
+ fetcher := newMockFetcher(map[string]mockFetchResp{
+ binaryURL: {Body: encodedBinary, Err: nil},
+ configURL: {Body: config, Err: nil},
+ newConfigURL: {Body: updateConfig, Err: nil},
+ secretsURL: {Body: []byte("secrets"), Err: nil},
+ })
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, secretsURL)
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, secretsURL)
require.NoError(t, err)
- updatedWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, updateConfig, secretsURL)
+ updatedWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, updateConfig, secretsURL)
require.NoError(t, err)
newWFIDs := hex.EncodeToString(updatedWFID[:])
@@ -1162,7 +1165,7 @@ func Test_workflowPausedActivatedUpdatedHandler(t *testing.T) {
Status: uint8(0),
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
BinaryURL: binaryURL,
ConfigURL: configURL,
SecretsURL: secretsURL,
@@ -1193,10 +1196,10 @@ func Test_workflowPausedActivatedUpdatedHandler(t *testing.T) {
require.NoError(t, err)
// Verify the record is updated in the database
- dbSpec, err := orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), "workflow-name")
+ dbSpec, err := orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, "workflow-name", dbSpec.WorkflowName)
+ require.Equal(t, workflowName, dbSpec.WorkflowName)
require.Equal(t, job.WorkflowSpecStatusActive, dbSpec.Status)
// Verify the engine is started
@@ -1209,17 +1212,17 @@ func Test_workflowPausedActivatedUpdatedHandler(t *testing.T) {
pauseEvent := WorkflowPausedV1{
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
DonID: 1,
}
err = h.workflowPausedEvent(ctx, pauseEvent)
require.NoError(t, err)
// Verify the record is updated in the database
- dbSpec, err = orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), "workflow-name")
+ dbSpec, err = orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, "workflow-name", dbSpec.WorkflowName)
+ require.Equal(t, workflowName, dbSpec.WorkflowName)
require.Equal(t, job.WorkflowSpecStatusPaused, dbSpec.Status)
// Verify the engine is removed
@@ -1231,7 +1234,7 @@ func Test_workflowPausedActivatedUpdatedHandler(t *testing.T) {
OldWorkflowID: giveWFID,
NewWorkflowID: updatedWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
BinaryURL: binaryURL,
ConfigURL: newConfigURL,
SecretsURL: secretsURL,
@@ -1241,10 +1244,10 @@ func Test_workflowPausedActivatedUpdatedHandler(t *testing.T) {
require.NoError(t, err)
// Verify the record is updated in the database
- dbSpec, err = orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), "workflow-name")
+ dbSpec, err = orm.GetWorkflowSpec(ctx, hex.EncodeToString(wfOwner), workflowName)
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, "workflow-name", dbSpec.WorkflowName)
+ require.Equal(t, workflowName, dbSpec.WorkflowName)
require.Equal(t, job.WorkflowSpecStatusActive, dbSpec.Status)
require.Equal(t, newWFIDs, dbSpec.WorkflowID)
require.Equal(t, newConfigURL, dbSpec.ConfigURL)
@@ -1311,11 +1314,12 @@ func TestEngineFactoryFn_SuccessfulCreation(t *testing.T) {
require.NoError(t, err)
servicetest.Run(t, eventHandler)
secretsURL := "http://example.com/secrets"
- wfOwner := "1234567890123456789012345678901234567890"
+ wfOwnerBytes := testutils.NewAddress().Bytes()
+ wfOwner := hex.EncodeToString(wfOwnerBytes)
t.Run("DAG workflow", func(t *testing.T) {
binary := wasmtest.CreateTestBinary(binaryCmd, true, t)
- workflowID, err := pkgworkflows.GenerateWorkflowID([]byte(wfOwner), "workflow-name", binary, config, secretsURL)
+ workflowID, err := pkgworkflows.GenerateWorkflowID(wfOwnerBytes, testutils.RandomizeName(t.Name()), binary, config, secretsURL)
require.NoError(t, err)
engine, err := eventHandler.engineFactoryFn(ctx, hex.EncodeToString(workflowID[:]), wfOwner, workflowName, config, binary)
require.NoError(t, err)
@@ -1324,7 +1328,7 @@ func TestEngineFactoryFn_SuccessfulCreation(t *testing.T) {
t.Run("NoDAG workflow", func(t *testing.T) {
binary := wasmtest.CreateTestBinary(noDagBinaryCmd, true, t)
- workflowID, err := pkgworkflows.GenerateWorkflowID([]byte(wfOwner), "workflow-name", binary, config, secretsURL)
+ workflowID, err := pkgworkflows.GenerateWorkflowID(wfOwnerBytes, testutils.RandomizeName(t.Name()), binary, config, secretsURL)
require.NoError(t, err)
engine, err := eventHandler.engineFactoryFn(ctx, hex.EncodeToString(workflowID[:]), wfOwner, workflowName, config, binary)
require.NoError(t, err)
diff --git a/core/services/workflows/syncer/v2/engine_registry.go b/core/services/workflows/syncer/v2/engine_registry.go
index ead8ffa1606..afd712afe2a 100644
--- a/core/services/workflows/syncer/v2/engine_registry.go
+++ b/core/services/workflows/syncer/v2/engine_registry.go
@@ -68,7 +68,7 @@ func (r *EngineRegistry) Get(workflowID types.WorkflowID) (ServiceWithMetadata,
func (r *EngineRegistry) GetAll() []ServiceWithMetadata {
r.mu.RLock()
defer r.mu.RUnlock()
- engines := []ServiceWithMetadata{}
+ engines := make([]ServiceWithMetadata, 0, len(r.engines))
for workflowID, entry := range r.engines {
engines = append(engines, ServiceWithMetadata{
WorkflowID: workflowID,
@@ -124,7 +124,7 @@ func (r *EngineRegistry) Pop(workflowID types.WorkflowID) (ServiceWithMetadata,
func (r *EngineRegistry) PopAll() []ServiceWithMetadata {
r.mu.Lock()
defer r.mu.Unlock()
- engines := []ServiceWithMetadata{}
+ engines := make([]ServiceWithMetadata, 0, len(r.engines))
for workflowID, entry := range r.engines {
engines = append(engines, ServiceWithMetadata{
WorkflowID: workflowID,
diff --git a/core/services/workflows/syncer/v2/fetcher_test.go b/core/services/workflows/syncer/v2/fetcher_test.go
index d1195e8c5a7..f4937db5735 100644
--- a/core/services/workflows/syncer/v2/fetcher_test.go
+++ b/core/services/workflows/syncer/v2/fetcher_test.go
@@ -274,11 +274,11 @@ func TestNewFetcherService(t *testing.T) {
// Connector handler never makes a connection to a gateway and the context expires.
t.Run("NOK-request_context_deadline_exceeded", func(t *testing.T) {
connector := gcmocks.NewGatewayConnector(t)
- wrapper := newConnectorWrapper(connector)
+ connWrapper := newConnectorWrapper(connector)
connector.EXPECT().AddHandler(matches.AnyContext, []string{ghcapabilities.MethodWorkflowSyncer}, mock.Anything).Return(nil)
connector.EXPECT().GatewayIDs(matches.AnyContext).Return([]string{"gateway1", "gateway2"}, nil)
- fetcher := NewFetcherService(lggr, wrapper, storageService, gateway.WithFixedStart())
+ fetcher := NewFetcherService(lggr, connWrapper, storageService, gateway.WithFixedStart())
require.NoError(t, fetcher.Start(ctx))
defer fetcher.Close()
@@ -302,11 +302,11 @@ func TestNewFetcherService(t *testing.T) {
// Connector handler cycles to next available gateway after first connection fails.
t.Run("OK-connector_handler_awaits_working_gateway", func(t *testing.T) {
connector := gcmocks.NewGatewayConnector(t)
- wrapper := newConnectorWrapper(connector)
+ connWrapper := newConnectorWrapper(connector)
connector.EXPECT().AddHandler(matches.AnyContext, []string{ghcapabilities.MethodWorkflowSyncer}, mock.Anything).Return(nil)
connector.EXPECT().GatewayIDs(matches.AnyContext).Return([]string{"gateway1", "gateway2"}, nil)
- fetcher := NewFetcherService(lggr, wrapper, storageService, gateway.WithFixedStart())
+ fetcher := NewFetcherService(lggr, connWrapper, storageService, gateway.WithFixedStart())
require.NoError(t, fetcher.Start(ctx))
defer fetcher.Close()
diff --git a/core/services/workflows/syncer/v2/grpc_workflow_source.go b/core/services/workflows/syncer/v2/grpc_workflow_source.go
index 6da52f74b01..50ee4b6d9f0 100644
--- a/core/services/workflows/syncer/v2/grpc_workflow_source.go
+++ b/core/services/workflows/syncer/v2/grpc_workflow_source.go
@@ -277,12 +277,9 @@ func (g *GRPCWorkflowSource) calculateBackoff(attempt int) time.Duration {
// Apply jitter (0.5 to 1.5 multiplier) - math/rand/v2 is auto-seeded and concurrent-safe
jitter := 0.5 + rand.Float64() //nolint:gosec // G404: weak random is fine for retry jitter
- backoff = time.Duration(float64(backoff) * jitter)
-
- // Cap at max delay
- if backoff > g.retryMaxDelay {
- backoff = g.retryMaxDelay
- }
+ backoff = min(
+ // Cap at max delay
+ time.Duration(float64(backoff)*jitter), g.retryMaxDelay)
return backoff
}
diff --git a/core/services/workflows/syncer/v2/grpc_workflow_source_test.go b/core/services/workflows/syncer/v2/grpc_workflow_source_test.go
index f7dc76ef265..70cf092d32e 100644
--- a/core/services/workflows/syncer/v2/grpc_workflow_source_test.go
+++ b/core/services/workflows/syncer/v2/grpc_workflow_source_test.go
@@ -66,10 +66,7 @@ func (m *mockGRPCClient) ListWorkflowMetadata(_ context.Context, _ []string, off
return []*pb.WorkflowMetadata{}, false, nil
}
- end := start + int(limit)
- if end > len(m.allWorkflows) {
- end = len(m.allWorkflows)
- }
+ end := min(start+int(limit), len(m.allWorkflows))
hasMore := end < len(m.allWorkflows)
return m.allWorkflows[start:end], hasMore, nil
diff --git a/core/services/workflows/syncer/v2/handler.go b/core/services/workflows/syncer/v2/handler.go
index ddddcff8e2f..c9e5e6daa47 100644
--- a/core/services/workflows/syncer/v2/handler.go
+++ b/core/services/workflows/syncer/v2/handler.go
@@ -288,7 +288,7 @@ func NewEventHandler(
func (h *eventHandler) close() error {
es := h.engineRegistry.PopAll()
- cs := []io.Closer{}
+ cs := make([]io.Closer, 0, len(es)+1)
cs = append(cs, h.engineLimiters)
for _, e := range es {
cs = append(cs, e)
diff --git a/core/services/workflows/syncer/v2/handler_test.go b/core/services/workflows/syncer/v2/handler_test.go
index bdb82b0682a..d0e53db99d2 100644
--- a/core/services/workflows/syncer/v2/handler_test.go
+++ b/core/services/workflows/syncer/v2/handler_test.go
@@ -13,13 +13,12 @@ import (
"time"
"github.com/jonboulle/clockwork"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
-
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"go.opentelemetry.io/otel/trace/noop"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
"google.golang.org/protobuf/proto"
"github.com/smartcontractkit/chainlink-common/pkg/beholder/beholdertest"
@@ -217,10 +216,10 @@ func Test_workflowRegisteredHandler(t *testing.T) {
return "http://example.com/" + wfID + "/config"
}
config := []byte("")
- wfOwner := []byte("0xOwner")
+ wfOwner := testutils.NewAddress().Bytes()
+
binary := wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary := []byte(base64.StdEncoding.EncodeToString(binary))
- workflowName := "workflow-name"
workflowTag := "workflow-tag"
signedURLParameter := "?auth=abc123"
@@ -271,13 +270,13 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURLFactory: binaryURLFactory,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredEvent {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredEvent {
wfIDString := hex.EncodeToString(wfID)
return WorkflowRegisteredEvent{
Status: WorkflowStatusActive,
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
WorkflowTag: workflowTag,
BinaryURL: binaryURLFactory(wfIDString),
ConfigURL: configURLFactory(wfIDString),
@@ -316,12 +315,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURLFactory: binaryURLFactory,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredEvent {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredEvent {
return WorkflowRegisteredEvent{
Status: WorkflowStatusActive,
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
WorkflowTag: workflowTag,
BinaryURL: binaryURLFactory(hex.EncodeToString(wfID)),
ConfigURL: configURLFactory(hex.EncodeToString(wfID)),
@@ -353,12 +352,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURLFactory: binaryURLFactory,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredEvent {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredEvent {
return WorkflowRegisteredEvent{
Status: WorkflowStatusActive,
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
WorkflowTag: workflowTag,
BinaryURL: binaryURLFactory(hex.EncodeToString(wfID)),
ConfigURL: configURLFactory(hex.EncodeToString(wfID)),
@@ -390,12 +389,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURLFactory: binaryURLFactory,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredEvent {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredEvent {
return WorkflowRegisteredEvent{
Status: WorkflowStatusActive,
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
WorkflowTag: workflowTag,
BinaryURL: binaryURLFactory(hex.EncodeToString(wfID)),
ConfigURL: configURLFactory(hex.EncodeToString(wfID)),
@@ -427,12 +426,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURLFactory: binaryURLFactory,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredEvent {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredEvent {
return WorkflowRegisteredEvent{
Status: WorkflowStatusActive,
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
WorkflowTag: workflowTag,
BinaryURL: binaryURLFactory(hex.EncodeToString(wfID)),
ConfigURL: configURLFactory(hex.EncodeToString(wfID)),
@@ -469,12 +468,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURLFactory: binaryURLFactory,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredEvent {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredEvent {
return WorkflowRegisteredEvent{
Status: WorkflowStatusPaused,
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
WorkflowTag: workflowTag,
BinaryURL: binaryURLFactory(hex.EncodeToString(wfID)),
ConfigURL: configURLFactory(hex.EncodeToString(wfID)),
@@ -490,7 +489,7 @@ func Test_workflowRegisteredHandler(t *testing.T) {
dbSpec, err := s.GetWorkflowSpec(ctx, wfID.Hex())
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, workflowName, dbSpec.WorkflowName)
+ require.Equal(t, wfName, dbSpec.WorkflowName)
require.Equal(t, job.WorkflowSpecStatusPaused, dbSpec.Status)
// Verify there is no running engine
@@ -516,12 +515,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
BinaryURLFactory: binaryURLFactory,
GiveBinary: binary,
WFOwner: wfOwner,
- Event: func(wfID []byte) WorkflowRegisteredEvent {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredEvent {
return WorkflowRegisteredEvent{
Status: WorkflowStatusActive,
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
WorkflowTag: workflowTag,
BinaryURL: binaryURLFactory(hex.EncodeToString(wfID)),
ConfigURL: configURLFactory(hex.EncodeToString(wfID)),
@@ -553,7 +552,7 @@ func Test_workflowRegisteredHandler(t *testing.T) {
dbSpec, err := s.GetWorkflowSpec(ctx, wfID.Hex())
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, workflowName, dbSpec.WorkflowName)
+ require.Equal(t, wfName, dbSpec.WorkflowName)
// This reflects the event status, not what was previously stored in the DB
require.Equal(t, job.WorkflowSpecStatusActive, dbSpec.Status)
@@ -585,12 +584,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
require.Equal(t, 1, fetcher.Calls(binaryURL+signedURLParameter))
require.Equal(t, 0, fetcher.Calls(configURL+signedURLParameter))
},
- Event: func(wfID []byte) WorkflowRegisteredEvent {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredEvent {
return WorkflowRegisteredEvent{
Status: WorkflowStatusActive,
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
WorkflowTag: workflowTag,
BinaryURL: binaryURLFactory(hex.EncodeToString(wfID)),
}
@@ -638,12 +637,12 @@ func Test_workflowRegisteredHandler(t *testing.T) {
require.Equal(t, 0, fetcher.Calls(binaryURL+signedURLParameter))
require.Equal(t, 0, fetcher.Calls(configURL+signedURLParameter))
},
- Event: func(wfID []byte) WorkflowRegisteredEvent {
+ Event: func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredEvent {
return WorkflowRegisteredEvent{
Status: WorkflowStatusActive,
WorkflowID: [32]byte(wfID),
WorkflowOwner: wfOwner,
- WorkflowName: workflowName,
+ WorkflowName: wfName,
BinaryURL: binaryURLFactory(hex.EncodeToString(wfID)),
ConfigURL: configURLFactory(hex.EncodeToString(wfID)),
}
@@ -669,11 +668,12 @@ func Test_workflowRegisteredHandler_confidentialRouting(t *testing.T) {
binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
config = []byte("")
- wfOwner = []byte("0xOwner")
+ workflowName = testutils.RandomizeName(t.Name())
workflowEncryptionKey = workflowkey.MustNewXXXTestingOnly(big.NewInt(1))
)
+ wfOwner := testutils.NewAddress().Bytes()
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, "")
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, "")
require.NoError(t, err)
wfIDString := hex.EncodeToString(giveWFID[:])
@@ -728,7 +728,7 @@ func Test_workflowRegisteredHandler_confidentialRouting(t *testing.T) {
Status: WorkflowStatusActive,
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
WorkflowTag: "workflow-tag",
BinaryURL: binaryURL,
ConfigURL: configURL,
@@ -765,11 +765,12 @@ func Test_workflowRegisteredHandler_confidentialRouting(t *testing.T) {
binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
config = []byte("")
- wfOwner = []byte("0xOwner")
+ workflowName = testutils.RandomizeName(t.Name())
workflowEncryptionKey = workflowkey.MustNewXXXTestingOnly(big.NewInt(1))
)
+ wfOwner := testutils.NewAddress().Bytes()
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, "")
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, "")
require.NoError(t, err)
wfIDString := hex.EncodeToString(giveWFID[:])
@@ -822,7 +823,7 @@ func Test_workflowRegisteredHandler_confidentialRouting(t *testing.T) {
Status: WorkflowStatusActive,
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
WorkflowTag: "workflow-tag",
BinaryURL: binaryURL,
ConfigURL: configURL,
@@ -852,11 +853,12 @@ func Test_workflowRegisteredHandler_confidentialRouting(t *testing.T) {
binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
config = []byte("")
- wfOwner = []byte("0xOwner")
+ workflowName = testutils.RandomizeName(t.Name())
workflowEncryptionKey = workflowkey.MustNewXXXTestingOnly(big.NewInt(1))
)
+ wfOwner := testutils.NewAddress().Bytes()
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, "")
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, "")
require.NoError(t, err)
wfIDString := hex.EncodeToString(giveWFID[:])
@@ -899,7 +901,7 @@ func Test_workflowRegisteredHandler_confidentialRouting(t *testing.T) {
Status: WorkflowStatusActive,
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
WorkflowTag: "workflow-tag",
BinaryURL: binaryURL,
ConfigURL: configURL,
@@ -922,7 +924,7 @@ type testCase struct {
ConfigURLFactory func(string) string
WFOwner []byte
fetcherFactory func(wfID []byte) *mockFetcher
- Event func(wfID []byte) WorkflowRegisteredEvent
+ Event func(wfID []byte, wfName string, wfOwner []byte) WorkflowRegisteredEvent
validationFn func(t *testing.T, ctx context.Context, event WorkflowRegisteredEvent, h *eventHandler, s *artifacts.Store, wfOwner []byte, wfName string, wfID types.WorkflowID, fetcher *mockFetcher, binaryURL string, configURL string)
engineFactoryFn func(ctx context.Context, wfid string, owner string, name types.WorkflowName, tag string, config []byte, binary []byte, initDone chan<- error) (services.Service, error)
}
@@ -940,16 +942,17 @@ func testRunningWorkflow(t *testing.T, tc testCase) {
binary = tc.GiveBinary
config = tc.GiveConfig
- wfOwner = tc.WFOwner
+ workflowName = testutils.RandomizeName(t.Name())
workflowEncryptionKey = workflowkey.MustNewXXXTestingOnly(big.NewInt(1))
fetcherFactory = tc.fetcherFactory
)
+ wfOwner := testutils.NewAddress().Bytes()
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, "")
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, "")
require.NoError(t, err)
- event := tc.Event(giveWFID[:])
+ event := tc.Event(giveWFID[:], workflowName, wfOwner)
er := NewEngineRegistry()
opts := []func(*eventHandler){
@@ -980,7 +983,7 @@ func testRunningWorkflow(t *testing.T, tc testCase) {
servicetest.Run(t, h)
ctx = contexts.WithCRE(ctx, contexts.CRE{Owner: hex.EncodeToString(wfOwner), Workflow: hex.EncodeToString(giveWFID[:])})
- tc.validationFn(t, ctx, event, h, artifactStore, wfOwner, "workflow-name", giveWFID, fetcher, tc.BinaryURLFactory(hex.EncodeToString(giveWFID[:])), tc.ConfigURLFactory(hex.EncodeToString(giveWFID[:])))
+ tc.validationFn(t, ctx, event, h, artifactStore, wfOwner, workflowName, giveWFID, fetcher, tc.BinaryURLFactory(hex.EncodeToString(giveWFID[:])), tc.ConfigURLFactory(hex.EncodeToString(giveWFID[:])))
})
}
@@ -1067,12 +1070,13 @@ func Test_workflowDeletedHandler(t *testing.T) {
binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
config = []byte("")
+ workflowName = testutils.RandomizeName(t.Name())
- wfOwner = []byte("0xOwner")
workflowEncryptionKey = workflowkey.MustNewXXXTestingOnly(big.NewInt(1))
)
+ wfOwner := testutils.NewAddress().Bytes()
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, "")
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, "")
require.NoError(t, err)
wfIDString := hex.EncodeToString(giveWFID[:])
@@ -1096,7 +1100,7 @@ func Test_workflowDeletedHandler(t *testing.T) {
Status: WorkflowStatusActive,
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
WorkflowTag: "workflow-tag",
BinaryURL: binaryURL,
ConfigURL: configURL,
@@ -1131,7 +1135,7 @@ func Test_workflowDeletedHandler(t *testing.T) {
dbSpec, err := orm.GetWorkflowSpec(ctx, types.WorkflowID(giveWFID).Hex())
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, "workflow-name", dbSpec.WorkflowName)
+ require.Equal(t, workflowName, dbSpec.WorkflowName)
require.Equal(t, job.WorkflowSpecStatusActive, dbSpec.Status)
// Verify the engine is started
@@ -1166,13 +1170,14 @@ func Test_workflowDeletedHandler(t *testing.T) {
binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
config = []byte("")
- wfOwner = []byte("0xOwner")
+ workflowName = testutils.RandomizeName(t.Name())
workflowEncryptionKey = workflowkey.MustNewXXXTestingOnly(big.NewInt(1))
-
- fetcher = newMockFetcher(map[string]mockFetchResp{})
)
+ wfOwner := testutils.NewAddress().Bytes()
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, "")
+ fetcher := newMockFetcher(map[string]mockFetchResp{})
+
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, "")
require.NoError(t, err)
er := NewEngineRegistry()
@@ -1216,13 +1221,15 @@ func Test_workflowDeletedHandler(t *testing.T) {
binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
config = []byte("")
- wfOwner = []byte("0xOwner")
+ workflowName = testutils.RandomizeName(t.Name())
workflowEncryptionKey = workflowkey.MustNewXXXTestingOnly(big.NewInt(1))
failWith = "mocked fail DB delete"
)
+ wfOwner := testutils.NewAddress().Bytes()
+
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, "")
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, "")
require.NoError(t, err)
wfIDString := hex.EncodeToString(giveWFID[:])
@@ -1244,7 +1251,7 @@ func Test_workflowDeletedHandler(t *testing.T) {
Status: WorkflowStatusActive,
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
WorkflowTag: "workflow-tag",
BinaryURL: binaryURL,
ConfigURL: configURL,
@@ -1281,7 +1288,7 @@ func Test_workflowDeletedHandler(t *testing.T) {
dbSpec, err := orm.GetWorkflowSpec(ctx, types.WorkflowID(giveWFID).Hex())
require.NoError(t, err)
require.Equal(t, hex.EncodeToString(wfOwner), dbSpec.WorkflowOwner)
- require.Equal(t, "workflow-name", dbSpec.WorkflowName)
+ require.Equal(t, workflowName, dbSpec.WorkflowName)
require.Equal(t, job.WorkflowSpecStatusActive, dbSpec.Status)
// Verify the engine is started
@@ -1452,17 +1459,20 @@ func Test_Handler_OrganizationID(t *testing.T) {
linkingURL := lis.Addr().String()
var (
- lggr = logger.TestLogger(t)
- lf = limits.Factory{Logger: lggr}
- mockORM = mocks.NewORM(t)
- binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
- encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
- config = []byte("")
- wfOwner = []byte("0xOwner")
+ lggr = logger.TestLogger(t)
+ lf = limits.Factory{Logger: lggr}
+ mockORM = mocks.NewORM(t)
+ binary = wasmtest.CreateTestBinary(binaryCmd, true, t)
+ encodedBinary = []byte(base64.StdEncoding.EncodeToString(binary))
+ config = []byte("")
+ workflowName = testutils.RandomizeName(t.Name())
+
workflowEncryptionKey = workflowkey.MustNewXXXTestingOnly(big.NewInt(1))
)
+ wfOwner := testutils.NewAddress().Bytes()
+
+ giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, workflowName, binary, config, "")
- giveWFID, err := pkgworkflows.GenerateWorkflowID(wfOwner, "workflow-name", binary, config, "")
require.NoError(t, err)
wfIDString := hex.EncodeToString(giveWFID[:])
@@ -1526,12 +1536,11 @@ func Test_Handler_OrganizationID(t *testing.T) {
Status: WorkflowStatusActive,
WorkflowID: giveWFID,
WorkflowOwner: wfOwner,
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
WorkflowTag: "workflow-tag",
BinaryURL: "http://example.com/" + wfIDString + "/binary",
ConfigURL: "http://example.com/" + wfIDString + "/config",
}
-
// Convert to WorkflowActivatedEvent and call through Handle method to test the full flow
activatedEvent := WorkflowActivatedEvent(event)
err = h.Handle(ctx, Event{
@@ -1571,8 +1580,9 @@ func Test_Handler_OrganizationID(t *testing.T) {
spec := &job.WorkflowSpec{
WorkflowID: hex.EncodeToString(giveWFID[:]),
WorkflowOwner: hex.EncodeToString(wfOwner),
- WorkflowName: "workflow-name",
+ WorkflowName: workflowName,
}
+
mockDeleteORM.EXPECT().GetWorkflowSpec(mock.Anything, types.WorkflowID(giveWFID).Hex()).Return(spec, nil)
mockDeleteORM.EXPECT().DeleteWorkflowSpec(mock.Anything, types.WorkflowID(giveWFID).Hex()).Return(nil)
diff --git a/core/services/workflows/syncer/v2/workflow_registry.go b/core/services/workflows/syncer/v2/workflow_registry.go
index f6b29753832..23a4aaa9f64 100644
--- a/core/services/workflows/syncer/v2/workflow_registry.go
+++ b/core/services/workflows/syncer/v2/workflow_registry.go
@@ -180,9 +180,9 @@ func WithAdditionalSources(sources []AdditionalSourceConfig) Option {
for _, src := range sources {
// Detect source type by URL scheme
- if strings.HasPrefix(src.URL, "file://") {
+ if after, ok := strings.CutPrefix(src.URL, "file://"); ok {
// File source - extract path from file:// URL
- filePath := strings.TrimPrefix(src.URL, "file://")
+ filePath := after
fileSource, err := NewFileWorkflowSourceWithPath(wr.lggr, src.Name, filePath)
if err != nil {
wr.lggr.Errorw("Failed to create file workflow source",
@@ -343,9 +343,7 @@ func (w *workflowRegistry) Start(_ context.Context) error {
ctx, cancel := w.stopCh.NewCtx()
initDoneCh := make(chan struct{})
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
+ w.wg.Go(func() {
defer w.lggr.Debugw("Successfully set ContractReader")
defer close(initDoneCh)
@@ -367,11 +365,9 @@ func (w *workflowRegistry) Start(_ context.Context) error {
w.contractReader = reader
}
}
- }()
+ })
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
+ w.wg.Go(func() {
defer cancel()
// Start goroutines to gather changes from Workflow Registry contract
select {
@@ -386,11 +382,9 @@ func (w *workflowRegistry) Start(_ context.Context) error {
return
}
w.syncUsingReconciliationStrategy(ctx)
- }()
+ })
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
+ w.wg.Go(func() {
defer cancel()
// Start goroutines to gather allowlisted requests from Workflow Registry contract
select {
@@ -399,7 +393,7 @@ func (w *workflowRegistry) Start(_ context.Context) error {
return
}
w.syncAllowlistedRequests(ctx)
- }()
+ })
return w.handler.Start(ctx)
})
@@ -1068,7 +1062,7 @@ func (w *workflowRegistry) getAllowlistedRequests(ctx context.Context, contractR
ctx, readIdentifier, primitives.Unconfirmed, params, &response,
)
if err != nil {
- return []workflow_registry_wrapper_v2.WorkflowRegistryOwnerAllowlistedRequest{}, w.lastSeenAllowlistedRequestsCount, &types.Head{Height: "0"}, errors.New("failed to get lastest value with head data. error: " + err.Error())
+ return []workflow_registry_wrapper_v2.WorkflowRegistryOwnerAllowlistedRequest{}, w.lastSeenAllowlistedRequestsCount, &types.Head{Height: "0"}, errors.New("failed to get latest value with head data. error: " + err.Error())
}
w.lggr.Debugw("contract call response",
diff --git a/core/services/workflows/syncer/v2/workflow_syncer_v2_test.go b/core/services/workflows/syncer/v2/workflow_syncer_v2_test.go
index ba5bc360cc6..5fe68383007 100644
--- a/core/services/workflows/syncer/v2/workflow_syncer_v2_test.go
+++ b/core/services/workflows/syncer/v2/workflow_syncer_v2_test.go
@@ -67,7 +67,7 @@ func Test_InitialStateSyncV2(t *testing.T) {
// Add requests to ensure we go above the MaxResultsPerQuery
activeAllowlistedRequestsCount := int(MaxResultsPerQuery + 1)
expiryTimestamp := time.Now().Add(24 * time.Hour)
- for i := 0; i < activeAllowlistedRequestsCount; i++ {
+ for i := range activeAllowlistedRequestsCount {
createSecretsRequestParams, marshalErr := json.Marshal(vaultcommon.CreateSecretsRequest{
EncryptedSecrets: []*vaultcommon.EncryptedSecret{
{
@@ -911,7 +911,7 @@ func allowlistRequest(
// Prepare the ABI arguments, in the exact order as expected by the Solidity contract.
func prepareABIArguments() (*abi.Arguments, error) {
- arguments := abi.Arguments{}
+ arguments := make(abi.Arguments, 0, 7)
uint8Type, err := abi.NewType("uint8", "", nil)
if err != nil {
diff --git a/core/services/workflows/syncer/versioning/contracts.go b/core/services/workflows/syncer/versioning/contracts.go
index 468587bf07f..df0dca3bbc2 100644
--- a/core/services/workflows/syncer/versioning/contracts.go
+++ b/core/services/workflows/syncer/versioning/contracts.go
@@ -19,8 +19,8 @@ import (
type ContractType string
const (
- ContractName = "TypeAndVersion"
- MethodName = "typeAndVersion"
+ ContractName = "TypeAndVersion"
+ MethodName = "typeAndVersion"
)
var (
diff --git a/core/services/workflows/syncer/workflow_registry.go b/core/services/workflows/syncer/workflow_registry.go
index fce7f2dad86..7aedce77d72 100644
--- a/core/services/workflows/syncer/workflow_registry.go
+++ b/core/services/workflows/syncer/workflow_registry.go
@@ -223,9 +223,7 @@ func NewWorkflowRegistry(
func (w *workflowRegistry) Start(_ context.Context) error {
return w.StartOnce(w.Name(), func() error {
ctx, cancel := w.stopCh.NewCtx()
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
+ w.wg.Go(func() {
defer cancel()
w.lggr.Debugw("Waiting for DON...")
@@ -248,7 +246,7 @@ func (w *workflowRegistry) Start(_ context.Context) error {
case SyncStrategyReconciliation:
w.syncUsingReconciliationStrategy(ctx, don, reader)
}
- }()
+ })
return w.handler.Start(ctx)
})
@@ -663,11 +661,9 @@ func (w *workflowRegistry) syncUsingReconciliationStrategy(ctx context.Context,
ets := []WorkflowRegistryEventType{
ForceUpdateSecretsEvent,
}
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
+ w.wg.Go(func() {
w.readRegistryEventsLoop(ctx, ets, don, reader, loadWorkflowsHead.Height)
- }()
+ })
ticker := w.getTicker()
pendingEvents := map[string]*reconciliationEvent{}
@@ -842,7 +838,7 @@ func (w *workflowRegistry) getWorkflowMetadata(ctx context.Context, don capabili
var workflows GetWorkflowMetadataListByDONReturnVal
headAtLastRead, err = contractReader.GetLatestValueWithHeadData(ctx, readIdentifier, primitives.Finalized, params, &workflows)
if err != nil {
- return []GetWorkflowMetadata{}, &types.Head{Height: "0"}, fmt.Errorf("failed to get lastest value with head data %w", err)
+ return []GetWorkflowMetadata{}, &types.Head{Height: "0"}, fmt.Errorf("failed to get latest value with head data %w", err)
}
allWorkflows = append(allWorkflows, workflows.WorkflowMetadataList...)
@@ -862,7 +858,7 @@ func (w *workflowRegistry) getWorkflowMetadata(ctx context.Context, don capabili
func toWorkflowRegistryEventResponse(
log types.Sequence,
evt WorkflowRegistryEventType,
- lggr logger.Logger,
+ _ logger.Logger,
) (workflowRegistryEvent, error) {
resp := workflowRegistryEvent{
Cursor: log.Cursor,
diff --git a/tools/test/.agents/skills/chainlink-test-diagnosis/SKILL.md b/tools/test/.agents/skills/chainlink-test-diagnosis/SKILL.md
deleted file mode 100644
index e7c941301e9..00000000000
--- a/tools/test/.agents/skills/chainlink-test-diagnosis/SKILL.md
+++ /dev/null
@@ -1,158 +0,0 @@
----
-name: chainlink-test-diagnosis
-description: >-
- Diagnoses and fixes unstable Chainlink Go tests (flakes, races, timeouts, deadlocks,
- slow runs). Use for non-deterministic failures, CI-only instability, or test runtime.
- Do NOT use for deterministic failures, routine runs, or full-suite CI prep.
----
-
-
-- DO NOT use this skill if the user already has a known fix (apply it directly).
-- DO NOT use for deterministic first-run failures (use normal debug).
-- DO NOT use for full-suite CI prep (use `make new_test` or `make new_gotestsum` instead).
-- ONLY run tests in these packages without explicit user approval: `core/`, `deployment/`. Warn the user if running outside these.
-- DO NOT modify the test's core goal to make it pass.
-- DO NOT remove tests/assertions unless replacing with better ones or deleting confirmed dead code.
-- DO NOT modify package-wide helpers (`testutils`) to fix localized tests.
-- IF Postgres sandbox error occurs (`operation not permitted`), ask the user to run the command or approve unsandboxed execution.
-- For runs expected >2m: Execute in background. Perform a single 30s crash check, then suspend task and wait for the report.json system notification. DO NOT poll.
-
-
-
-When summarizing context, strictly maintain state in this format:
-
-## [TestName]
-Failure: [suspected failure reasons]
-SuspectedFix: [the fix you've implemented or want to try]
-NextStep: [the next step for diagnosing/fixing/verifying the test]
-
-
-## Initialization
-1. Verify target scope (test, package, or issue). If unknown, prompt user.
-2. Formulate initial hypothesis: flake, timeout, slow, panic, deadlock, or race.
-3. Run bounded diagnosis (`--fail-fast` or low `--iterations`).
-
-
-Base Command: `go -C tools/test run . diagnose [harness_flags] -- [go_test_flags] ./path`
-- ALWAYS use `--ai-output` before the `--`.
-- Harness flags (before `--`): `--iterations N`, `--fail-fast-on=(timeout|slow)`, `--parallel-iterations N`
-- Go test flags (after `--`): `--run '^TestName$'`, `--timeout 10m`, `--race`
-- Help: `go -C tools/test run . diagnose -h`
-- Shuffle test order: `go test -shuffle=on -count=50 -failfast ./path/to/package`
-- CPU/Memory load: `go test -cpu=1,2,4 -count=20 -failfast ./path/to/package`
-- Lint check: `golangci-lint run ./ --fix`
-
-
-## Execution & Analysis
-- **Postgres:** Serial diagnose restores DB between iterations. Parallel gives each worker an ephemeral DB. Neither resets between tests *within* one iteration.
-- **Report Analysis:** Read `/report.json` using `jq`. Top-level buckets: `flakes`, `failures`, `timeouts`, `slow`. Harness and `go test` invocation: `jq .run` (argv, iteration count, fail-fast, shuffle, etc.).
-- **Narrowing:** If many tests flag, look for similarities in their failures. If found, present that to the user and ask if they want to continue with that assumption. If not, try to focus on the most problematic test.
-- **Profiles:** When logs/report are insufficient, use standard `go test` profile flags (`-race`, `-cpuprofile`, `-trace`, etc.). View with `go tool pprof` or `go tool trace`.
-
-
-/
-|-- iteration-n.log.jsonl # DO NOT READ unless absolutely necessary; full log outputs, long and messy
-|-- postgres-state-n.md # Final state of postgres DB after test iteration. Read if diagnosing DB-based errors or hangs.
-|-- report.json # Read this; summary of full `diagnose` run (include `jq .run` for go test args and harness flags)
-|-- report.csv # DO NOT READ; human readable csv
-|-- logs/ # Extracted individual test logs
-|---- pkg_TestName_iter-n.log # Logs for individual slow/failing test
-
-
-
-When reading log files from the `logs/` directory or `iteration-n.log.jsonl`, you MUST spawn a sub-agent to read from the end up.
-The sub-agent MUST output ONLY valid JSON matching this exact structure, with no markdown, no explanations, and no yapping:
-{
- "logs_read": ["log_path_1.log", "log_path_2.log"],
- "failure_diagnosis": [
- {
- "possible_reason": "explanation",
- "evidence": "reasoning and evidence"
- }
- ]
-}
-
-
-## Playbook & General Fixes
-Lead with your hypothesis before writing code. Show contextual diffs, do not describe fixes abstractly.
-
-1. **Check Known Patterns:** See `` below for common flaky test patterns and fixes in this repo. Try them first.
-2. **Isolate (Pass alone, fail in package):** Cross-test dependency. Missing `t.Cleanup`, global state (`var` singletons, loggers), or shared mock servers. Fix by moving state to per-test constructors or using `t.Cleanup`.
-3. **Order (Shuffle changes pass rate):** Same as isolation. Fix cross-test leakage. Capture failing seed and provide to user.
-4. **Race:** Triggers on weird stack traces or nil pointers. Use `-race`. Fix with `sync.Mutex`, `atomic.*`, or narrow shared fields.
-5. **Timeout:** Check logs for blocking (chan receive, `Wait`, `testutils.WaitTimeout`). Use `synctest` to improve tests relying on channels.
-6. **Slow:** Compare `p50` vs `max_elapsed`. Look for `time.Sleep` or coarse polling loops. Replace with `require.eventually` or channel sync. Simulated chains are frequent offenders.
-7. **Resources:** If failing under load/CI only, DB connections might be exhausted by `t.Parallel()`. Use separate schema/user per test.
-
-
-
-
- The dominant flake pattern in simulated-chain tests that enable `Feature.LogPoller = true`. Error message contains `"failed to retrieve log value pointer of block N: not found"` and the stack trace points to a `FilterXxx` call that immediately follows a `backend.Commit()`. Note: Raw geth bindings do NOT have this race, only interface types backed by LogPoller.
-
-
-
- For one-shot events where you only need a value emitted at creation (e.g. `SubscriptionCreated`, `RequestSent`): parse the tx receipt directly instead of calling `FilterXxx`.
- ```go
- // AFTER (deterministic):
- tx, err := coordinator.CreateSubscription(auth)
- require.NoError(t, err)
- backend.Commit()
- receipt, err := backend.Client().TransactionReceipt(ctx, tx.Hash())
- require.NoError(t, err)
- require.Equal(t, uint64(1), receipt.Status)
- var subID *big.Int
- for _, log := range receipt.Logs {
- if log.Address != coordinatorAddress {
- continue
- }
- // SubscriptionCreated(uint64 indexed subId, address owner): Topics[1] = subId
- subID = new(big.Int).SetBytes(log.Topics[1].Bytes())
- break
- }
- require.NotNil(t, subID, "no SubscriptionCreated log in receipt")
- ```
-
-
-
- For diagnostic/verification filters called inside a polling loop: a transient LogPoller error must not crash the test — it should retry.
- ```go
- // AFTER (retries):
- require.Eventually(t, func() bool {
- // LogPoller may not have indexed the latest block yet; skip and retry.
- it, err := coordinator.FilterRandomWordsForced(nil, ids, subs, addrs)
- if err == nil {
- for it.Next() {
- require.Equal(t, expected, it.Event.Field)
- }
- }
- return utils.IsEmpty(commitment[:])
- }, timeout, tick)
- ```
-
-
-
- If `require.Eventually` commits new blocks on each iteration, compute the reference block number inside the closure so it doesn't become stale.
- ```go
- // AFTER (dynamic):
- require.Eventually(t, func() bool {
- backend.Commit()
- tip, err := backend.Client().HeaderByNumber(ctx, nil)
- if err != nil || tip == nil || tip.Number.Uint64() < 256 {
- return false
- }
- _, err = bhsContract.GetBlockhash(nil, new(big.Int).SetUint64(tip.Number.Uint64()-256))
- return err == nil
- }, testutils.WaitTimeoutCustom(t, 5*time.Minute), time.Second)
- ```
-
-
-
-
-
- Under 5+ parallel test workers, TXM broadcasts transactions asynchronously. A heartbeat/fulfillment tx may be logged as "sent" by the service but not yet in the mempool when the next `backend.Commit()` fires. Test detects service as active, but stored block is `N+1` or later than the fixed reference.
-
-
- Use the dynamic reference fix (`fix_c_dynamic_reference` from LogPoller Timing Race) so the check tracks wherever the tx actually lands.
-
-
-
\ No newline at end of file
diff --git a/tools/test/.agents/skills/fix-chainlink-tests/SKILL.md b/tools/test/.agents/skills/fix-chainlink-tests/SKILL.md
new file mode 100644
index 00000000000..0911015e663
--- /dev/null
+++ b/tools/test/.agents/skills/fix-chainlink-tests/SKILL.md
@@ -0,0 +1,114 @@
+---
+name: fix-chainlink-tests
+description: >-
+ Diagnoses and fixes unstable Chainlink Go tests (flakes, races, timeouts, deadlocks,
+ slow runs). Use for non-deterministic failures or slow tests.
+ Do NOT use for deterministic failures, routine runs, or full-suite CI prep.
+---
+
+
+- DO NOT use this skill if the user already has a known fix (apply it directly).
+- DO NOT use for deterministic first-run failures (use normal debug).
+- DO NOT use for full-suite CI prep (use `make new_test` or `make new_gotestsum` instead).
+- ONLY run tests in these packages without explicit user approval: `core/`, `deployment/`. Warn the user if running outside these.
+- DO NOT modify the test's core goal to make it pass.
+- DO NOT remove tests/assertions unless replacing with better ones or deleting confirmed dead code.
+- DO NOT modify package-wide helpers (`testutils`) to fix localized tests.
+- DO NOT use plain `go test` commands. Only use `go -C tools/test run . diagnose`. Use `--iterations 1` for a single run.
+- For `diagnose` runs expected >2m: Execute in background. Perform a single 30s crash check, then suspend task and wait for the report.json system notification. DO NOT poll.
+
+
+## Initialization
+1. Verify target scope (test, package, or issue). If unknown, prompt user.
+2. Formulate initial hypothesis: flake, timeout, slow, panic, deadlock, or race.
+3. Run bounded diagnosis (`--fail-fast` or low `--iterations`).
+
+
+Base Command: `go -C tools/test run . diagnose [harness_flags] -- [go_test_flags] ./path`
+- ALWAYS use `--ai-output` before the `--`.
+- Harness flags (before `--`): `--iterations N`, `--fail-fast-on=(timeout|slow)`, `--parallel-iterations N`
+- Go test flags (after `--`): `--run '^TestName$'`, `--timeout 10m`, `--race`
+- Help: `go -C tools/test run . diagnose -h`
+- Lint check: `golangci-lint run ./ --fix`
+
+
+
+1. If user doesn't have recent results, run `diagnose` command with min 5 iterations to gather initial info. If issues due to sandbox, STOP, give user command to run and have them report results.
+2. If no issues, ask the user if they want to verify with more iterations. If not, end and output final report of findings, fixes, and lessons learned.
+3. If issues detected, focus on the ones the user wants to fix.
+4. If a `diagnose-attempted-fixes-[test/package]-[flake/broken/timeout/slow].jsonl` file exists, read it to see previous fix attempts and findings.
+5. Form a hypothesis on the cause of the issues
+6. Implement a fix
+7. Output the hypothesis and attempted fix, plus reasons why you think it would work.
+8. Run a `diagnose` loop and read the `report.json` file using jq to see if the fix works.
+ Append to `diagnose-attempted-fixes-[test/package]-[flake/broken/timeout/slow].jsonl` file in this json format:
+ ```json
+ {"timestamp": "[current_timestamp]", "model": "[current-model] (e.g. `claude-sonnet-4.6/high`, `gemini-3.1-pro`)", "hypothesis": "Your original hypothesis for the issue", "experiment": "A concise summary of what you tried. Include small code snippets if helpful", "result": "Did it fix it or not? If not, give concise reason why", "next": "Next steps to attempt"}
+ ```
+9. GOTO 2
+
+IF at any time the user interrupts or interjects during this loop, pick it up again where you left off, unless explicitly told otherwise.
+
+
+
+* Chainlink nodes are blockchain oracles. Read the [README.md](/README.md)
+* All tests share a single postgres DB. Each `diagnose` loop creates a new one.
+
+
+
+Lead with your hypothesis before writing code. Show contextual diffs, do not describe fixes abstractly. List of common approaches and diagnoses:
+
+1. **Check Known Patterns:** See `` below for common flaky test patterns and fixes in this repo. If they apply to the situation attempt them first.
+2. **Narrowing:** If many tests flag, look for similarities in their failures. If found, present that to the user and ask if they want to continue with assumption of relation. If not, try to focus on the most problematic test.
+3. **Isolate (Pass alone, fail in package):** Cross-test dependency. Missing `t.Cleanup`, global state (`var` singletons, loggers), or shared mock servers. Fix by moving state to per-test constructors or using `t.Cleanup`.
+4. **Order (Shuffle changes pass rate):** Same as isolation. Fix cross-test leakage. Capture failing seed and provide to user.
+5. **Race:** Triggers on weird stack traces or nil pointers. Use `-race`. Fix with `sync.Mutex`, `atomic.*`, or narrow shared fields.
+6. **Timeout:** Check logs for blocking (chan receive, `Wait`, `testutils.WaitTimeout`). Use `synctest` to improve tests relying on channels.
+7. **Slow:** Compare `p50` vs `max_elapsed`. Look for `time.Sleep` or coarse polling loops. Replace with `require.Eventually` or channel sync. Simulated chains are frequent offenders.
+8. **Resources:** If failing under load/CI only, check CPU and Memory usage. When logs/report are insufficient, use standard `go test` profile flags (`-race`, `-cpuprofile`, `-trace`, etc.). View with `go tool pprof` or `go tool trace`.
+
+
+
+Files in the `references/flaky-patterns/` dir.
+- [filter.md](./references/flaky-patterns/filter.md): Tests using `Filter` functions to validate on-chain events. Usually LogPoller based tests.
+- [sql-lockout.md](./references/): `failed to create ...: ERROR: canceling statement due to lock timeout (SQLSTATE 55P03)`
+
+
+
+When summarizing/compacting/compressing context, strictly maintain a reference to the `attempted-fixes-[test/package]-[flake/broken/timeout/slow].jsonl` you're using for this session.
+
+
+
+- **GOCACHE permissions issues**: `[build failed]\n open .../Library/Caches/...` This is caused by some sandbox environments. If you cannot exit the sandbox to fix this, STOP. DO NOT attempt to create a new cache. Ask the user to run the command instead and give you results so you can continue.
+- **Postgres sandbox error**: `operation not permitted` connecting to postgres. Sandbox issues. If you cannot exit the sandbox to fix this, STOP. Ask the user to run the command instead and give you results so you can continue.
+
+
+
+[resultsDir]/
+|-- iteration-n.log.jsonl # DO NOT READ unless absolutely necessary; full log outputs, long and messy
+|-- postgres-state-n.md # Final state of tests' postgres DB after iteration. Read if diagnosing DB-based errors or hangs.
+|-- report.json # Read this; summary of full `diagnose` run (include `jq .run` for go test args and harness flags)
+|-- report.csv # DO NOT READ; human readable csv
+|-- logs/ # Extracted individual test logs
+|---- pkg_TestName_iter-n.log # Logs for individual slow/failing tests, read this as needed
+
+
+
+When reading log files from the `logs/` directory or `iteration-n.log.jsonl`, you MUST spawn a specialist `LogAnalyzer` sub-agent.
+
+You MUST configure the sub-agent with these exact initialization parameters:
+1. System Prompt: "You are a headless, read-only log parser. Your sole purpose is to read Go test logs from the end up. Each log file contains logs from `chainlink` nodes, plus test-specific logs. Read the logs and construct possible reasons why the test [input reason we're investigating]. You do not converse. You output raw JSON and nothing else."
+2. Allowed Tools: File read/grep tools ONLY. Revoke all execution, write, and web search capabilities.
+3. Temperature: 0.0
+
+The sub-agent MUST output ONLY valid JSON matching this exact structure. DO NOT wrap the output in markdown code blocks. Output raw JSON only, with no explanations and no yapping:
+{
+ "logs_read": ["log_path_1.log", "log_path_2.log"],
+ "failure_diagnosis": [
+ {
+ "possible_reason": "explanation",
+ "evidence": "specific logs/log lines"
+ }
+ ]
+}
+
diff --git a/tools/test/.agents/skills/fix-chainlink-tests/eval/real-fix-shas.json b/tools/test/.agents/skills/fix-chainlink-tests/eval/real-fix-shas.json
new file mode 100644
index 00000000000..288f725d698
--- /dev/null
+++ b/tools/test/.agents/skills/fix-chainlink-tests/eval/real-fix-shas.json
@@ -0,0 +1,7 @@
+[
+ {
+ "sha": "c98d9e6822d101584549e1d2aa849029990c90a3",
+ "pr_url": "https://github.com/smartcontractkit/chainlink/pull/22324",
+ "package": "core/services/vrf/v2"
+ }
+]
diff --git a/tools/test/.agents/skills/fix-chainlink-tests/references/flaky-patterns/filter.md b/tools/test/.agents/skills/fix-chainlink-tests/references/flaky-patterns/filter.md
new file mode 100644
index 00000000000..6cdc941c2aa
--- /dev/null
+++ b/tools/test/.agents/skills/fix-chainlink-tests/references/flaky-patterns/filter.md
@@ -0,0 +1,65 @@
+
+The dominant flake pattern in simulated-chain tests that enable `Feature.LogPoller = true`. Error message contains `"failed to retrieve log value pointer of block N: not found"` and the stack trace points to a `FilterXxx` call that immediately follows a `backend.Commit()`. Note: Raw geth bindings do NOT have this race, only interface types backed by LogPoller.
+
+
+
+
+For one-shot events where you only need a value emitted at creation (e.g. `SubscriptionCreated`, `RequestSent`): parse the tx receipt directly instead of calling `FilterXxx`.
+```go
+// AFTER (deterministic):
+tx, err := coordinator.CreateSubscription(auth)
+require.NoError(t, err)
+backend.Commit()
+receipt, err := backend.Client().TransactionReceipt(ctx, tx.Hash())
+require.NoError(t, err)
+require.Equal(t, uint64(1), receipt.Status)
+var subID *big.Int
+for _, log := range receipt.Logs {
+ if log.Address != coordinatorAddress {
+ continue
+ }
+ // SubscriptionCreated(uint64 indexed subId, address owner): Topics[1] = subId
+ subID = new(big.Int).SetBytes(log.Topics[1].Bytes())
+ break
+}
+require.NotNil(t, subID, "no SubscriptionCreated log in receipt")
+```
+
+
+
+
+
+For diagnostic/verification filters called inside a polling loop: a transient LogPoller error must not crash the test — it should retry.
+```go
+// AFTER (retries):
+require.Eventually(t, func() bool {
+ // LogPoller may not have indexed the latest block yet; skip and retry.
+ it, err := coordinator.FilterRandomWordsForced(nil, ids, subs, addrs)
+ if err == nil {
+ for it.Next() {
+ require.Equal(t, expected, it.Event.Field)
+ }
+ }
+ return utils.IsEmpty(commitment[:])
+}, timeout, tick)
+```
+
+
+
+
+
+If `require.Eventually` commits new blocks on each iteration, compute the reference block number inside the closure so it doesn't become stale.
+```go
+// AFTER (dynamic):
+require.Eventually(t, func() bool {
+ backend.Commit()
+ tip, err := backend.Client().HeaderByNumber(ctx, nil)
+ if err != nil || tip == nil || tip.Number.Uint64() < 256 {
+ return false
+ }
+ _, err = bhsContract.GetBlockhash(nil, new(big.Int).SetUint64(tip.Number.Uint64()-256))
+ return err == nil
+}, testutils.WaitTimeoutCustom(t, 5*time.Minute), time.Second)
+```
+
+
\ No newline at end of file
diff --git a/tools/test/.agents/skills/fix-chainlink-tests/references/flaky-patterns/sql-lockout.md b/tools/test/.agents/skills/fix-chainlink-tests/references/flaky-patterns/sql-lockout.md
new file mode 100644
index 00000000000..a60026af7af
--- /dev/null
+++ b/tools/test/.agents/skills/fix-chainlink-tests/references/flaky-patterns/sql-lockout.md
@@ -0,0 +1,22 @@
+
+SQL lock errors.
+`failed to create ...: ERROR: canceling statement due to lock timeout (SQLSTATE 55P03)`
+
+
+
+Randomize unique database keys.
+Avoid:
+```go
+// Collision: multiple iterations of this test use the same ID
+owner := Keccak256([]byte(t.Name()))[:20]
+name := t.Name()
+```
+
+Prefer:
+```go
+// Isolation: every iteration/process gets a unique row
+owner := testutils.NewAddress().Bytes()
+name := testutils.RandomizeName(t.Name())
+```
+
+
diff --git a/tools/test/AGENTS.md b/tools/test/AGENTS.md
index 0a65e04610b..915239188d3 100644
--- a/tools/test/AGENTS.md
+++ b/tools/test/AGENTS.md
@@ -3,7 +3,7 @@ A test runner harness for the /chainlink repo.
- Provide a single, easy command to setup and run tests in /chainlink repo, eliminating `make` command chaining.
- Enable automatically re-running tests and analyzing results to catch and diagnose flakes and slow tests
-- Provide an AI skill for the process in `.agents/skills/chainlink-test-diagnosis/SKILL.md` (under `tools/test/`)
+- Provide an AI skill for the process in `.agents/skills/fix-chainlink-tests/SKILL.md` (under `tools/test/`)
diff --git a/tools/test/internal/config/config.go b/tools/test/internal/config/config.go
index 4e6d6bceda8..09d295b75ba 100644
--- a/tools/test/internal/config/config.go
+++ b/tools/test/internal/config/config.go
@@ -50,7 +50,7 @@ func NormalizeFailFastOn(values []string) ([]string, error) {
var out []string
seen := make(map[string]struct{})
for _, value := range values {
- for _, part := range strings.Split(value, ",") {
+ for part := range strings.SplitSeq(value, ",") {
category := strings.ToLower(strings.TrimSpace(part))
if category == "" {
return nil, errors.New(`--fail-fast-on must contain only "any", "failure", "timeout", or "slow"; got ""`)
diff --git a/tools/test/internal/db/db.go b/tools/test/internal/db/db.go
index f0fe1c511e3..809c2b483ea 100644
--- a/tools/test/internal/db/db.go
+++ b/tools/test/internal/db/db.go
@@ -104,12 +104,24 @@ func ensure(ctx context.Context, conf *config.App, out *output.Printer, setGloba
}
}()
+ // Turned off some prod-protections to make postgres go brrr
+ // https://github.com/peterldowns/pgtestdb#how-do-i-make-it-go-faster
c, err := postgres.Run(ctx,
fmt.Sprintf("docker.io/postgres:%s-alpine", conf.PostgresVersion),
postgres.WithDatabase("chainlink_test"),
postgres.WithUsername("postgres"),
postgres.WithPassword("postgres"),
- testcontainers.WithCmdArgs("-c", "max_connections=1000"),
+ testcontainers.WithCmdArgs(
+ "-c", "max_connections=1000",
+ "-c", "shared_buffers=128MB",
+ "-c", "fsync=off",
+ "-c", "synchronous_commit=off",
+ "-c", "full_page_writes=off",
+ "-c", "client_min_messages=warning",
+ ),
+ testcontainers.WithTmpfs(map[string]string{
+ "/var/lib/postgresql/data": "rw",
+ }),
testcontainers.WithWaitStrategy(
wait.ForLog("database system is ready to accept connections").
WithOccurrence(2).
@@ -375,15 +387,18 @@ func (h *Handle) DumpDiagnostics(ctx context.Context, dir string, iteration int)
exitCode, out, execErr := h.container.Exec(ctx,
[]string{"psql", "-U", "postgres", "-d", "chainlink_test", "-P", "pager=off", "-c", q.sql},
)
- switch {
- case execErr != nil:
- fmt.Fprintf(f, "error: %v\n", execErr)
- case exitCode != 0:
+ if execErr != nil {
+ fmt.Fprintf(f, "error: %v\n```\n\n", execErr)
+ continue
+ }
+ if exitCode != 0 {
fmt.Fprintf(f, "psql exit %d\n", exitCode)
}
- _, err = io.Copy(f, out)
- if err != nil {
- return fmt.Errorf("copy output: %w", err)
+ if out != nil {
+ _, err = io.Copy(f, out)
+ if err != nil {
+ return fmt.Errorf("copy output: %w", err)
+ }
}
fmt.Fprint(f, "```\n\n")
}
diff --git a/tools/test/internal/output/output.go b/tools/test/internal/output/output.go
index f00ecb5f3aa..e9d34afe81c 100644
--- a/tools/test/internal/output/output.go
+++ b/tools/test/internal/output/output.go
@@ -45,6 +45,24 @@ func New(aiOutput bool, stdout, stderr io.Writer, stderrFD uintptr) *Printer {
}
}
+// NewForTest returns a Printer with explicit live-inline behavior for unit tests.
+// Production code should use New/NewFromApp (TTY detection on stderr).
+func NewForTest(aiOutput bool, stdout, stderr io.Writer, liveInline bool) *Printer {
+ if stdout == nil {
+ stdout = io.Discard
+ }
+ if stderr == nil {
+ stderr = io.Discard
+ }
+ li := liveInline && !aiOutput
+ return &Printer{
+ aiOutput: aiOutput,
+ stdout: stdout,
+ stderr: stderr,
+ liveInline: li,
+ }
+}
+
// NewFromApp uses os.Stdout/os.Stderr and stderr's TTY bit from conf.AIOutput.
func NewFromApp(conf *config.App) *Printer {
return New(conf.AIOutput, os.Stdout, os.Stderr, os.Stderr.Fd())
diff --git a/tools/test/internal/output/output_test.go b/tools/test/internal/output/output_test.go
index b580d76398e..bdd6997d917 100644
--- a/tools/test/internal/output/output_test.go
+++ b/tools/test/internal/output/output_test.go
@@ -1,6 +1,7 @@
package output
import (
+ "io"
"strings"
"testing"
@@ -36,6 +37,19 @@ func TestNew_liveInline_requiresTTYAndHuman(t *testing.T) {
require.False(t, pAI.LiveInlineProgress())
}
+func TestNewForTest_liveInline(t *testing.T) {
+ t.Parallel()
+ var stderr strings.Builder
+ p := NewForTest(false, io.Discard, &stderr, true)
+ require.True(t, p.LiveInlineProgress())
+ p.ClearInline()
+ require.Equal(t, "\r\u001b[K", stderr.String())
+
+ var err2 strings.Builder
+ pAI := NewForTest(true, io.Discard, &err2, true)
+ require.False(t, pAI.LiveInlineProgress())
+}
+
func TestSparseStdoutln_onlyWhenAI(t *testing.T) {
t.Parallel()
var out, err strings.Builder
diff --git a/tools/test/internal/repo/repo.go b/tools/test/internal/repo/repo.go
index 1381da88187..a02a3f0938e 100644
--- a/tools/test/internal/repo/repo.go
+++ b/tools/test/internal/repo/repo.go
@@ -41,7 +41,7 @@ func RootFrom(dir string) (string, error) {
// modulePathFromGoMod returns the module path from the first `module` directive,
// skipping leading comments and blank lines (go.mod may legally start with either).
func modulePathFromGoMod(data string) (path string, ok bool) {
- for _, raw := range strings.Split(data, "\n") {
+ for raw := range strings.SplitSeq(data, "\n") {
line := strings.TrimSpace(raw)
if line == "" || strings.HasPrefix(line, "//") {
continue
diff --git a/tools/test/internal/runner/analyze.go b/tools/test/internal/runner/analyze.go
index db5457c0d06..184041d9c66 100644
--- a/tools/test/internal/runner/analyze.go
+++ b/tools/test/internal/runner/analyze.go
@@ -2,6 +2,7 @@ package runner
import (
"bufio"
+ "cmp"
"crypto/sha256"
"encoding/csv"
"encoding/json"
@@ -28,11 +29,17 @@ const timeoutPanic = "panic: test timed out"
// TestEvent mirrors cmd/internal/test2json's TestEvent; only fields we need.
type TestEvent struct {
- Action string `json:"Action"`
- Package string `json:"Package"`
- Test string `json:"Test"`
- Elapsed float64 `json:"Elapsed"`
- Output string `json:"Output"`
+ Action string `json:"Action"`
+ Package string `json:"Package"`
+ Test string `json:"Test"`
+ Elapsed float64 `json:"Elapsed"`
+ Output string `json:"Output"`
+ FailedBuild string `json:"FailedBuild,omitempty"`
+}
+
+// iterationScanMeta collects signals during scan that are not represented in aggregates only.
+type iterationScanMeta struct {
+ sawFailedBuild bool
}
type testKey struct {
@@ -105,6 +112,8 @@ type RunMeta struct {
FailFast bool `json:"fail_fast,omitempty"`
FailFastOn []string `json:"fail_fast_on,omitempty"`
Shuffle bool `json:"shuffle,omitempty"`
+ PostgresVersion string `json:"postgres_version,omitempty"`
+ HasDatabase bool `json:"has_database"`
}
// ReportSummary holds aggregate flake and slow rates for the full diagnose run.
@@ -154,7 +163,7 @@ type LogMap map[testKey]map[int]string
func Analyze(iterations []io.Reader, slowThreshold time.Duration) (*Report, LogMap, error) {
aggs := make(map[testKey]*aggregate)
for i, r := range iterations {
- if err := scanIterationJSONL(r, i, aggs); err != nil {
+ if err := scanIterationJSONL(r, i, aggs, nil); err != nil {
return nil, nil, err
}
}
@@ -183,7 +192,8 @@ func (a *aggregate) recordElapsed(iterIdx int, d time.Duration) {
}
// scanIterationJSONL merges one iteration's JSONL stream into aggs at iterIdx.
-func scanIterationJSONL(r io.Reader, iterIdx int, aggs map[testKey]*aggregate) error {
+// meta may be nil; when set, records e.g. compile/build failure from FailedBuild on fail events.
+func scanIterationJSONL(r io.Reader, iterIdx int, aggs map[testKey]*aggregate, meta *iterationScanMeta) error {
reader := bufio.NewReaderSize(r, 1024*1024)
for {
line, err := reader.ReadBytes('\n')
@@ -204,6 +214,9 @@ func scanIterationJSONL(r io.Reader, iterIdx int, aggs map[testKey]*aggregate) e
d := seconds(ev.Elapsed)
a.recordElapsed(iterIdx, d)
case "fail":
+ if meta != nil && ev.FailedBuild != "" {
+ meta.sawFailedBuild = true
+ }
a.fails++
a.iterations[iterIdx] = struct{}{}
a.failedIters[iterIdx] = true
@@ -248,6 +261,9 @@ func buildReportFromAggs(aggs map[testKey]*aggregate, numIterations int, slowThr
SlowThreshold: slowThreshold,
}
+ var pkgEntries []TestEntry
+ var testsByPkg = make(map[string][]TestEntry)
+
for key, a := range aggs {
minE, p50 := stats(a.elapseds)
base := TestEntry{
@@ -265,6 +281,10 @@ func buildReportFromAggs(aggs map[testKey]*aggregate, numIterations int, slowThr
TimeoutIters: sortedBoolMapKeys(a.timeoutIters),
SlowIters: slowIterations(a.elapsedByIter, slowThreshold),
}
+ if key.Test == "" {
+ pkgEntries = append(pkgEntries, base)
+ }
+
switch {
case a.timedOut:
rep.Timeouts = append(rep.Timeouts, base)
@@ -277,15 +297,51 @@ func buildReportFromAggs(aggs map[testKey]*aggregate, numIterations int, slowThr
} else {
rep.Failures = append(rep.Failures, base)
}
- case a.passes > 0 && a.fails > 0:
+ case key.Test != "" && a.passes > 0 && a.fails > 0:
rep.Flakes = append(rep.Flakes, base)
- case a.fails > 0 && a.passes == 0:
+ case key.Test != "" && a.fails > 0 && a.passes == 0:
rep.Failures = append(rep.Failures, base)
}
- if !a.timedOut && key.Test != "" && slowThreshold > 0 && a.maxElapsed > slowThreshold {
- rep.Slow = append(rep.Slow, base)
+
+ if key.Test != "" && !a.timedOut && slowThreshold > 0 && a.maxElapsed > slowThreshold {
+ testsByPkg[key.Package] = append(testsByPkg[key.Package], base)
+ }
+ }
+
+ // Always include top 10 slowest packages, plus any package that has slow tests.
+ slices.SortFunc(pkgEntries, func(a, b TestEntry) int {
+ return cmp.Or(
+ cmp.Compare(b.MaxElapsed, a.MaxElapsed),
+ strings.Compare(a.Package, b.Package),
+ )
+ })
+
+ slowMerged := make(map[string]bool)
+ seenPkg := make(map[string]bool)
+ for i, pkg := range pkgEntries {
+ if i < 10 && !pkgAggregateExcludedFromSlowReports(pkg) {
+ rep.Slow = append(rep.Slow, pkg)
+ seenPkg[pkg.Package] = true
+ }
+ if slowTests, ok := testsByPkg[pkg.Package]; ok {
+ rep.Slow = append(rep.Slow, slowTests...)
+ slowMerged[pkg.Package] = true
+ if !seenPkg[pkg.Package] && !pkgAggregateExcludedFromSlowReports(pkg) {
+ rep.Slow = append(rep.Slow, pkg)
+ seenPkg[pkg.Package] = true
+ }
+ }
+ }
+ var orphanSlowPkgs []string
+ for pkgName := range testsByPkg {
+ if !slowMerged[pkgName] {
+ orphanSlowPkgs = append(orphanSlowPkgs, pkgName)
}
}
+ sort.Strings(orphanSlowPkgs)
+ for _, pkgName := range orphanSlowPkgs {
+ rep.Slow = append(rep.Slow, testsByPkg[pkgName]...)
+ }
sortEntries(rep.Flakes)
sortEntries(rep.Failures)
@@ -322,7 +378,7 @@ func buildReportFromAggs(aggs map[testKey]*aggregate, numIterations int, slowThr
}
}
summaries := make([]IterationSummary, numIterations)
- for i := 0; i < numIterations; i++ {
+ for i := range numIterations {
s := IterationSummary{Index: i}
switch {
case iterTimedOut[i]:
@@ -369,7 +425,12 @@ func buildReportSummary(rep *Report, aggs map[testKey]*aggregate, slowThreshold
iterWithFlakeFail[i] = struct{}{}
}
}
- slowCount := len(rep.Slow)
+ slowCount := 0
+ for _, e := range rep.Slow {
+ if e.Test != "" {
+ slowCount++
+ }
+ }
s := &ReportSummary{
DistinctNamedTests: distinct,
@@ -407,6 +468,7 @@ type IterationDigest struct {
FailTests int // len(IterationSummaries[0].FailingTests)
SlowTests int // tests over slow threshold
TimeoutTests int // len(Timeouts) for this iteration
+ BuildFailure bool // compile/build failed or heuristic package-level fail with no named tests run
}
// countNamedTestsRanInAggs counts distinct non-empty test keys that recorded
@@ -428,7 +490,8 @@ func countNamedTestsRanInAggs(aggs map[testKey]*aggregate) int {
// It uses the same scan + report pipeline as Analyze for one iteration (no redundant Analyze wrapper).
func DigestIterationJSONL(r io.Reader, slowThreshold time.Duration) (IterationDigest, error) {
aggs := make(map[testKey]*aggregate)
- if err := scanIterationJSONL(r, 0, aggs); err != nil {
+ var meta iterationScanMeta
+ if err := scanIterationJSONL(r, 0, aggs, &meta); err != nil {
return IterationDigest{}, err
}
reattributeTimeouts(aggs, newAggregate)
@@ -436,6 +499,8 @@ func DigestIterationJSONL(r io.Reader, slowThreshold time.Duration) (IterationDi
rep, _ := buildReportFromAggs(aggs, 1, slowThreshold)
d := iterationDigestFromReport(rep)
d.RanTests = ran
+ d.BuildFailure = meta.sawFailedBuild ||
+ (d.Result == "fail" && d.RanTests == 0 && d.FailTests > 0)
return d, nil
}
@@ -715,7 +780,8 @@ func flaggedRows(rep *Report) []csvRow {
// PrintSummary writes a human-readable summary: headings and tests grouped by
// package under a common path prefix (tree). Broken/Flaky/Slow test lines use
-// red / yellow / grey; package path rows are muted.
+// red / yellow / grey; package path rows are muted. The Overall block uses green
+// when a metric is clean, orange for slow prevalence, and red for broken or flaky counts.
// Broken and Timeout entries are sorted alphabetically by package then test.
// Flaky entries are sorted by fails/runs (desc), then fails (desc), then name.
// Slow entries are sorted by max runtime (desc), then name.
@@ -773,7 +839,7 @@ func PrintSummary(w io.Writer, rep *Report) {
}
return slow[i].Test < slow[j].Test
})
- printSummarySectionTree(w, "Slow", n, slow, termstyle.Muted, termstyle.Muted, formatSlowTestLine)
+ printSummarySectionTree(w, "Slow Tests & Top Packages", n, slow, termstyle.Muted, termstyle.Muted, formatSlowTestLine)
}
printOverallStats(w, rep)
@@ -798,22 +864,48 @@ func printOverallStats(w io.Writer, rep *Report) {
fmt.Fprintln(w, termstyle.Muted.Render(line))
}
if s.DistinctNamedTests > 0 {
+ brokenN := 0
+ for _, f := range rep.Failures {
+ if f.Test != "" {
+ brokenN++
+ }
+ }
+ pctBroken := float64(brokenN) / float64(s.DistinctNamedTests) * 100
+ line := fmt.Sprintf(" Broken tests: %d/%d (%.1f%%)", brokenN, s.DistinctNamedTests, pctBroken)
+ if brokenN > 0 {
+ fmt.Fprintln(w, termstyle.Bad.Render(line))
+ } else {
+ fmt.Fprintln(w, termstyle.OK.Render(line))
+ }
+
pct := 0.0
if s.FlakePrevalence != nil {
pct = *s.FlakePrevalence * 100
}
- line := fmt.Sprintf(" Flaky tests: %d/%d (%.1f%%)", s.FlakeNamedCount, s.DistinctNamedTests, pct)
- fmt.Fprintln(w, termstyle.Muted.Render(line))
+ line = fmt.Sprintf(" Flaky tests: %d/%d (%.1f%%)", s.FlakeNamedCount, s.DistinctNamedTests, pct)
+ if s.FlakeNamedCount > 0 {
+ fmt.Fprintln(w, termstyle.Bad.Render(line))
+ } else {
+ fmt.Fprintln(w, termstyle.OK.Render(line))
+ }
}
if len(rep.Flakes) > 0 && s.FlakeIterationFailRate != nil {
pct := *s.FlakeIterationFailRate * 100
line := fmt.Sprintf(" Flaky Iterations: %d/%d (%.1f%%)", s.FlakeFailingIterations, s.FlakeIterationTotal, pct)
- fmt.Fprintln(w, termstyle.Muted.Render(line))
+ if s.FlakeFailingIterations > 0 {
+ fmt.Fprintln(w, termstyle.Bad.Render(line))
+ } else {
+ fmt.Fprintln(w, termstyle.OK.Render(line))
+ }
}
if rep.SlowThreshold > 0 && s.DistinctNamedTests > 0 && s.SlowPrevalence != nil {
pct := *s.SlowPrevalence * 100
line := fmt.Sprintf(" Slow tests: %d/%d (%.1f%%)", s.SlowCount, s.DistinctNamedTests, pct)
- fmt.Fprintln(w, termstyle.Muted.Render(line))
+ if s.SlowCount > 0 {
+ fmt.Fprintln(w, termstyle.Accent.Render(line))
+ } else {
+ fmt.Fprintln(w, termstyle.OK.Render(line))
+ }
}
fmt.Fprintln(w)
}
@@ -848,10 +940,11 @@ func formatTimeoutTestLine(e TestEntry) string {
}
func formatSlowTestLine(e TestEntry) string {
+ dur := termstyle.Accent.Render(e.MaxElapsed.Round(time.Millisecond).String())
if e.Test == "" {
- return fmt.Sprintf("%s %s", e.Package, e.MaxElapsed.Round(time.Millisecond))
+ return fmt.Sprintf("%s %s", e.Package, dur)
}
- return fmt.Sprintf("%s %s", e.Test, e.MaxElapsed.Round(time.Millisecond))
+ return fmt.Sprintf("%s %s", e.Test, dur)
}
// pipeBranch returns a tree prefix: depth 1 -> "|-- ", depth 2 -> "|---- ", etc.
@@ -1101,6 +1194,21 @@ func sortedBoolMapKeys(m map[int]bool) []int {
return keys
}
+// pkgAggregateExcludedFromSlowReports is true for package-level aggregates that
+// belong only in Failures or Timeouts, not in rep.Slow package ranking.
+func pkgAggregateExcludedFromSlowReports(e TestEntry) bool {
+ if e.Test != "" {
+ return false
+ }
+ if e.Timeouts > 0 {
+ return true
+ }
+ if e.Fails > 0 && e.Successes == 0 {
+ return true
+ }
+ return false
+}
+
func slowIterations(elapsedByIter map[int]time.Duration, threshold time.Duration) []int {
if threshold <= 0 {
return nil
diff --git a/tools/test/internal/runner/analyze_test.go b/tools/test/internal/runner/analyze_test.go
index 7526fdf6fa0..1dabbdb35ef 100644
--- a/tools/test/internal/runner/analyze_test.go
+++ b/tools/test/internal/runner/analyze_test.go
@@ -3,6 +3,7 @@ package runner
import (
"bufio"
"encoding/json"
+ "fmt"
"io"
"os"
"path/filepath"
@@ -12,6 +13,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/tools/test/internal/termstyle"
)
func readers(iters ...string) []io.Reader {
@@ -176,6 +179,7 @@ func TestDigestIterationJSONL(t *testing.T) {
assert.Equal(t, 0, d.FailTests)
assert.Equal(t, 0, d.SlowTests)
assert.Equal(t, 0, d.TimeoutTests)
+ assert.False(t, d.BuildFailure)
})
t.Run("slow test", func(t *testing.T) {
@@ -198,6 +202,16 @@ func TestDigestIterationJSONL(t *testing.T) {
assert.Equal(t, "fail", d.Result)
assert.Equal(t, 0, d.RanTests)
assert.Equal(t, 1, d.FailTests)
+ assert.True(t, d.BuildFailure)
+ })
+
+ t.Run("failed_build_field", func(t *testing.T) {
+ t.Parallel()
+ jsonl := `{"Action":"fail","Package":"example.com/badpkg","Elapsed":0,"FailedBuild":"example.com/badpkg.test"}` + "\n"
+ d, err := DigestIterationJSONL(strings.NewReader(jsonl), 30*time.Second)
+ require.NoError(t, err)
+ assert.Equal(t, "fail", d.Result)
+ assert.True(t, d.BuildFailure)
})
t.Run("timeout", func(t *testing.T) {
@@ -593,7 +607,7 @@ func TestPrintSummaryOverallContains(t *testing.T) {
require.NoError(t, err)
return rep
},
- needle: []string{"Overall", "Flaky tests:", "Flaky Iterations: 1/2 (50.0%)", "Slow tests:"},
+ needle: []string{"Overall", "Broken tests:", "Flaky tests:", "Flaky Iterations: 1/2 (50.0%)", "Slow tests:"},
},
{
name: "iteration_wall_clock_runtimes",
@@ -605,7 +619,7 @@ func TestPrintSummaryOverallContains(t *testing.T) {
fillIterationRuntimeSummary(rep)
return rep
},
- needle: []string{"Overall", "Iteration runtimes:", "min=5s"},
+ needle: []string{"Overall", "Iteration runtimes:", "min=5s", "Broken tests:"},
},
}
for _, tc := range tests {
@@ -621,6 +635,61 @@ func TestPrintSummaryOverallContains(t *testing.T) {
}
}
+func TestPrintSummaryOverall_usesSeverityColors(t *testing.T) {
+ t.Parallel()
+ rep, _, err := Analyze(readers(
+ `{"Action":"fail","Package":"pkg/foo","Test":"TestX","Elapsed":0.5}`,
+ `{"Action":"pass","Package":"pkg/foo","Test":"TestX","Elapsed":0.4}`,
+ ), 30*time.Second)
+ require.NoError(t, err)
+ require.NotNil(t, rep.Summary)
+ s := rep.Summary
+
+ var buf strings.Builder
+ PrintSummary(&buf, rep)
+ out := buf.String()
+
+ brokenN := len(rep.Failures)
+ pctBroken := float64(brokenN) / float64(s.DistinctNamedTests) * 100
+ brokenLine := fmt.Sprintf(" Broken tests: %d/%d (%.1f%%)", brokenN, s.DistinctNamedTests, pctBroken)
+ if brokenN > 0 {
+ assert.Contains(t, out, termstyle.Bad.Render(brokenLine))
+ } else {
+ assert.Contains(t, out, termstyle.OK.Render(brokenLine))
+ }
+
+ pctFlake := 0.0
+ if s.FlakePrevalence != nil {
+ pctFlake = *s.FlakePrevalence * 100
+ }
+ flakyLine := fmt.Sprintf(" Flaky tests: %d/%d (%.1f%%)", s.FlakeNamedCount, s.DistinctNamedTests, pctFlake)
+ if s.FlakeNamedCount > 0 {
+ assert.Contains(t, out, termstyle.Bad.Render(flakyLine))
+ } else {
+ assert.Contains(t, out, termstyle.OK.Render(flakyLine))
+ }
+
+ if len(rep.Flakes) > 0 && s.FlakeIterationFailRate != nil {
+ pctFI := *s.FlakeIterationFailRate * 100
+ fiLine := fmt.Sprintf(" Flaky Iterations: %d/%d (%.1f%%)", s.FlakeFailingIterations, s.FlakeIterationTotal, pctFI)
+ if s.FlakeFailingIterations > 0 {
+ assert.Contains(t, out, termstyle.Bad.Render(fiLine))
+ } else {
+ assert.Contains(t, out, termstyle.OK.Render(fiLine))
+ }
+ }
+
+ if rep.SlowThreshold > 0 && s.DistinctNamedTests > 0 && s.SlowPrevalence != nil {
+ pctSlow := *s.SlowPrevalence * 100
+ slowLine := fmt.Sprintf(" Slow tests: %d/%d (%.1f%%)", s.SlowCount, s.DistinctNamedTests, pctSlow)
+ if s.SlowCount > 0 {
+ assert.Contains(t, out, termstyle.Accent.Render(slowLine))
+ } else {
+ assert.Contains(t, out, termstyle.OK.Render(slowLine))
+ }
+ }
+}
+
func publicTestEntries(entries []TestEntry) []TestEntry {
out := append([]TestEntry(nil), entries...)
for i := range out {
@@ -1058,3 +1127,28 @@ func TestMarshalAISummaryJSON(t *testing.T) {
})
}
}
+
+func TestAnalyzeSlowTestsNoDuplication(t *testing.T) {
+ t.Parallel()
+ iter := `{"Action":"pass","Package":"pkg/slow","Test":"TestSlow","Elapsed":10.0}
+{"Action":"pass","Package":"pkg/slow","Elapsed":10.0}
+`
+ rep, _, err := Analyze([]io.Reader{strings.NewReader(iter)}, 1*time.Second)
+ require.NoError(t, err)
+
+ pkgSlowCount := 0
+ testSlowCount := 0
+ for _, s := range rep.Slow {
+ if s.Package == "pkg/slow" {
+ switch s.Test {
+ case "":
+ pkgSlowCount++
+ case "TestSlow":
+ testSlowCount++
+ }
+ }
+ }
+
+ assert.Equal(t, 1, pkgSlowCount, "Package should appear exactly once in slow reports")
+ assert.Equal(t, 1, testSlowCount, "Slow test should appear exactly once")
+}
diff --git a/tools/test/internal/runner/diagnose_progress.go b/tools/test/internal/runner/diagnose_progress.go
index 61ca346ef76..37eb460ca20 100644
--- a/tools/test/internal/runner/diagnose_progress.go
+++ b/tools/test/internal/runner/diagnose_progress.go
@@ -18,15 +18,55 @@ import (
const chainlinkModulePrefix = "github.com/smartcontractkit/chainlink/v2"
-// packagePatternsFromEnd returns trailing non-flag arguments. This matches the usual
-// `go test [flags] [packages]` layout (package patterns last).
+// testBinaryTwoArgSuffixFlags are test-binary flags that consume the following argv token.
+// When scanning backwards from the end, a token immediately after one of these is skipped
+// so package patterns can appear before `-run TestName` (valid `go test` ordering).
+var testBinaryTwoArgSuffixFlags = map[string]bool{
+ "-run": true,
+ "-bench": true,
+ "-skip": true,
+ "-fuzz": true,
+}
+
+func singleArgTestBinaryFlagPrefix(arg string) (prefix string, ok bool) {
+ for _, p := range []string{"-run=", "-bench=", "-skip=", "-fuzz="} {
+ if strings.HasPrefix(arg, p) {
+ return p, true
+ }
+ }
+ return "", false
+}
+
+func looksLikeGoPackagePattern(arg string) bool {
+ return strings.Contains(arg, ".") ||
+ strings.Contains(arg, "/") ||
+ strings.Contains(arg, "...")
+}
+
+// packagePatternsFromEnd returns trailing arguments that look like package patterns.
+// It scans backward from the end of goTestFlagsBeforeArgs(args), skipping `-run`,
+// `-bench`, `-skip`, and `-fuzz` and their values so `./pkg -run TestName` still
+// yields `./pkg`. This matches the usual `go test [flags] [packages]` layout and
+// also package-first ordering with test flags after packages.
func packagePatternsFromEnd(args []string) []string {
+ args = goTestFlagsBeforeArgs(args)
var pkgs []string
for i := len(args) - 1; i >= 0; i-- {
- if strings.HasPrefix(args[i], "-") {
+ arg := args[i]
+ if _, ok := singleArgTestBinaryFlagPrefix(arg); ok {
+ continue
+ }
+ if strings.HasPrefix(arg, "-") {
+ break
+ }
+ if i >= 1 && testBinaryTwoArgSuffixFlags[args[i-1]] {
+ i--
+ continue
+ }
+ if !looksLikeGoPackagePattern(arg) {
break
}
- pkgs = append(pkgs, args[i])
+ pkgs = append(pkgs, arg)
}
slices.Reverse(pkgs)
return pkgs
@@ -178,16 +218,10 @@ func (p *parallelDiagnoseProgress) renderSnapshot(now time.Time) (completed, tot
defer p.mu.Unlock()
completed = p.completed
total = p.totalIterations
- poolElapsed = now.Sub(p.poolStartedAt)
- if poolElapsed < 0 {
- poolElapsed = 0
- }
+ poolElapsed = max(now.Sub(p.poolStartedAt), 0)
poolElapsed = poolElapsed.Round(time.Second)
for iter, pr := range p.active {
- elapsed := now.Sub(pr.startedAt)
- if elapsed < 0 {
- elapsed = 0
- }
+ elapsed := max(now.Sub(pr.startedAt), 0)
actives = append(actives, activeIterElapsed{iteration: iter, elapsed: elapsed.Round(time.Second)})
}
slices.SortFunc(actives, func(a, b activeIterElapsed) int {
@@ -256,10 +290,7 @@ func renderDiagnoseProgressLine(w io.Writer, iteration, iterations int, iterElap
iterBracket := fmt.Sprintf("iter %d/%d (%s)", iteration, iterations, iterElapsed.Round(time.Second).String())
line := progressBracket(termstyle.Label.Render(iterBracket))
if !diagnoseRunStart.IsZero() {
- runEl := now.Sub(diagnoseRunStart)
- if runEl < 0 {
- runEl = 0
- }
+ runEl := max(now.Sub(diagnoseRunStart), 0)
line += " " + progressBracket(termstyle.Muted.Render(runEl.Round(time.Second).String()))
}
fmt.Fprint(w, "\r\033[K")
diff --git a/tools/test/internal/runner/diagnose_progress_test.go b/tools/test/internal/runner/diagnose_progress_test.go
index a7bd2d58081..a2e8a5db95d 100644
--- a/tools/test/internal/runner/diagnose_progress_test.go
+++ b/tools/test/internal/runner/diagnose_progress_test.go
@@ -1,11 +1,14 @@
package runner
import (
+ "io"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
+
+ "github.com/smartcontractkit/chainlink/v2/tools/test/internal/output"
)
func TestDiagnoseProgress_onTestJSONLine_packageTerminal(t *testing.T) {
@@ -123,3 +126,35 @@ func TestRenderParallelDiagnoseProgressLine(t *testing.T) {
require.NotContains(t, got, "·")
require.NotContains(t, got, "core/")
}
+
+// Simulates: worker goroutine redraws the next iteration's \r line before the
+// receiver prints the previous iteration's table row (unbuffered channel
+// completion order). Without ClearInline, Fprintln appends to the progress line.
+func TestDiagnoseDigestAfterProgressNeedsClear_mergedWithoutClear(t *testing.T) {
+ t.Parallel()
+ var stderr strings.Builder
+ out := output.NewForTest(false, io.Discard, &stderr, true)
+ runStart := time.Date(2020, 1, 1, 11, 0, 0, 0, time.UTC)
+ now := time.Date(2020, 1, 1, 12, 0, 0, 0, time.UTC)
+ renderDiagnoseProgressLine(out.HumanStderrWriter(), 5, 20, 2*time.Second, runStart, now, true)
+ out.HumanStderr(" 4 fail")
+ got := stderr.String()
+ require.Contains(t, got, "iter 5/20")
+ require.Contains(t, got, " 4 fail")
+ idxRow := strings.Index(got, " 4")
+ require.Positive(t, idxRow)
+ require.NotContains(t, got[:idxRow], "\n", "digest must not start on a new line (glitch)")
+}
+
+func TestDiagnoseDigestAfterProgressNeedsClear_clearedBeforeHumanStderr(t *testing.T) {
+ t.Parallel()
+ var stderr strings.Builder
+ out := output.NewForTest(false, io.Discard, &stderr, true)
+ runStart := time.Date(2020, 1, 1, 11, 0, 0, 0, time.UTC)
+ now := time.Date(2020, 1, 1, 12, 0, 0, 0, time.UTC)
+ renderDiagnoseProgressLine(out.HumanStderrWriter(), 5, 20, 2*time.Second, runStart, now, true)
+ out.ClearInline()
+ out.HumanStderr(" 4 fail")
+ got := stderr.String()
+ require.Contains(t, got, "\r\u001b[K 4 fail\n")
+}
diff --git a/tools/test/internal/runner/diagnose_results_dir.go b/tools/test/internal/runner/diagnose_results_dir.go
index 0b5d1528fdd..cd24389cf4b 100644
--- a/tools/test/internal/runner/diagnose_results_dir.go
+++ b/tools/test/internal/runner/diagnose_results_dir.go
@@ -11,6 +11,28 @@ const (
maxDiagnoseResultsBasename = 220
)
+// parseGoTestRunPattern returns the last `-run` pattern from go test-style argv
+// (before `-args`), mirroring how Go applies repeated `-run` flags.
+func parseGoTestRunPattern(goTestArgs []string) string {
+ args := goTestFlagsBeforeArgs(goTestArgs)
+ var last string
+ for i := 0; i < len(args); i++ {
+ a := args[i]
+ if after, ok := strings.CutPrefix(a, "-run="); ok {
+ last = strings.TrimSpace(after)
+ continue
+ }
+ if a == "-run" {
+ if i+1 >= len(args) {
+ continue
+ }
+ i++
+ last = strings.TrimSpace(args[i])
+ }
+ }
+ return last
+}
+
// diagnoseResultsDirName returns a repo-root-relative directory basename for
// diagnose output: diagnose--. Full argv and harness
// flags live in report.json under the run key (see RunMeta).
@@ -18,6 +40,9 @@ func diagnoseResultsDirName(goTestArgs []string, now time.Time) string {
tsPart := now.Format("20060102150405")
target := guessPackagePatternForSlug(goTestArgs)
slug := diagnoseTargetSlug(target)
+ if run := parseGoTestRunPattern(goTestArgs); run != "" {
+ slug += "__run_" + sanitizeDirToken(run)
+ }
tail := "-" + tsPart
avail := max(maxDiagnoseResultsBasename-len(diagnoseResultsNamePrefix)-len(tail), 1)
slug = truncateUTF8MaxBytes(slug, avail)
diff --git a/tools/test/internal/runner/runner.go b/tools/test/internal/runner/runner.go
index 0eceb8f19c5..7626943f7cf 100644
--- a/tools/test/internal/runner/runner.go
+++ b/tools/test/internal/runner/runner.go
@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io"
+ "math"
"math/rand/v2"
"os"
"os/exec"
@@ -22,9 +23,29 @@ import (
"github.com/smartcontractkit/chainlink/v2/tools/test/internal/termstyle"
)
+// failFastReasonBuildFailure is used when go test reports a compile/build failure
+// (FailedBuild or package-level fail with no named tests run); diagnose stops immediately.
+const failFastReasonBuildFailure = "build-failure"
+
type diagnoseIterationResource = db.Resource
-type diagnoseIterationRunner func(ctx context.Context, conf *config.App, out *output.Printer, resultsDir string, goTestArgs []string, iteration int, shuffleSeed int64, env []string, liveProgress bool, parallelProgress *parallelDiagnoseProgress, diagnoseRunStart time.Time) error
+// diagnoseIterationParams is per-iteration state for diagnose (see diagnoseIterationRunner;
+// context is passed separately to avoid storing context in a struct).
+type diagnoseIterationParams struct {
+ Conf *config.App
+ Out *output.Printer
+ ResultsDir string
+ GoTestArgs []string
+ Iteration int
+ ShuffleSeed int64
+ Env []string
+ LiveProgress bool
+ ParallelProgress *parallelDiagnoseProgress
+ DiagnoseRunStart time.Time
+ SerialProgressMu *sync.Mutex
+}
+
+type diagnoseIterationRunner func(ctx context.Context, p diagnoseIterationParams) error
type diagnoseRunHooks struct {
runIteration diagnoseIterationRunner
@@ -32,12 +53,13 @@ type diagnoseRunHooks struct {
}
type diagnoseRunState struct {
- completed int
- failedFast bool
- failedFastReason string
- iterDurations []time.Duration
- shuffleSeeds map[int]int64
- liveProgress bool
+ completed int
+ failedFast bool
+ failedFastReason string
+ failedFastIteration int // 0-based diagnose iteration index; -1 if unset
+ iterDurations []time.Duration
+ shuffleSeeds map[int]int64
+ liveProgress bool
}
// GoTest runs `go test` with the given args (repo root as working directory).
@@ -72,8 +94,8 @@ func Gotestsum(ctx context.Context, conf *config.App, args []string) error {
// With --ai-output, stdout is three lines: the results directory path, the
// path to report.json, and one line of JSON (the report's summary object, or
// the JSON keyword null when there is no summary).
-// Test iteration failures do not stop later runs (unless --fail-fast); they are
-// reflected in report.json. Diagnose returns a non-nil error for setup failures
+// Test iteration failures do not stop later runs (unless --fail-fast); compile/build
+// failures stop immediately. Results are reflected in report.json. Diagnose returns a non-nil error for setup failures
// (e.g. mkdir, database reset), analyze/write report failures, or ctx errors
// bubbling from dependencies — not for failing tests alone.
// resources supplies the prepared per-worker database state. Each resource runs
@@ -90,6 +112,9 @@ func Diagnose(ctx context.Context, conf *config.App, out *output.Printer, goTest
return err
}
printDiagnoseResultsDirHeader(out, resultsDir)
+ if err := printDiagnoseRunTimeEstimate(out, conf, goTestArgs, len(resources)); err != nil {
+ return err
+ }
state, runErr := runDiagnoseIterations(ctx, conf, out, resultsDir, goTestArgs, resources, diagnoseRunHooks{})
if runErr != nil {
@@ -106,19 +131,41 @@ func Diagnose(ctx context.Context, conf *config.App, out *output.Printer, goTest
}
if state.failedFast && !out.AIOutput() {
- msg := "--fail-fast set, stopping early"
- if state.failedFastReason != "" {
- msg = fmt.Sprintf("fail-fast matched %s, stopping early", state.failedFastReason)
+ if state.failedFastReason == failFastReasonBuildFailure && state.failedFastIteration >= 0 {
+ iter := state.failedFastIteration + 1
+ out.HumanStderr(termstyle.Bad.Render(
+ fmt.Sprintf("Build failed — stopping diagnose run (iteration %d/%d).", iter, conf.Iterations)))
+ } else {
+ msg := "--fail-fast set, stopping early"
+ if state.failedFastReason != "" {
+ msg = fmt.Sprintf("fail-fast matched %s, stopping early", state.failedFastReason)
+ }
+ out.HumanStderr(termstyle.Accent.Render(msg))
}
- out.HumanStderr(termstyle.Accent.Render(msg))
}
- printDiagnoseAnalyzing(out, state.liveProgress)
- report, logs, analyzeErr := AnalyzeResults(resultsDir, conf.SlowThreshold)
+ stopAnalyzing := startDiagnoseAnalyzingProgress(out, state.liveProgress)
+ var report *Report
+ var logs LogMap
+ var analyzeErr error
+ defer func() { stopAnalyzing(analyzeErr) }()
+ report, logs, analyzeErr = AnalyzeResults(resultsDir, conf.SlowThreshold)
if analyzeErr != nil {
out.Stderrf("analyze results: %v\n", analyzeErr)
return analyzeErr
}
+ if out.AIOutput() && state.failedFastReason == failFastReasonBuildFailure && state.failedFastIteration >= 0 {
+ var pkgs []string
+ if report != nil {
+ for _, sum := range report.IterationSummaries {
+ if sum.Index == state.failedFastIteration {
+ pkgs = append(pkgs, sum.FailingTests...)
+ break
+ }
+ }
+ }
+ out.Stderrf("bf_stop iter=%d pkgs=%s\n", state.failedFastIteration+1, strings.Join(pkgs, ","))
+ }
if report != nil {
for i, d := range state.iterDurations {
if i >= len(report.IterationSummaries) {
@@ -130,7 +177,7 @@ func Diagnose(ctx context.Context, conf *config.App, out *output.Printer, goTest
}
}
finished := time.Now()
- report.Run = newRunMeta(conf, goTestArgs, resultsDir, start, &finished)
+ report.Run = newRunMeta(conf, goTestArgs, resultsDir, start, &finished, len(resources) > 0)
fillIterationRuntimeSummary(report)
}
if err := WriteLogFiles(resultsDir, report, logs); err != nil {
@@ -159,8 +206,8 @@ func Diagnose(ctx context.Context, conf *config.App, out *output.Printer, goTest
}
out.HumanStderr(
- termstyle.Label.Render("diagnose complete") +
- termstyle.Muted.Render(fmt.Sprintf(" (%s)", time.Since(start))))
+ termstyle.Label.Render("diagnosis complete") + " " +
+ termstyle.Muted.Render("["+formatDiagnoseWallClock(time.Since(start))+"]"))
if report != nil {
PrintSummary(out.HumanStderrWriter(), report)
}
@@ -203,10 +250,7 @@ func EffectiveParallelIterations(conf *config.App) int {
if conf == nil {
return 1
}
- parallel := conf.ParallelIterations
- if parallel < 1 {
- parallel = 1
- }
+ parallel := max(conf.ParallelIterations, 1)
if conf.Iterations > 0 && parallel > conf.Iterations {
parallel = conf.Iterations
}
@@ -257,7 +301,8 @@ func runDiagnoseIterations(ctx context.Context, conf *config.App, out *output.Pr
}
resources = resources[:parallel]
state := diagnoseRunState{
- iterDurations: make([]time.Duration, conf.Iterations),
+ iterDurations: make([]time.Duration, conf.Iterations),
+ failedFastIteration: -1,
}
if conf.Shuffle {
state.shuffleSeeds = make(map[int]int64)
@@ -303,10 +348,15 @@ func runDiagnoseIterations(ctx context.Context, conf *config.App, out *output.Pr
progressTickWG.Wait()
}()
+ var serialProgressMu *sync.Mutex
+ if parallel == 1 && state.liveProgress {
+ serialProgressMu = new(sync.Mutex)
+ }
+
var wg sync.WaitGroup
for _, resource := range resources {
wg.Go(func() {
- executeSingleIteration(runCtx, conf, out, resultsDir, goTestArgs, resource, hooks, parallel, parallelProgress, diagnoseRunStart, jobs, results, cancel)
+ executeSingleIteration(runCtx, conf, out, resultsDir, goTestArgs, resource, hooks, parallel, parallelProgress, diagnoseRunStart, serialProgressMu, jobs, results, cancel)
})
}
@@ -346,7 +396,17 @@ func runDiagnoseIterations(ctx context.Context, conf *config.App, out *output.Pr
printDiagnoseIterationDigest(out, result.iteration, conf.Iterations, conf, resultsDir, result.duration)
})
} else {
- printDiagnoseIterationDigest(out, result.iteration, conf.Iterations, conf, resultsDir, result.duration)
+ // Serial TTY: worker may redraw the next iteration's \r line before this
+ // loop runs. Hold the same lock as redraw so ClearInline+Fprintln is not
+ // interleaved with another progress draw (which would leave the cursor at EOL).
+ if serialProgressMu != nil {
+ serialProgressMu.Lock()
+ out.ClearInline()
+ printDiagnoseIterationDigest(out, result.iteration, conf.Iterations, conf, resultsDir, result.duration)
+ serialProgressMu.Unlock()
+ } else {
+ printDiagnoseIterationDigest(out, result.iteration, conf.Iterations, conf, resultsDir, result.duration)
+ }
}
if result.dumpErr != nil && !out.AIOutput() {
out.Stderrf("postgres state dump iteration %d: %v\n", result.iteration, result.dumpErr)
@@ -356,12 +416,15 @@ func runDiagnoseIterations(ctx context.Context, conf *config.App, out *output.Pr
if state.failedFastReason == "" {
state.failedFastReason = result.failReason
}
+ if state.failedFastIteration < 0 {
+ state.failedFastIteration = result.iteration
+ }
}
}
return state, firstErr
}
-func executeSingleIteration(runCtx context.Context, conf *config.App, out *output.Printer, resultsDir string, goTestArgs []string, resource diagnoseIterationResource, hooks diagnoseRunHooks, parallel int, parallelProgress *parallelDiagnoseProgress, diagnoseRunStart time.Time, jobs <-chan int, results chan<- diagnoseIterationResult, cancel context.CancelFunc) {
+func executeSingleIteration(runCtx context.Context, conf *config.App, out *output.Printer, resultsDir string, goTestArgs []string, resource diagnoseIterationResource, hooks diagnoseRunHooks, parallel int, parallelProgress *parallelDiagnoseProgress, diagnoseRunStart time.Time, serialProgressMu *sync.Mutex, jobs <-chan int, results chan<- diagnoseIterationResult, cancel context.CancelFunc) {
used := false
for iteration := range jobs {
if runCtx.Err() != nil {
@@ -386,7 +449,19 @@ func executeSingleIteration(runCtx context.Context, conf *config.App, out *outpu
seed = hooks.seed()
}
iterStart := time.Now()
- iterErr := hooks.runIteration(runCtx, conf, out, resultsDir, goTestArgs, iteration, seed, resource.Env, parallel == 1, parallelProgress, diagnoseRunStart)
+ iterErr := hooks.runIteration(runCtx, diagnoseIterationParams{
+ Conf: conf,
+ Out: out,
+ ResultsDir: resultsDir,
+ GoTestArgs: goTestArgs,
+ Iteration: iteration,
+ ShuffleSeed: seed,
+ Env: resource.Env,
+ LiveProgress: parallel == 1,
+ ParallelProgress: parallelProgress,
+ DiagnoseRunStart: diagnoseRunStart,
+ SerialProgressMu: serialProgressMu,
+ })
iterDur := time.Since(iterStart)
var dumpErr error
if resource.DumpDiagnostics != nil {
@@ -428,20 +503,32 @@ func shouldFailFastIteration(conf *config.App, resultsDir string, iteration int,
if conf == nil {
return false, ""
}
- if iterErr != nil && conf.FailFast {
- return true, "failure"
- }
- if len(conf.FailFastOn) == 0 {
+ if iterErr == nil && !conf.FailFast && len(conf.FailFastOn) == 0 {
return false, ""
}
jsonPath := filepath.Join(resultsDir, fmt.Sprintf("iteration-%d.log.jsonl", iteration))
f, err := os.Open(jsonPath)
if err != nil {
+ if iterErr != nil && conf.FailFast {
+ return true, "failure"
+ }
return false, ""
}
defer f.Close()
- d, err := DigestIterationJSONL(f, conf.SlowThreshold)
- if err != nil {
+ d, digestErr := DigestIterationJSONL(f, conf.SlowThreshold)
+ if digestErr != nil {
+ if iterErr != nil && conf.FailFast {
+ return true, "failure"
+ }
+ return false, ""
+ }
+ if d.BuildFailure {
+ return true, failFastReasonBuildFailure
+ }
+ if iterErr != nil && conf.FailFast {
+ return true, "failure"
+ }
+ if len(conf.FailFastOn) == 0 {
return false, ""
}
return failFastDigestMatch(d, conf.FailFastOn)
@@ -520,6 +607,137 @@ func parseDiagnoseGoTestCount(goTestArgs []string) (set bool, n int, err error)
return set, n, nil
}
+// defaultGoTestTimeout matches `go help testflag` when -timeout is omitted.
+const defaultGoTestTimeout = 10 * time.Minute
+
+// parseGoTestTimeout returns the last -timeout in the go test flag section (before -args).
+// If the value parses to 0, disabled is true (timeout disabled for the test binary).
+// If no -timeout appears, set is false and callers should assume defaultGoTestTimeout for estimates.
+func parseGoTestTimeout(goTestArgs []string) (set bool, d time.Duration, disabled bool, err error) {
+ args := goTestFlagsBeforeArgs(goTestArgs)
+ for i := 0; i < len(args); i++ {
+ a := args[i]
+ if after, ok := strings.CutPrefix(a, "-timeout="); ok {
+ v := strings.TrimSpace(after)
+ if v == "" {
+ return false, 0, false, errors.New("invalid go test arguments: -timeout= requires a duration")
+ }
+ dur, e := time.ParseDuration(v)
+ if e != nil {
+ return false, 0, false, fmt.Errorf("invalid -timeout value %q: %w", v, e)
+ }
+ set = true
+ if dur == 0 {
+ disabled = true
+ } else {
+ disabled = false
+ d = dur
+ }
+ continue
+ }
+ if a == "-timeout" {
+ if i+1 >= len(args) {
+ return false, 0, false, errors.New("invalid go test arguments: -timeout must be followed by a duration")
+ }
+ i++
+ v := strings.TrimSpace(args[i])
+ dur, e := time.ParseDuration(v)
+ if e != nil {
+ return false, 0, false, fmt.Errorf("invalid -timeout value %q: %w", args[i], e)
+ }
+ set = true
+ if dur == 0 {
+ disabled = true
+ } else {
+ disabled = false
+ d = dur
+ }
+ }
+ }
+ return set, d, disabled, nil
+}
+
+// diagnoseIterationWaves returns ceil(iterations/workers) for scheduling diagnose iterations.
+func diagnoseIterationWaves(iterations, workers int) int {
+ w := max(workers, 1)
+ if iterations < 1 {
+ return 0
+ }
+ return (iterations + w - 1) / w
+}
+
+// diagnoseWallUpperBoundDetails holds the inputs used for the human-facing estimate line.
+type diagnoseWallUpperBoundDetails struct {
+ Bound time.Duration
+ Workers int
+ Waves int
+ PerInv time.Duration
+ UsedDefault bool // true when -timeout was omitted (10m assumed)
+}
+
+// diagnoseWallUpperBound returns worst-case wall clock if each go test invocation runs
+// for the full per-invocation test timeout (Go default 10m when -timeout is unset).
+// ok is false when -timeout=0 disables the test binary timeout (no finite bound).
+func diagnoseWallUpperBound(conf *config.App, goTestArgs []string, resourceCount int) (diag diagnoseWallUpperBoundDetails, ok bool, err error) {
+ if conf == nil {
+ return diagnoseWallUpperBoundDetails{}, false, errors.New("config is nil")
+ }
+ set, d, disabled, err := parseGoTestTimeout(goTestArgs)
+ if err != nil {
+ return diagnoseWallUpperBoundDetails{}, false, err
+ }
+ if set && disabled {
+ return diagnoseWallUpperBoundDetails{}, false, nil
+ }
+ perInv := defaultGoTestTimeout
+ usedDefault := !set
+ if set && !disabled {
+ perInv = d
+ usedDefault = false
+ }
+ parallel := EffectiveParallelIterations(conf)
+ if resourceCount > 0 && resourceCount < parallel {
+ parallel = resourceCount
+ }
+ if parallel < 1 {
+ parallel = 1
+ }
+ waves := diagnoseIterationWaves(conf.Iterations, parallel)
+ bound := time.Duration(waves) * perInv
+ return diagnoseWallUpperBoundDetails{
+ Bound: bound,
+ Workers: parallel,
+ Waves: waves,
+ PerInv: perInv,
+ UsedDefault: usedDefault,
+ }, true, nil
+}
+
+func printDiagnoseRunTimeEstimate(out *output.Printer, conf *config.App, goTestArgs []string, resourceCount int) error {
+ if out == nil || conf == nil {
+ return nil
+ }
+ diag, ok, err := diagnoseWallUpperBound(conf, goTestArgs, resourceCount)
+ if err != nil {
+ return err
+ }
+ if out.AIOutput() {
+ if !ok {
+ out.Stderrf("lpr_s:inf\n")
+ return nil
+ }
+ sec := max(diag.Bound.Round(time.Second)/time.Second, 0)
+ out.Stderrf("lpr_s:%d\n", sec)
+ return nil
+ }
+ est := "∞"
+ if ok {
+ est = formatDiagnoseWallClock(diag.Bound)
+ }
+ out.HumanStderr(termstyle.Muted.Render("Longest Possible Runtime: " + est))
+ return nil
+}
+
// WarnDiagnoseGoTestCount prints hints when the user sets -count on go test, and
// returns an error if -count values in the go test flag section are malformed.
func WarnDiagnoseGoTestCount(w io.Writer, goTestArgs []string) error {
@@ -588,14 +806,102 @@ func printDiagnoseResultsDirHeader(out *output.Printer, resultsDir string) {
out.HumanStderr(termstyle.Muted.Render("results directory: ") + termstyle.Label.Render(resultsDir))
}
-func printDiagnoseAnalyzing(out *output.Printer, afterLiveProgress bool) {
+// formatDiagnoseWallClock formats total wall time for human diagnose footers (0.1s resolution, seconds show two decimals when fractional).
+func formatDiagnoseWallClock(d time.Duration) string {
+ if d < 0 {
+ d = 0
+ }
+ d = d.Round(100 * time.Millisecond)
+ if d == 0 {
+ return "0s"
+ }
+ h := d / time.Hour
+ d -= h * time.Hour
+ mins := d / time.Minute
+ d -= mins * time.Minute
+ s := float64(d) / float64(time.Second)
+ secStr := formatDiagnoseSecondsFragment(s)
+ if h > 0 {
+ return fmt.Sprintf("%dh%dm%s", h, mins, secStr)
+ }
+ if mins > 0 {
+ return fmt.Sprintf("%dm%s", mins, secStr)
+ }
+ return secStr
+}
+
+func formatDiagnoseSecondsFragment(s float64) string {
+ cs := max(int(math.Round(s*100)), 0)
+ w := cs / 100
+ f := cs % 100
+ if f == 0 {
+ return fmt.Sprintf("%ds", w)
+ }
+ return fmt.Sprintf("%d.%02ds", w, f)
+}
+
+// startDiagnoseAnalyzingProgress prints a live "analyzing [duration]" line and returns stop.
+// Call stop once analysis is done (and defer stop as well — it is idempotent) so the final
+// "analyzing [duration] ✅" line is written before diagnosis complete, not at process exit.
+func startDiagnoseAnalyzingProgress(out *output.Printer, afterLiveProgress bool) (stop func(error)) {
if out.AIOutput() {
- return
+ return func(error) {}
}
if afterLiveProgress {
_, _ = fmt.Fprint(out.HumanStderrWriter(), "\r\033[K\n")
}
- out.HumanStderr("analyzing...")
+
+ analyzeStart := time.Now()
+ var once sync.Once
+ finalize := func(live bool, err error) {
+ elapsed := max(time.Since(analyzeStart).Round(time.Second), 0)
+ mark := termstyle.OK.Render("✅")
+ if err != nil {
+ mark = termstyle.Bad.Render("❌")
+ }
+ line := termstyle.Label.Render("analyzing") + " " + termstyle.Muted.Render("["+elapsed.String()+"]") + " " + mark
+ if live {
+ _, _ = fmt.Fprint(out.HumanStderrWriter(), "\r\033[K"+line+"\n")
+ return
+ }
+ out.HumanStderr(line)
+ }
+
+ if !out.LiveInlineProgress() {
+ return func(err error) {
+ once.Do(func() { finalize(false, err) })
+ }
+ }
+
+ renderProgress := func() {
+ elapsed := max(time.Since(analyzeStart).Round(time.Second), 0)
+ line := termstyle.Label.Render("analyzing") + " " + termstyle.Muted.Render("["+elapsed.String()+"]")
+ _, _ = fmt.Fprint(out.HumanStderrWriter(), "\r\033[K"+line)
+ }
+ renderProgress()
+
+ done := make(chan struct{})
+ var wg sync.WaitGroup
+ wg.Go(func() {
+ tick := time.NewTicker(250 * time.Millisecond)
+ defer tick.Stop()
+ for {
+ select {
+ case <-done:
+ return
+ case <-tick.C:
+ renderProgress()
+ }
+ }
+ })
+
+ return func(err error) {
+ once.Do(func() {
+ close(done)
+ wg.Wait()
+ finalize(true, err)
+ })
+ }
}
func printDiagnoseIterationDigest(out *output.Printer, iterationIdx0, totalIters int, conf *config.App, resultsDir string, iterDur time.Duration) {
@@ -632,10 +938,7 @@ func formatIterationDigestAI(iter, total int, d IterationDigest, dur time.Durati
case "timeout":
rs = "t"
}
- sec := int(dur.Round(time.Second) / time.Second)
- if sec < 0 {
- sec = 0
- }
+ sec := max(int(dur.Round(time.Second)/time.Second), 0)
return fmt.Sprintf("d %d/%d %s %ds r%d f%d t%d s%d", iter, total, rs, sec, d.RanTests, d.FailTests, d.TimeoutTests, d.SlowTests)
}
@@ -670,7 +973,14 @@ func (sw *syncedWriter) Write(p []byte) (int, error) {
return sw.w.Write(p)
}
-func diagnoseIteration(ctx context.Context, conf *config.App, out *output.Printer, resultsDir string, goTestArgs []string, iteration int, shuffleSeed int64, env []string, liveProgress bool, parallelProgress *parallelDiagnoseProgress, diagnoseRunStart time.Time) error {
+func diagnoseIteration(ctx context.Context, p diagnoseIterationParams) error {
+ conf, out := p.Conf, p.Out
+ resultsDir, goTestArgs := p.ResultsDir, p.GoTestArgs
+ iteration, shuffleSeed := p.Iteration, p.ShuffleSeed
+ env := p.Env
+ liveProgress, parallelProgress := p.LiveProgress, p.ParallelProgress
+ diagnoseRunStart, serialProgressMu := p.DiagnoseRunStart, p.SerialProgressMu
+
start := time.Now()
jsonPath := filepath.Join(resultsDir, fmt.Sprintf("iteration-%d.log.jsonl", iteration))
resultsFile, err := os.Create(jsonPath)
@@ -722,6 +1032,10 @@ func diagnoseIteration(ctx context.Context, conf *config.App, out *output.Printe
}
redraw := func(liveInline bool) {
+ if serialProgressMu != nil {
+ serialProgressMu.Lock()
+ defer serialProgressMu.Unlock()
+ }
renderDiagnoseProgressLine(out.HumanStderrWriter(), iter, iters, time.Since(start), diagnoseRunStart, time.Now(), liveInline)
}
@@ -788,7 +1102,7 @@ func diagnoseIteration(ctx context.Context, conf *config.App, out *output.Printe
return runErr
}
-func newRunMeta(conf *config.App, goTestArgs []string, resultsDir string, started time.Time, finished *time.Time) *RunMeta {
+func newRunMeta(conf *config.App, goTestArgs []string, resultsDir string, started time.Time, finished *time.Time, hasDatabase bool) *RunMeta {
if conf == nil {
return nil
}
@@ -803,10 +1117,7 @@ func newRunMeta(conf *config.App, goTestArgs []string, resultsDir string, starte
if n, err := config.NormalizeFailFastOn(conf.FailFastOn); err == nil && len(n) > 0 {
ffo = n
}
- par := conf.ParallelIterations
- if par < 1 {
- par = 1
- }
+ par := max(conf.ParallelIterations, 1)
var fin *time.Time
if finished != nil {
t := finished.UTC()
@@ -824,5 +1135,7 @@ func newRunMeta(conf *config.App, goTestArgs []string, resultsDir string, starte
FailFast: conf.FailFast,
FailFastOn: ffo,
Shuffle: conf.Shuffle,
+ PostgresVersion: conf.PostgresVersion,
+ HasDatabase: hasDatabase,
}
}
diff --git a/tools/test/internal/runner/runner_test.go b/tools/test/internal/runner/runner_test.go
index e2ea2eb4871..b81edb6911b 100644
--- a/tools/test/internal/runner/runner_test.go
+++ b/tools/test/internal/runner/runner_test.go
@@ -110,14 +110,43 @@ func TestDiagnoseHumanModeFooterShowsReportJSONPath(t *testing.T) {
assert.NotContains(t, out, "results in ")
}
-func TestPrintDiagnoseAnalyzingStartsNewLineAfterLiveProgress(t *testing.T) {
+func TestStartDiagnoseAnalyzingProgress_startsNewLineAfterLiveProgress(t *testing.T) {
t.Parallel()
var stderr strings.Builder
out := output.New(false, io.Discard, &stderr, output.SkipFD)
- printDiagnoseAnalyzing(out, true)
+ stop := startDiagnoseAnalyzingProgress(out, true)
+ stop(nil)
- assert.Equal(t, "\r\u001b[K\nanalyzing...\n", stderr.String())
+ plain := stripANSI(stderr.String())
+ assert.Contains(t, plain, "analyzing [0s]")
+ assert.Contains(t, plain, "✅")
+ assert.True(t, strings.HasPrefix(stderr.String(), "\r\u001b[K\n"))
+}
+
+func TestStartDiagnoseAnalyzingProgress_liveInline_updatesDuration(t *testing.T) {
+ t.Parallel()
+ var stderr strings.Builder
+ out := output.NewForTest(false, io.Discard, &stderr, true)
+
+ stop := startDiagnoseAnalyzingProgress(out, false)
+ time.Sleep(300 * time.Millisecond)
+ stop(nil)
+
+ got := stderr.String()
+ assert.Contains(t, got, "analyzing")
+ assert.NotContains(t, got, "analyzing...")
+ assert.Regexp(t, `\[[0-9]+s\]`, got)
+ assert.Contains(t, stripANSI(got), "✅")
+ assert.Contains(t, got, "\r\u001b[K")
+ assert.True(t, strings.HasSuffix(got, "\n"))
+}
+
+func TestFormatDiagnoseWallClock(t *testing.T) {
+ t.Parallel()
+ assert.Equal(t, "0s", formatDiagnoseWallClock(0))
+ assert.Equal(t, "50m4.60s", formatDiagnoseWallClock(50*time.Minute+4*time.Second+607421833*time.Nanosecond))
+ assert.Equal(t, "1h0m0s", formatDiagnoseWallClock(time.Hour))
}
func TestParseDiagnoseGoTestCount(t *testing.T) {
@@ -193,6 +222,182 @@ func TestWarnDiagnoseGoTestCount(t *testing.T) {
})
}
+func TestParseGoTestTimeout(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ args []string
+ wantSet bool
+ wantDur time.Duration
+ disabled bool
+ wantErr bool
+ }{
+ {
+ name: "equals form",
+ args: []string{"-timeout=5m", "./pkg"},
+ wantSet: true,
+ wantDur: 5 * time.Minute,
+ },
+ {
+ name: "separate value",
+ args: []string{"-timeout", "10m", "./pkg"},
+ wantSet: true,
+ wantDur: 10 * time.Minute,
+ },
+ {
+ name: "missing uses unset",
+ args: []string{"-race", "./pkg"},
+ wantSet: false,
+ },
+ {
+ name: "zero disables",
+ args: []string{"-timeout=0", "./pkg"},
+ wantSet: true,
+ disabled: true,
+ },
+ {
+ name: "zero separate disables",
+ args: []string{"-timeout", "0", "./pkg"},
+ wantSet: true,
+ disabled: true,
+ },
+ {
+ name: "after args ignored",
+ args: []string{"-args", "-timeout", "1ns", "./pkg"},
+ wantSet: false,
+ },
+ {
+ name: "last wins toward longer",
+ args: []string{"-timeout=1m", "-timeout=2m", "./pkg"},
+ wantSet: true,
+ wantDur: 2 * time.Minute,
+ },
+ {
+ name: "last wins clears disable",
+ args: []string{"-timeout=0", "-timeout=5m", "./pkg"},
+ wantSet: true,
+ wantDur: 5 * time.Minute,
+ },
+ {
+ name: "last wins toward disable",
+ args: []string{"-timeout=5m", "-timeout=0", "./pkg"},
+ wantSet: true,
+ disabled: true,
+ },
+ {
+ name: "empty equals",
+ args: []string{"-timeout=", "./pkg"},
+ wantErr: true,
+ },
+ {
+ name: "missing value after flag",
+ args: []string{"-timeout"},
+ wantErr: true,
+ },
+ {
+ name: "invalid duration",
+ args: []string{"-timeout=notaduration", "./pkg"},
+ wantErr: true,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ set, d, disabled, err := parseGoTestTimeout(tc.args)
+ if tc.wantErr {
+ require.Error(t, err)
+ return
+ }
+ require.NoError(t, err)
+ assert.Equal(t, tc.wantSet, set, "set")
+ assert.Equal(t, tc.disabled, disabled, "disabled")
+ if !tc.wantSet {
+ assert.Equal(t, time.Duration(0), d)
+ return
+ }
+ if tc.disabled {
+ return
+ }
+ assert.Equal(t, tc.wantDur, d)
+ })
+ }
+}
+
+func TestDiagnoseIterationWaves(t *testing.T) {
+ t.Parallel()
+ assert.Equal(t, 0, diagnoseIterationWaves(0, 3))
+ assert.Equal(t, 10, diagnoseIterationWaves(10, 1))
+ assert.Equal(t, 4, diagnoseIterationWaves(10, 3))
+ assert.Equal(t, 3, diagnoseIterationWaves(10, 4))
+}
+
+func TestDiagnoseWallUpperBound(t *testing.T) {
+ t.Parallel()
+
+ t.Run("parallel waves and bound", func(t *testing.T) {
+ t.Parallel()
+ diag, ok, err := diagnoseWallUpperBound(
+ &config.App{Iterations: 10, ParallelIterations: 3},
+ []string{"-timeout=15m", "./pkg"},
+ 3,
+ )
+ require.NoError(t, err)
+ require.True(t, ok)
+ assert.Equal(t, 4, diag.Waves)
+ assert.Equal(t, 3, diag.Workers)
+ assert.Equal(t, 15*time.Minute, diag.PerInv)
+ assert.False(t, diag.UsedDefault)
+ assert.Equal(t, 60*time.Minute, diag.Bound)
+ })
+
+ t.Run("resource count narrows workers", func(t *testing.T) {
+ t.Parallel()
+ diag, ok, err := diagnoseWallUpperBound(
+ &config.App{Iterations: 4, ParallelIterations: 4},
+ []string{"-timeout=5m", "./pkg"},
+ 2,
+ )
+ require.NoError(t, err)
+ require.True(t, ok)
+ assert.Equal(t, 2, diag.Workers)
+ assert.Equal(t, 2, diag.Waves)
+ assert.Equal(t, 10*time.Minute, diag.Bound)
+ })
+
+ t.Run("default timeout when unset", func(t *testing.T) {
+ t.Parallel()
+ diag, ok, err := diagnoseWallUpperBound(
+ &config.App{Iterations: 2, ParallelIterations: 1},
+ []string{"./pkg"},
+ 0,
+ )
+ require.NoError(t, err)
+ require.True(t, ok)
+ assert.True(t, diag.UsedDefault)
+ assert.Equal(t, defaultGoTestTimeout, diag.PerInv)
+ assert.Equal(t, 2*defaultGoTestTimeout, diag.Bound)
+ })
+
+ t.Run("timeout zero disables bound", func(t *testing.T) {
+ t.Parallel()
+ _, ok, err := diagnoseWallUpperBound(
+ &config.App{Iterations: 5, ParallelIterations: 1},
+ []string{"-timeout=0", "./pkg"},
+ 0,
+ )
+ require.NoError(t, err)
+ assert.False(t, ok)
+ })
+
+ t.Run("nil config errors", func(t *testing.T) {
+ t.Parallel()
+ _, _, err := diagnoseWallUpperBound(nil, []string{"./pkg"}, 0)
+ require.Error(t, err)
+ })
+}
+
func TestBuildDiagnoseArgs(t *testing.T) {
t.Parallel()
@@ -302,7 +507,7 @@ func TestDiagnoseResultsDirName(t *testing.T) {
{
name: "flags before package",
goTestArgs: []string{"-race", "-run=^TestFoo$", "./pkg"},
- want: diagnoseResultsNamePrefix + "pkg-20240601123045",
+ want: diagnoseResultsNamePrefix + "pkg__run__TestFoo_-20240601123045",
},
{
name: "single package",
@@ -314,6 +519,16 @@ func TestDiagnoseResultsDirName(t *testing.T) {
goTestArgs: []string{"./a"},
want: diagnoseResultsNamePrefix + "a-20240601123045",
},
+ {
+ name: "package then run flag",
+ goTestArgs: []string{"./core/services/foo", "-run", "TestBar"},
+ want: diagnoseResultsNamePrefix + "core_services_foo__run_TestBar-20240601123045",
+ },
+ {
+ name: "run flag then package",
+ goTestArgs: []string{"-run", "TestBar", "./core/services/foo"},
+ want: diagnoseResultsNamePrefix + "core_services_foo__run_TestBar-20240601123045",
+ },
}
for _, tc := range tests {
@@ -332,7 +547,7 @@ func TestDiagnoseResultsDirNameLongRunAndPath(t *testing.T) {
goTestArgs := []string{"-run=" + longRun, "./p"}
got := diagnoseResultsDirName(goTestArgs, diagnoseResultsDirNameAt)
assert.LessOrEqual(t, len(got), maxDiagnoseResultsBasename)
- assert.Regexp(t, `diagnose-p-20240601123045`, got)
+ assert.Regexp(t, `diagnose-.+-20240601123045`, got)
longTarget := "./" + strings.Repeat("seg/", 60) + "z"
goTestArgs2 := []string{longTarget}
@@ -424,6 +639,10 @@ func TestPackagePatternsFromEnd(t *testing.T) {
t.Parallel()
assert.Equal(t, []string{"./core/...", "./foo"}, packagePatternsFromEnd([]string{"-race", "-timeout=5m", "./core/...", "./foo"}))
assert.Nil(t, packagePatternsFromEnd([]string{"-v", "-race"}))
+ assert.Equal(t, []string{"./core/..."}, packagePatternsFromEnd([]string{"-timeout", "10m", "./core/..."}))
+ assert.Nil(t, packagePatternsFromEnd([]string{"-timeout", "10m"}))
+ assert.Equal(t, []string{"./pkg"}, packagePatternsFromEnd([]string{"./pkg", "-run", "TestName"}))
+ assert.Equal(t, []string{"./pkg"}, packagePatternsFromEnd([]string{"-run", "TestName", "./pkg"}))
}
func TestRunDiagnoseIterationsRunsInParallelWithWorkerIsolation(t *testing.T) {
@@ -477,9 +696,9 @@ func TestRunDiagnoseIterationsRunsInParallelWithWorkerIsolation(t *testing.T) {
},
}
hooks := diagnoseRunHooks{
- runIteration: func(_ context.Context, _ *config.App, _ *output.Printer, dir string, _ []string, iteration int, _ int64, env []string, liveProgress bool, parallelProgress *parallelDiagnoseProgress, _ time.Time) error {
- require.False(t, liveProgress)
- require.Nil(t, parallelProgress)
+ runIteration: func(_ context.Context, p diagnoseIterationParams) error {
+ require.False(t, p.LiveProgress)
+ require.Nil(t, p.ParallelProgress)
nowActive := atomic.AddInt32(&active, 1)
for {
seen := atomic.LoadInt32(&maxActive)
@@ -488,10 +707,10 @@ func TestRunDiagnoseIterationsRunsInParallelWithWorkerIsolation(t *testing.T) {
}
}
mu.Lock()
- envByIter[iteration] = append([]string(nil), env...)
+ envByIter[p.Iteration] = append([]string(nil), p.Env...)
mu.Unlock()
time.Sleep(25 * time.Millisecond)
- err := os.WriteFile(filepath.Join(dir, "iteration-"+strconv.Itoa(iteration)+".log.jsonl"), []byte(`{"Action":"pass","Package":"p","Test":"T","Elapsed":0.01}`+"\n"), 0600)
+ err := os.WriteFile(filepath.Join(p.ResultsDir, "iteration-"+strconv.Itoa(p.Iteration)+".log.jsonl"), []byte(`{"Action":"pass","Package":"p","Test":"T","Elapsed":0.01}`+"\n"), 0600)
atomic.AddInt32(&active, -1)
return err
},
@@ -525,12 +744,12 @@ func TestRunDiagnoseIterationsFailFastCancelsNewWork(t *testing.T) {
var mu sync.Mutex
started := make(map[int]struct{})
hooks := diagnoseRunHooks{
- runIteration: func(ctx context.Context, _ *config.App, _ *output.Printer, dir string, _ []string, iteration int, _ int64, _ []string, _ bool, _ *parallelDiagnoseProgress, _ time.Time) error {
+ runIteration: func(ctx context.Context, p diagnoseIterationParams) error {
mu.Lock()
- started[iteration] = struct{}{}
+ started[p.Iteration] = struct{}{}
mu.Unlock()
- if iteration == 0 {
- require.NoError(t, os.WriteFile(filepath.Join(dir, "iteration-0.log.jsonl"), []byte(`{"Action":"fail","Package":"p","Test":"T","Elapsed":0.01}`+"\n"), 0600))
+ if p.Iteration == 0 {
+ require.NoError(t, os.WriteFile(filepath.Join(p.ResultsDir, "iteration-0.log.jsonl"), []byte(`{"Action":"fail","Package":"p","Test":"T","Elapsed":0.01}`+"\n"), 0600))
return errors.New("test failed")
}
<-ctx.Done()
@@ -544,6 +763,132 @@ func TestRunDiagnoseIterationsFailFastCancelsNewWork(t *testing.T) {
assert.LessOrEqual(t, len(started), conf.ParallelIterations)
}
+func TestRunDiagnoseIterationsStopsOnBuildFailure(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ iterationJSON string
+ }{
+ {
+ name: "package_level_fail_heuristic",
+ iterationJSON: `{"Action":"fail","Package":"pkg/build","Elapsed":0.0}`,
+ },
+ {
+ name: "failed_build_field",
+ iterationJSON: `{"Action":"fail","Package":"example.com/badpkg","Elapsed":0,"FailedBuild":"example.com/badpkg.test"}`,
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ t.Parallel()
+ resultsDir := t.TempDir()
+ conf := &config.App{
+ RepoRoot: t.TempDir(),
+ AIOutput: true,
+ Iterations: 5,
+ // Build failure stops even without --fail-fast or --fail-fast-on.
+ FailFast: false,
+ }
+ out := output.New(true, io.Discard, io.Discard, output.SkipFD)
+ hooks := diagnoseRunHooks{
+ runIteration: func(_ context.Context, p diagnoseIterationParams) error {
+ payload := tc.iterationJSON + "\n"
+ require.NoError(t, os.WriteFile(filepath.Join(p.ResultsDir, "iteration-"+strconv.Itoa(p.Iteration)+".log.jsonl"), []byte(payload), 0600))
+ return errors.New("exit status 1")
+ },
+ }
+
+ state, err := runDiagnoseIterations(context.Background(), conf, out, resultsDir, []string{"./pkg"}, []diagnoseIterationResource{{}}, hooks)
+ require.NoError(t, err)
+ assert.Equal(t, 1, state.completed)
+ assert.True(t, state.failedFast)
+ assert.Equal(t, failFastReasonBuildFailure, state.failedFastReason)
+ assert.Equal(t, 0, state.failedFastIteration)
+ })
+ }
+}
+
+// Stress serial live \r progress vs digest printing: the hook redraws on a tight
+// ticker like diagnoseIteration while the results consumer prints table rows.
+// Without a shared mutex, scheduling can merge progress and digest on one line.
+func TestRunDiagnoseIterations_serialLiveProgressMutex_noMergedProgressAndTableLines(t *testing.T) {
+ t.Parallel()
+ var stderr strings.Builder
+ out := output.NewForTest(false, io.Discard, &stderr, true)
+ require.True(t, out.LiveInlineProgress())
+
+ resultsDir := t.TempDir()
+ conf := &config.App{
+ RepoRoot: t.TempDir(),
+ AIOutput: false,
+ Iterations: 60,
+ ParallelIterations: 1,
+ }
+ jsonl := `{"Action":"pass","Package":"p"}` + "\n"
+
+ hooks := diagnoseRunHooks{
+ runIteration: func(ctx context.Context, p diagnoseIterationParams) error {
+ require.True(t, p.LiveProgress)
+ require.NotNil(t, p.SerialProgressMu)
+ iter, iters := p.Iteration+1, conf.Iterations
+ iterStart := time.Now()
+ tickDone := make(chan struct{})
+ var wgTick sync.WaitGroup
+ wgTick.Go(func() {
+ tick := time.NewTicker(120 * time.Microsecond)
+ defer tick.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-tickDone:
+ return
+ case <-tick.C:
+ p.SerialProgressMu.Lock()
+ renderDiagnoseProgressLine(p.Out.HumanStderrWriter(), iter, iters, time.Since(iterStart), p.DiagnoseRunStart, time.Now(), true)
+ p.SerialProgressMu.Unlock()
+ }
+ }
+ })
+ time.Sleep(3 * time.Millisecond)
+ close(tickDone)
+ wgTick.Wait()
+ path := filepath.Join(p.ResultsDir, "iteration-"+strconv.Itoa(p.Iteration)+".log.jsonl")
+ return os.WriteFile(path, []byte(jsonl), 0o600)
+ },
+ }
+
+ state, err := runDiagnoseIterations(context.Background(), conf, out, resultsDir, []string{"./pkg"}, []diagnoseIterationResource{{}}, hooks)
+ require.NoError(t, err)
+ require.Equal(t, conf.Iterations, state.completed)
+
+ for line := range strings.SplitSeq(stderr.String(), "\n") {
+ plain := stripANSI(ttySegmentAfterLastCR(line))
+ if plain == "" {
+ continue
+ }
+ if strings.Contains(plain, "Iter") && strings.Contains(plain, "Result") {
+ continue
+ }
+ if strings.Contains(plain, "iter ") && strings.Contains(plain, "/") &&
+ (strings.Contains(plain, " pass") || strings.Contains(plain, " fail") || strings.Contains(plain, " timeout")) {
+ t.Fatalf("merged live progress with table row (serial mutex regression):\n%q", plain)
+ }
+ }
+}
+
+// ttySegmentAfterLastCR returns the portion of a single stderr line after the final
+// carriage return, matching how a terminal shows the line when the buffer records
+// many \r redraws before one newline.
+func ttySegmentAfterLastCR(s string) string {
+ if i := strings.LastIndex(s, "\r"); i >= 0 {
+ return s[i+1:]
+ }
+ return s
+}
+
func TestRunDiagnoseIterationsFailFastOnCategories(t *testing.T) {
t.Parallel()
@@ -607,13 +952,13 @@ func TestRunDiagnoseIterationsFailFastOnCategories(t *testing.T) {
}
out := output.New(true, io.Discard, io.Discard, output.SkipFD)
hooks := diagnoseRunHooks{
- runIteration: func(_ context.Context, _ *config.App, _ *output.Printer, dir string, _ []string, iteration int, _ int64, _ []string, _ bool, _ *parallelDiagnoseProgress, _ time.Time) error {
- return os.WriteFile(filepath.Join(dir, "iteration-"+strconv.Itoa(iteration)+".log.jsonl"), []byte(tc.iterationJSON+"\n"), 0600)
+ runIteration: func(_ context.Context, p diagnoseIterationParams) error {
+ return os.WriteFile(filepath.Join(p.ResultsDir, "iteration-"+strconv.Itoa(p.Iteration)+".log.jsonl"), []byte(tc.iterationJSON+"\n"), 0600)
},
}
if tc.iterErr != nil {
- hooks.runIteration = func(_ context.Context, _ *config.App, _ *output.Printer, dir string, _ []string, iteration int, _ int64, _ []string, _ bool, _ *parallelDiagnoseProgress, _ time.Time) error {
- require.NoError(t, os.WriteFile(filepath.Join(dir, "iteration-"+strconv.Itoa(iteration)+".log.jsonl"), []byte(tc.iterationJSON+"\n"), 0600))
+ hooks.runIteration = func(_ context.Context, p diagnoseIterationParams) error {
+ require.NoError(t, os.WriteFile(filepath.Join(p.ResultsDir, "iteration-"+strconv.Itoa(p.Iteration)+".log.jsonl"), []byte(tc.iterationJSON+"\n"), 0600))
return tc.iterErr
}
}
@@ -633,3 +978,21 @@ func TestFormatIterationDigestAI(t *testing.T) {
}
assert.Equal(t, "d 7/100 p 90s r126 f0 t0 s6", formatIterationDigestAI(7, 100, d, 90*time.Second))
}
+
+func TestShouldFailFastIterationOptimization(t *testing.T) {
+ t.Parallel()
+
+ tmp := t.TempDir()
+ conf := &config.App{
+ FailFast: false,
+ FailFastOn: nil,
+ }
+
+ failed, reason := shouldFailFastIteration(conf, filepath.Join(tmp, "non-existent"), 1, nil)
+ assert.False(t, failed)
+ assert.Empty(t, reason)
+
+ failed, reason = shouldFailFastIteration(conf, filepath.Join(tmp, "non-existent"), 1, os.ErrNotExist)
+ assert.False(t, failed)
+ assert.Empty(t, reason)
+}