Skip to content

Commit

Permalink
Remove t.Parallel from common/cluster tests
Browse files Browse the repository at this point in the history
This change trades execution time for (hopefully) a bit more stability
in the cluster tests. We see that on constrained CI systems these tests
timeout. When they timeout, there are many, many go routines stuck in
t.Parallel waiting for tests to complete. These routines make it harder
to see where the true failures are.

So, let's try running each of these tests serially; by doing this we go
from local execution of about 8s to 18s. In the scope of tests that take
tens of minutes, this seems reasonable.

Change-Id: Icaea901bf03a6e15e6f9c32da2d7bedbdf7cdb38
Signed-off-by: Matthew Sykes <sykesmat@us.ibm.com>
  • Loading branch information
sykesm authored and ale-linux committed Jan 9, 2020
1 parent 83dacb4 commit f101cab
Show file tree
Hide file tree
Showing 5 changed files with 0 additions and 48 deletions.
22 changes: 0 additions & 22 deletions orderer/common/cluster/comm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -287,8 +287,6 @@ func newTestNode(t *testing.T) *clusterNode {
}

func TestSendBigMessage(t *testing.T) {
t.Parallel()

// Scenario: Basic test that spawns 5 nodes and sends a big message
// from one of the nodes to the others.
// A receiver node's Step() server side method (which calls Recv)
Expand Down Expand Up @@ -383,7 +381,6 @@ func TestSendBigMessage(t *testing.T) {
}

func TestBlockingSend(t *testing.T) {
t.Parallel()
// Scenario: Basic test that spawns 2 nodes and sends from the first node
// to the second node, three SubmitRequests, or three consensus requests.
// SubmitRequests should block, but consensus requests should not.
Expand Down Expand Up @@ -488,7 +485,6 @@ func TestBlockingSend(t *testing.T) {
}

func TestBasic(t *testing.T) {
t.Parallel()
// Scenario: Basic test that spawns 2 nodes and sends each other
// messages that are expected to be echoed back

Expand All @@ -506,7 +502,6 @@ func TestBasic(t *testing.T) {
}

func TestUnavailableHosts(t *testing.T) {
t.Parallel()
// Scenario: A node is configured to connect
// to a host that is down
node1 := newTestNode(t)
Expand All @@ -531,8 +526,6 @@ func TestUnavailableHosts(t *testing.T) {
}

func TestStreamAbort(t *testing.T) {
t.Parallel()

// Scenarios: node 1 is connected to node 2 in 2 channels,
// and the consumer of the communication calls receive.
// The two sub-scenarios happen:
Expand Down Expand Up @@ -617,7 +610,6 @@ func testStreamAbort(t *testing.T, node2 *clusterNode, newMembership []cluster.R
}

func TestDoubleReconfigure(t *testing.T) {
t.Parallel()
// Scenario: Basic test that spawns 2 nodes
// and configures node 1 twice, and checks that
// the remote stub for node 1 wasn't re-created in the second
Expand All @@ -641,13 +633,11 @@ func TestDoubleReconfigure(t *testing.T) {
}

func TestInvalidChannel(t *testing.T) {
t.Parallel()
// Scenario: node 1 it ordered to send a message on a channel
// that doesn't exist, and also receives a message, but
// the channel cannot be extracted from the message.

t.Run("channel doesn't exist", func(t *testing.T) {
t.Parallel()
node1 := newTestNode(t)
defer node1.stop()

Expand All @@ -656,7 +646,6 @@ func TestInvalidChannel(t *testing.T) {
})

t.Run("channel cannot be extracted", func(t *testing.T) {
t.Parallel()
node1 := newTestNode(t)
defer node1.stop()

Expand Down Expand Up @@ -686,7 +675,6 @@ func TestInvalidChannel(t *testing.T) {
}

func TestAbortRPC(t *testing.T) {
t.Parallel()
// Scenarios:
// (I) The node calls an RPC, and calls Abort() on the remote context
// in parallel. The RPC should return even though the server-side call hasn't finished.
Expand Down Expand Up @@ -771,7 +759,6 @@ func testAbort(t *testing.T, abortFunc func(*cluster.RemoteContext), rpcTimeout
}

func TestNoTLSCertificate(t *testing.T) {
t.Parallel()
// Scenario: The node is sent a message by another node that doesn't
// connect with mutual TLS, thus doesn't provide a TLS certificate
node1 := newTestNode(t)
Expand Down Expand Up @@ -808,7 +795,6 @@ func TestNoTLSCertificate(t *testing.T) {
}

func TestReconnect(t *testing.T) {
t.Parallel()
// Scenario: node 1 and node 2 are connected,
// and node 2 is taken offline.
// Node 1 tries to send a message to node 2 but fails,
Expand Down Expand Up @@ -860,7 +846,6 @@ func TestReconnect(t *testing.T) {
}

func TestRenewCertificates(t *testing.T) {
t.Parallel()
// Scenario: node 1 and node 2 are connected,
// and the certificates are renewed for both nodes
// at the same time.
Expand Down Expand Up @@ -916,7 +901,6 @@ func TestRenewCertificates(t *testing.T) {
}

func TestMembershipReconfiguration(t *testing.T) {
t.Parallel()
// Scenario: node 1 and node 2 are started up
// and node 2 is configured to know about node 1,
// without node1 knowing about node 2.
Expand Down Expand Up @@ -972,7 +956,6 @@ func TestMembershipReconfiguration(t *testing.T) {
}

func TestShutdown(t *testing.T) {
t.Parallel()
// Scenario: node 1 is shut down and as a result, can't
// send messages to anyone, nor can it be reconfigured

Expand Down Expand Up @@ -1015,7 +998,6 @@ func TestShutdown(t *testing.T) {
}

func TestMultiChannelConfig(t *testing.T) {
t.Parallel()
// Scenario: node 1 is knows node 2 only in channel "foo"
// and knows node 3 only in channel "bar".
// Messages that are received, are routed according to their corresponding channels
Expand Down Expand Up @@ -1093,7 +1075,6 @@ func TestMultiChannelConfig(t *testing.T) {
}

func TestConnectionFailure(t *testing.T) {
t.Parallel()
// Scenario: node 1 fails to connect to node 2.

node1 := newTestNode(t)
Expand Down Expand Up @@ -1145,8 +1126,6 @@ func (tm *testMetrics) initialize() {
}

func TestMetrics(t *testing.T) {
t.Parallel()

for _, testCase := range []struct {
name string
runTest func(node1, node2 *clusterNode, testMetrics *testMetrics)
Expand Down Expand Up @@ -1300,7 +1279,6 @@ func TestMetrics(t *testing.T) {
}

func TestCertExpirationWarningEgress(t *testing.T) {
t.Parallel()
// Scenario: Ensures that when certificates are due to expire,
// a warning is logged to the log.

Expand Down
2 changes: 0 additions & 2 deletions orderer/common/cluster/connections_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ import (
)

func TestConcurrentConnections(t *testing.T) {
t.Parallel()
// Scenario: Have 100 goroutines try to create a connection together at the same time,
// wait until one of them succeeds, and then wait until they all return,
// and also ensure they all return the same connection reference
Expand Down Expand Up @@ -60,7 +59,6 @@ func (cms *connectionMapperSpy) Lookup(cert []byte) (*grpc.ClientConn, bool) {
}

func TestConcurrentLookupMiss(t *testing.T) {
t.Parallel()
// Scenario: 2 concurrent connection attempts are made,
// and the first 2 Lookup operations are delayed,
// which makes the connection store attempt to connect
Expand Down
4 changes: 0 additions & 4 deletions orderer/common/cluster/rpc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
)

func TestRPCChangeDestination(t *testing.T) {
t.Parallel()
// We send a Submit() to 2 different nodes - 1 and 2.
// The first invocation of Submit() establishes a stream with node 1
// and the second establishes a stream with node 2.
Expand Down Expand Up @@ -92,7 +91,6 @@ func TestRPCChangeDestination(t *testing.T) {
}

func TestSend(t *testing.T) {
t.Parallel()
submitRequest := &orderer.SubmitRequest{Channel: "mychannel"}
submitResponse := &orderer.StepResponse{
Payload: &orderer.StepResponse_SubmitRes{
Expand Down Expand Up @@ -258,8 +256,6 @@ func TestRPCGarbageCollection(t *testing.T) {
// remote node.
// The first stream should be cleaned from the mapping.

t.Parallel()

comm := &mocks.Communicator{}
client := &mocks.ClusterClient{}
stream := &mocks.StepClient{}
Expand Down
8 changes: 0 additions & 8 deletions orderer/common/cluster/service_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ var (
)

func TestStep(t *testing.T) {
t.Parallel()
dispatcher := &mocks.Dispatcher{}

svc := &cluster.Service{
Expand Down Expand Up @@ -93,7 +92,6 @@ func TestStep(t *testing.T) {
}

func TestSubmitSuccess(t *testing.T) {
t.Parallel()
dispatcher := &mocks.Dispatcher{}

stream := &mocks.StepStream{}
Expand Down Expand Up @@ -142,7 +140,6 @@ func (t tuple) asArray() []interface{} {
}

func TestSubmitFailure(t *testing.T) {
t.Parallel()
oops := errors.New("oops")
testCases := []struct {
name string
Expand Down Expand Up @@ -196,8 +193,6 @@ func TestSubmitFailure(t *testing.T) {
}

func TestIngresStreamsMetrics(t *testing.T) {
t.Parallel()

dispatcher := &mocks.Dispatcher{}
dispatcher.On("DispatchConsensus", mock.Anything, mock.Anything).Return(nil)

Expand Down Expand Up @@ -233,7 +228,6 @@ func TestIngresStreamsMetrics(t *testing.T) {
}

func TestServiceGRPC(t *testing.T) {
t.Parallel()
// Check that Service correctly implements the gRPC interface
srv, err := comm.NewGRPCServer("127.0.0.1:0", comm.ServerConfig{})
assert.NoError(t, err)
Expand All @@ -244,8 +238,6 @@ func TestServiceGRPC(t *testing.T) {
}

func TestExpirationWarningIngress(t *testing.T) {
t.Parallel()

ca, err := tlsgen.NewCA()
assert.NoError(t, err)

Expand Down
12 changes: 0 additions & 12 deletions orderer/common/cluster/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ type policyManager interface {
}

func TestParallelStubActivation(t *testing.T) {
t.Parallel()
// Scenario: Activate the stub from different goroutines in parallel.
stub := &cluster.Stub{}
var wg sync.WaitGroup
Expand Down Expand Up @@ -84,7 +83,6 @@ func TestParallelStubActivation(t *testing.T) {
}

func TestDialerCustomKeepAliveOptions(t *testing.T) {
t.Parallel()
ca, err := tlsgen.NewCA()
assert.NoError(t, err)

Expand All @@ -110,8 +108,6 @@ func TestDialerCustomKeepAliveOptions(t *testing.T) {
}

func TestPredicateDialerUpdateRootCAs(t *testing.T) {
t.Parallel()

node1 := newTestNode(t)
defer node1.stop()

Expand Down Expand Up @@ -146,7 +142,6 @@ func TestPredicateDialerUpdateRootCAs(t *testing.T) {
}

func TestDialerBadConfig(t *testing.T) {
t.Parallel()
emptyCertificate := []byte("-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----")
dialer := &cluster.PredicateDialer{
Config: comm.ClientConfig{
Expand All @@ -163,7 +158,6 @@ func TestDialerBadConfig(t *testing.T) {
}

func TestDERtoPEM(t *testing.T) {
t.Parallel()
ca, err := tlsgen.NewCA()
assert.NoError(t, err)
keyPair, err := ca.NewServerCertKeyPair("localhost")
Expand All @@ -172,7 +166,6 @@ func TestDERtoPEM(t *testing.T) {
}

func TestStandardDialer(t *testing.T) {
t.Parallel()
emptyCertificate := []byte("-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----")
certPool := [][]byte{emptyCertificate}
config := comm.ClientConfig{SecOpts: comm.SecureOptions{UseTLS: true, ServerRootCAs: certPool}}
Expand Down Expand Up @@ -763,7 +756,6 @@ func TestConfigFromBlockBadInput(t *testing.T) {
}

func TestBlockValidationPolicyVerifier(t *testing.T) {
t.Parallel()
config := genesisconfig.Load(genesisconfig.SampleInsecureSoloProfile, configtest.GetDevConfigDir())
group, err := encoder.NewChannelGroup(config)
assert.NoError(t, err)
Expand Down Expand Up @@ -841,7 +833,6 @@ func TestBlockValidationPolicyVerifier(t *testing.T) {
}

func TestBlockVerifierAssembler(t *testing.T) {
t.Parallel()
config := genesisconfig.Load(genesisconfig.SampleInsecureSoloProfile, configtest.GetDevConfigDir())
group, err := encoder.NewChannelGroup(config)
assert.NoError(t, err)
Expand Down Expand Up @@ -934,8 +925,6 @@ func TestLastConfigBlock(t *testing.T) {
}

func TestVerificationRegistryRegisterVerifier(t *testing.T) {
t.Parallel()

blockBytes, err := ioutil.ReadFile("testdata/mychannel.block")
assert.NoError(t, err)

Expand Down Expand Up @@ -975,7 +964,6 @@ func TestVerificationRegistryRegisterVerifier(t *testing.T) {
}

func TestVerificationRegistry(t *testing.T) {
t.Parallel()
blockBytes, err := ioutil.ReadFile("testdata/mychannel.block")
assert.NoError(t, err)

Expand Down

0 comments on commit f101cab

Please sign in to comment.