Skip to content

Commit

Permalink
Run BDD Compose files intelligently
Browse files Browse the repository at this point in the history
When bringing up containers with docker compose during BDD testing
a time.sleep call is not very efficient. This commit replaces this
with an intelligent probing of the containers to determine when they
are ready. It determines this by checking the output of the netstat
command on each container for listening or connected TCP connections.
Once an active connection is detected on all containers or the timeout
period elapses, execution continues or aborts respectively.

In combination with change 419 in this change BDDTests complete
approximately 4 minutes faster.

Signed-off-by: Julian Carrivick <cjulian@au1.ibm.com>
Change-Id: I2c89f360d04a8e5e5daeaa2aa5027e5c191a453c
  • Loading branch information
juliancarrivick-ibm committed Aug 17, 2016
1 parent c9a0166 commit 66cc54b
Show file tree
Hide file tree
Showing 6 changed files with 201 additions and 91 deletions.
25 changes: 16 additions & 9 deletions bddtests/peer_basic.feature
Original file line number Diff line number Diff line change
Expand Up @@ -1161,36 +1161,43 @@ Scenario: chaincode example02 with 4 peers, two stopped
| a | 100 | b | 200 |
Then I should have received a chaincode name
Then I wait up to "60" seconds for transaction to be committed to peers:
| vp0 | vp1 | vp2 |
| vp0 | vp1 | vp2 | vp3 |

When I query chaincode "example2" function name "query" with value "a" on peers:
| vp0 | vp1 | vp2 | vp3 |
| vp0 | vp1 | vp2 | vp3 |
Then I should get a JSON response from peers with "result.message" = "100"
| vp0 | vp1 | vp2 | vp3 |
| vp0 | vp1 | vp2 | vp3 |

Given I stop peers:
| vp2 | vp3 |
| vp2 | vp3 |

When I invoke chaincode "example2" function name "invoke" on "vp0"
|arg1|arg2|arg3|
| a | b | 10 |
Then I should have received a transactionID

Given I start peers:
| vp3 |
And I wait "15" seconds
| vp3 |

# Make sure vp3 catches up first
Then I wait up to "60" seconds for transaction to be committed to peers:
| vp0 | vp1 | vp3 |
When I query chaincode "example2" function name "query" with value "a" on peers:
| vp0 | vp1 | vp3 |
Then I should get a JSON response from peers with "result.message" = "90"
| vp0 | vp1 | vp3 |

When I invoke chaincode "example2" function name "invoke" on "vp0" "9" times
|arg1|arg2|arg3|
| a | b | 10 |
Then I should have received a transactionID
Then I wait up to "60" seconds for transaction to be committed to peers:
| vp0 | vp1 | vp3 |
| vp0 | vp1 | vp3 |

When I query chaincode "example2" function name "query" with value "a" on peers:
| vp0 | vp1 | vp3 |
| vp0 | vp1 | vp3 |
Then I should get a JSON response from peers with "result.message" = "0"
| vp0 | vp1 | vp3 |
| vp0 | vp1 | vp3 |

@issue_1874b
#@doNotDecompose
Expand Down
170 changes: 170 additions & 0 deletions bddtests/steps/bdd_compose_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
#
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

import os
import time
import re

from bdd_test_util import cli_call

REST_PORT = "7050"

class ContainerData:
def __init__(self, containerName, ipAddress, envFromInspect, composeService):
self.containerName = containerName
self.ipAddress = ipAddress
self.envFromInspect = envFromInspect
self.composeService = composeService

def getEnv(self, key):
envValue = None
for val in self.envFromInspect:
if val.startswith(key):
envValue = val[len(key):]
break
if envValue == None:
raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName))
return envValue

def getDockerComposeFileArgsFromYamlFile(compose_yaml):
parts = compose_yaml.split()
args = []
for part in parts:
args = args + ["-f"] + [part]
return args

def parseComposeOutput(context):
"""Parses the compose output results and set appropriate values into context. Merges existing with newly composed."""
# Use the prefix to get the container name
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
containerNames = []
for l in context.compose_error.splitlines():
tokens = l.split()
print(tokens)
if 1 < len(tokens):
thisContainer = tokens[1]
if containerNamePrefix not in thisContainer:
thisContainer = containerNamePrefix + thisContainer + "_1"
if thisContainer not in containerNames:
containerNames.append(thisContainer)

print("Containers started: ")
print(containerNames)
# Now get the Network Address for each name, and set the ContainerData onto the context.
containerDataList = []
for containerName in containerNames:
output, error, returncode = \
cli_call(context, ["docker", "inspect", "--format", "{{ .NetworkSettings.IPAddress }}", containerName], expect_success=True)
print("container {0} has address = {1}".format(containerName, output.splitlines()[0]))
ipAddress = output.splitlines()[0]

# Get the environment array
output, error, returncode = \
cli_call(context, ["docker", "inspect", "--format", "{{ .Config.Env }}", containerName], expect_success=True)
env = output.splitlines()[0][1:-1].split()

# Get the Labels to access the com.docker.compose.service value
output, error, returncode = \
cli_call(context, ["docker", "inspect", "--format", "{{ .Config.Labels }}", containerName], expect_success=True)
labels = output.splitlines()[0][4:-1].split()
dockerComposeService = [composeService[27:] for composeService in labels if composeService.startswith("com.docker.compose.service:")][0]
print("dockerComposeService = {0}".format(dockerComposeService))
print("container {0} has env = {1}".format(containerName, env))
containerDataList.append(ContainerData(containerName, ipAddress, env, dockerComposeService))
# Now merge the new containerData info with existing
newContainerDataList = []
if "compose_containers" in context:
# Need to merge I new list
newContainerDataList = context.compose_containers
newContainerDataList = newContainerDataList + containerDataList

setattr(context, "compose_containers", newContainerDataList)
print("")

def allContainersAreReadyWithinTimeout(context, timeout):
timeoutTimestamp = time.time() + timeout
formattedTime = time.strftime("%X", time.localtime(timeoutTimestamp))
print("All containers should be up by {}".format(formattedTime))

for container in context.compose_containers:
if not containerIsReadyByTimestamp(container, timeoutTimestamp):
return False

print("All containers in ready state, ready to proceed")
return True

def containerIsReadyByTimestamp(container, timeoutTimestamp):
while containerIsNotReady(container):
if timestampExceeded(timeoutTimestamp):
print("Timed out waiting for {}".format(container.containerName))
return False

print("{} not ready, waiting...".format(container.containerName))
time.sleep(1)

print("{} now available".format(container.containerName))
return True

def timestampExceeded(timeoutTimestamp):
return time.time() > timeoutTimestamp

def containerIsNotReady(container):
return not containerIsReady(container)

def containerIsReady(container):
isReady = tcpPortsAreReady(container)
isReady = isReady and restPortRespondsIfContainerIsPeer(container)

return isReady

def tcpPortsAreReady(container):
netstatOutput = getContainerNetstatOutput(container.containerName)

for line in netstatOutput.splitlines():
if re.search("ESTABLISHED|LISTEN", line):
return True

print("No TCP connections are ready in container {}".format(container.containerName))
return False

def getContainerNetstatOutput(containerName):
command = ["docker", "exec", containerName, "netstat", "-atun"]
stdout, stderr, returnCode = cli_call(None, command, expect_success=False)

return stdout

def restPortRespondsIfContainerIsPeer(container):
containerName = container.containerName
command = ["docker", "exec", containerName, "curl", "localhost:" + REST_PORT]

if containerIsPeer(container):
stdout, stderr, returnCode = cli_call(None, command, expect_success=False)

if returnCode != 0:
print("Connection to REST Port on {} failed".format(containerName))

return returnCode == 0

return True

def containerIsPeer(container):
netstatOutput = getContainerNetstatOutput(container.containerName)

for line in netstatOutput.splitlines():
if re.search(REST_PORT, line) and re.search("LISTEN", line):
return True

return False
87 changes: 10 additions & 77 deletions bddtests/steps/peer_basic_impl.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,75 +26,11 @@
import sys, requests, json

import bdd_test_util
import bdd_compose_util

CORE_REST_PORT = 7050
JSONRPC_VERSION = "2.0"

class ContainerData:
def __init__(self, containerName, ipAddress, envFromInspect, composeService):
self.containerName = containerName
self.ipAddress = ipAddress
self.envFromInspect = envFromInspect
self.composeService = composeService

def getEnv(self, key):
envValue = None
for val in self.envFromInspect:
if val.startswith(key):
envValue = val[len(key):]
break
if envValue == None:
raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName))
return envValue

def parseComposeOutput(context):
"""Parses the compose output results and set appropriate values into context. Merges existing with newly composed."""
# Use the prefix to get the container name
containerNamePrefix = os.path.basename(os.getcwd()) + "_"
containerNames = []
for l in context.compose_error.splitlines():
tokens = l.split()
print(tokens)
if 1 < len(tokens):
thisContainer = tokens[1]
if containerNamePrefix not in thisContainer:
thisContainer = containerNamePrefix + thisContainer + "_1"
if thisContainer not in containerNames:
containerNames.append(thisContainer)

print("Containers started: ")
print(containerNames)
# Now get the Network Address for each name, and set the ContainerData onto the context.
containerDataList = []
for containerName in containerNames:
output, error, returncode = \
bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .NetworkSettings.IPAddress }}", containerName], expect_success=True)
print("container {0} has address = {1}".format(containerName, output.splitlines()[0]))
ipAddress = output.splitlines()[0]

# Get the environment array
output, error, returncode = \
bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .Config.Env }}", containerName], expect_success=True)
env = output.splitlines()[0][1:-1].split()

# Get the Labels to access the com.docker.compose.service value
output, error, returncode = \
bdd_test_util.cli_call(context, ["docker", "inspect", "--format", "{{ .Config.Labels }}", containerName], expect_success=True)
labels = output.splitlines()[0][4:-1].split()
dockerComposeService = [composeService[27:] for composeService in labels if composeService.startswith("com.docker.compose.service:")][0]
print("dockerComposeService = {0}".format(dockerComposeService))
print("container {0} has env = {1}".format(containerName, env))
containerDataList.append(ContainerData(containerName, ipAddress, env, dockerComposeService))
# Now merge the new containerData info with existing
newContainerDataList = []
if "compose_containers" in context:
# Need to merge I new list
newContainerDataList = context.compose_containers
newContainerDataList = newContainerDataList + containerDataList

setattr(context, "compose_containers", newContainerDataList)
print("")

def buildUrl(context, ipAddress, path):
schema = "http"
if 'TLS' in context.tags:
Expand All @@ -104,22 +40,19 @@ def buildUrl(context, ipAddress, path):
def currentTime():
return time.strftime("%H:%M:%S")

def getDockerComposeFileArgsFromYamlFile(compose_yaml):
parts = compose_yaml.split()
args = []
for part in parts:
args = args + ["-f"] + [part]
return args

@given(u'we compose "{composeYamlFile}"')
def step_impl(context, composeYamlFile):
context.compose_yaml = composeYamlFile
fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml)
fileArgsToDockerCompose = bdd_compose_util.getDockerComposeFileArgsFromYamlFile(context.compose_yaml)
context.compose_output, context.compose_error, context.compose_returncode = \
bdd_test_util.cli_call(context, ["docker-compose"] + fileArgsToDockerCompose + ["up","--force-recreate", "-d"], expect_success=True)
assert context.compose_returncode == 0, "docker-compose failed to bring up {0}".format(composeYamlFile)
parseComposeOutput(context)
time.sleep(10) # Should be replaced with a definitive interlock guaranteeing that all peers/membersrvc are ready

bdd_compose_util.parseComposeOutput(context)

timeoutSeconds = 15
assert bdd_compose_util.allContainersAreReadyWithinTimeout(context, timeoutSeconds), \
"Containers did not come up within {} seconds, aborting".format(timeoutSeconds)

@when(u'requesting "{path}" from "{containerName}"')
def step_impl(context, path, containerName):
Expand Down Expand Up @@ -805,7 +738,7 @@ def compose_op(context, op):
assert 'table' in context, "table (of peers) not found in context"
assert 'compose_yaml' in context, "compose_yaml not found in context"

fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml)
fileArgsToDockerCompose = bdd_compose_util.getDockerComposeFileArgsFromYamlFile(context.compose_yaml)
services = context.table.headings
# Loop through services and start/stop them, and modify the container data list if successful.
for service in services:
Expand All @@ -815,7 +748,7 @@ def compose_op(context, op):
if op == "stop" or op == "pause":
context.compose_containers = [containerData for containerData in context.compose_containers if containerData.composeService != service]
else:
parseComposeOutput(context)
bdd_compose_util.parseComposeOutput(context)
print("After {0}ing, the container service list is = {1}".format(op, [containerData.composeService for containerData in context.compose_containers]))

def to_bytes(strlist):
Expand Down
2 changes: 1 addition & 1 deletion tools/dbutility/bddtests/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
def before_feature(context, feature):
print("\nRunning go build")
cmd = ["go", "build", "../dump_db_stats.go"]
test_util.cli_call(context, cmd, expect_success=True)
test_util.cli_call(cmd, expect_success=True)
print("go build complete")

def after_feature(context, feature):
Expand Down
6 changes: 3 additions & 3 deletions tools/dbutility/bddtests/steps/test.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os
import shutil

import test_util
from test_util import cli_call

@given(u'I create a dir "{dirPath}"')
def step_impl(context, dirPath):
Expand All @@ -14,14 +14,14 @@ def step_impl(contxt, dirPath):
@when(u'I execute utility with no flag')
def step_impl(context):
cmd = ["./dump_db_stats"]
context.output, context.error, context.returncode = test_util.cli_call(context, cmd, expect_success=False)
context.output, context.error, context.returncode = cli_call(cmd, expect_success=False)

@when(u'I execute utility with flag "{flag}" and path "{path}"')
def step_impl(context, flag, path):
cmd = ["./dump_db_stats"]
cmd.append(flag)
cmd.append(path)
context.output, context.error, context.returncode = test_util.cli_call(context, cmd, expect_success=False)
context.output, context.error, context.returncode = cli_call(cmd, expect_success=False)

@then(u'I should get a process exit code "{expectedReturncode}"')
def step_impl(context, expectedReturncode):
Expand Down
2 changes: 1 addition & 1 deletion tools/dbutility/bddtests/test_util.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import subprocess

def cli_call(context, arg_list, expect_success=True):
def cli_call(arg_list, expect_success=True):
p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = p.communicate()
if p.returncode != 0:
Expand Down

0 comments on commit 66cc54b

Please sign in to comment.