diff --git a/bddtests/.behaverc b/bddtests/.behaverc index 5a1ddc883cb..d3f1cda0802 100644 --- a/bddtests/.behaverc +++ b/bddtests/.behaverc @@ -6,3 +6,4 @@ tags=~@issue_767 ~@issue_1565 ~@issue_RBAC_TCERT_With_Attributes ~@sdk + ~@TLS diff --git a/bddtests/docker-compose-1-empty.yml b/bddtests/docker-compose-1-empty.yml index 75a1edf9070..90d26ceec15 100644 --- a/bddtests/docker-compose-1-empty.yml +++ b/bddtests/docker-compose-1-empty.yml @@ -1,3 +1,4 @@ empty: image: hyperledger/fabric-src - command: bash -c "sleep inf" + # TCP Listen on a port to satisfy the container 'ready' condition + command: nc -k -l 50000 diff --git a/bddtests/peer_basic.feature b/bddtests/peer_basic.feature index 2d88bf93575..b786718d3fd 100644 --- a/bddtests/peer_basic.feature +++ b/bddtests/peer_basic.feature @@ -628,19 +628,16 @@ Feature: Network of Peers | docker-compose-4-consensus-noops.yml | 60 | -# @doNotDecompose -# @wip - Scenario: basic startup of 3 validating peers - Given we compose "docker-compose-3.yml" - When requesting "/chain" from "vp0" - Then I should get a JSON response with "height" = "1" + Scenario: basic startup of 3 validating peers + Given we compose "docker-compose-3.yml" + When requesting "/chain" from "vp0" + Then I should get a JSON response with "height" = "1" - @TLS -# @doNotDecompose - Scenario: basic startup of 2 validating peers using TLS - Given we compose "docker-compose-2-tls-basic.yml" - When requesting "/chain" from "vp0" - Then I should get a JSON response with "height" = "1" + @TLS + Scenario: basic startup of 2 validating peers using TLS + Given we compose "docker-compose-2-tls-basic.yml" + When requesting "/chain" from "vp0" + Then I should get a JSON response with "height" = "1" Scenario Outline: 4 peers and 1 membersrvc, consensus still works if one backup replica fails @@ -895,7 +892,8 @@ Feature: Network of Peers @issue_1091 - Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #1019 (out of date peer) + @doNotDecompose + Scenario Outline: chaincode example02 with 4 peers and 1 membersrvc, issue #1091 (out of date peer) Given we compose "" And I register with CA supplying username "binhn" and secret "7avZQLwcUe9q" on peers: @@ -938,22 +936,24 @@ Feature: Network of Peers # Now start vp3 again Given I start peers: | vp3 | - And I wait "15" seconds - # Invoke 8 more txs, this will trigger a state transfer, but it cannot complete + # Invoke some more txs, this will trigger a state transfer, but it cannot complete When I invoke chaincode "example2" function name "invoke" on "vp0" "8" times |arg1|arg2|arg3| | a | b | 10 | Then I should have received a transactionID Then I wait up to "" seconds for transaction to be committed to peers: | vp0 | vp1 | vp2 | - # wait a bit to make sure the state is invalid on vp3 - Then I wait "20" seconds When I query chaincode "example2" function name "query" with value "a" on peers: | vp0 | vp1 | vp2 | - Then I should get a JSON response from peers with "result.message" = "21" | vp0 | vp1 | vp2 | - When I unconditionally query chaincode "example2" function name "query" with value "a" on peers: + Then I should get a JSON response from peers with "result.message" = "21" + + # Force VP3 to attempt to sync with the rest of the peers + When I invoke chaincode "example2" function name "invoke" on "vp3" + |arg1|arg2|arg3| + | a | b | 10 | + And I unconditionally query chaincode "example2" function name "query" with value "a" on peers: | vp3 | Then I should get a JSON response from peers with "error.data" = "Error when querying chaincode: Error: state may be inconsistent, cannot query" | vp3 | @@ -1159,15 +1159,15 @@ Scenario: chaincode example02 with 4 peers, two stopped | a | 100 | b | 200 | Then I should have received a chaincode name Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp2 | + | vp0 | vp1 | vp2 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: - | vp0 | vp1 | vp2 | vp3 | + | vp0 | vp1 | vp2 | vp3 | Then I should get a JSON response from peers with "result.message" = "100" - | vp0 | vp1 | vp2 | vp3 | + | vp0 | vp1 | vp2 | vp3 | Given I stop peers: - | vp2 | vp3 | + | vp2 | vp3 | When I invoke chaincode "example2" function name "invoke" on "vp0" |arg1|arg2|arg3| @@ -1175,20 +1175,27 @@ Scenario: chaincode example02 with 4 peers, two stopped Then I should have received a transactionID Given I start peers: - | vp3 | - And I wait "15" seconds + | vp3 | + + # Make sure vp3 catches up first + Then I wait up to "60" seconds for transaction to be committed to peers: + | vp0 | vp1 | vp3 | + When I query chaincode "example2" function name "query" with value "a" on peers: + | vp0 | vp1 | vp3 | + Then I should get a JSON response from peers with "result.message" = "90" + | vp0 | vp1 | vp3 | When I invoke chaincode "example2" function name "invoke" on "vp0" "9" times |arg1|arg2|arg3| | a | b | 10 | Then I should have received a transactionID Then I wait up to "60" seconds for transaction to be committed to peers: - | vp0 | vp1 | vp3 | + | vp0 | vp1 | vp3 | When I query chaincode "example2" function name "query" with value "a" on peers: - | vp0 | vp1 | vp3 | + | vp0 | vp1 | vp3 | Then I should get a JSON response from peers with "result.message" = "0" - | vp0 | vp1 | vp3 | + | vp0 | vp1 | vp3 | @issue_1874b #@doNotDecompose diff --git a/bddtests/steps/bdd_compose_util.py b/bddtests/steps/bdd_compose_util.py new file mode 100644 index 00000000000..f1f33df9ccb --- /dev/null +++ b/bddtests/steps/bdd_compose_util.py @@ -0,0 +1,238 @@ +# +# Copyright IBM Corp. 2016 All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os, time, re, requests + +from bdd_rest_util import buildUrl, CORE_REST_PORT +from bdd_json_util import getAttributeFromJSON +from bdd_test_util import cli_call, bdd_log + +class ContainerData: + def __init__(self, containerName, ipAddress, envFromInspect, composeService): + self.containerName = containerName + self.ipAddress = ipAddress + self.envFromInspect = envFromInspect + self.composeService = composeService + + def getEnv(self, key): + envValue = None + for val in self.envFromInspect: + if val.startswith(key): + envValue = val[len(key):] + break + if envValue == None: + raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName)) + return envValue + + def __str__(self): + return "{} - {}".format(self.containerName, self.ipAddress) + + def __repr__(self): + return self.__str__() + +def getDockerComposeFileArgsFromYamlFile(compose_yaml): + parts = compose_yaml.split() + args = [] + for part in parts: + args = args + ["-f"] + [part] + return args + +def parseComposeOutput(context): + """Parses the compose output results and set appropriate values into context. Merges existing with newly composed.""" + # Use the prefix to get the container name + containerNamePrefix = os.path.basename(os.getcwd()) + "_" + containerNames = [] + for l in context.compose_error.splitlines(): + tokens = l.split() + bdd_log(tokens) + if 1 < len(tokens): + thisContainer = tokens[1] + if containerNamePrefix not in thisContainer: + thisContainer = containerNamePrefix + thisContainer + "_1" + if thisContainer not in containerNames: + containerNames.append(thisContainer) + + bdd_log("Containers started: ") + bdd_log(containerNames) + # Now get the Network Address for each name, and set the ContainerData onto the context. + containerDataList = [] + for containerName in containerNames: + output, error, returncode = \ + cli_call(["docker", "inspect", "--format", "{{ .NetworkSettings.IPAddress }}", containerName], expect_success=True) + bdd_log("container {0} has address = {1}".format(containerName, output.splitlines()[0])) + ipAddress = output.splitlines()[0] + + # Get the environment array + output, error, returncode = \ + cli_call(["docker", "inspect", "--format", "{{ .Config.Env }}", containerName], expect_success=True) + env = output.splitlines()[0][1:-1].split() + + # Get the Labels to access the com.docker.compose.service value + output, error, returncode = \ + cli_call(["docker", "inspect", "--format", "{{ .Config.Labels }}", containerName], expect_success=True) + labels = output.splitlines()[0][4:-1].split() + dockerComposeService = [composeService[27:] for composeService in labels if composeService.startswith("com.docker.compose.service:")][0] + bdd_log("dockerComposeService = {0}".format(dockerComposeService)) + bdd_log("container {0} has env = {1}".format(containerName, env)) + containerDataList.append(ContainerData(containerName, ipAddress, env, dockerComposeService)) + # Now merge the new containerData info with existing + newContainerDataList = [] + if "compose_containers" in context: + # Need to merge I new list + newContainerDataList = context.compose_containers + newContainerDataList = newContainerDataList + containerDataList + + setattr(context, "compose_containers", newContainerDataList) + bdd_log("") + +def allContainersAreReadyWithinTimeout(context, timeout): + timeoutTimestamp = time.time() + timeout + formattedTime = time.strftime("%X", time.localtime(timeoutTimestamp)) + bdd_log("All containers should be up by {}".format(formattedTime)) + + allContainers = context.compose_containers + + for container in allContainers: + if not containerIsInitializedByTimestamp(container, timeoutTimestamp): + return False + + peersAreReady = peersAreReadyByTimestamp(context, allContainers, timeoutTimestamp) + + if peersAreReady: + bdd_log("All containers in ready state, ready to proceed") + + return peersAreReady + +def containerIsInitializedByTimestamp(container, timeoutTimestamp): + while containerIsNotInitialized(container): + if timestampExceeded(timeoutTimestamp): + bdd_log("Timed out waiting for {} to initialize".format(container.containerName)) + return False + + bdd_log("{} not initialized, waiting...".format(container.containerName)) + time.sleep(1) + + bdd_log("{} now available".format(container.containerName)) + return True + +def timestampExceeded(timeoutTimestamp): + return time.time() > timeoutTimestamp + +def containerIsNotInitialized(container): + return not containerIsInitialized(container) + +def containerIsInitialized(container): + isReady = tcpPortsAreReady(container) + isReady = isReady and restPortRespondsIfContainerIsPeer(container) + + return isReady + +def tcpPortsAreReady(container): + netstatOutput = getContainerNetstatOutput(container.containerName) + + for line in netstatOutput.splitlines(): + if re.search("ESTABLISHED|LISTEN", line): + return True + + bdd_log("No TCP connections are ready in container {}".format(container.containerName)) + return False + +def getContainerNetstatOutput(containerName): + command = ["docker", "exec", containerName, "netstat", "-atun"] + stdout, stderr, returnCode = cli_call(command, expect_success=False) + + return stdout + +def restPortRespondsIfContainerIsPeer(container): + containerName = container.containerName + command = ["docker", "exec", containerName, "curl", "localhost:{}".format(CORE_REST_PORT)] + + if containerIsPeer(container): + stdout, stderr, returnCode = cli_call(command, expect_success=False) + + if returnCode != 0: + bdd_log("Connection to REST Port on {} failed".format(containerName)) + + return returnCode == 0 + + return True + +def peersAreReadyByTimestamp(context, containers, timeoutTimestamp): + peers = getPeerContainers(containers) + bdd_log("Detected Peers: {}".format(peers)) + + for peer in peers: + if not peerIsReadyByTimestamp(context, peer, peers, timeoutTimestamp): + return False + + return True + +def getPeerContainers(containers): + peers = [] + + for container in containers: + if containerIsPeer(container): + peers.append(container) + + return peers + +def containerIsPeer(container): + # This is not an ideal way of detecting whether a container is a peer or not since + # we are depending on the name of the container. Another way of detecting peers is + # is to determine if the container is listening on the REST port. However, this method + # may run before the listening port is ready. Hence, as along as the current + # convention of vp[0-9] is adhered to this function will be good enough. + return re.search("vp[0-9]+", container.containerName, re.IGNORECASE) + +def peerIsReadyByTimestamp(context, peerContainer, allPeerContainers, timeoutTimestamp): + while peerIsNotReady(context, peerContainer, allPeerContainers): + if timestampExceeded(timeoutTimestamp): + bdd_log("Timed out waiting for peer {}".format(peerContainer.containerName)) + return False + + bdd_log("Peer {} not ready, waiting...".format(peerContainer.containerName)) + time.sleep(1) + + bdd_log("Peer {} now available".format(peerContainer.containerName)) + return True + +def peerIsNotReady(context, thisPeer, allPeers): + return not peerIsReady(context, thisPeer, allPeers) + +def peerIsReady(context, thisPeer, allPeers): + connectedPeers = getConnectedPeersFromPeer(context, thisPeer) + + if connectedPeers is None: + return False + + numPeers = len(allPeers) + numConnectedPeers = len(connectedPeers) + + if numPeers != numConnectedPeers: + bdd_log("Expected {} peers, got {}".format(numPeers, numConnectedPeers)) + bdd_log("Connected Peers: {}".format(connectedPeers)) + bdd_log("Expected Peers: {}".format(allPeers)) + + return numPeers == numConnectedPeers + +def getConnectedPeersFromPeer(context, thisPeer): + url = buildUrl(context, thisPeer.ipAddress, "/network/peers") + response = requests.get(url, headers={'Accept': 'application/json'}, verify=False) + + if response.status_code != 200: + return None + + return getAttributeFromJSON("peers", response.json(), "There should be a peer json attribute") \ No newline at end of file diff --git a/bddtests/steps/bdd_json_util.py b/bddtests/steps/bdd_json_util.py new file mode 100644 index 00000000000..44fc4cdd097 --- /dev/null +++ b/bddtests/steps/bdd_json_util.py @@ -0,0 +1,24 @@ +# +# Copyright IBM Corp. 2016 All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +def getAttributeFromJSON(attribute, jsonObject, msg): + return getHierarchyAttributesFromJSON(attribute.split("."), jsonObject, msg) + +def getHierarchyAttributesFromJSON(attributes, jsonObject, msg): + if len(attributes) > 0: + assert attributes[0] in jsonObject, msg + return getHierarchyAttributesFromJSON(attributes[1:], jsonObject[attributes[0]], msg) + return jsonObject \ No newline at end of file diff --git a/bddtests/steps/bdd_rest_util.py b/bddtests/steps/bdd_rest_util.py new file mode 100644 index 00000000000..0bde0de8e26 --- /dev/null +++ b/bddtests/steps/bdd_rest_util.py @@ -0,0 +1,23 @@ +# +# Copyright IBM Corp. 2016 All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +CORE_REST_PORT = "7050" + +def buildUrl(context, ipAddress, path): + schema = "http" + if 'TLS' in context.tags: + schema = "https" + return "{0}://{1}:{2}{3}".format(schema, ipAddress, CORE_REST_PORT, path) \ No newline at end of file diff --git a/bddtests/steps/bdd_test_util.py b/bddtests/steps/bdd_test_util.py index b7f6758f1b0..e48af5dbfac 100644 --- a/bddtests/steps/bdd_test_util.py +++ b/bddtests/steps/bdd_test_util.py @@ -16,6 +16,7 @@ import os import re +import time import subprocess def cli_call(arg_list, expect_success=True): @@ -117,3 +118,9 @@ def getContainerDataValuesFromContext(context, aliases, callback): def start_background_process(context, program_name, arg_list): p = subprocess.Popen(arg_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) setattr(context, program_name, p) + +def bdd_log(msg): + print("{} - {}".format(currentTime(), msg)) + +def currentTime(): + return time.strftime("%H:%M:%S") \ No newline at end of file diff --git a/bddtests/steps/peer_basic_impl.py b/bddtests/steps/peer_basic_impl.py index 431d47d9a05..9fc8da70b7b 100644 --- a/bddtests/steps/peer_basic_impl.py +++ b/bddtests/steps/peer_basic_impl.py @@ -25,101 +25,27 @@ import sys, requests, json +import bdd_compose_util import bdd_test_util +from bdd_test_util import currentTime +from bdd_rest_util import buildUrl +from bdd_json_util import getAttributeFromJSON -CORE_REST_PORT = 7050 JSONRPC_VERSION = "2.0" -class ContainerData: - def __init__(self, containerName, ipAddress, envFromInspect, composeService): - self.containerName = containerName - self.ipAddress = ipAddress - self.envFromInspect = envFromInspect - self.composeService = composeService - - def getEnv(self, key): - envValue = None - for val in self.envFromInspect: - if val.startswith(key): - envValue = val[len(key):] - break - if envValue == None: - raise Exception("ENV key not found ({0}) for container ({1})".format(key, self.containerName)) - return envValue - -def parseComposeOutput(context): - """Parses the compose output results and set appropriate values into context. Merges existing with newly composed.""" - # Use the prefix to get the container name - containerNamePrefix = os.path.basename(os.getcwd()) + "_" - containerNames = [] - for l in context.compose_error.splitlines(): - tokens = l.split() - print(tokens) - if 1 < len(tokens): - thisContainer = tokens[1] - if containerNamePrefix not in thisContainer: - thisContainer = containerNamePrefix + thisContainer + "_1" - if thisContainer not in containerNames: - containerNames.append(thisContainer) - - print("Containers started: ") - print(containerNames) - # Now get the Network Address for each name, and set the ContainerData onto the context. - containerDataList = [] - for containerName in containerNames: - output, error, returncode = \ - bdd_test_util.cli_call(["docker", "inspect", "--format", "{{ .NetworkSettings.IPAddress }}", containerName], expect_success=True) - print("container {0} has address = {1}".format(containerName, output.splitlines()[0])) - ipAddress = output.splitlines()[0] - - # Get the environment array - output, error, returncode = \ - bdd_test_util.cli_call(["docker", "inspect", "--format", "{{ .Config.Env }}", containerName], expect_success=True) - env = output.splitlines()[0][1:-1].split() - - # Get the Labels to access the com.docker.compose.service value - output, error, returncode = \ - bdd_test_util.cli_call(["docker", "inspect", "--format", "{{ .Config.Labels }}", containerName], expect_success=True) - labels = output.splitlines()[0][4:-1].split() - dockerComposeService = [composeService[27:] for composeService in labels if composeService.startswith("com.docker.compose.service:")][0] - print("dockerComposeService = {0}".format(dockerComposeService)) - print("container {0} has env = {1}".format(containerName, env)) - containerDataList.append(ContainerData(containerName, ipAddress, env, dockerComposeService)) - # Now merge the new containerData info with existing - newContainerDataList = [] - if "compose_containers" in context: - # Need to merge I new list - newContainerDataList = context.compose_containers - newContainerDataList = newContainerDataList + containerDataList - - setattr(context, "compose_containers", newContainerDataList) - print("") - -def buildUrl(context, ipAddress, path): - schema = "http" - if 'TLS' in context.tags: - schema = "https" - return "{0}://{1}:{2}{3}".format(schema, ipAddress, CORE_REST_PORT, path) - -def currentTime(): - return time.strftime("%H:%M:%S") - -def getDockerComposeFileArgsFromYamlFile(compose_yaml): - parts = compose_yaml.split() - args = [] - for part in parts: - args = args + ["-f"] + [part] - return args - @given(u'we compose "{composeYamlFile}"') def step_impl(context, composeYamlFile): context.compose_yaml = composeYamlFile - fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml) + fileArgsToDockerCompose = bdd_compose_util.getDockerComposeFileArgsFromYamlFile(context.compose_yaml) context.compose_output, context.compose_error, context.compose_returncode = \ bdd_test_util.cli_call(["docker-compose"] + fileArgsToDockerCompose + ["up","--force-recreate", "-d"], expect_success=True) assert context.compose_returncode == 0, "docker-compose failed to bring up {0}".format(composeYamlFile) - parseComposeOutput(context) - time.sleep(10) # Should be replaced with a definitive interlock guaranteeing that all peers/membersrvc are ready + + bdd_compose_util.parseComposeOutput(context) + + timeoutSeconds = 15 + assert bdd_compose_util.allContainersAreReadyWithinTimeout(context, timeoutSeconds), \ + "Containers did not come up within {} seconds, aborting".format(timeoutSeconds) @when(u'requesting "{path}" from "{containerName}"') def step_impl(context, path, containerName): @@ -143,15 +69,6 @@ def step_impl(context, attribute): except AssertionError: print("Attribute not found as was expected.") -def getAttributeFromJSON(attribute, jsonObject, msg): - return getHierarchyAttributesFromJSON(attribute.split("."), jsonObject, msg) - -def getHierarchyAttributesFromJSON(attributes, jsonObject, msg): - if len(attributes) > 0: - assert attributes[0] in jsonObject, msg - return getHierarchyAttributesFromJSON(attributes[1:], jsonObject[attributes[0]], msg) - return jsonObject - def formatStringToCompare(value): # double quotes are replaced by simple quotes because is not possible escape double quotes in the attribute parameters. return str(value).replace("\"", "'") @@ -807,7 +724,7 @@ def compose_op(context, op): assert 'table' in context, "table (of peers) not found in context" assert 'compose_yaml' in context, "compose_yaml not found in context" - fileArgsToDockerCompose = getDockerComposeFileArgsFromYamlFile(context.compose_yaml) + fileArgsToDockerCompose = bdd_compose_util.getDockerComposeFileArgsFromYamlFile(context.compose_yaml) services = context.table.headings # Loop through services and start/stop them, and modify the container data list if successful. for service in services: @@ -817,7 +734,7 @@ def compose_op(context, op): if op == "stop" or op == "pause": context.compose_containers = [containerData for containerData in context.compose_containers if containerData.composeService != service] else: - parseComposeOutput(context) + bdd_compose_util.parseComposeOutput(context) print("After {0}ing, the container service list is = {1}".format(op, [containerData.composeService for containerData in context.compose_containers])) def to_bytes(strlist):