From 039acbea908609caeaa5c80acded609281450856 Mon Sep 17 00:00:00 2001 From: Michael Suo Date: Fri, 11 Oct 2019 23:14:10 -0700 Subject: [PATCH] Revert D17757197: Add CI builds Test Plan: revert-hammer Differential Revision: D17757197 Original commit changeset: e0522e159387 fbshipit-source-id: 10c20ff703676635afcb17ea36b0b48cd3688b7c --- .circleci/cimodel/data/pytorch_build_data.py | 26 ------- .../cimodel/data/pytorch_build_definitions.py | 8 +- .circleci/config.yml | 75 +------------------ .../verbatim-sources/pytorch-job-specs.yml | 19 +---- 4 files changed, 7 insertions(+), 121 deletions(-) diff --git a/.circleci/cimodel/data/pytorch_build_data.py b/.circleci/cimodel/data/pytorch_build_data.py index 8074ac774ee20..1d3ee8f051771 100644 --- a/.circleci/cimodel/data/pytorch_build_data.py +++ b/.circleci/cimodel/data/pytorch_build_data.py @@ -15,8 +15,6 @@ XImportant("3.6"), ("3.6", [ ("namedtensor", [XImportant(True)]), - ("parallel_tbb", [XImportant(True)]), - ("parallel_native", [XImportant(True)]), ]), ]), # TODO: bring back libtorch test @@ -27,8 +25,6 @@ XImportant("3.6"), # This is actually the ASAN build ("3.6", [ ("namedtensor", [XImportant(True)]), # ASAN - ("parallel_tbb", [XImportant(True)]), - ("parallel_native", [XImportant(True)]), ]), ]), ("7", [ @@ -137,8 +133,6 @@ def child_constructor(self): "xla": XlaConfigNode, "namedtensor": NamedTensorConfigNode, "libtorch": LibTorchConfigNode, - "parallel_tbb": ParallelTBBConfigNode, - "parallel_native": ParallelNativeConfigNode, "important": ImportantConfigNode, "android_abi": AndroidAbiConfigNode, } @@ -176,26 +170,6 @@ def init2(self, node_name): def child_constructor(self): return ImportantConfigNode -class ParallelTBBConfigNode(TreeConfigNode): - def modify_label(self, label): - return "PARALLELTBB=" + str(label) - - def init2(self, node_name): - self.props["parallel_backend"] = "paralleltbb" - - def child_constructor(self): - return ImportantConfigNode - -class ParallelNativeConfigNode(TreeConfigNode): - def modify_label(self, label): - return "PARALLELNATIVE=" + str(label) - - def init2(self, node_name): - self.props["parallel_backend"] = "parallelnative" - - def child_constructor(self): - return ImportantConfigNode - class AndroidAbiConfigNode(TreeConfigNode): def init2(self, node_name): diff --git a/.circleci/cimodel/data/pytorch_build_definitions.py b/.circleci/cimodel/data/pytorch_build_definitions.py index 4f10f6ac939fe..9455170f485ca 100644 --- a/.circleci/cimodel/data/pytorch_build_definitions.py +++ b/.circleci/cimodel/data/pytorch_build_definitions.py @@ -34,7 +34,6 @@ class Conf: is_namedtensor: bool = False is_libtorch: bool = False is_important: bool = False - parallel_backend: Optional[str] = None # TODO: Eliminate the special casing for docker paths # In the short term, we *will* need to support special casing as docker images are merged for caffe2 and pytorch @@ -51,8 +50,6 @@ def get_parms(self, for_docker): leading.append("namedtensor") if self.is_libtorch and not for_docker: leading.append("libtorch") - if self.parallel_backend is not None and not for_docker: - leading.append(self.parallel_backend) cuda_parms = [] if self.cuda_version: @@ -231,7 +228,6 @@ def instantiate_configs(): is_namedtensor = fc.find_prop("is_namedtensor") or False is_libtorch = fc.find_prop("is_libtorch") or False is_important = fc.find_prop("is_important") or False - parallel_backend = fc.find_prop("parallel_backend") or None gpu_resource = None if cuda_version and cuda_version != "10": @@ -249,7 +245,6 @@ def instantiate_configs(): is_namedtensor=is_namedtensor, is_libtorch=is_libtorch, is_important=is_important, - parallel_backend=parallel_backend, ) if cuda_version == "9" and python_version == "3.6" and not is_libtorch: @@ -258,8 +253,7 @@ def instantiate_configs(): if (compiler_name == "gcc" and compiler_version == "5.4" and not is_namedtensor - and not is_libtorch - and parallel_backend is None): + and not is_libtorch): bc_breaking_check = Conf( "backward-compatibility-check", [], diff --git a/.circleci/config.yml b/.circleci/config.yml index 96a0663e29ee4..e1fe1ff9423a5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -337,14 +337,7 @@ jobs: NAMED_FLAG="export BUILD_NAMEDTENSOR=1" fi - if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then - export PARALLEL_FLAGS="export ATEN_THREADING=TBB USE_TBB=1 " - elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then - export PARALLEL_FLAGS="export ATEN_THREADING=NATIVE " - fi - echo "Parallel backend flags: "${PARALLEL_FLAGS} - - export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo '"$NAMED_FLAG"' && echo '"$PARALLEL_FLAGS"' && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' + export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo '"$NAMED_FLAG"' && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts # Push intermediate Docker image for next phase to use @@ -397,12 +390,6 @@ jobs: else export COMMIT_DOCKER_IMAGE=$output_image fi - if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then - export PARALLEL_FLAGS="export ATEN_THREADING=TBB USE_TBB=1 " - elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then - export PARALLEL_FLAGS="export ATEN_THREADING=NATIVE " - fi - echo "Parallel backend flags: "${PARALLEL_FLAGS} echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE} time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then @@ -411,9 +398,9 @@ jobs: export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE}) fi if [[ ${BUILD_ENVIRONMENT} == *"multigpu"* ]]; then - export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "${PARALLEL_FLAGS}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/multigpu-test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' + export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/multigpu-test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' else - export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "${PARALLEL_FLAGS}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' + export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' fi echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts caffe2_linux_build: @@ -1667,34 +1654,6 @@ workflows: build_environment: "pytorch-namedtensor-linux-xenial-py3.6-gcc5.4-test" docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:347" resource_class: large - - pytorch_linux_build: - name: pytorch_paralleltbb_linux_xenial_py3_6_gcc5_4_build - requires: - - setup - build_environment: "pytorch-paralleltbb-linux-xenial-py3.6-gcc5.4-build" - docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:347" - - pytorch_linux_test: - name: pytorch_paralleltbb_linux_xenial_py3_6_gcc5_4_test - requires: - - setup - - pytorch_paralleltbb_linux_xenial_py3_6_gcc5_4_build - build_environment: "pytorch-paralleltbb-linux-xenial-py3.6-gcc5.4-test" - docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:347" - resource_class: large - - pytorch_linux_build: - name: pytorch_parallelnative_linux_xenial_py3_6_gcc5_4_build - requires: - - setup - build_environment: "pytorch-parallelnative-linux-xenial-py3.6-gcc5.4-build" - docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:347" - - pytorch_linux_test: - name: pytorch_parallelnative_linux_xenial_py3_6_gcc5_4_test - requires: - - setup - - pytorch_parallelnative_linux_xenial_py3_6_gcc5_4_build - build_environment: "pytorch-parallelnative-linux-xenial-py3.6-gcc5.4-test" - docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3.6-gcc5.4:347" - resource_class: large - pytorch_linux_build: name: pytorch_linux_xenial_py3_6_gcc7_build requires: @@ -1747,34 +1706,6 @@ workflows: build_environment: "pytorch-namedtensor-linux-xenial-py3-clang5-asan-test" docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:347" resource_class: large - - pytorch_linux_build: - name: pytorch_paralleltbb_linux_xenial_py3_clang5_asan_build - requires: - - setup - build_environment: "pytorch-paralleltbb-linux-xenial-py3-clang5-asan-build" - docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:347" - - pytorch_linux_test: - name: pytorch_paralleltbb_linux_xenial_py3_clang5_asan_test - requires: - - setup - - pytorch_paralleltbb_linux_xenial_py3_clang5_asan_build - build_environment: "pytorch-paralleltbb-linux-xenial-py3-clang5-asan-test" - docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:347" - resource_class: large - - pytorch_linux_build: - name: pytorch_parallelnative_linux_xenial_py3_clang5_asan_build - requires: - - setup - build_environment: "pytorch-parallelnative-linux-xenial-py3-clang5-asan-build" - docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:347" - - pytorch_linux_test: - name: pytorch_parallelnative_linux_xenial_py3_clang5_asan_test - requires: - - setup - - pytorch_parallelnative_linux_xenial_py3_clang5_asan_build - build_environment: "pytorch-parallelnative-linux-xenial-py3-clang5-asan-test" - docker_image: "308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/pytorch-linux-xenial-py3-clang5-asan:347" - resource_class: large - pytorch_linux_build: name: pytorch_xla_linux_xenial_py3_6_clang7_build requires: diff --git a/.circleci/verbatim-sources/pytorch-job-specs.yml b/.circleci/verbatim-sources/pytorch-job-specs.yml index 110f36abefa36..9af4593aa0f66 100644 --- a/.circleci/verbatim-sources/pytorch-job-specs.yml +++ b/.circleci/verbatim-sources/pytorch-job-specs.yml @@ -49,14 +49,7 @@ jobs: NAMED_FLAG="export BUILD_NAMEDTENSOR=1" fi - if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then - export PARALLEL_FLAGS="export ATEN_THREADING=TBB USE_TBB=1 " - elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then - export PARALLEL_FLAGS="export ATEN_THREADING=NATIVE " - fi - echo "Parallel backend flags: "${PARALLEL_FLAGS} - - export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo '"$NAMED_FLAG"' && echo '"$PARALLEL_FLAGS"' && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' + export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo '"$NAMED_FLAG"' && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/build.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts # Push intermediate Docker image for next phase to use @@ -109,12 +102,6 @@ jobs: else export COMMIT_DOCKER_IMAGE=$output_image fi - if [[ ${BUILD_ENVIRONMENT} == *"paralleltbb"* ]]; then - export PARALLEL_FLAGS="export ATEN_THREADING=TBB USE_TBB=1 " - elif [[ ${BUILD_ENVIRONMENT} == *"parallelnative"* ]]; then - export PARALLEL_FLAGS="export ATEN_THREADING=NATIVE " - fi - echo "Parallel backend flags: "${PARALLEL_FLAGS} echo "DOCKER_IMAGE: "${COMMIT_DOCKER_IMAGE} time docker pull ${COMMIT_DOCKER_IMAGE} >/dev/null if [ -n "${USE_CUDA_DOCKER_RUNTIME}" ]; then @@ -123,8 +110,8 @@ jobs: export id=$(docker run -t -d -w /var/lib/jenkins ${COMMIT_DOCKER_IMAGE}) fi if [[ ${BUILD_ENVIRONMENT} == *"multigpu"* ]]; then - export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "${PARALLEL_FLAGS}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/multigpu-test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' + export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/multigpu-test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' else - export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "${PARALLEL_FLAGS}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' + export COMMAND='((echo "export BUILD_ENVIRONMENT=${BUILD_ENVIRONMENT}" && echo "${NAMED_FLAG}" && echo "source ./workspace/env" && echo "sudo chown -R jenkins workspace && cd workspace && .jenkins/pytorch/test.sh") | docker exec -u jenkins -i "$id" bash) 2>&1' fi echo ${COMMAND} > ./command.sh && unbuffer bash ./command.sh | ts