Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revive static_runtime_benchmark build and test #87660

Closed
13 changes: 2 additions & 11 deletions .jenkins/pytorch/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -182,17 +182,8 @@ if [[ "${BUILD_ENVIRONMENT}" == *linux-focal-py3.7-gcc7-build* ]]; then
export USE_GLOO_WITH_OPENSSL=ON
fi

# TODO: Remove after xenial->focal migration
if [[ "${BUILD_ENVIRONMENT}" == pytorch-linux-xenial-py3* ]]; then
if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* ]]; then
export BUILD_STATIC_RUNTIME_BENCHMARK=ON
fi
fi

if [[ "${BUILD_ENVIRONMENT}" == pytorch-linux-focal-py3* ]]; then
if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* ]]; then
export BUILD_STATIC_RUNTIME_BENCHMARK=ON
fi
if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* ]]; then
export BUILD_STATIC_RUNTIME_BENCHMARK=ON
fi

if [[ "$BUILD_ENVIRONMENT" == *-bazel-* ]]; then
Expand Down
9 changes: 3 additions & 6 deletions .jenkins/pytorch/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -403,12 +403,9 @@ test_libtorch() {
OMP_NUM_THREADS=2 TORCH_CPP_TEST_MNIST_PATH="test/cpp/api/mnist" "$TORCH_BIN_DIR"/test_api --gtest_filter='-IMethodTest.*' --gtest_output=xml:$TEST_REPORTS_DIR/test_api.xml
"$TORCH_BIN_DIR"/test_tensorexpr --gtest_output=xml:$TEST_REPORTS_DIR/test_tensorexpr.xml

# TODO: this condition is never (BUILD_ENVIRONMENT doesn't start with pytorch-), need to fix this.
if [[ "${BUILD_ENVIRONMENT}" == pytorch-linux-xenial-py3* ]]; then
if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* && "${BUILD_ENVIRONMENT}" != *asan* ]]; then
# TODO: Consider to run static_runtime_test from $TORCH_BIN_DIR (may need modify build script)
"$BUILD_BIN_DIR"/static_runtime_test --gtest_output=xml:$TEST_REPORTS_DIR/static_runtime_test.xml
fi
if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* && "${BUILD_ENVIRONMENT}" != *asan* ]]; then
# TODO: Consider to run static_runtime_test from $TORCH_BIN_DIR (may need modify build script)
"$BUILD_BIN_DIR"/static_runtime_test --gtest_output=xml:$TEST_REPORTS_DIR/static_runtime_test.xml
fi
assert_git_not_dirty
fi
Expand Down
2 changes: 0 additions & 2 deletions benchmarks/static_runtime/test_generated_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7841,7 +7841,6 @@ TEST(StaticRuntime, autogen_diagonal) {
auto offset0 = 0;
auto dim10 = 2;
auto dim20 = 1;
auto dim00 = 1;
std::vector<IValue> args{self0, offset0, dim10, dim20};
testStaticRuntime(script, args);
}
Expand All @@ -7859,7 +7858,6 @@ TEST(StaticRuntime, autogen_linalg_diagonal) {
auto offset0 = 0;
auto dim10 = 2;
auto dim20 = 1;
auto dim00 = 1;
std::vector<IValue> args{A0, offset0, dim10, dim20};
testStaticRuntime(script, args);
}
Expand Down
7 changes: 0 additions & 7 deletions benchmarks/static_runtime/test_static_module.cc
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,6 @@ const auto sigmoid_inplace_script = R"JIT(
return (a)
)JIT";

const auto sigmoid_out_script = R"JIT(
def forward(self, inp: Tensor):
a = inp + inp
b = torch.sigmoid(inp, out=a).clone()
return (b)
)JIT";

} // namespace

// Test that StaticModule::value_group groups values of the graph into
Expand Down
13 changes: 7 additions & 6 deletions benchmarks/static_runtime/test_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ void compareTensorLists(
const bool use_allclose,
const bool use_equalnan) {
EXPECT_TRUE(l.size() == r.size());
for (int i = 0; i < l.size(); ++i) {
for (auto i : c10::irange(l.size())) {
ASSERT_TRUE(l[i].isTensor());
ASSERT_TRUE(r[i].isTensor());
VLOG(2) << "expect " << i << ": \n" << l[i] << std::endl;
Expand Down Expand Up @@ -298,11 +298,12 @@ void testStaticRuntime(
// 1st run: collect allocation profiles (args)
// 2nd run: exercise memory planner and resizing with args2
// 3rd run: run with args again
StaticModuleOptions opts{
.enable_out_variant = enable_out_variant,
.optimize_memory = enable_out_variant,
.manage_output_tensors = manage_output_tensors,
.enable_tensorexpr_fusion = enable_tensorexpr_fusion};
StaticModuleOptions opts;
opts.enable_out_variant = enable_out_variant;
opts.optimize_memory = enable_out_variant;
opts.manage_output_tensors = manage_output_tensors;
opts.enable_tensorexpr_fusion = enable_tensorexpr_fusion;

auto smodule = test_context->makeStaticModule(opts);
StaticRuntime runtime(smodule);
auto actual = runtime(args, {});
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/runtime/static/passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ TORCH_API void ReplacePermuteWithCopy(
std::shared_ptr<torch::jit::Graph>& graph,
bool outputs_are_immutable = true);

void ReplaceWithMaybeCopy(
TORCH_API void ReplaceWithMaybeCopy(
std::shared_ptr<torch::jit::Graph>& graph,
bool outputs_are_immutable = true);

Expand Down