From 79e4dd005644a81afc967f586e8c8dc920e76bd3 Mon Sep 17 00:00:00 2001 From: Max Hniebergall Date: Mon, 9 Sep 2024 16:59:54 -0400 Subject: [PATCH 01/10] Change result writing to allow for a single value --- bin/pytorch_inference/CResultWriter.cc | 3 +++ bin/pytorch_inference/CResultWriter.h | 16 ++++++++++++++++ bin/pytorch_inference/Main.cc | 11 +++++++++-- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/bin/pytorch_inference/CResultWriter.cc b/bin/pytorch_inference/CResultWriter.cc index b4ca0baeb0..34389dad44 100644 --- a/bin/pytorch_inference/CResultWriter.cc +++ b/bin/pytorch_inference/CResultWriter.cc @@ -136,6 +136,9 @@ std::string CResultWriter::createInnerResult(const ::torch::Tensor& results) { case 2: this->writePrediction<2>(results, jsonWriter); break; + case 1: + this->writePrediction<1>(results, jsonWriter); + break; default: { std::ostringstream ss; ss << "Cannot convert results tensor of size [" << sizes << ']'; diff --git a/bin/pytorch_inference/CResultWriter.h b/bin/pytorch_inference/CResultWriter.h index 037a2769f5..aeca5ea19f 100644 --- a/bin/pytorch_inference/CResultWriter.h +++ b/bin/pytorch_inference/CResultWriter.h @@ -191,6 +191,22 @@ class CResultWriter : public TStringBufWriter { jsonWriter.onObjectEnd(); } + //! Write a 2D inference result + template + void writeInferenceResults(const ::torch::TensorAccessor& accessor, + TStringBufWriter& jsonWriter) { + + jsonWriter.onKey(RESULT); + jsonWriter.onObjectBegin(); + jsonWriter.onKey(INFERENCE); + // The Java side requires a 3D array, so wrap the 2D result in an + // extra outer array. + jsonWriter.onArrayBegin(); + this->writeTensor(accessor, jsonWriter); + jsonWriter.onArrayEnd(); + jsonWriter.onObjectEnd(); + } + private: core::CJsonOutputStreamWrapper m_WrappedOutputStream; }; diff --git a/bin/pytorch_inference/Main.cc b/bin/pytorch_inference/Main.cc index 662810a48c..224dd01215 100644 --- a/bin/pytorch_inference/Main.cc +++ b/bin/pytorch_inference/Main.cc @@ -69,17 +69,24 @@ torch::Tensor infer(torch::jit::script::Module& module_, } auto output = module_.forward(inputs); + LOG_ERROR(<< "output" << output); if (output.isTuple()) { // For transformers the result tensor is the first element in a tuple. all.push_back(output.toTuple()->elements()[0].toTensor()); } else { - all.push_back(output.toTensor()); + auto outputTensor = output.toTensor(); + auto sizes = outputTensor.sizes(); + if(sizeof(sizes)> 1){ + all.push_back(outputTensor.reshape((1, 1))); + } else { + all.push_back(outputTensor); + } } inputs.clear(); } - return at::cat(all, 0); + return at::cat(all); } bool handleRequest(ml::torch::CCommandParser::CRequestCacheInterface& cache, From 195457b66239bbfe9f60c77dfd9bb96c8e25d537 Mon Sep 17 00:00:00 2001 From: Max Hniebergall Date: Mon, 9 Sep 2024 17:02:52 -0400 Subject: [PATCH 02/10] change to debug logging --- bin/pytorch_inference/Main.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/pytorch_inference/Main.cc b/bin/pytorch_inference/Main.cc index 224dd01215..94197a7487 100644 --- a/bin/pytorch_inference/Main.cc +++ b/bin/pytorch_inference/Main.cc @@ -69,7 +69,7 @@ torch::Tensor infer(torch::jit::script::Module& module_, } auto output = module_.forward(inputs); - LOG_ERROR(<< "output" << output); + LOG_DEBUG(<< "output" << output); if (output.isTuple()) { // For transformers the result tensor is the first element in a tuple. all.push_back(output.toTuple()->elements()[0].toTensor()); From b18b63a52ba1581499c43ad1b648e4da313ee46c Mon Sep 17 00:00:00 2001 From: Max Hniebergall Date: Tue, 10 Sep 2024 09:48:09 -0400 Subject: [PATCH 03/10] Wrap output in extra array to make 3D. Improve debug logging formatting --- bin/pytorch_inference/CResultWriter.h | 8 +++++--- bin/pytorch_inference/Main.cc | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/bin/pytorch_inference/CResultWriter.h b/bin/pytorch_inference/CResultWriter.h index aeca5ea19f..8d809dc9df 100644 --- a/bin/pytorch_inference/CResultWriter.h +++ b/bin/pytorch_inference/CResultWriter.h @@ -191,7 +191,7 @@ class CResultWriter : public TStringBufWriter { jsonWriter.onObjectEnd(); } - //! Write a 2D inference result + //! Write a 1D inference result template void writeInferenceResults(const ::torch::TensorAccessor& accessor, TStringBufWriter& jsonWriter) { @@ -199,11 +199,13 @@ class CResultWriter : public TStringBufWriter { jsonWriter.onKey(RESULT); jsonWriter.onObjectBegin(); jsonWriter.onKey(INFERENCE); - // The Java side requires a 3D array, so wrap the 2D result in an - // extra outer array. + // The Java side requires a 3D array, so wrap the 1D result in an + // extra outer array twice. + jsonWriter.onArrayBegin(); jsonWriter.onArrayBegin(); this->writeTensor(accessor, jsonWriter); jsonWriter.onArrayEnd(); + jsonWriter.onArrayEnd(); jsonWriter.onObjectEnd(); } diff --git a/bin/pytorch_inference/Main.cc b/bin/pytorch_inference/Main.cc index 94197a7487..865bedb2a7 100644 --- a/bin/pytorch_inference/Main.cc +++ b/bin/pytorch_inference/Main.cc @@ -69,7 +69,7 @@ torch::Tensor infer(torch::jit::script::Module& module_, } auto output = module_.forward(inputs); - LOG_DEBUG(<< "output" << output); + LOG_DEBUG(<< "output_" << i << ": " << output); if (output.isTuple()) { // For transformers the result tensor is the first element in a tuple. all.push_back(output.toTuple()->elements()[0].toTensor()); @@ -86,7 +86,7 @@ torch::Tensor infer(torch::jit::script::Module& module_, inputs.clear(); } - return at::cat(all); + return at::cat(all, 0); } bool handleRequest(ml::torch::CCommandParser::CRequestCacheInterface& cache, From 0252f11cb9701f38f78e6725a6c2c4b2dced169c Mon Sep 17 00:00:00 2001 From: Max Hniebergall Date: Tue, 10 Sep 2024 10:22:57 -0400 Subject: [PATCH 04/10] Added unit test --- bin/pytorch_inference/unittest/CResultWriterTest.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/bin/pytorch_inference/unittest/CResultWriterTest.cc b/bin/pytorch_inference/unittest/CResultWriterTest.cc index 99333db8c3..97b99038a2 100644 --- a/bin/pytorch_inference/unittest/CResultWriterTest.cc +++ b/bin/pytorch_inference/unittest/CResultWriterTest.cc @@ -80,6 +80,18 @@ BOOST_AUTO_TEST_CASE(testCreateInnerInferenceResult) { BOOST_REQUIRE_EQUAL(expected, innerPortion); } +BOOST_AUTO_TEST_CASE(testCreateInnerInferenceResultFor1DimensionalResult) { + std::ostringstream output; + ml::torch::CResultWriter resultWriter{output}; + ::torch::Tensor tensor{::torch::ones({1})}; + std::string innerPortion{resultWriter.createInnerResult(tensor)}; + std::string expected = "\"result\":{\"inference\":" + "[[[1]]]}"; + LOG_INFO(<< "expected: " << expected); + LOG_INFO(<< "actual: " << innerPortion); + BOOST_REQUIRE_EQUAL(expected, innerPortion); +} + BOOST_AUTO_TEST_CASE(testWrapAndWriteInferenceResult) { std::string innerPortion{ "\"result\":{\"inference\":" From bd80556a18af9358645a1ec2a58510f006b91c10 Mon Sep 17 00:00:00 2001 From: Max Hniebergall Date: Tue, 10 Sep 2024 10:26:53 -0400 Subject: [PATCH 05/10] Add changelog --- docs/CHANGELOG.asciidoc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 6f98ce1ff8..147c9ba861 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -35,12 +35,20 @@ * Allow the user to force a detector to shift time series state by a specific amount. (See {ml-pull}2695[#2695].) +=== Bug Fixes + +* Allow for pytorch_inference results to include zero-dimensional tensors. + == {es} version 8.15.2 === Enhancements * Update the Pytorch library to version 2.3.1. (See {ml-pull}2688[#2688].) +=== Bug Fixes + +* Allow for pytorch_inference results to include zero-dimensional tensors. + == {es} version 8.15.1 == {es} version 8.15.0 From 4d9a587a96575b8e4c00a6ad9d90c728d097e4fd Mon Sep 17 00:00:00 2001 From: Max Hniebergall Date: Tue, 10 Sep 2024 10:51:19 -0400 Subject: [PATCH 06/10] formatting --- bin/pytorch_inference/Main.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/pytorch_inference/Main.cc b/bin/pytorch_inference/Main.cc index 865bedb2a7..9687c85a2f 100644 --- a/bin/pytorch_inference/Main.cc +++ b/bin/pytorch_inference/Main.cc @@ -76,7 +76,7 @@ torch::Tensor infer(torch::jit::script::Module& module_, } else { auto outputTensor = output.toTensor(); auto sizes = outputTensor.sizes(); - if(sizeof(sizes)> 1){ + if (sizeof(sizes) > 1) { all.push_back(outputTensor.reshape((1, 1))); } else { all.push_back(outputTensor); From 81249b492fea2e6e4b2b7d006740bc6929f9d354 Mon Sep 17 00:00:00 2001 From: Max Hniebergall Date: Tue, 10 Sep 2024 14:43:43 -0400 Subject: [PATCH 07/10] Fix reshaping of output tensor to only resahep zero-dimensional tensors --- bin/pytorch_inference/Main.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/bin/pytorch_inference/Main.cc b/bin/pytorch_inference/Main.cc index 9687c85a2f..0be63c83f6 100644 --- a/bin/pytorch_inference/Main.cc +++ b/bin/pytorch_inference/Main.cc @@ -75,9 +75,8 @@ torch::Tensor infer(torch::jit::script::Module& module_, all.push_back(output.toTuple()->elements()[0].toTensor()); } else { auto outputTensor = output.toTensor(); - auto sizes = outputTensor.sizes(); - if (sizeof(sizes) > 1) { - all.push_back(outputTensor.reshape((1, 1))); + if (outputTensor.dim() == 0) { // If the output is a scaler, we need to reshape it into a 1D tensor + all.push_back(outputTensor.reshape({1, 1})); } else { all.push_back(outputTensor); } From eb806ab30b196391e8aba94e607ab54943906597 Mon Sep 17 00:00:00 2001 From: Max Hniebergall Date: Tue, 10 Sep 2024 16:26:16 -0400 Subject: [PATCH 08/10] Use std::move --- bin/pytorch_inference/Main.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/pytorch_inference/Main.cc b/bin/pytorch_inference/Main.cc index 0be63c83f6..a1c35a3f38 100644 --- a/bin/pytorch_inference/Main.cc +++ b/bin/pytorch_inference/Main.cc @@ -76,9 +76,9 @@ torch::Tensor infer(torch::jit::script::Module& module_, } else { auto outputTensor = output.toTensor(); if (outputTensor.dim() == 0) { // If the output is a scaler, we need to reshape it into a 1D tensor - all.push_back(outputTensor.reshape({1, 1})); + all.push_back(std::move(outputTensor.reshape({1, 1}))); } else { - all.push_back(outputTensor); + all.push_back(std::move(outputTensor)); } } From 73fe5c76e3e4bbf8d5e79006a3289f73f1251ae4 Mon Sep 17 00:00:00 2001 From: Max Hniebergall Date: Thu, 12 Sep 2024 14:10:18 -0400 Subject: [PATCH 09/10] Backport fix by removing debug line; also remove std::move for reshape --- bin/pytorch_inference/Main.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bin/pytorch_inference/Main.cc b/bin/pytorch_inference/Main.cc index a1c35a3f38..92db0aacec 100644 --- a/bin/pytorch_inference/Main.cc +++ b/bin/pytorch_inference/Main.cc @@ -69,14 +69,13 @@ torch::Tensor infer(torch::jit::script::Module& module_, } auto output = module_.forward(inputs); - LOG_DEBUG(<< "output_" << i << ": " << output); if (output.isTuple()) { // For transformers the result tensor is the first element in a tuple. all.push_back(output.toTuple()->elements()[0].toTensor()); } else { auto outputTensor = output.toTensor(); if (outputTensor.dim() == 0) { // If the output is a scaler, we need to reshape it into a 1D tensor - all.push_back(std::move(outputTensor.reshape({1, 1}))); + all.push_back(outputTensor.reshape({1, 1})); } else { all.push_back(std::move(outputTensor)); } From 6990faeb4089419f377f10f997972515932a5a3d Mon Sep 17 00:00:00 2001 From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Date: Fri, 13 Sep 2024 09:34:35 -0400 Subject: [PATCH 10/10] Update CHANGELOG.asciidoc --- docs/CHANGELOG.asciidoc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc index 147c9ba861..9c1d528166 100644 --- a/docs/CHANGELOG.asciidoc +++ b/docs/CHANGELOG.asciidoc @@ -45,10 +45,6 @@ * Update the Pytorch library to version 2.3.1. (See {ml-pull}2688[#2688].) -=== Bug Fixes - -* Allow for pytorch_inference results to include zero-dimensional tensors. - == {es} version 8.15.1 == {es} version 8.15.0