Skip to content

Commit

Permalink
clean code
Browse files Browse the repository at this point in the history
  • Loading branch information
kexinzhao committed May 10, 2018
1 parent cbf502e commit aa2635f
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 14 deletions.
11 changes: 1 addition & 10 deletions paddle/fluid/operators/load_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <fstream>
#include <iostream>

#include "paddle/fluid/framework/data_type_transform.h"
#include "paddle/fluid/framework/op_registry.h"
Expand Down Expand Up @@ -53,30 +52,22 @@ class LoadOp : public framework::OperatorBase {
auto in_dtype = framework::ToDataType(tensor->type());
auto out_dtype = load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype;

std::cout << "In load op: " << std::endl;
std::cout << "before conversion block" << std::endl;

if (in_dtype != out_dtype) {
// convert to float16 tensor
auto in_kernel_type = framework::OpKernelType(in_dtype, place);
auto out_kernel_type = framework::OpKernelType(out_dtype, place);
framework::LoDTensor fp16_tensor;
// copy LoD info to the new tensor
fp16_tensor.set_lod(tensor->lod());
std::cout << "before TransDataType" << std::endl;
framework::TransDataType(in_kernel_type, out_kernel_type, *tensor,
&fp16_tensor);
std::cout << "after TransDataType" << std::endl;

// reset output tensor
out_var->Clear();
tensor = out_var->GetMutable<framework::LoDTensor>();
tensor->set_lod(fp16_tensor.lod());
std::cout << "before TransDataType" << std::endl;
tensor->ShareDataWith(fp16_tensor);
std::cout << "after TransDataType" << std::endl;
}

std::cout << "Out of load op: " << std::endl;
}
};

Expand Down
4 changes: 0 additions & 4 deletions paddle/fluid/operators/save_load_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -127,17 +127,13 @@ TEST(LoadFP16Op, CPU) {
auto load_var = scope.Var("out_var");
auto load_op = paddle::framework::OpRegistry::CreateOp(
"load", {}, {{"Out", {"out_var"}}}, attrs);
LOG(INFO) << "before load op run";
load_op->Run(scope, place);
LOG(INFO) << "after load op run";

auto target = load_var->Get<paddle::framework::LoDTensor>();
paddle::platform::float16* actual = target.data<paddle::platform::float16>();
LOG(INFO) << "after target->data";
for (int64_t i = 0; i < tensor->numel(); ++i) {
EXPECT_EQ(expect[i], static_cast<float>(actual[i]));
}
LOG(INFO) << "after expect equal";

auto& actual_lod = target.lod();
EXPECT_EQ(expect_lod.size(), actual_lod.size());
Expand Down

0 comments on commit aa2635f

Please sign in to comment.