Skip to content

Commit

Permalink
Remove unused arguments from formal list.
Browse files Browse the repository at this point in the history
test=develop
  • Loading branch information
Xreki committed Nov 14, 2019
1 parent 82e5670 commit 9a7799f
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 7 deletions.
5 changes: 4 additions & 1 deletion paddle/fluid/framework/ir/fusion_group/code_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ limitations under the License. */

#include "paddle/fluid/framework/ir/fusion_group/code_generator.h"
#include <sstream>
#include <unordered_set>
#include "paddle/fluid/framework/ir/fusion_group/code_generator_helper.h"
#include "paddle/fluid/framework/ir/fusion_group/operation.h"

Expand Down Expand Up @@ -106,7 +107,9 @@ std::set<int> CodeGenerator::DistilInputIds(
// Use std::set to remove the reptead id and get a ordered list.
for (size_t i = 0; i < expressions.size(); i++) {
for (auto id : expressions[i].GetInputIds()) {
input_ids.insert(id);
if (id >= 0) {
input_ids.insert(id);
}
}
}
return input_ids;
Expand Down
12 changes: 6 additions & 6 deletions paddle/fluid/framework/ir/fusion_group/code_generator_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,9 @@ void CheckOutput(const std::vector<OperationExpression>& expressions,
const std::vector<int> output_ids_of_subgraph, int i) {
std::vector<float> var(cpu_tensors.size());
for (auto id : input_ids_of_subgraph) {
var[id] = cpu_tensors[id].data<float>()[i];
if (id >= 0) {
var[id] = cpu_tensors[id].data<float>()[i];
}
}

for (auto expression : expressions) {
Expand Down Expand Up @@ -182,10 +184,8 @@ void TestMainImpl(std::string func_name, std::string code_str,
gpu_tensors[id].mutable_data<float>(cpu_tensors[id].dims(), place);
fusion_group::SetupRandomCPUTensor<float>(&cpu_tensors[id]);
TensorCopySync(cpu_tensors[id], place, &gpu_tensors[id]);
} else {
gpu_ptrs[id] = nullptr;
args.push_back(&gpu_ptrs[id]);
}
args.push_back(&gpu_ptrs[id]);
}

for (auto id : output_ids) {
Expand Down Expand Up @@ -283,7 +283,7 @@ TEST(code_generator, elementwise_grad) {
// t3 = relu(t2)
// t2' = relu_grad(t2, t3, t3')
// t0', t1' = elementwise_mul_grad(t0, t1, t2, t2')
fusion_group::OperationExpression exp1("relu_grad", {2, 3, 7}, {6});
fusion_group::OperationExpression exp1("relu_grad", {2, -1, 7}, {6});
fusion_group::OperationExpression exp2("elementwise_mul_grad", {0, 1, 2, 6},
{4, 5});
std::vector<fusion_group::OperationExpression> expressions = {exp1, exp2};
Expand All @@ -300,7 +300,7 @@ TEST(code_generator, elementwise_grad) {
// Op(relu_grad), inputs:{2,3,7}, outputs:{6}
// Op(elementwise_mul_grad), inputs:{0,1,2,6}, outputs:{4,5}
int n = cpu_tensors[0].numel();
std::vector<int> input_ids = {0, 1, 2, 3, 7};
std::vector<int> input_ids = {0, 1, 2, -1, 7};
std::vector<int> output_ids = {4, 5, 6};
TestMain("elementwise_grad_kernel_0", expressions, cpu_tensors, n, input_ids,
output_ids);
Expand Down

0 comments on commit 9a7799f

Please sign in to comment.