Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature Request: C++ gradient for LRN #13987

Merged
merged 10 commits into from
Nov 30, 2017
12 changes: 12 additions & 0 deletions tensorflow/cc/gradients/nn_grad.cc
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,18 @@ Status MaxPoolGradV2Helper(const Scope& scope, const Operation& op,
}
REGISTER_GRADIENT_OP("MaxPoolV2", MaxPoolGradV2Helper);

Status LRNGradHelper(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs){
internal::LRNGrad::Attrs grad_attrs;

auto dx = internal::LRNGrad(scope, grad_inputs[0], op.input(0), op.output(0),
grad_attrs);
grad_outputs->push_back(dx);
return scope.status();
}
REGISTER_GRADIENT_OP("LRN", LRNGradHelper);

} // anonymous namespace
} // namespace ops
} // namespace tensorflow
7 changes: 7 additions & 0 deletions tensorflow/cc/gradients/nn_grad_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -191,5 +191,12 @@ TEST_F(NNGradTest, MaxPoolGradV2Helper) {
RunTest(x, x_init_value, y, y_shape);
}

TEST_F(NNGradTest, LRN){
TensorShape x_shape({1, 1, 2, 1});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = LRN(scope_, x);
RunTest(x, x_shape, y, x_shape);
}

} // namespace
} // namespace tensorflow