Skip to content

Commit

Permalink
[caffe2] Add operator schema for FP16SparseNorm (#46300)
Browse files Browse the repository at this point in the history
Summary:
Fixes regression introduced by #45551
Also Fix signed-unsigned comparison warnings in test/cpp/tensorexpr/test_train_impl.cpp

Pull Request resolved: #46300

Reviewed By: walterddr

Differential Revision: D24294821

Pulled By: malfet

fbshipit-source-id: 16bffa71ec0d2d38208855223a3c5efb18414ab5
  • Loading branch information
malfet authored and facebook-github-bot committed Oct 14, 2020
1 parent f89498f commit 1fcec6e
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 6 deletions.
32 changes: 29 additions & 3 deletions caffe2/operators/sparse_normalize_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -109,9 +109,6 @@ bool SparseNormalizeOp<c10::Half, CPUContext>::DoRunWithType() {
}

REGISTER_CPU_OPERATOR(SparseNormalize, SparseNormalizeOp<float, CPUContext>);
REGISTER_CPU_OPERATOR(
Float16SparseNormalize,
SparseNormalizeOp<c10::Half, CPUContext>);
OPERATOR_SCHEMA(SparseNormalize)
.NumInputs(2, 3)
.NumOutputs(1)
Expand Down Expand Up @@ -139,4 +136,33 @@ Given a sparse matrix, apply max_norm or constant_norm sparse regularization.
)DOC");

SHOULD_NOT_DO_GRADIENT(SparseNormalize);

REGISTER_CPU_OPERATOR(Float16SparseNormalize, SparseNormalizeOp<c10::Half, CPUContext>);
OPERATOR_SCHEMA(Float16SparseNormalize)
.NumInputs(2, 3)
.NumOutputs(1)
.Input(0, "param", "Parameters to be normalized")
.Input(1, "indices", "Sparse indices")
.Input(
2,
"grad",
"Gradient computed (optional - not used, this argument is for backwards compatibility)")
.Output(0, "output_param", "Normalized parameters")
.EnforceOneToOneInplace()
.Arg(
"use_max_norm",
"A bool variable to control whether to use max norm \
or constant norm. When use_max_norm = false, constant norm is used so that \
all the embedding vectors are scaled to have a L2 norm equals to A \
(see blow argument norm=A). If use_max_norm = true, \
max norm is used so that embedding is scaled so that its l2 norm is no larger \
than A. If an embedding's norm is less than A originally, \
the embedding is left unchanged.\
The default is True.")
.Arg("norm", "L2 norm of the embedding. The default is 1.0.")
.SetDoc(R"DOC(
Given a sparse matrix, apply max_norm or constant_norm sparse regularization.
)DOC");

SHOULD_NOT_DO_GRADIENT(Float16SparseNormalize);
} // namespace caffe2
6 changes: 3 additions & 3 deletions test/cpp/tensorexpr/test_train_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ VTensor* grad(VTensor* y, VTensor* x, VTensor* j) {
TORCH_CHECK(g, ss.str());
}
auto g_outs = g(op->inputs, grad_inputs);
for (auto i = 0; i < g_outs.size(); ++i) {
for (auto i = 0U; i < g_outs.size(); ++i) {
auto input = op->inputs[i];
if (need_grad.find(input) != need_grad.end()) {
if (grad_map.find(input) != grad_map.end()) {
Expand Down Expand Up @@ -198,7 +198,7 @@ VOp::VOp(
VGraph* graph_)
: inputs(inputs_), graph(graph_) {
method = &VMethod::get(name);
for (auto i = 0; i < num_outputs; ++i) {
for (auto i = 0U; i < num_outputs; ++i) {
outputs.emplace_back(graph->create_tensor({}));
outputs.back()->op = this;
}
Expand Down Expand Up @@ -497,7 +497,7 @@ to_tensorexpr(const VGraph& graph, std::vector<VTensor*> outputs) {
}
auto outs = vop->method->lower(inps, vop->inputs, vbindings);
TORCH_CHECK(outs.size() == vop->outputs.size());
for (auto i = 0; i < outs.size(); ++i) {
for (auto i = 0U; i < outs.size(); ++i) {
bindings[vop->outputs[i]] = outs[i];
}
}
Expand Down

0 comments on commit 1fcec6e

Please sign in to comment.