Skip to content

Commit

Permalink
LayerNormInt8QuantizeFakeNNPI fix to match ICEREF. (#47140)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #47140

LayerNorm + Int8Quantize fix to match ICEREF.

(Note: this ignores all push blocking failures!)

Test Plan:
buck test --debug //caffe2/caffe2/contrib/fakelowp/test:test_layernorm_nnpi_fp16nnpi -- test_fused_ln_quantize --print-passing-details

https://internalfb.com/intern/testinfra/testrun/7881299371969005

Reviewed By: hyuen

Differential Revision: D24659904

fbshipit-source-id: 026d1a1f69a68eca662a39752af5ab0756bace2d
  • Loading branch information
venkatacrc authored and facebook-github-bot committed Nov 1, 2020
1 parent 19ede75 commit 1cc1da5
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 7 deletions.
3 changes: 1 addition & 2 deletions caffe2/contrib/fakelowp/layernorm_fp16_fake_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ class LayerNormFakeFp16Op final : public Operator<CPUContext> {
int Nout = X.numel();

std::vector<float> inv_scalev(Nout, inv_scale);
std::vector<float> offsetv(Nout, Y_offset - 128.0);
std::vector<float> offsetv(Nout, Y_offset);
uint8_t* Y_uint8_data = Y_int8->t.template mutable_data<uint8_t>();

fake_fp16::fma_fp16(Nout, Y_fp16.data(), inv_scalev.data(), offsetv.data());
Expand All @@ -200,7 +200,6 @@ class LayerNormFakeFp16Op final : public Operator<CPUContext> {
for (int i = 0; i < Nout; i++) {
float halfRes = offsetv[i];
halfRes = round(halfRes);
halfRes = halfRes + 128.0;
if (std::isinf(halfRes)) {
if (halfRes > 0) {
halfRes = qmax;
Expand Down
5 changes: 0 additions & 5 deletions caffe2/contrib/fakelowp/test/test_layernorm_nnpi_fp16.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,3 @@





import numpy as np
import caffe2.python.fakelowp.init_shared_libs # noqa
from caffe2.proto import caffe2_pb2
Expand Down

0 comments on commit 1cc1da5

Please sign in to comment.