Skip to content

Commit

Permalink
Update on "[quant][graphmode][fx] Scope support for call_method in Qu…
Browse files Browse the repository at this point in the history
…antizationTracer"


Summary:
Previously we did not set the qconfig for call_method node correctly since it requires us to know
the scope (module path and type of the module whose forward graph contains the node) of the node. This
PR modifies the QuantizationTracer to record the scope information and build a map from call_method
Node to (module_path, module_type), which will be used when we construct qconfig_map

Test Plan:
python test/test_quantization.py TestQuantizeFx.test_qconfig_for_call_method
Reviewers:

Subscribers:

Tasks:

Tags:

Differential Revision: [D25818132](https://our.internmc.facebook.com/intern/diff/D25818132)

[ghstack-poisoned]
  • Loading branch information
jerryzh168 committed Jan 9, 2021
2 parents de3d47a + 49bb0a3 commit 3e9560f
Show file tree
Hide file tree
Showing 155 changed files with 5,597 additions and 3,279 deletions.
4 changes: 2 additions & 2 deletions CMakeLists.txt
Expand Up @@ -207,7 +207,7 @@ cmake_dependent_option(
USE_VALGRIND "Use Valgrind. Only available on Linux." ON
"LINUX" OFF)
option(USE_VULKAN "Use Vulkan GPU backend" OFF)
option(USE_VULKAN_FP16_INFERENCE "Vulkan - Use fp16 inference even on fp32 tensors" ON)
option(USE_VULKAN_FP16_INFERENCE "Vulkan - Use fp16 inference even on fp32 tensors" OFF)
option(USE_VULKAN_RELAXED_PRECISION "Vulkan - Use relaxed precision math in the kernels (mediump)" OFF)
option(USE_VULKAN_SHADERC_RUNTIME "Vulkan - Use runtime shader compilation (needs libshaderc)" OFF)
option(USE_VULKAN_WRAPPER "Vulkan - Dynamically load Vulkan functions" ON)
Expand Down Expand Up @@ -318,7 +318,7 @@ set(OP_DEPENDENCY "" CACHE STRING
# symbol lookup error: miniconda3/envs/pytorch-py3.7/lib/libmkl_intel_lp64.so: undefined symbol: mkl_blas_dsyrk
# https://software.intel.com/en-us/articles/symbol-lookup-error-when-linking-intel-mkl-with-gcc-on-ubuntu
if(LINUX)
set(CMAKE_SHARED_LINKER_FLAGS "-Wl,--no-as-needed ${CMAKE_SHARED_LINKER_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-as-needed")
endif()

if(MSVC)
Expand Down
2 changes: 2 additions & 0 deletions Dockerfile
Expand Up @@ -59,6 +59,7 @@ RUN /opt/conda/bin/conda install -c "${INSTALL_CHANNEL}" -y python=${PYTHON_VERS
RUN /opt/conda/bin/pip install torchelastic

FROM ${BASE_IMAGE} as official
ARG PYTORCH_VERSION
LABEL com.nvidia.volumes.needed="nvidia_driver"
RUN --mount=type=cache,id=apt-final,target=/var/cache/apt \
apt-get update && apt-get install -y --no-install-recommends \
Expand All @@ -71,6 +72,7 @@ ENV PATH /opt/conda/bin:$PATH
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES compute,utility
ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64
ENV PYTORCH_VERSION ${PYTORCH_VERSION}
WORKDIR /workspace

FROM official as dev
Expand Down
60 changes: 20 additions & 40 deletions android/pytorch_android/generate_test_torchscripts.py
Expand Up @@ -20,101 +20,85 @@ def forward(self, input):
return None

@torch.jit.script_method
def eqBool(self, input):
# type: (bool) -> bool
def eqBool(self, input: bool) -> bool:
return input

@torch.jit.script_method
def eqInt(self, input):
# type: (int) -> int
def eqInt(self, input: int) -> int:
return input

@torch.jit.script_method
def eqFloat(self, input):
# type: (float) -> float
def eqFloat(self, input: float) -> float:
return input

@torch.jit.script_method
def eqStr(self, input):
# type: (str) -> str
def eqStr(self, input: str) -> str:
return input

@torch.jit.script_method
def eqTensor(self, input):
# type: (Tensor) -> Tensor
def eqTensor(self, input: Tensor) -> Tensor:
return input

@torch.jit.script_method
def eqDictStrKeyIntValue(self, input):
# type: (Dict[str, int]) -> Dict[str, int]
def eqDictStrKeyIntValue(self, input: Dict[str, int]) -> Dict[str, int]:
return input

@torch.jit.script_method
def eqDictIntKeyIntValue(self, input):
# type: (Dict[int, int]) -> Dict[int, int]
def eqDictIntKeyIntValue(self, input: Dict[int, int]) -> Dict[int, int]:
return input

@torch.jit.script_method
def eqDictFloatKeyIntValue(self, input):
# type: (Dict[float, int]) -> Dict[float, int]
def eqDictFloatKeyIntValue(self, input: Dict[float, int]) -> Dict[float, int]:
return input

@torch.jit.script_method
def listIntSumReturnTuple(self, input):
# type: (List[int]) -> Tuple[List[int], int]
def listIntSumReturnTuple(self, input: List[int]) -> Tuple[List[int], int]:
sum = 0
for x in input:
sum += x
return (input, sum)

@torch.jit.script_method
def listBoolConjunction(self, input):
# type: (List[bool]) -> bool
def listBoolConjunction(self, input: List[bool]) -> bool:
res = True
for x in input:
res = res and x
return res

@torch.jit.script_method
def listBoolDisjunction(self, input):
# type: (List[bool]) -> bool
def listBoolDisjunction(self, input: List[bool]) -> bool:
res = False
for x in input:
res = res or x
return res

@torch.jit.script_method
def tupleIntSumReturnTuple(self, input):
# type: (Tuple[int, int, int]) -> Tuple[Tuple[int, int, int], int]
def tupleIntSumReturnTuple(self, input: Tuple[int, int, int]) -> Tuple[Tuple[int, int, int], int]:
sum = 0
for x in input:
sum += x
return (input, sum)

@torch.jit.script_method
def optionalIntIsNone(self, input):
# type: (Optional[int]) -> bool
def optionalIntIsNone(self, input: Optional[int]) -> bool:
return input is None

@torch.jit.script_method
def intEq0None(self, input):
# type: (int) -> Optional[int]
def intEq0None(self, input: int) -> Optional[int]:
if input == 0:
return None
return input

@torch.jit.script_method
def str3Concat(self, input):
# type: (str) -> str
def str3Concat(self, input: str) -> str:
return input + input + input

@torch.jit.script_method
def newEmptyShapeWithItem(self, input):
return torch.tensor([int(input.item())])[0]

@torch.jit.script_method
def testAliasWithOffset(self):
# type: () -> List[Tensor]
def testAliasWithOffset(self) -> List[Tensor]:
x = torch.tensor([100, 200])
a = [x[0], x[1]]
return a
Expand All @@ -128,8 +112,7 @@ def testNonContiguous(self):
return x

@torch.jit.script_method
def conv2d(self, x, w, toChannelsLast):
# type: (Tensor, Tensor, bool) -> Tensor
def conv2d(self, x: Tensor, w: Tensor, toChannelsLast: bool) -> Tensor:
r = torch.nn.functional.conv2d(x, w)
if (toChannelsLast):
r = r.contiguous(memory_format=torch.channels_last)
Expand All @@ -138,18 +121,15 @@ def conv2d(self, x, w, toChannelsLast):
return r

@torch.jit.script_method
def contiguous(self, x):
# type: (Tensor) -> Tensor
def contiguous(self, x: Tensor) -> Tensor:
return x.contiguous()

@torch.jit.script_method
def contiguousChannelsLast(self, x):
# type: (Tensor) -> Tensor
def contiguousChannelsLast(self, x: Tensor) -> Tensor:
return x.contiguous(memory_format=torch.channels_last)

@torch.jit.script_method
def contiguousChannelsLast3d(self, x):
# type: (Tensor) -> Tensor
def contiguousChannelsLast3d(self, x: Tensor) -> Tensor:
return x.contiguous(memory_format=torch.channels_last_3d)

scriptAndSave(Test(), "test.pt")
60 changes: 20 additions & 40 deletions android/pytorch_android/test_asset.jit
@@ -1,85 +1,69 @@
def forward(self, input):
return None

def eqBool(self, input):
# type: (bool) -> bool
def eqBool(self, input: bool) -> bool:
return input

def eqInt(self, input):
# type: (int) -> int
def eqInt(self, input: int) -> int:
return input

def eqFloat(self, input):
# type: (float) -> float
def eqFloat(self, input: float) -> float:
return input

def eqStr(self, input):
# type: (str) -> str
def eqStr(self, input: str) -> str:
return input

def eqTensor(self, input):
# type: (Tensor) -> Tensor
def eqTensor(self, input: Tensor) -> Tensor:
return input

def eqDictStrKeyIntValue(self, input):
# type: (Dict[str, int]) -> Dict[str, int]
def eqDictStrKeyIntValue(self, input: Dict[str, int]) -> Dict[str, int]:
return input

def eqDictIntKeyIntValue(self, input):
# type: (Dict[int, int]) -> Dict[int, int]
def eqDictIntKeyIntValue(self, input: Dict[int, int]) -> Dict[int, int]:
return input

def eqDictFloatKeyIntValue(self, input):
# type: (Dict[float, int]) -> Dict[float, int]
def eqDictFloatKeyIntValue(self, input: Dict[float, int]) -> Dict[float, int]:
return input

def listIntSumReturnTuple(self, input):
# type: (List[int]) -> Tuple[List[int], int]
def listIntSumReturnTuple(self, input: List[int]) -> Tuple[List[int], int]:
sum = 0
for x in input:
sum += x
return (input, sum)

def listBoolConjunction(self, input):
# type: (List[bool]) -> bool
def listBoolConjunction(self, input: List[bool]) -> bool:
res = True
for x in input:
res = res and x
return res

def listBoolDisjunction(self, input):
# type: (List[bool]) -> bool
def listBoolDisjunction(self, input: List[bool]) -> bool:
res = False
for x in input:
res = res or x
return res

def tupleIntSumReturnTuple(self, input):
# type: (Tuple[int, int, int]) -> Tuple[Tuple[int, int, int], int]
def tupleIntSumReturnTuple(self, input: Tuple[int, int, int]) -> Tuple[Tuple[int, int, int], int]:
sum = 0
for x in input:
sum += x
return (input, sum)

def optionalIntIsNone(self, input):
# type: (Optional[int]) -> bool
def optionalIntIsNone(self, input: Optional[int]) -> bool:
return input is None

def intEq0None(self, input):
# type: (int) -> Optional[int]
def intEq0None(self, input: int) -> Optional[int]:
if input == 0:
return None
return input

def str3Concat(self, input):
# type: (str) -> str
def str3Concat(self, input: str) -> str:
return input + input + input

def newEmptyShapeWithItem(self, input):
return torch.tensor([int(input.item())])[0]

def testAliasWithOffset(self):
# type: () -> List[Tensor]
def testAliasWithOffset(self) -> List[Tensor]:
x = torch.tensor([100, 200])
a = [x[0], x[1]]
return a
Expand All @@ -91,8 +75,7 @@ def testNonContiguous(self):
assert x[1] == 300
return x

def conv2d(self, x, w, toChannelsLast):
# type: (Tensor, Tensor, bool) -> Tensor
def conv2d(self, x: Tensor, w: Tensor, toChannelsLast: bool) -> Tensor:
r = torch.conv2d(x, w)
if (toChannelsLast):
# memory_format=torch.channels_last
Expand All @@ -101,16 +84,13 @@ def conv2d(self, x, w, toChannelsLast):
r = r.contiguous()
return r

def contiguous(self, x):
# type: (Tensor) -> Tensor
def contiguous(self, x: Tensor) -> Tensor:
return x.contiguous()

def contiguousChannelsLast(self, x):
# type: (Tensor) -> Tensor
def contiguousChannelsLast(self, x: Tensor) -> Tensor:
# memory_format=torch.channels_last
return x.contiguous(memory_format=2)

def contiguousChannelsLast3d(self, x):
# type: (Tensor) -> Tensor
def contiguousChannelsLast3d(self, x: Tensor) -> Tensor:
# memory_format=torch.channels_last_3d
return x.contiguous(memory_format=3)
11 changes: 4 additions & 7 deletions aten/src/ATen/BatchedTensorImpl.cpp
Expand Up @@ -20,22 +20,19 @@ BatchedTensorImpl::BatchedTensorImpl(Tensor value, BatchDims bdims)
const auto public_dims = value_.dim() - bdims_.size();
const auto value_sizes = value_.sizes();
const auto value_strides = value_.strides();
sizes_.clear();
sizes_.reserve(public_dims);
strides_.clear();
strides_.reserve(public_dims);
sizes_and_strides_.resize(public_dims);
for (int64_t dim = 0; dim < public_dims; dim++) {
auto actual_dim = actualDim(dim, /*wrap_dim=*/false);
sizes_.push_back(value_sizes.at(actual_dim));
strides_.push_back(value_strides.at(actual_dim));
sizes_and_strides_.size_at_unchecked(dim) = value_sizes.at(actual_dim);
sizes_and_strides_.stride_at_unchecked(dim) = value_strides.at(actual_dim);
}
refresh_numel();
refresh_contiguous();
}

int64_t BatchedTensorImpl::actualDim(int64_t dim, bool wrap_dim) const {
if (wrap_dim) {
const auto ndim = sizes_.size();
const auto ndim = sizes_and_strides_.size();
dim = maybe_wrap_dim(dim, ndim);
}
auto is_bdim = createBatchDimBitset(bdims_);
Expand Down
6 changes: 3 additions & 3 deletions aten/src/ATen/OpaqueTensorImpl.h
Expand Up @@ -28,7 +28,7 @@ struct TORCH_API OpaqueTensorImpl : public TensorImpl {
bool is_non_overlapping_and_dense = true)
: TensorImpl(key_set, data_type, device),
opaque_handle_(std::move(opaque_handle)) {
sizes_ = sizes.vec();
sizes_and_strides_.set_sizes(sizes);
refresh_numel();
is_non_overlapping_and_dense_ = is_non_overlapping_and_dense;
}
Expand Down Expand Up @@ -86,7 +86,7 @@ struct TORCH_API OpaqueTensorImpl : public TensorImpl {
const c10::VariableVersion& version_counter,
bool allow_tensor_metadata_change) const override {
auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
key_set(), dtype(), device(), opaque_handle_, sizes_);
key_set(), dtype(), device(), opaque_handle_, sizes_and_strides_.sizes_arrayref());
copy_tensor_metadata(
/*src_opaque_impl=*/this,
/*dest_opaque_impl=*/impl.get(),
Expand All @@ -106,7 +106,7 @@ struct TORCH_API OpaqueTensorImpl : public TensorImpl {
c10::VariableVersion&& version_counter,
bool allow_tensor_metadata_change) const override {
auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
key_set(), dtype(), device(), opaque_handle_, sizes_);
key_set(), dtype(), device(), opaque_handle_, sizes_and_strides_.sizes_arrayref());
copy_tensor_metadata(
/*src_opaque_impl=*/this,
/*dest_opaque_impl=*/impl.get(),
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/SparseTensorImpl.cpp
Expand Up @@ -70,6 +70,7 @@ void SparseTensorImpl::set_storage_offset(int64_t storage_offset) {
}

int64_t SparseTensorImpl::dim() const {
TORCH_INTERNAL_ASSERT_DEBUG_ONLY(sparse_dim_ + dense_dim_ == TensorImpl::dim());
return sparse_dim_ + dense_dim_;
}
bool SparseTensorImpl::has_storage() const {
Expand Down

0 comments on commit 3e9560f

Please sign in to comment.