Skip to content

Commit

Permalink
Fix static initialization issue for static build (pytorch#90133)
Browse files Browse the repository at this point in the history
Fixes pytorch#83255

Code comes from pytorch#83258 after fixing merge conflicts.

Pull Request resolved: pytorch#90133
Approved by: https://github.com/soumith, https://github.com/malfet
  • Loading branch information
ppwwyyxx authored and kulinseth committed Dec 9, 2022
1 parent a655da7 commit f790592
Show file tree
Hide file tree
Showing 8 changed files with 22 additions and 0 deletions.
1 change: 1 addition & 0 deletions aten/src/ATen/native/ao_sparse/quantized/cpu/qlinear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,7 @@ class QLinearInt8 final {
};

TORCH_LIBRARY_IMPL(sparse, QuantizedCPU, m) {
register_linear_params();
m.impl(
TORCH_SELECTIVE_NAME("sparse::qlinear"),
TORCH_FN(QLinearInt8<false>::run));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,7 @@ class QLinearPackWeightInt8 final {
};

TORCH_LIBRARY_IMPL(sparse, QuantizedCPU, m) {
register_linear_params();
m.impl(
TORCH_SELECTIVE_NAME("sparse::qlinear_prepack"),
TORCH_FN(QLinearPackWeightInt8::run));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,7 @@ class QLinearUnpackWeightInt8 final {
};

TORCH_LIBRARY_IMPL(sparse, CatchAll, m) {
register_linear_params();
m.impl(
TORCH_SELECTIVE_NAME("sparse::qlinear_unpack"),
TORCH_FN(QLinearUnpackWeightInt8::run));
Expand Down
2 changes: 2 additions & 0 deletions aten/src/ATen/native/quantized/cpu/qlinear.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -955,12 +955,14 @@ class QLinearInt8FusedQDQ final {
};

TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
register_linear_params();
m.impl(TORCH_SELECTIVE_NAME("quantized::linear"), TORCH_FN(QLinearInt8<false>::run));
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_relu"), TORCH_FN(QLinearInt8<true>::run));
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_leaky_relu"), TORCH_FN(QLinearLeakyReluInt8::run));
}

TORCH_LIBRARY_IMPL(_quantized, QuantizedCPU, m) {
register_linear_params();
m.impl(TORCH_SELECTIVE_NAME("_quantized::linear"), TORCH_FN(QLinearInt8<false>::run));
}

Expand Down
2 changes: 2 additions & 0 deletions aten/src/ATen/native/quantized/cpu/qlinear_dynamic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -662,6 +662,7 @@ class QLinearDynamicFp16 final {
};

TORCH_LIBRARY_IMPL(quantized, CPU, m) {
register_linear_params();
m.impl(
TORCH_SELECTIVE_NAME("quantized::linear_dynamic"),
TORCH_FN(QLinearDynamicInt8<false>::run));
Expand All @@ -677,6 +678,7 @@ TORCH_LIBRARY_IMPL(quantized, CPU, m) {
}

TORCH_LIBRARY_IMPL(_quantized, CPU, m) {
register_linear_params();
m.impl(
TORCH_SELECTIVE_NAME("_quantized::linear_dynamic"),
TORCH_FN(QLinearDynamicInt8<false>::run));
Expand Down
4 changes: 4 additions & 0 deletions aten/src/ATen/native/quantized/cpu/qlinear_prepack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -381,20 +381,24 @@ class QLinearPackWeightFp16Legacy final {
};

TORCH_LIBRARY_IMPL(quantized, QuantizedCPU, m) {
register_linear_params();
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack"), TORCH_FN(QLinearPackWeightInt8::run));
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack_legacy"), TORCH_FN(QLinearPackWeightInt8Legacy::run));
}

TORCH_LIBRARY_IMPL(quantized, CPU, m) {
register_linear_params();
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack_fp16"), TORCH_FN(QLinearPackWeightFp16::run));
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_prepack_fp16_legacy"), TORCH_FN(QLinearPackWeightFp16Legacy::run));
}

TORCH_LIBRARY_IMPL(_quantized, QuantizedCPU, m) {
register_linear_params();
m.impl(TORCH_SELECTIVE_NAME("_quantized::linear_prepack"), TORCH_FN(QLinearPackWeightInt8::run));
}

TORCH_LIBRARY_IMPL(_quantized, CPU, m) {
register_linear_params();
m.impl(TORCH_SELECTIVE_NAME("_quantized::linear_prepack_fp16"), TORCH_FN(QLinearPackWeightFp16::run));
m.impl(TORCH_SELECTIVE_NAME("_quantized::linear_prepack_fp16_legacy"), TORCH_FN(QLinearPackWeightFp16Legacy::run));
}
Expand Down
8 changes: 8 additions & 0 deletions aten/src/ATen/native/quantized/qconv_unpack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,12 @@ and /cudnn/ConvUnpackImpl.cpp, for cudnn.
#include <ATen/ops/from_blob.h>
#endif

template <int kSpatialDim = 2>
int register_conv_params();

extern template int register_conv_params<2>();
extern template int register_conv_params<3>();


namespace at {
namespace native {
Expand Down Expand Up @@ -192,6 +198,8 @@ unpack_quantized_prepacked_sizes_conv2d(const IValue& ivalue) {
}

TORCH_LIBRARY_IMPL(quantized, CatchAll, m) {
register_conv_params<2>();
register_conv_params<3>();
// conv_unpack is deprecated, please use conv2d_unpack for 2D conv.
m.impl(TORCH_SELECTIVE_NAME("quantized::conv_unpack"), TORCH_FN(QConvUnpackWeightsInt8<2>::run));
// We use conv2d_unpack to be consistent with conv3d_unpack
Expand Down
3 changes: 3 additions & 0 deletions aten/src/ATen/native/quantized/qlinear_unpack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ and /cudnn/linear_unpack_impl.cpp, for cudnn.
#include <torch/custom_class.h>
#include <torch/library.h>

int register_linear_params();

namespace at {
namespace native {
namespace {
Expand Down Expand Up @@ -68,6 +70,7 @@ TORCH_LIBRARY_IMPL(quantized, CPU, m) {
}

TORCH_LIBRARY_IMPL(quantized, CatchAll, m) {
register_linear_params();
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_unpack"), TORCH_FN(QLinearUnpackWeightInt8::run));
m.impl(TORCH_SELECTIVE_NAME("quantized::linear_unpack_fp16"), TORCH_FN(QLinearUnpackWeightFp16::run));
}
Expand Down

0 comments on commit f790592

Please sign in to comment.