Skip to content

Commit

Permalink
Update on "[FX][2/2] Make docstrings pretty when rendered"
Browse files Browse the repository at this point in the history
Differential Revision: [D25351588](https://our.internmc.facebook.com/intern/diff/D25351588)

[ghstack-poisoned]
  • Loading branch information
James Reed committed Dec 5, 2020
2 parents 809bf74 + a782908 commit 21881ff
Show file tree
Hide file tree
Showing 66 changed files with 1,215 additions and 1,080 deletions.
2 changes: 1 addition & 1 deletion .circleci/cimodel/data/dimensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
]

ROCM_VERSIONS = [
"3.8",
"3.9",
"3.10",
]

ROCM_VERSION_LABELS = ["rocm" + v for v in ROCM_VERSIONS]
Expand Down
208 changes: 104 additions & 104 deletions .circleci/config.yml

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions .jenkins/pytorch/multigpu-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ fi

python tools/download_mnist.py --quiet -d test/cpp/api/mnist
OMP_NUM_THREADS=2 TORCH_CPP_TEST_MNIST_PATH="test/cpp/api/mnist" build/bin/test_api
time python test/run_test.py --verbose -i distributed/test_jit_c10d
time python test/run_test.py --verbose -i distributed/test_distributed_fork
time python test/run_test.py --verbose -i distributed/test_c10d
time python test/run_test.py --verbose -i distributed/test_c10d_spawn
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/BatchingRegistrations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -941,8 +941,8 @@ Tensor new_empty_strided_batching_rule(
size.size(), ") must match dimensionality of strides (",
stride.size(), ")");
auto storage_size = native::storage_size_for(size, stride);
for (int64_t idx = 0; idx < physical_strides.size(); ++idx) {
physical_strides[idx] *= storage_size;
for (auto& physical_stride : physical_strides) {
physical_stride *= storage_size;
}

// physical_strides = [B1 * B2 * S, B2 * S, S] + strides
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/NamedTensorUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -264,11 +264,11 @@ static std::vector<Dimname> compute_dot_product_outnames(
}
std::vector<Dimname> outnames(num_outnames, Dimname::wildcard());
int64_t index = 0;
for (int64_t j = 0; j < tensor_names.size(); ++j) {
for (size_t j = 0; j < tensor_names.size(); ++j) {
if (j == tensor_dotted_dim) continue;
outnames[index++] = tensor_names[j];
}
for (int64_t j = 0; j < other_names.size(); ++j) {
for (size_t j = 0; j < other_names.size(); ++j) {
if (j == other_dotted_dim) continue;
outnames[index++] = other_names[j];
}
Expand Down
39 changes: 37 additions & 2 deletions aten/src/ATen/OpaqueTensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,14 +86,34 @@ struct CAFFE2_API OpaqueTensorImpl : public TensorImpl {
auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
key_set(), dtype(), device(), opaque_handle_, sizes_);
copy_tensor_metadata(
/*src_impl=*/this,
/*dest_impl=*/impl.get(),
/*src_opaque_impl=*/this,
/*dest_opaque_impl=*/impl.get(),
/*version_counter=*/version_counter,
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
impl->refresh_numel();
return impl;
}

/**
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
*
* For usage of `version_counter` and `allow_tensor_metadata_change`,
* see NOTE [ TensorImpl Shallow-Copying ].
*/
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
c10::VariableVersion&& version_counter,
bool allow_tensor_metadata_change) const override {
auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>(
key_set(), dtype(), device(), opaque_handle_, sizes_);
copy_tensor_metadata(
/*src_opaque_impl=*/this,
/*dest_opaque_impl=*/impl.get(),
/*version_counter=*/std::move(version_counter),
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
impl->refresh_numel();
return impl;
}

/**
* Shallow-copies data from another TensorImpl into this TensorImpl.
*
Expand Down Expand Up @@ -143,6 +163,21 @@ struct CAFFE2_API OpaqueTensorImpl : public TensorImpl {
dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_;
}

static void copy_tensor_metadata(
const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl,
OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl,
c10::VariableVersion&& version_counter,
bool allow_tensor_metadata_change) {
TensorImpl::copy_tensor_metadata(
src_opaque_impl,
dest_opaque_impl,
std::move(version_counter),
allow_tensor_metadata_change);

// OpaqueTensorImpl-specific fields.
dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_;
}

private:
OpaqueHandle opaque_handle_;
};
Expand Down
19 changes: 19 additions & 0 deletions aten/src/ATen/SparseTensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,25 @@ struct CAFFE2_API SparseTensorImpl : public TensorImpl {
return impl;
}

/**
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
*
* For usage of `version_counter` and `allow_tensor_metadata_change`,
* see NOTE [ TensorImpl Shallow-Copying ].
*/
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
c10::VariableVersion&& version_counter,
bool allow_tensor_metadata_change) const override {
auto impl = c10::make_intrusive<SparseTensorImpl>(key_set(), dtype());
copy_tensor_metadata(
/*src_impl=*/this,
/*dest_impl=*/impl.get(),
/*version_counter=*/std::move(version_counter),
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
impl->refresh_numel();
return impl;
}

/**
* Shallow-copies data from another TensorImpl into this TensorImpl.
*
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/TensorIterator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -939,8 +939,8 @@ TensorIterator TensorIterator::reduce_op(Tensor& out1, Tensor& out2, const Tenso
}

void TensorIteratorBase::populate_operands(TensorIteratorConfig& config) {
for (int i = 0; i < config.tensors_.size(); i++) {
operands_.emplace_back(std::move(config.tensors_[i]));
for (auto& tensor: config.tensors_) {
operands_.emplace_back(std::move(tensor));
}
num_outputs_ = config.num_outputs_;
}
Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/TensorNames.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,10 @@ TensorNames::TensorNames(ArrayRef<Dimname> names, int64_t start, int64_t end) {
}

TensorNames& TensorNames::unifyFromRightInplace(const TensorNames& other, const char* op_name) {
int64_t size_diff = std::labs(names_.size() - other.names_.size());
size_t size_diff = std::labs(names_.size() - other.names_.size());

if (names_.size() > other.names_.size()) {
for (int64_t idx = size_diff; idx < names_.size(); ++idx) {
for (size_t idx = size_diff; idx < names_.size(); ++idx) {
names_[idx] = names_[idx].unify(other.names_[idx - size_diff], op_name);
}
} else {
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/core/ivalue_inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ struct C10_EXPORT ivalue::Future : c10::intrusive_ptr_target {
[fut](std::function<IValue(void)> cb) {
try {
fut->markCompleted(cb());
} catch (std::exception& e) {
} catch (std::exception&) {
fut->setError(std::current_exception());
}
},
Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/native/Convolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -177,13 +177,13 @@ auto ConvParams::needs_64bit_indexing_no_split(const at::Tensor& input, const at
int64_t outsize = 1;
if (transposed) {
std::vector<int64_t> o = conv_input_size(input.sizes(), weight.sizes(), padding, output_padding, stride, dilation, groups);
for (int64_t i = 1; i < o.size(); i++) {
outsize *= o[i];
for (const auto& e: o) {
outsize *= e;
}
} else {
std::vector<int64_t> o = conv_output_size(input.sizes(), weight.sizes(), padding, stride, dilation);
for (int64_t i = 1; i < o.size(); i++) {
outsize *= o[i];
for (const auto& e: o) {
outsize *= e;
}
}
return outsize > int_max;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/ForeachOpsKernels.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ std::vector<Tensor> foreach_tensor_##NAME##_slow(TensorList tensors1, TensorList
\
std::vector<Tensor> result; \
result.reserve(tensors1.size()); \
for (int i = 0; i < tensors1.size(); i++) { \
for (size_t i = 0; i < tensors1.size(); i++) { \
result.emplace_back(at::NAME(tensors1[i], tensors2[i])); \
} \
\
Expand Down
24 changes: 4 additions & 20 deletions aten/src/ATen/native/SpectralOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -645,46 +645,30 @@ void _cufft_clear_plan_cache(int64_t device_index) {
detail::getCUDAHooks().cuFFTClearPlanCache(device_index);
}

Tensor fft(const Tensor& self, const int64_t signal_ndim, const bool normalized) {
TORCH_WARN_ONCE(
"The function torch.fft is deprecated and will be removed in PyTorch 1.8. "
"Use the new torch.fft module functions, instead, by importing torch.fft "
"and calling torch.fft.fft or torch.fft.fftn.");
static Tensor fft(const Tensor& self, const int64_t signal_ndim, const bool normalized) {
return _fft(self, signal_ndim, /* complex_input */ true,
/* complex_output */ true, /* inverse */ false, {},
normalized ? fft_norm_mode::by_root_n : fft_norm_mode::none,
/* onesided */ false);
}

Tensor ifft(const Tensor& self, const int64_t signal_ndim, const bool normalized) {
TORCH_WARN_ONCE(
"The function torch.ifft is deprecated and will be removed in a future "
"PyTorch release. Use the new torch.fft module functions, instead, by "
"importing torch.fft and calling torch.fft.ifft or torch.fft.ifftn.");
static Tensor ifft(const Tensor& self, const int64_t signal_ndim, const bool normalized) {
return _fft(self, signal_ndim, /* complex_input */ true,
/* complex_output */ true, /* inverse */ true, {},
normalized ? fft_norm_mode::by_root_n : fft_norm_mode::by_n,
/* onesided */ false);
}

Tensor rfft(const Tensor& self, const int64_t signal_ndim, const bool normalized,
static Tensor rfft(const Tensor& self, const int64_t signal_ndim, const bool normalized,
const bool onesided) {
TORCH_WARN_ONCE(
"The function torch.rfft is deprecated and will be removed in a future "
"PyTorch release. Use the new torch.fft module functions, instead, by "
"importing torch.fft and calling torch.fft.fft or torch.fft.rfft.");
return _fft(self, signal_ndim, /* complex_input */ false,
/* complex_output */ true, /* inverse */ false, {},
normalized ? fft_norm_mode::by_root_n : fft_norm_mode::none,
onesided);
}

Tensor irfft(const Tensor& self, const int64_t signal_ndim, const bool normalized,
static Tensor irfft(const Tensor& self, const int64_t signal_ndim, const bool normalized,
const bool onesided, IntArrayRef signal_sizes) {
TORCH_WARN_ONCE(
"The function torch.irfft is deprecated and will be removed in a future "
"PyTorch release. Use the new torch.fft module functions, instead, by "
"importing torch.fft and calling torch.fft.ifft or torch.fft.irfft.");
return _fft(self, signal_ndim, /* complex_input */ true,
/* complex_output */ false, /* inverse */ true, signal_sizes,
normalized ? fft_norm_mode::by_root_n : fft_norm_mode::by_n,
Expand Down
16 changes: 0 additions & 16 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2047,18 +2047,6 @@
dispatch:
CPU, CUDA: native_group_norm_backward

- func: ifft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor
use_c10_dispatcher: full
variants: function, method

- func: rfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True) -> Tensor
use_c10_dispatcher: full
variants: function, method

- func: irfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True, int[] signal_sizes=[]) -> Tensor
use_c10_dispatcher: full
variants: function, method

- func: _fft_with_size(Tensor self, int signal_ndim, bool complex_input, bool complex_output, bool inverse, int[] checked_signal_sizes, bool normalized, bool onesided, int[] output_sizes) -> Tensor
use_c10_dispatcher: full
variants: function
Expand Down Expand Up @@ -9396,10 +9384,6 @@
use_c10_dispatcher: full
variants: function

- func: fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor
use_c10_dispatcher: full
variants: function, method

## Functions for linear algebra and the torch.linalg namespace
# Note [linalg namespace binding]
# Functions in the linalg python module should have their names start with
Expand Down
21 changes: 21 additions & 0 deletions aten/src/ATen/quantized/QTensorImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,27 @@ struct CAFFE2_API QTensorImpl : public c10::TensorImpl {
return impl;
}

/**
* Return a TensorImpl that is a shallow-copy of this TensorImpl.
*
* For usage of `version_counter` and `allow_tensor_metadata_change`,
* see NOTE [ TensorImpl Shallow-Copying ].
*/
c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
c10::VariableVersion&& version_counter,
bool allow_tensor_metadata_change) const override {
auto impl = c10::make_intrusive<QTensorImpl>(
Storage(storage()), key_set(), data_type_, quantizer_);
copy_tensor_metadata(
/*src_impl=*/this,
/*dest_impl=*/impl.get(),
/*version_counter=*/std::move(version_counter),
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
impl->refresh_numel();
impl->refresh_contiguous();
return impl;
}

/**
* Shallow-copies data from another TensorImpl into this TensorImpl.
*
Expand Down

0 comments on commit 21881ff

Please sign in to comment.