Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Update on "Factor out numerical logic"
This change is similar to #54049 in that it helps us factor out some code that can be used in both fast and slow versions of gradcheck. - `compute_gradient` and `compute_numerical_jacobian_cols` have fewer responsibilities: - compute_numerical_jacobian_cols essentially only handles the complexity of complex derivatives - compute_gradient handles only finite differencing (and doesn't worry about different layouts and indexing into the input tensor) - we have two stages again where we first compute the columns separately, then combine them [ghstack-poisoned]
- Loading branch information
Showing
96 changed files
with
2,050 additions
and
1,380 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
name: Label PRs & Issues | ||
|
||
on: | ||
issues: | ||
types: [opened, edited] | ||
pull_request_target: | ||
types: [edited, opened, synchronize, reopened] | ||
|
||
jobs: | ||
auto-label-rocm: | ||
runs-on: ubuntu-18.04 | ||
steps: | ||
- name: Retrieve information | ||
id: vars | ||
run: | | ||
set -eux | ||
IS_PR=${{ github.event.pull_request }} | ||
if [[ -n "${IS_PR}" ]]; then | ||
TITLE="${{ github.event.pull_request.title }}" | ||
ISSUE_NUMBER=${{ github.event.pull_request.number }} | ||
else | ||
TITLE="${{ github.event.issue.title }}" | ||
ISSUE_NUMBER=${{ github.event.issue.number }} | ||
fi | ||
echo ::set-output name=TITLE::"${TITLE}" | ||
echo ::set-output name=ISSUE_NUMBER::"${ISSUE_NUMBER}" | ||
echo ::set-output name=OWNER::"${{ github.repository_owner }}" | ||
echo ::set-output name=REPO::"${{ github.event.repository.name }}" | ||
- name: Auto-label ROCm | ||
run: | | ||
set -eux | ||
if [[ "${TITLE,,}" == *rocm* ]]; then | ||
curl \ | ||
-X POST \ | ||
-H "Authorization: token ${GITHUB_TOKEN}" \ | ||
"https://api.github.com/repos/${OWNER}/${REPO}/issues/${ISSUE_NUMBER}/labels" \ | ||
-d '{"labels":["ROCm"]}' | ||
fi | ||
env: | ||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" | ||
TITLE: "${{ steps.vars.outputs.TITLE }}" | ||
ISSUE_NUMBER: "${{ steps.vars.outputs.ISSUE_NUMBER }}" | ||
OWNER: "${{ steps.vars.outputs.OWNER }}" | ||
REPO: "${{ steps.vars.outputs.REPO }}" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
#pragma once | ||
|
||
#include <c10/core/TensorOptions.h> | ||
|
||
/* | ||
* [Note: hacky wrapper removal for optional tensor] | ||
* | ||
* The kernel implementation takes an optional tensor marked in the schema as | ||
* Tensor? but the C++ function takes Tensor instead of the optional<Tensor> | ||
* expected by the dispatcher. | ||
* | ||
* To remove the hacky wrapper, the C++ function is changed to take | ||
* optional<Tensor> and unwrap the Tensor value at the beginning of | ||
* the function, e.g.: | ||
* > const Tensor& weight = | ||
> c10::value_or_else(weight_opt, [] {returnTensor();}); | ||
* | ||
* We may want make the kernel handle optional directly without going through | ||
* the creation of a default constructed tensor. | ||
*/ | ||
|
||
/* | ||
* [Note: hacky wrapper removal for TensorOptions] | ||
* | ||
* The kernel implementation takes a TensorOptions argument but the dispatcher | ||
* expects separate arguments for dtype, layout, device, pin_memory. | ||
* | ||
* To remove the hacky wrapper, the kernel implementation is changed to take | ||
* the 4 arguments (dtype, layout, device, pin_memory), and assemble the | ||
* TensorOptions value at the beginning of the function, e.g.: | ||
* > TensorOptions options = TensorOptions().dtype(dtype).layout(layout) | ||
* > .device(device).pinned_memory(pin_memory); | ||
* | ||
* We may want make the kernel handle these parameters directly without going | ||
* through the creation of a TensorOptions value. | ||
*/ | ||
|
||
namespace c10 { | ||
namespace impl { | ||
|
||
inline c10::optional<MemoryFormat> | ||
check_tensor_options_and_extract_memory_format( | ||
const TensorOptions& options, | ||
c10::optional<MemoryFormat> memory_format) { | ||
TORCH_CHECK( | ||
options.requires_grad_opt() == c10::nullopt || | ||
options.requires_grad_opt().value() == false, | ||
"Operators taking TensorOptions cannot take a TensorOptions with " | ||
"options.requires_grad set as true. This isn't implemented yet."); | ||
TORCH_CHECK( | ||
!(options.has_memory_format() && memory_format.has_value()), | ||
"Cannot set memory_format both in TensorOptions and explicit argument; please delete " | ||
"the redundant setter."); | ||
if (memory_format.has_value()) { | ||
return memory_format; | ||
} else { | ||
return options.memory_format_opt(); | ||
} | ||
} | ||
} // namespace impl | ||
} // namespace c10 |
Oops, something went wrong.