Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix: Bad autograd side effects from printing #51364

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
13 changes: 13 additions & 0 deletions test/test_autograd.py
Expand Up @@ -7355,6 +7355,19 @@ def test_inplace_view_of_view(self, device):
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])

def test_inplace_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3

# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn

c.sum().backward()

def test_inplace_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, device=device, requires_grad=True)
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/custom_function.h
Expand Up @@ -212,7 +212,7 @@ auto Function<T>::apply(Args&&... args) -> std::enable_if_t<std::is_same<X,T>::v
extract_vars(node->is_variable_input_, input_vars, args...);

bool is_executable = GradMode::is_enabled() && any_variable_requires_grad(input_vars);
auto next_edges = collect_next_edges(input_vars);
auto next_edges = (is_executable ? collect_next_edges(input_vars) : edge_list());
albanD marked this conversation as resolved.
Show resolved Hide resolved
node->set_ctx_grad_fn(node);
node->set_next_edges(std::move(next_edges));
node->clear_input_metadata();
Expand Down
2 changes: 0 additions & 2 deletions torch/csrc/autograd/function.h
Expand Up @@ -504,8 +504,6 @@ inline bool any_variable_requires_grad(const variable_list& variables) {
/// Return the next edges of all the given variables, or tuples of variables.
template <typename... Variables>
edge_list collect_next_edges(Variables&&... variables) {
if (!GradMode::is_enabled())
return {};
detail::MakeNextFunctionList make;
make.apply(std::forward<Variables>(variables)...);
return std::move(make.next_edges);
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/functions/utils.cpp
Expand Up @@ -22,7 +22,7 @@ variable_list wrap_outputs(const variable_list& inputs, tensor_list&& outputs,
}
}
} else {
auto grad_fn = ctr(collect_next_edges(inputs));
auto grad_fn = ctr(GradMode::is_enabled() ? collect_next_edges(inputs) : edge_list());
for (auto& output : outputs) {
if (output.defined()) {
auto variable = autograd::make_variable(output, /*requires_grad=*/false);
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/python_function.cpp
Expand Up @@ -487,7 +487,7 @@ std::pair<UnpackedInput, InputFlags> unpack_input(PyObject *args) {
}

flags.is_executable = GradMode::is_enabled() && any_variable_requires_grad(unpacked.input_vars);
flags.next_edges = collect_next_edges(unpacked.input_vars);
flags.next_edges = (flags.is_executable ? collect_next_edges(unpacked.input_vars) : edge_list());
albanD marked this conversation as resolved.
Show resolved Hide resolved
return std::make_pair(std::move(unpacked), std::move(flags));
}

Expand Down