Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions scripts/release_notes/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,12 +286,12 @@ def balance_dataset(dataset: List):
return dataset
title, files, author, category = zip(*dataset)
category = [common.categories.index(cat) for cat in category]
inpt_data = list(zip(title, files, author))
input_data = list(zip(title, files, author))
from imblearn.over_sampling import RandomOverSampler

# from imblearn.under_sampling import RandomUnderSampler
rus = RandomOverSampler(random_state=42)
X, y = rus.fit_resample(inpt_data, category)
X, y = rus.fit_resample(input_data, category)
merged = list(zip(X, y))
merged = random.sample(merged, k=2 * len(dataset))
X, y = zip(*merged)
Expand Down Expand Up @@ -372,8 +372,8 @@ def train(save_path: Path, data_folder: Path, regen_data: bool, resample: bool):

with torch.no_grad():
commit_classifier.eval()
val_inpts, val_targets = val_batch
val_output = commit_classifier(val_inpts)
val_inputs, val_targets = val_batch
val_output = commit_classifier(val_inputs)
val_preds = torch.argmax(val_output, dim=1)
val_acc = torch.sum(val_preds == val_targets).item() / len(val_preds)
print(f"Final Validation accuracy is {val_acc}")
Expand Down
4 changes: 2 additions & 2 deletions test/distributed/fsdp/test_fsdp_comm_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def test_default_communication_hook_behavior(
"""
out_dim = self.world_size
net = torch.nn.Linear(1, out_dim, bias=False)
inpt = torch.tensor([self.rank]).float().cuda(self.rank)
input_ = torch.tensor([self.rank]).float().cuda(self.rank)

net_default_hook = FSDP(
net,
Expand All @@ -149,7 +149,7 @@ def test_default_communication_hook_behavior(
for _ in range(4):
# Clear gradients
net_default_hook.zero_grad()
loss = net_default_hook(inpt).sum()
loss = net_default_hook(input_).sum()
loss.backward()

# For each worker, the gradient on the weight should be worker_rank.
Expand Down
6 changes: 3 additions & 3 deletions test/functorch/test_aotdispatch.py
Original file line number Diff line number Diff line change
Expand Up @@ -1624,13 +1624,13 @@ def f(a):
self.verify_aot_autograd(f, inp, test_mutation=True)

def test_input_mutation_batchnorm(self):
def f(inpt, weight, bias, running_mean, running_var):
def f(input_, weight, bias, running_mean, running_var):
# This is additionally a good test, because the input tensors that we mutate
# are *also* saved for backwards.
# This tests that what we save for the backward is actually cloned inputs,
# and not the original inputs that got mutated.
return torch._native_batch_norm_legit(
inpt, weight, bias, running_mean, running_var, True, 0.5, 1e-5
input_, weight, bias, running_mean, running_var, True, 0.5, 1e-5
)

def create_inp(req_grad):
Expand Down Expand Up @@ -5798,7 +5798,7 @@ def forward(self, primals_1, primals_2, primals_3):

# Important pieces of the graph:
# - 4 total dense outputs.
# This corresponds to the fact that each user fwd inpt (a, b)
# This corresponds to the fact that each user fwd input (a, b)
# will get a gradient that is a TwoTensor subclass,
# so (mul_2, mul_3) will be wrapped into a.grad
# and (div_1, div_2) will be wrapped into b.grad
Expand Down
60 changes: 31 additions & 29 deletions test/functorch/test_eager_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -4627,28 +4627,30 @@ def normalize_devices(fx_g):

@markDynamoStrictTest
class TestFunctionalize(TestCase):
def _check_functionalize_correctness(self, f, inpt, *, skip_vmap=False):
inpt1 = inpt.clone()
inpt2 = inpt.clone()
inpt3 = inpt.clone()
def _check_functionalize_correctness(self, f, input_, *, skip_vmap=False):
input1 = input_.clone()
input2 = input_.clone()
input3 = input_.clone()

expected_outputs = f(inpt1)
expected_outputs = f(input1)
if skip_vmap:
actual_outputs = functionalize(f)(inpt2)
actual_outputs = functionalize(f)(input2)
else:
actual_outputs = vmap(functionalize(f))(inpt2.unsqueeze(0))[0].squeeze()
actual_outputs = vmap(functionalize(f))(input2.unsqueeze(0))[0].squeeze()
# Right now the flavor of functionalize that also removes view ops
# isn't being used with vmap
# That's because {view}_copy ops don't have batching rules yet
# (although we should probably fix that)
actual_outputs_view_copy = functionalize(f, remove="mutations_and_views")(inpt3)
actual_outputs_view_copy = functionalize(f, remove="mutations_and_views")(
input3
)
# Check that outputs are the same
self.assertEqual(actual_outputs, expected_outputs)
self.assertEqual(actual_outputs_view_copy, expected_outputs)

# Inputs might have been mutated by f: check that they were mutated properly
self.assertEqual(inpt1, inpt2)
self.assertEqual(inpt1, inpt3)
self.assertEqual(input1, input2)
self.assertEqual(input1, input3)

def test_simple_view(self, device):
def f(x: torch.Tensor) -> torch.Tensor:
Expand Down Expand Up @@ -4718,12 +4720,12 @@ def test_functionalize_opt_tensor_list(self, device):
def f(x: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
return x[indices]

inpta = torch.ones(4, device=device)
inptb = torch.arange(2, device=device)
out1 = f(inpta, inptb)
out2 = functionalize(f)(inpta, inptb)
input_a = torch.ones(4, device=device)
input_b = torch.arange(2, device=device)
out1 = f(input_a, input_b)
out2 = functionalize(f)(input_a, input_b)
self.assertEqual(out1, out2)
out = make_fx(functionalize(f))(inpta, inptb)
out = make_fx(functionalize(f))(input_a, input_b)
self.assertExpectedInline(
(out.code),
"""\
Expand All @@ -4745,12 +4747,12 @@ def f(x: torch.Tensor) -> torch.Tensor:
y.add_(tmp)
return z.sum()

inpt1 = torch.ones(4, 2, device=device)
inpt2 = torch.ones(4, 2, device=device)
out1 = grad(f)(inpt1)
out2 = grad(functionalize(f))(inpt2)
input1 = torch.ones(4, 2, device=device)
input2 = torch.ones(4, 2, device=device)
out1 = grad(f)(input1)
out2 = grad(functionalize(f))(input2)
self.assertEqual(out1, out2)
self.assertEqual(inpt1, inpt2)
self.assertEqual(input1, input2)

@unittest.skipIf(IS_FBCODE, "fails in fbcode")
def test_vmap_functionalize_jvp(self, device):
Expand Down Expand Up @@ -4835,9 +4837,9 @@ def forward(self, x_1) -> torch.Tensor:
)

def test_functionalize_fx_out_op(self, device):
def f(inpt: torch.Tensor) -> torch.Tensor:
def f(inp: torch.Tensor) -> torch.Tensor:
out = torch.empty((), dtype=torch.float32)
torch.add(inpt, inpt, out=out)
torch.add(inp, inp, out=out)
out_view = out.view(4)
out_view.add_(1)
return out
Expand All @@ -4851,9 +4853,9 @@ def f(inpt: torch.Tensor) -> torch.Tensor:



def forward(self, inpt_1) -> torch.Tensor:
def forward(self, inp_1) -> torch.Tensor:
empty = torch.ops.aten.empty.memory_format([], dtype = torch.float32, device = 'cpu', pin_memory = False); empty = None
add = torch.ops.aten.add.Tensor(inpt_1, inpt_1); inpt_1 = None
add = torch.ops.aten.add.Tensor(inp_1, inp_1); inp_1 = None
view_copy = torch.ops.aten.view_copy.default(add, [4]); view_copy = None
view_copy_1 = torch.ops.aten.view_copy.default(add, [4]); add = None
add_1 = torch.ops.aten.add.Tensor(view_copy_1, 1); view_copy_1 = None
Expand All @@ -4864,12 +4866,12 @@ def forward(self, inpt_1) -> torch.Tensor:
)

def test_functionalize_fx_multi_out_op(self, device):
def f(inpt: torch.Tensor) -> torch.Tensor:
def f(inp: torch.Tensor) -> torch.Tensor:
mins = torch.empty(4, dtype=torch.float32)
maxs = torch.empty(2, 2, dtype=torch.float32)
maxs_view = maxs.view(4)
inpt_view = inpt.view(2, 4)
torch.aminmax(inpt_view, dim=0, out=(mins, maxs_view))
input_view = inp.view(2, 4)
torch.aminmax(input_view, dim=0, out=(mins, maxs_view))
return (maxs, mins)

fn = make_fx(functionalize(f, remove="mutations_and_views"))
Expand All @@ -4881,11 +4883,11 @@ def f(inpt: torch.Tensor) -> torch.Tensor:



def forward(self, inpt_1) -> torch.Tensor:
def forward(self, inp_1) -> torch.Tensor:
empty = torch.ops.aten.empty.memory_format([4], dtype = torch.float32, device = 'cpu', pin_memory = False); empty = None
empty_1 = torch.ops.aten.empty.memory_format([2, 2], dtype = torch.float32, device = 'cpu', pin_memory = False)
view_copy = torch.ops.aten.view_copy.default(empty_1, [4]); empty_1 = view_copy = None
view_copy_1 = torch.ops.aten.view_copy.default(inpt_1, [2, 4]); inpt_1 = None
view_copy_1 = torch.ops.aten.view_copy.default(inp_1, [2, 4]); inp_1 = None
aminmax = torch.ops.aten.aminmax.default(view_copy_1, dim = 0); view_copy_1 = None
getitem = aminmax[0]
getitem_1 = aminmax[1]; aminmax = None
Expand Down
8 changes: 4 additions & 4 deletions test/functorch/test_memory_efficient_fusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -281,13 +281,13 @@ def test_hash_with_numbers(self):
if torch._dynamo.is_compiling():
self.skipTest("Unsupported if test run is compiled")

def f(inpt, osize):
size = inpt.shape[-1]
def f(input_, osize):
size = input_.shape[-1]
s1 = size - 1
s2 = size - 1.0
scale = s2 / (osize - 1.0)
inpt = torch.clamp(inpt, 0, s1)
return scale * inpt
input_ = torch.clamp(input_, 0, s1)
return scale * input_

# Fetch dynamic graph
gms = []
Expand Down
6 changes: 3 additions & 3 deletions test/inductor/test_cuda_repro.py
Original file line number Diff line number Diff line change
Expand Up @@ -502,12 +502,12 @@ def foo(x):

foo_opt = torch.compile(foo, backend="inductor")

inpt = torch.randn(10, 10, device="cuda", requires_grad=True)
input_ = torch.randn(10, 10, device="cuda", requires_grad=True)
# TODO: this is broken, fix later
# out = foo_opt(inpt)
# out = foo_opt(input_)
# out.add_(2)

out_ref = foo(inpt)
out_ref = foo(input_)
out_ref.add_(2)
# self.assertEqual(out_ref, out)

Expand Down
4 changes: 2 additions & 2 deletions test/inductor/test_padding.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def test_LinearAndSoftmax_both_shapes(self, bias=True):
Compare the perf with good and bad shape.
"""
m_bad_shape = LinearAndSoftmax(vocab_size=30523, bias=bias)
inptus_bad_shape = m_bad_shape.get_example_inputs()
inputs_bad_shape = m_bad_shape.get_example_inputs()
m_good_shape = LinearAndSoftmax(vocab_size=30528, bias=bias)
inputs_good_shape = m_good_shape.get_example_inputs()

Expand All @@ -185,7 +185,7 @@ def test_LinearAndSoftmax_both_shapes(self, bias=True):
lambda: forward_and_backward_pass(m_good_shape_opt, inputs_good_shape)
)
latency_bad_shape = benchmarker.benchmark_gpu(
lambda: forward_and_backward_pass(m_bad_shape_opt, inptus_bad_shape)
lambda: forward_and_backward_pass(m_bad_shape_opt, inputs_bad_shape)
)
print(
f"Latency for good shape v.s. bad shape: {latency_good_shape:.3f}ms v.s. {latency_bad_shape:.3f}ms"
Expand Down
60 changes: 31 additions & 29 deletions test/test_functionalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,14 +61,14 @@ def wrapped(*inputs):
flat_inputs = pytree.tree_leaves(inputs)
flat_inputs_functional = pytree.tree_leaves(inputs_functional)

for inpt, input_functional in zip(flat_inputs, flat_inputs_functional):
for input_, input_functional in zip(flat_inputs, flat_inputs_functional):
torch._sync(input_functional)
inpt_new = torch._from_functional_tensor(input_functional)
if inpt_new is not inpt and not skip_input_mutations:
input_new = torch._from_functional_tensor(input_functional)
if input_new is not input_ and not skip_input_mutations:
# Existing deficiency in functionalize():
# we don't correctly mutate input metadata (yet?)
if inpt_new.shape == inpt.shape:
inpt.copy_(inpt_new)
if input_new.shape == input_.shape:
input_.copy_(input_new)
tree_map_only(torch.Tensor, torch._sync, out)
out_unwrapped = tree_map_only(
torch.Tensor, torch._from_functional_tensor, out
Expand All @@ -84,24 +84,24 @@ def wrapped(*inputs):
class TestFunctionalization(TestCase):
crossref = False

def get_logs(self, func, *inpts, reapply_views=False, run_reinplace=False):
inpts_clone = tree_map_only(torch.Tensor, torch.clone, inpts)
def get_logs(self, func, *inputs, reapply_views=False, run_reinplace=False):
inputs_clone = tree_map_only(torch.Tensor, torch.clone, inputs)
traced_f = make_fx(
_functionalize(func, reapply_views=reapply_views, crossref=self.crossref)
)(*inpts)
)(*inputs)
if run_reinplace:
traced_f = reinplace(traced_f, *inpts_clone)
traced_f = reinplace(traced_f, *inputs_clone)
return traced_f.code

def assert_functionalization(
self, func, *inpts, reapply_views=False, mutated_input_metadata=False
self, func, *inputs, reapply_views=False, mutated_input_metadata=False
):
clones1 = tree_map_only(torch.Tensor, torch.clone, inpts)
clones2 = tree_map_only(torch.Tensor, torch.clone, inpts)
clones3 = tree_map_only(torch.Tensor, torch.clone, inpts)
clones1 = tree_map_only(torch.Tensor, torch.clone, inputs)
clones2 = tree_map_only(torch.Tensor, torch.clone, inputs)
clones3 = tree_map_only(torch.Tensor, torch.clone, inputs)

# Compare outputs (and mutated inputs), with and without functionalization.
out_ref = func(*inpts)
out_ref = func(*inputs)
out_functional = _functionalize(
func, reapply_views=reapply_views, crossref=self.crossref
)(*clones1)
Expand All @@ -120,16 +120,16 @@ def assert_functionalization(
# functionalize() deficiency: input metadata mutations aren't propagated properly,
# so we just need to skip checks here for the tests that exercise that.
if not mutated_input_metadata:
flat_inpts = pytree.tree_leaves(inpts)
flat_inputs = pytree.tree_leaves(inputs)
flat_clones1 = pytree.tree_leaves(clones1)
flat_clones3 = pytree.tree_leaves(clones3)
for inpt, input_clone, input_clone3 in zip(
flat_inpts, flat_clones1, flat_clones3
for input_, input_clone, input_clone3 in zip(
flat_inputs, flat_clones1, flat_clones3
):
self.assertEqual(
inpt, input_clone
input_, input_clone
) # input mutations should still occur
self.assertEqual(inpt, input_clone3)
self.assertEqual(input_, input_clone3)

# Handle tests with multi-tensor outputs
if isinstance(out_ref, tuple):
Expand Down Expand Up @@ -425,10 +425,10 @@ def f(x):
z.add_(1)
return y

inpt = torch.arange(3, dtype=torch.float32)
self.assert_functionalization(f, inpt)
input_ = torch.arange(3, dtype=torch.float32)
self.assert_functionalization(f, input_)

logs = self.get_logs(f, inpt)
logs = self.get_logs(f, input_)
self.assertExpectedInline(
logs,
"""\
Expand All @@ -446,7 +446,9 @@ def forward(self, arg0_1):
""",
)

reinplaced_logs = self.get_logs(f, inpt, reapply_views=True, run_reinplace=True)
reinplaced_logs = self.get_logs(
f, input_, reapply_views=True, run_reinplace=True
)
self.assertExpectedInline(
reinplaced_logs,
"""\
Expand Down Expand Up @@ -1213,16 +1215,16 @@ def f(t, y):
out_1 = torch.ones(1)
return torch.add(t, y, out=out_1)

inpt1, inpt2 = torch.tensor([1]), torch.tensor([1])
inpt1_func, inpt2_func = (
torch._to_functional_tensor(inpt1),
torch._to_functional_tensor(inpt2),
input1, input2 = torch.tensor([1]), torch.tensor([1])
input1_func, input2_func = (
torch._to_functional_tensor(input1),
torch._to_functional_tensor(input2),
)

out_ref = f(inpt1, inpt2)
out_ref = f(input1, input2)
torch._enable_functionalization(reapply_views=True)
try:
out_functional = f(inpt1_func, inpt2_func)
out_functional = f(input1_func, input2_func)
finally:
torch._disable_functionalization()
self.assertEqual(out_ref, torch._from_functional_tensor(out_functional))
Expand Down
Loading
Loading