Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion test/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def shell(command, cwd=None):
torch.half]

def fake_empty_like(*args, **kwargs):
if 'memory_format' not in kwargs:
if 'memory_format' not in kwargs and not args[0].is_sparse:
kwargs['memory_format'] = torch.contiguous_format
return torch.empty_like(*args, **kwargs)

Expand Down
8 changes: 4 additions & 4 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -760,10 +760,10 @@ def test_manual_seed(self):
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
a = torch.bernoulli(torch.full_like(x, 0.5, memory_format=torch.preserve_format))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
b = torch.bernoulli(torch.full_like(x, 0.5, memory_format=torch.preserve_format))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
Expand Down Expand Up @@ -1860,7 +1860,7 @@ def backward(ctx, grad):
output = MultiplyInStream.apply(x)
output.sum().backward()

self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(x.grad, torch.ones_like(x, memory_format=torch.preserve_format) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)

def test_streaming_backwards_multiple_streams(self):
Expand Down Expand Up @@ -1895,7 +1895,7 @@ def accum_hook(grad):
model = StreamModel().cuda()
model(x).sum().backward()

self.assertEqual(x.grad, torch.ones_like(x) * 5)
self.assertEqual(x.grad, torch.ones_like(x, memory_format=torch.preserve_format) * 5)

@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cuda_init_race(self):
Expand Down
2 changes: 1 addition & 1 deletion test/test_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def test_index_setitem_bools_slices(self, device):
for a in tensors:
# prefix with a 1,1, to ensure we are compatible with numpy which cuts off prefix 1s
# (some of these ops already prefix a 1 to the size)
neg_ones = torch.ones_like(a) * -1
neg_ones = torch.ones_like(a, memory_format=torch.preserve_format) * -1
neg_ones_expanded = neg_ones.unsqueeze(0).unsqueeze(0)
a[True] = neg_ones_expanded
self.assertEqual(a, neg_ones)
Expand Down
20 changes: 12 additions & 8 deletions test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@
import torch.nn.functional as F
from torch.quantization import QConfig
from torch.quantization._quantize_script import ConvPackedParams, LinearPackedParams
from fake_operators import fake_empty_like, fake_rand_like, fake_randint_like, fake_randn_like, fake_ones_like, fake_zeros_like, fake_full_like


# Testing utils
import jit_utils
Expand Down Expand Up @@ -395,7 +397,7 @@ def make_decision(flag, x):
if flag:
return x
else:
return torch.zeros_like(x)
return torch.zeros_like(x, memory_format=torch.contiguous_format)
x = torch.neg(x)
return make_decision(flag, x)

Expand Down Expand Up @@ -7145,10 +7147,10 @@ def s(t, to_str, non_blocking=None, device=None, cuda=None):
def test_copy_behavior(t, non_blocking=False):
self.assertIs(t, s(t, 't.to(t, non_blocking=non_blocking)', non_blocking))
self.assertIs(t, s(t, 't.to(t.dtype, non_blocking=non_blocking)', non_blocking))
self.assertIs(t, s(t, 't.to(torch.empty_like(t), non_blocking=non_blocking)', non_blocking))
self.assertIs(t, s(t, 't.to(torch.empty_like(t, memory_format=torch.contiguous_format), non_blocking=non_blocking)', non_blocking))
self.assertIsNot(t, s(t, 't.to(t, non_blocking=non_blocking, copy=True)', non_blocking))
self.assertIsNot(t, s(t, 't.to(t.dtype, non_blocking=non_blocking, copy=True)', non_blocking))
self.assertIsNot(t, s(t, 't.to(torch.empty_like(t), non_blocking=non_blocking, copy=True)', non_blocking))
self.assertIsNot(t, s(t, 't.to(torch.empty_like(t, memory_format=torch.contiguous_format), non_blocking=non_blocking, copy=True)', non_blocking))

devices = [t.device]
if t.device.type == 'cuda':
Expand Down Expand Up @@ -8073,9 +8075,9 @@ def forward(self, input):
d = m2.sub2.a.mm(input)
ref = a + b + m2.bias + m2.sub.weight + a + c + d
self.assertEqual(ref, m2.forward(input))
m2.weight = nn.Parameter(torch.zeros_like(m2.weight))
m2.bias = nn.Parameter(torch.zeros_like(m2.bias))
m2.sub.weight = nn.Parameter(torch.zeros_like(m2.sub.weight))
m2.weight = nn.Parameter(fake_zeros_like(m2.weight))
m2.bias = nn.Parameter(fake_zeros_like(m2.bias))
m2.sub.weight = nn.Parameter(fake_zeros_like(m2.sub.weight))
m2.sub2.a.data.zero_()
self.assertEqual(torch.zeros(2, 2), m2.forward(torch.randn(3, 2)))

Expand Down Expand Up @@ -13225,7 +13227,7 @@ def forward(self, x):
fb = FooBar()
fb.linear1.weight = torch.nn.Parameter(
torch.tensor([[-150, 100], [100, -150]], dtype=torch.float), requires_grad=False)
fb.linear1.bias = torch.nn.Parameter(torch.zeros_like(fb.linear1.bias), requires_grad=False)
fb.linear1.bias = torch.nn.Parameter(fake_zeros_like(fb.linear1.bias), requires_grad=False)

x = (torch.rand(1, K1).float() - 0.5) / 10.0
value = torch.tensor([[100, -150]], dtype=torch.float)
Expand Down Expand Up @@ -15661,7 +15663,7 @@ def forward(self,
result = torch.to(torch.fill_(_1, 5), dtype=6, layout=0, device=torch.device("cpu"),
non_blocking=False, copy=False)
result2 = torch.rand([10], dtype=6, layout=0, device=torch.device("cpu"))
result3 = torch.rand_like(result2, dtype=6, layout=0, device=torch.device("cpu"), memory_format=torch.contiguous_format)
result3 = torch.rand_like(result2, dtype=6, layout=0, device=torch.device("cpu"), memory_format=1)
_2 = torch.add(torch.add(result, result2, alpha=1), result3, alpha=1)
return _2
''',
Expand Down Expand Up @@ -17105,6 +17107,8 @@ def test_docs(self):
add_nn_module_test(**test)

if __name__ == '__main__':
# import os
# input(os.getpid())
run_tests()
if not PY2:
import test_jit_py3
Expand Down
4 changes: 2 additions & 2 deletions test/test_jit_fuser.py
Original file line number Diff line number Diff line change
Expand Up @@ -781,7 +781,7 @@ def __init__(self):

@torch.jit.script_method
def create(self, x):
return x * x + x + torch.rand_like(x)
return x * x + x + torch.rand_like(x, memory_format=torch.contiguous_format)

x = torch.zeros([3, 4, 5], dtype=torch.float, device='cuda')
m = M()
Expand Down Expand Up @@ -822,7 +822,7 @@ def fn_test_erf(x):
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
def test_rand_broadcast_cuda(self):
def fn_test_rand(x, y):
r = torch.rand_like(y)
r = torch.rand_like(y, memory_format=torch.contiguous_format)
return r * x + x

x = torch.randn(4, 4, dtype=torch.float, device='cuda')
Expand Down
12 changes: 6 additions & 6 deletions test/test_namedtensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -923,12 +923,12 @@ def fn_method_and_inplace(name, *args, **kwargs):
method('narrow', 0, 0, 1),

# creation functions
fn('empty_like'),
fn('zeros_like'),
fn('ones_like'),
fn('full_like', 3.14),
fn('rand_like'),
fn('randn_like'),
fn('empty_like', memory_format=torch.preserve_format),
fn('zeros_like', memory_format=torch.preserve_format),
fn('ones_like', memory_format=torch.preserve_format),
fn('full_like', 3.14, memory_format=torch.preserve_format),
fn('rand_like', memory_format=torch.preserve_format),
fn('randn_like', memory_format=torch.preserve_format),

# bernoulli variants
method('bernoulli_', 0.5),
Expand Down