-
Notifications
You must be signed in to change notification settings - Fork 7.2k
Open
Description
Test log:
============================= test session starts =============================
platform win32 -- Python 3.7.4, pytest-5.0.1, py-1.8.0, pluggy-0.12.0
rootdir: C:\w\2\s\packaging\windows\vision
collected 187 items
test\test_backbone_utils.py .. [ 1%]
test\test_cpp_models.py FFFFF..FFFF........FFFFFFFFFF.. [ 17%]
test\test_datasets.py ..F...... [ 22%]
test\test_datasets_transforms.py .. [ 23%]
test\test_datasets_utils.py .....FFF. [ 28%]
test\test_datasets_video_utils.py ..FFss [ 31%]
test\test_io.py .FFFFF [ 34%]
test\test_models.py ................................................ [ 60%]
test\test_ops.py ..s..s.s.s.s.s.s.s.s [ 71%]
test\test_transforms.py ..........sss................................... [ 96%]
.. [ 97%]
test\test_utils.py ..FF [100%]
================================== FAILURES ===================================
_____________________________ Tester.test_alexnet _____________________________
self = <test_cpp_models.Tester testMethod=test_alexnet>
def test_alexnet(self):
> process_model(models.alexnet(self.pretrained), self.image, _C_tests.forward_alexnet, 'Alexnet')
test\test_cpp_models.py:43:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = AlexNet(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
(1)...ures=4096, bias=True)
(5): ReLU(inplace=True)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_alexnet of PyCapsule object at 0x000000323C5E9450>
name = 'Alexnet'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torch\include\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
___________________________ Tester.test_densenet121 ___________________________
self = <test_cpp_models.Tester testMethod=test_densenet121>
def test_densenet121(self):
> process_model(models.densenet121(self.pretrained), self.image, _C_tests.forward_densenet121, 'Densenet121')
test\test_cpp_models.py:105:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = DenseNet(
(features): Sequential(
(conv0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias....1, affine=True, track_running_stats=True)
)
(classifier): Linear(in_features=1024, out_features=1000, bias=True)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_densenet121 of PyCapsule object at 0x000000323C6005A0>
name = 'Densenet121'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torch\include\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
___________________________ Tester.test_densenet161 ___________________________
self = <test_cpp_models.Tester testMethod=test_densenet161>
def test_densenet161(self):
> process_model(models.densenet161(self.pretrained), self.image, _C_tests.forward_densenet161, 'Densenet161')
test\test_cpp_models.py:114:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = DenseNet(
(features): Sequential(
(conv0): Conv2d(3, 96, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias....1, affine=True, track_running_stats=True)
)
(classifier): Linear(in_features=2208, out_features=1000, bias=True)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_densenet161 of PyCapsule object at 0x000000323C600BD0>
name = 'Densenet161'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torch\include\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
___________________________ Tester.test_densenet169 ___________________________
self = <test_cpp_models.Tester testMethod=test_densenet169>
def test_densenet169(self):
> process_model(models.densenet169(self.pretrained), self.image, _C_tests.forward_densenet169, 'Densenet169')
test\test_cpp_models.py:108:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = DenseNet(
(features): Sequential(
(conv0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias....1, affine=True, track_running_stats=True)
)
(classifier): Linear(in_features=1664, out_features=1000, bias=True)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_densenet169 of PyCapsule object at 0x000000323C6008D0>
name = 'Densenet169'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torch\include\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
___________________________ Tester.test_densenet201 ___________________________
self = <test_cpp_models.Tester testMethod=test_densenet201>
def test_densenet201(self):
> process_model(models.densenet201(self.pretrained), self.image, _C_tests.forward_densenet201, 'Densenet201')
test\test_cpp_models.py:111:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = DenseNet(
(features): Sequential(
(conv0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias....1, affine=True, track_running_stats=True)
)
(classifier): Linear(in_features=1920, out_features=1000, bias=True)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_densenet201 of PyCapsule object at 0x000000323C600B70>
name = 'Densenet201'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torch\include\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
___________________________ Tester.test_mnasnet0_5 ____________________________
self = <test_cpp_models.Tester testMethod=test_mnasnet0_5>
def test_mnasnet0_5(self):
> process_model(models.mnasnet0_5(self.pretrained), self.image, _C_tests.forward_mnasnet0_5, 'MNASNet0_5')
test\test_cpp_models.py:123:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = MNASNet(
(layers): Sequential(
(0): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)...Sequential(
(0): Dropout(p=0.2, inplace=True)
(1): Linear(in_features=1280, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_mnasnet0_5 of PyCapsule object at 0x000000323C6009F0>
name = 'MNASNet0_5'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\1\s\windows\pytorch\build\aten\src\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
___________________________ Tester.test_mnasnet0_75 ___________________________
self = <test_cpp_models.Tester testMethod=test_mnasnet0_75>
def test_mnasnet0_75(self):
> process_model(models.mnasnet0_75(self.pretrained), self.image, _C_tests.forward_mnasnet0_75, 'MNASNet0_75')
test\test_cpp_models.py:126:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = MNASNet(
(layers): Sequential(
(0): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)...Sequential(
(0): Dropout(p=0.2, inplace=True)
(1): Linear(in_features=1280, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_mnasnet0_75 of PyCapsule object at 0x000000323C600A80>
name = 'MNASNet0_75'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\1\s\windows\pytorch\build\aten\src\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
___________________________ Tester.test_mnasnet1_0 ____________________________
self = <test_cpp_models.Tester testMethod=test_mnasnet1_0>
def test_mnasnet1_0(self):
> process_model(models.mnasnet1_0(self.pretrained), self.image, _C_tests.forward_mnasnet1_0, 'MNASNet1_0')
test\test_cpp_models.py:129:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = MNASNet(
(layers): Sequential(
(0): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)...Sequential(
(0): Dropout(p=0.2, inplace=True)
(1): Linear(in_features=1280, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_mnasnet1_0 of PyCapsule object at 0x000000323C8A9570>
name = 'MNASNet1_0'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\1\s\windows\pytorch\build\aten\src\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
___________________________ Tester.test_mnasnet1_3 ____________________________
self = <test_cpp_models.Tester testMethod=test_mnasnet1_3>
def test_mnasnet1_3(self):
> process_model(models.mnasnet1_3(self.pretrained), self.image, _C_tests.forward_mnasnet1_3, 'MNASNet1_3')
test\test_cpp_models.py:132:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = MNASNet(
(layers): Sequential(
(0): Conv2d(3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)...Sequential(
(0): Dropout(p=0.2, inplace=True)
(1): Linear(in_features=1280, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_mnasnet1_3 of PyCapsule object at 0x000000323C8A9060>
name = 'MNASNet1_3'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\1\s\windows\pytorch\build\aten\src\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
__________________________ Tester.test_squeezenet1_0 __________________________
self = <test_cpp_models.Tester testMethod=test_squeezenet1_0>
def test_squeezenet1_0(self):
process_model(models.squeezenet1_0(self.pretrained), self.image,
> _C_tests.forward_squeezenet1_0, 'Squeezenet1.0')
test\test_cpp_models.py:98:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = SqueezeNet(
(features): Sequential(
(0): Conv2d(3, 96, kernel_size=(7, 7), stride=(2, 2))
(1): ReLU(inplace=...00, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): AdaptiveAvgPool2d(output_size=(1, 1))
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_squeezenet1_0 of PyCapsule object at 0x000000323C600F60>
name = 'Squeezenet1.0'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torch\include\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
__________________________ Tester.test_squeezenet1_1 __________________________
self = <test_cpp_models.Tester testMethod=test_squeezenet1_1>
def test_squeezenet1_1(self):
process_model(models.squeezenet1_1(self.pretrained), self.image,
> _C_tests.forward_squeezenet1_1, 'Squeezenet1.1')
test\test_cpp_models.py:102:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = SqueezeNet(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2))
(1): ReLU(inplace=...00, kernel_size=(1, 1), stride=(1, 1))
(2): ReLU(inplace=True)
(3): AdaptiveAvgPool2d(output_size=(1, 1))
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_squeezenet1_1 of PyCapsule object at 0x000000323C600D50>
name = 'Squeezenet1.1'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torch\include\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
______________________________ Tester.test_vgg11 ______________________________
self = <test_cpp_models.Tester testMethod=test_vgg11>
def test_vgg11(self):
> process_model(models.vgg11(self.pretrained), self.image, _C_tests.forward_vgg11, 'VGG11')
test\test_cpp_models.py:46:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU...lace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_vgg11 of PyCapsule object at 0x000000323C5E97B0>
name = 'VGG11'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torch\include\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
____________________________ Tester.test_vgg11_bn _____________________________
self = <test_cpp_models.Tester testMethod=test_vgg11_bn>
def test_vgg11_bn(self):
> process_model(models.vgg11_bn(self.pretrained), self.image, _C_tests.forward_vgg11bn, 'VGG11BN')
test\test_cpp_models.py:58:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): Batc...lace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_vgg11bn of PyCapsule object at 0x000000323C5B5F30>
name = 'VGG11BN'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torch\include\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
______________________________ Tester.test_vgg13 ______________________________
self = <test_cpp_models.Tester testMethod=test_vgg13>
def test_vgg13(self):
> process_model(models.vgg13(self.pretrained), self.image, _C_tests.forward_vgg13, 'VGG13')
test\test_cpp_models.py:49:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU...lace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_vgg13 of PyCapsule object at 0x000000323C5E97E0>
name = 'VGG13'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\1\s\windows\pytorch\build\aten\src\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
____________________________ Tester.test_vgg13_bn _____________________________
self = <test_cpp_models.Tester testMethod=test_vgg13_bn>
def test_vgg13_bn(self):
> process_model(models.vgg13_bn(self.pretrained), self.image, _C_tests.forward_vgg13bn, 'VGG13BN')
test\test_cpp_models.py:61:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): Batc...lace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_vgg13bn of PyCapsule object at 0x000000323C5B5E40>
name = 'VGG13BN'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\1\s\windows\pytorch\build\aten\src\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
______________________________ Tester.test_vgg16 ______________________________
self = <test_cpp_models.Tester testMethod=test_vgg16>
def test_vgg16(self):
> process_model(models.vgg16(self.pretrained), self.image, _C_tests.forward_vgg16, 'VGG16')
test\test_cpp_models.py:52:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU...lace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_vgg16 of PyCapsule object at 0x000000323C5E9990>
name = 'VGG16'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\1\s\windows\pytorch\build\aten\src\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
____________________________ Tester.test_vgg16_bn _____________________________
self = <test_cpp_models.Tester testMethod=test_vgg16_bn>
def test_vgg16_bn(self):
> process_model(models.vgg16_bn(self.pretrained), self.image, _C_tests.forward_vgg16bn, 'VGG16BN')
test\test_cpp_models.py:64:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): Batc...lace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_vgg16bn of PyCapsule object at 0x000000323C5B5EA0>
name = 'VGG16BN'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\1\s\windows\pytorch\build\aten\src\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
______________________________ Tester.test_vgg19 ______________________________
self = <test_cpp_models.Tester testMethod=test_vgg19>
def test_vgg19(self):
> process_model(models.vgg19(self.pretrained), self.image, _C_tests.forward_vgg19, 'VGG19')
test\test_cpp_models.py:55:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU...lace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_vgg19 of PyCapsule object at 0x000000323C5B5ED0>
name = 'VGG19'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\1\s\windows\pytorch\build\aten\src\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
____________________________ Tester.test_vgg19_bn _____________________________
self = <test_cpp_models.Tester testMethod=test_vgg19_bn>
def test_vgg19_bn(self):
> process_model(models.vgg19_bn(self.pretrained), self.image, _C_tests.forward_vgg19bn, 'VGG19BN')
test\test_cpp_models.py:67:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
model = VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): Batc...lace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=1000, bias=True)
)
)
tensor = tensor([[[[0.0902, 0.1098, 0.1216, ..., 0.2824, 0.2314, 0.2392],
[0.0980, 0.0863, 0.1020, ..., 0.3333, 0.3...20, 0.1059, 0.0980, ..., 0.0667, 0.0784, 0.0706],
[0.1059, 0.0941, 0.0980, ..., 0.0588, 0.0667, 0.0667]]]])
func = <built-in method forward_vgg19bn of PyCapsule object at 0x000000323C5B5C90>
name = 'VGG19BN'
def process_model(model, tensor, func, name):
model.eval()
traced_script_module = torch.jit.trace(model, tensor)
traced_script_module.save("model.pt")
py_output = model.forward(tensor)
> cpp_output = func("model.pt", tensor)
E RuntimeError: undefined Tensor (infer_is_variable at C:\w\1\s\windows\pytorch\build\aten\src\ATen/Functions.h:1149)
E (no backtrace available)
test\test_cpp_models.py:16: RuntimeError
___________________________ Tester.test_cityscapes ____________________________
self = <test_datasets.Tester testMethod=test_cityscapes>
def test_cityscapes(self):
with cityscapes_root() as root:
for mode in ['coarse', 'fine']:
if mode == 'coarse':
splits = ['train', 'train_extra', 'val']
else:
splits = ['train', 'val', 'test']
for split in splits:
for target_type in ['semantic', 'instance']:
dataset = torchvision.datasets.Cityscapes(root, split=split,
target_type=target_type, mode=mode)
self.generic_segmentation_dataset_test(dataset, num_images=2)
color_dataset = torchvision.datasets.Cityscapes(root, split=split,
target_type='color', mode=mode)
color_img, color_target = color_dataset[0]
self.assertTrue(isinstance(color_img, PIL.Image.Image))
self.assertTrue(np.array(color_target).shape[2] == 4)
polygon_dataset = torchvision.datasets.Cityscapes(root, split=split,
target_type='polygon', mode=mode)
polygon_img, polygon_target = polygon_dataset[0]
self.assertTrue(isinstance(polygon_img, PIL.Image.Image))
self.assertTrue(isinstance(polygon_target, dict))
self.assertTrue(isinstance(polygon_target['imgHeight'], int))
self.assertTrue(isinstance(polygon_target['objects'], list))
# Test multiple target types
targets_combo = ['semantic', 'polygon', 'color']
multiple_types_dataset = torchvision.datasets.Cityscapes(root, split=split,
target_type=targets_combo,
mode=mode)
output = multiple_types_dataset[0]
self.assertTrue(isinstance(output, tuple))
self.assertTrue(len(output) == 2)
self.assertTrue(isinstance(output[0], PIL.Image.Image))
self.assertTrue(isinstance(output[1], tuple))
self.assertTrue(len(output[1]) == 3)
self.assertTrue(isinstance(output[1][0], PIL.Image.Image)) # semantic
self.assertTrue(isinstance(output[1][1], dict)) # polygon
> self.assertTrue(isinstance(output[1][2], PIL.Image.Image)) # color
test\test_datasets.py:195:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\contextlib.py:119: in __exit__
next(self.gen)
test\fakedata_generation.py:243: in cityscapes_root
yield tmp_dir
..\conda\envs\py37\lib\contextlib.py:119: in __exit__
next(self.gen)
test\common_utils.py:16: in get_tmp_dir
shutil.rmtree(tmp_dir)
..\conda\envs\py37\lib\shutil.py:516: in rmtree
return _rmtree_unsafe(path, onerror)
..\conda\envs\py37\lib\shutil.py:395: in _rmtree_unsafe
_rmtree_unsafe(fullname, onerror)
..\conda\envs\py37\lib\shutil.py:395: in _rmtree_unsafe
_rmtree_unsafe(fullname, onerror)
..\conda\envs\py37\lib\shutil.py:395: in _rmtree_unsafe
_rmtree_unsafe(fullname, onerror)
..\conda\envs\py37\lib\shutil.py:400: in _rmtree_unsafe
onerror(os.unlink, fullname, sys.exc_info())
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
path = 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmp5etnebcf\\gtFine\\test\\bochum'
onerror = <function rmtree.<locals>.onerror at 0x000000323F3BFDC8>
def _rmtree_unsafe(path, onerror):
try:
with os.scandir(path) as scandir_it:
entries = list(scandir_it)
except OSError:
onerror(os.scandir, path, sys.exc_info())
entries = []
for entry in entries:
fullname = entry.path
try:
is_dir = entry.is_dir(follow_symlinks=False)
except OSError:
is_dir = False
if is_dir:
try:
if entry.is_symlink():
# This can only happen if someone replaces
# a directory with a symlink after the call to
# os.scandir or entry.is_dir above.
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, fullname, sys.exc_info())
continue
_rmtree_unsafe(fullname, onerror)
else:
try:
> os.unlink(fullname)
E PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmp5etnebcf\\gtFine\\test\\bochum\\bochum_000000_000000_gtFine_color.png'
..\conda\envs\py37\lib\shutil.py:398: PermissionError
__________________________ Tester.test_extract_gzip ___________________________
self = <test_datasets_utils.Tester testMethod=test_extract_gzip>
def test_extract_gzip(self):
with get_tmp_dir() as temp_dir:
with tempfile.NamedTemporaryFile(suffix='.gz') as f:
> with gzip.GzipFile(f.name, 'wb') as zf:
test\test_datasets_utils.py:101:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <[AttributeError("'GzipFile' object has no attribute 'fileobj'") raised in repr()] GzipFile object at 0x32007d28c8>
filename = 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmpc1wq6shu.gz'
mode = 'wb', compresslevel = 9, fileobj = None, mtime = None
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, an io.BytesIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may include the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', 'wb', 'x', or
'xb' depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
A mode of 'r' is equivalent to one of 'rb', and similarly for 'w' and
'wb', 'a' and 'ab', and 'x' and 'xb'.
The compresslevel argument is an integer from 0 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. 0 is no compression
at all. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the last modification time field in the stream when compressing.
If omitted or None, the current time is used.
"""
if mode and ('t' in mode or 'U' in mode):
raise ValueError("Invalid mode: {!r}".format(mode))
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
> fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')
E PermissionError: [Errno 13] Permission denied: 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmpc1wq6shu.gz'
..\conda\envs\py37\lib\gzip.py:163: PermissionError
___________________________ Tester.test_extract_tar ___________________________
self = <test_datasets_utils.Tester testMethod=test_extract_tar>
def test_extract_tar(self):
for ext, mode in zip(['.tar', '.tar.gz'], ['w', 'w:gz']):
with get_tmp_dir() as temp_dir:
with tempfile.NamedTemporaryFile() as bf:
bf.write("this is the content".encode())
bf.seek(0)
with tempfile.NamedTemporaryFile(suffix=ext) as f:
> with tarfile.open(f.name, mode=mode) as zf:
test\test_datasets_utils.py:90:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\tarfile.py:1611: in open
return cls.taropen(name, mode, fileobj, **kwargs)
..\conda\envs\py37\lib\tarfile.py:1621: in taropen
return cls(name, mode, fileobj, **kwargs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <tarfile.TarFile object at 0x0000003200A91AC8>
name = 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmplby3znrd.tar', mode = 'w'
fileobj = None, format = None, tarinfo = None, dereference = None
ignore_zeros = None, encoding = None, errors = 'surrogateescape'
pax_headers = None, debug = None, errorlevel = None, copybufsize = None
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None,
errorlevel=None, copybufsize=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
modes = {"r": "rb", "a": "r+b", "w": "wb", "x": "xb"}
if mode not in modes:
raise ValueError("mode must be 'r', 'a', 'w' or 'x'")
self.mode = mode
self._mode = modes[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
> fileobj = bltn_open(name, self._mode)
E PermissionError: [Errno 13] Permission denied: 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmplby3znrd.tar'
..\conda\envs\py37\lib\tarfile.py:1436: PermissionError
___________________________ Tester.test_extract_zip ___________________________
self = <test_datasets_utils.Tester testMethod=test_extract_zip>
def test_extract_zip(self):
with get_tmp_dir() as temp_dir:
with tempfile.NamedTemporaryFile(suffix='.zip') as f:
with zipfile.ZipFile(f, 'w') as zf:
zf.writestr('file.tst', 'this is the content')
> utils.extract_archive(f.name, temp_dir)
test\test_datasets_utils.py:77:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\site-packages\torchvision\datasets\utils.py:231: in extract_archive
with zipfile.ZipFile(from_path, 'r') as z:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <zipfile.ZipFile [closed]>
file = 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmpiwmc4x4z.zip', mode = 'r'
compression = 0, allowZip64 = True, compresslevel = None
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True,
compresslevel=None):
"""Open the ZIP file with mode read 'r', write 'w', exclusive create 'x',
or append 'a'."""
if mode not in ('r', 'w', 'x', 'a'):
raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'")
_check_compression(compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.compresslevel = compresslevel
self.mode = mode
self.pwd = None
self._comment = b''
# Check if we were passed a file-like object
if isinstance(file, os.PathLike):
file = os.fspath(file)
if isinstance(file, str):
# No, it's a filename
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b',
'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'}
filemode = modeDict[mode]
while True:
try:
> self.fp = io.open(file, filemode)
E PermissionError: [Errno 13] Permission denied: 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmpiwmc4x4z.zip'
..\conda\envs\py37\lib\zipfile.py:1207: PermissionError
___________________________ Tester.test_video_clips ___________________________
self = <test_datasets_video_utils.Tester testMethod=test_video_clips>
def test_video_clips(self):
with get_list_of_videos(num_videos=3) as video_list:
> video_clips = VideoClips(video_list, 5, 5)
test\test_datasets_video_utils.py:62:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\site-packages\torchvision\datasets\video_utils.py:55: in __init__
self._compute_frame_pts()
..\conda\envs\py37\lib\site-packages\torchvision\datasets\video_utils.py:84: in _compute_frame_pts
for batch in dl:
..\conda\envs\py37\lib\site-packages\torch\utils\data\dataloader.py:278: in __iter__
return _MultiProcessingDataLoaderIter(self)
..\conda\envs\py37\lib\site-packages\torch\utils\data\dataloader.py:682: in __init__
w.start()
..\conda\envs\py37\lib\multiprocessing\process.py:112: in start
self._popen = self._Popen(self)
..\conda\envs\py37\lib\multiprocessing\context.py:223: in _Popen
return _default_context.get_context().Process._Popen(process_obj)
..\conda\envs\py37\lib\multiprocessing\context.py:322: in _Popen
return Popen(process_obj)
..\conda\envs\py37\lib\multiprocessing\popen_spawn_win32.py:89: in __init__
reduction.dump(process_obj, to_child)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
obj = <Process(Process-1, initial daemon)>, file = <_io.BufferedWriter name=10>
protocol = None
def dump(obj, file, protocol=None):
'''Replacement for pickle.dump() using ForkingPickler.'''
> ForkingPickler(file, protocol).dump(obj)
E AttributeError: Can't pickle local object 'VideoClips._compute_frame_pts.<locals>.DS'
..\conda\envs\py37\lib\multiprocessing\reduction.py:60: AttributeError
---------------------------- Captured stderr call -----------------------------
_____________________ Tester.test_video_clips_custom_fps ______________________
self = <test_datasets_video_utils.Tester testMethod=test_video_clips_custom_fps>
def test_video_clips_custom_fps(self):
with get_list_of_videos(num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6]) as video_list:
num_frames = 4
for fps in [1, 3, 4, 10]:
> video_clips = VideoClips(video_list, num_frames, num_frames, fps)
test\test_datasets_video_utils.py:117:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\site-packages\torchvision\datasets\video_utils.py:55: in __init__
self._compute_frame_pts()
..\conda\envs\py37\lib\site-packages\torchvision\datasets\video_utils.py:84: in _compute_frame_pts
for batch in dl:
..\conda\envs\py37\lib\site-packages\torch\utils\data\dataloader.py:278: in __iter__
return _MultiProcessingDataLoaderIter(self)
..\conda\envs\py37\lib\site-packages\torch\utils\data\dataloader.py:682: in __init__
w.start()
..\conda\envs\py37\lib\multiprocessing\process.py:112: in start
self._popen = self._Popen(self)
..\conda\envs\py37\lib\multiprocessing\context.py:223: in _Popen
return _default_context.get_context().Process._Popen(process_obj)
..\conda\envs\py37\lib\multiprocessing\context.py:322: in _Popen
return Popen(process_obj)
..\conda\envs\py37\lib\multiprocessing\popen_spawn_win32.py:89: in __init__
reduction.dump(process_obj, to_child)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
obj = <Process(Process-2, initial daemon)>, file = <_io.BufferedWriter name=10>
protocol = None
def dump(obj, file, protocol=None):
'''Replacement for pickle.dump() using ForkingPickler.'''
> ForkingPickler(file, protocol).dump(obj)
E AttributeError: Can't pickle local object 'VideoClips._compute_frame_pts.<locals>.DS'
..\conda\envs\py37\lib\multiprocessing\reduction.py:60: AttributeError
---------------------------- Captured stderr call -----------------------------
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "c:\w\2\s\packaging\windows\conda\envs\py37\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "c:\w\2\s\packaging\windows\conda\envs\py37\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
-------------------------- Captured stderr teardown ---------------------------
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "c:\w\2\s\packaging\windows\conda\envs\py37\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "c:\w\2\s\packaging\windows\conda\envs\py37\lib\multiprocessing\spawn.py", line 115, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
_______________________ Tester.test_read_partial_video ________________________
self = <test_io.Tester testMethod=test_read_partial_video>
def test_read_partial_video(self):
> with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data):
test\test_io.py:84:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\contextlib.py:112: in __enter__
return next(self.gen)
test\test_io.py:51: in temp_video
io.write_video(f.name, data, fps=fps, video_codec=video_codec, options=options)
..\conda\envs\py37\lib\site-packages\torchvision\io\video.py:55: in write_video
container.mux(packet)
av/container/output.pyx:198: in av.container.output.OutputContainer.mux
???
av/container/output.pyx:204: in av.container.output.OutputContainer.mux_one
???
av/container/output.pyx:166: in av.container.output.OutputContainer.start_encoding
???
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
> ???
E av.AVError: [Errno 13] Permission denied
av/utils.pyx:109: AVError
___________________ Tester.test_read_partial_video_bframes ____________________
self = <test_io.Tester testMethod=test_read_partial_video_bframes>
def test_read_partial_video_bframes(self):
# do not use lossless encoding, to test the presence of B-frames
options = {'bframes': '16', 'keyint': '10', 'min-keyint': '4'}
> with temp_video(100, 300, 300, 5, options=options) as (f_name, data):
test\test_io.py:100:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\contextlib.py:112: in __enter__
return next(self.gen)
test\test_io.py:51: in temp_video
io.write_video(f.name, data, fps=fps, video_codec=video_codec, options=options)
..\conda\envs\py37\lib\site-packages\torchvision\io\video.py:55: in write_video
container.mux(packet)
av/container/output.pyx:198: in av.container.output.OutputContainer.mux
???
av/container/output.pyx:204: in av.container.output.OutputContainer.mux_one
???
av/container/output.pyx:166: in av.container.output.OutputContainer.start_encoding
???
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
> ???
E av.AVError: [Errno 13] Permission denied
av/utils.pyx:109: AVError
_________________________ Tester.test_read_timestamps _________________________
self = <test_io.Tester testMethod=test_read_timestamps>
def test_read_timestamps(self):
> with temp_video(10, 300, 300, 5) as (f_name, data):
test\test_io.py:69:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\contextlib.py:112: in __enter__
return next(self.gen)
test\test_io.py:51: in temp_video
io.write_video(f.name, data, fps=fps, video_codec=video_codec, options=options)
..\conda\envs\py37\lib\site-packages\torchvision\io\video.py:59: in write_video
container.mux(packet)
av/container/output.pyx:198: in av.container.output.OutputContainer.mux
???
av/container/output.pyx:204: in av.container.output.OutputContainer.mux_one
???
av/container/output.pyx:166: in av.container.output.OutputContainer.start_encoding
???
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
> ???
E av.AVError: [Errno 13] Permission denied
av/utils.pyx:109: AVError
___________________ Tester.test_read_timestamps_from_packet ___________________
self = <test_io.Tester testMethod=test_read_timestamps_from_packet>
def test_read_timestamps_from_packet(self):
> with temp_video(10, 300, 300, 5, video_codec='mpeg4') as (f_name, data):
test\test_io.py:129:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\contextlib.py:112: in __enter__
return next(self.gen)
test\test_io.py:51: in temp_video
io.write_video(f.name, data, fps=fps, video_codec=video_codec, options=options)
..\conda\envs\py37\lib\site-packages\torchvision\io\video.py:55: in write_video
container.mux(packet)
av/container/output.pyx:198: in av.container.output.OutputContainer.mux
???
av/container/output.pyx:204: in av.container.output.OutputContainer.mux_one
???
av/container/output.pyx:166: in av.container.output.OutputContainer.start_encoding
???
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
> ???
E av.AVError: [Errno 13] Permission denied
av/utils.pyx:109: AVError
________________________ Tester.test_write_read_video _________________________
self = <test_io.Tester testMethod=test_write_read_video>
def test_write_read_video(self):
> with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data):
test\test_io.py:62:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\contextlib.py:112: in __enter__
return next(self.gen)
test\test_io.py:51: in temp_video
io.write_video(f.name, data, fps=fps, video_codec=video_codec, options=options)
..\conda\envs\py37\lib\site-packages\torchvision\io\video.py:55: in write_video
container.mux(packet)
av/container/output.pyx:198: in av.container.output.OutputContainer.mux
???
av/container/output.pyx:204: in av.container.output.OutputContainer.mux_one
???
av/container/output.pyx:166: in av.container.output.OutputContainer.start_encoding
???
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
> ???
E av.AVError: [Errno 13] Permission denied
av/utils.pyx:109: AVError
___________________________ Tester.test_save_image ____________________________
self = <test_utils.Tester testMethod=test_save_image>
def test_save_image(self):
with tempfile.NamedTemporaryFile(suffix='.png') as f:
t = torch.rand(2, 3, 64, 64)
> utils.save_image(t, f.name)
test\test_utils.py:43:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\site-packages\torchvision\utils.py:105: in save_image
im.save(filename)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <PIL.Image.Image image mode=RGB size=134x68 at 0x323F3755C8>
fp = 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmpm0s9rq8o.png'
format = 'PNG', params = {}
filename = 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmpm0s9rq8o.png'
open_fp = True, save_all = False, ext = '.png'
save_handler = <function _save at 0x000000323CA1FA68>
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param params: Extra parameters to the image writer.
:returns: None
:exception ValueError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception IOError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename = ""
open_fp = False
if isPath(fp):
filename = fp
open_fp = True
elif HAS_PATHLIB and isinstance(fp, Path):
filename = str(fp)
open_fp = True
if not filename and hasattr(fp, "name") and isPath(fp.name):
# only set the name for metadata purposes
filename = fp.name
# may mutate self!
self._ensure_mutable()
save_all = params.pop("save_all", False)
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
if ext not in EXTENSION:
init()
try:
format = EXTENSION[ext]
except KeyError:
raise ValueError("unknown file extension: {}".format(ext))
if format.upper() not in SAVE:
init()
if save_all:
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
if open_fp:
if params.get("append", False):
fp = builtins.open(filename, "r+b")
else:
# Open also for reading ("+"), because TIFF save_all
# writer needs to go back and edit the written data.
> fp = builtins.open(filename, "w+b")
E PermissionError: [Errno 13] Permission denied: 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmpm0s9rq8o.png'
..\conda\envs\py37\lib\site-packages\PIL\Image.py:2085: PermissionError
_____________________ Tester.test_save_image_single_pixel _____________________
self = <test_utils.Tester testMethod=test_save_image_single_pixel>
def test_save_image_single_pixel(self):
with tempfile.NamedTemporaryFile(suffix='.png') as f:
t = torch.rand(1, 3, 1, 1)
> utils.save_image(t, f.name)
test\test_utils.py:49:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
..\conda\envs\py37\lib\site-packages\torchvision\utils.py:105: in save_image
im.save(filename)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <PIL.Image.Image image mode=RGB size=1x1 at 0x323F678748>
fp = 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmpn5hd8b_0.png'
format = 'PNG', params = {}
filename = 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmpn5hd8b_0.png'
open_fp = True, save_all = False, ext = '.png'
save_handler = <function _save at 0x000000323CA1FA68>
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param params: Extra parameters to the image writer.
:returns: None
:exception ValueError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception IOError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename = ""
open_fp = False
if isPath(fp):
filename = fp
open_fp = True
elif HAS_PATHLIB and isinstance(fp, Path):
filename = str(fp)
open_fp = True
if not filename and hasattr(fp, "name") and isPath(fp.name):
# only set the name for metadata purposes
filename = fp.name
# may mutate self!
self._ensure_mutable()
save_all = params.pop("save_all", False)
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
if ext not in EXTENSION:
init()
try:
format = EXTENSION[ext]
except KeyError:
raise ValueError("unknown file extension: {}".format(ext))
if format.upper() not in SAVE:
init()
if save_all:
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
if open_fp:
if params.get("append", False):
fp = builtins.open(filename, "r+b")
else:
# Open also for reading ("+"), because TIFF save_all
# writer needs to go back and edit the written data.
> fp = builtins.open(filename, "w+b")
E PermissionError: [Errno 13] Permission denied: 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\tmpn5hd8b_0.png'
..\conda\envs\py37\lib\site-packages\PIL\Image.py:2085: PermissionError
============================== warnings summary ===============================
c:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torchvision\datasets\lsun.py:8
c:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torchvision\datasets\lsun.py:8: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from collections import Iterable
c:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\av\container\__init__.py:1
c:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\av\container\__init__.py:1: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from .core import Container, open
test/test_datasets.py::Tester::test_imagenet
c:\w\2\s\packaging\windows\conda\envs\py37\lib\importlib\_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject
return f(*args, **kwds)
test/test_transforms.py::Tester::test_randomperspective
c:\w\2\s\packaging\windows\conda\envs\py37\lib\site-packages\torchvision\transforms\functional.py:440: UserWarning: torch.gels is deprecated in favour of torch.lstsq and will be removed in the next release. Please use torch.lstsq instead.
res = torch.gels(B, A)[0]
-- Docs: https://docs.pytest.org/en/latest/warnings.html
======= 32 failed, 141 passed, 14 skipped, 4 warnings in 407.68 seconds =======
cc @peterjc123 @nbcsm @guyang3532 @maxluk @gunandrose4u @smartcat2010 @mszhanyi