Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
123 changes: 66 additions & 57 deletions tests/cpu/test_mlp.py
Original file line number Diff line number Diff line change
@@ -1,63 +1,72 @@
import torch
import math
import random
import unittest
import time

from functools import reduce
import torch
import intel_pytorch_extension as ipex

import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.nn import Parameter
import torch.nn.functional as F
from torch.autograd import gradcheck
from torch.autograd.gradcheck import gradgradcheck
from torch._six import inf, nan

from common_utils import TestCase, iter_indices, TEST_NUMPY, TEST_SCIPY, TEST_MKL, \
TEST_LIBROSA, run_tests, download_file, skipIfNoLapack, suppress_warnings, \
IS_WINDOWS, PY3, NO_MULTIPROCESSING_SPAWN, do_test_dtypes, do_test_empty_full, \
IS_SANDCASTLE, load_tests, brute_pdist, brute_cdist, slowTest, \
skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf

K=1 #128
C=16 #64
MB = 28

def get_rand_seed():
return int(time.time() * 1000000000)

def _ipxex_linear(random_seed, data_type = torch.float32):
torch.manual_seed(random_seed)
fc = ipex.IpexMLPLinear(C, K).to(data_type)
return fc

def _cpu_linear(random_seed, data_type = torch.float32):
torch.manual_seed(random_seed)
fc = torch.nn.Linear(C, K).to(data_type)
return fc

def _run_mlp(random_seed, fc_module, data_type = torch.float32):
torch.manual_seed(random_seed)
x1 = torch.randn(MB, C, requires_grad=True).to(data_type).requires_grad_(True)
y1 = fc_module(x1)
z1 = y1.mean()
z1.backward()
return x1.grad, fc_module.weight.grad, fc_module.bias.grad

for data_type in [torch.float32, torch.bfloat16]:
seed = get_rand_seed()
ipex_fc = _ipxex_linear(seed, data_type)
cpu_fc = _cpu_linear(seed, data_type)

rtol = 1e-5
atol = rtol
if data_type == torch.bfloat16:
rtol = 1e-2
atol = rtol

seed = get_rand_seed()
input_grad_ipex, weight_grad_ipex, bias_grad_ipex = _run_mlp(seed, ipex_fc, data_type)
input_grad_cpu, weight_grad_cpu, bias_grad_cpu = _run_mlp(seed, cpu_fc, data_type)

if input_grad_ipex is None:
if input_grad_cpu is not None:
print("##################### {} MLP input grad FAIL".format(str(data_type)))
else:
print("##################### {} MLP input grad PASS".format(str(data_type)))
else:
if not input_grad_ipex.to(torch.float32).allclose(input_grad_cpu.to(torch.float32), rtol=rtol, atol=atol):
print("##################### {} MLP input grad FAIL".format(str(data_type)))
else:
print("##################### {} MLP input grad PASS".format(str(data_type)))

if not weight_grad_ipex.to(torch.float32).allclose(weight_grad_cpu.to(torch.float32), rtol=rtol, atol=atol):
print("##################### {} MLP weight grad FAIL".format(str(data_type)))
else:
print("##################### {} MLP weight grad PASS".format(str(data_type)))

if not bias_grad_ipex.to(torch.float32).allclose(bias_grad_cpu.to(torch.float32), rtol=rtol, atol=atol):
print("##################### {} MLP bias grad FAIL".format(str(data_type)))
else:
print("##################### {} MLP bias grad PASS".format(str(data_type)))

class TestMLPCases(TestCase):
def get_rand_seed(self):
return int(time.time() * 1000000000)

def _ipxex_linear(self, random_seed, data_type = torch.float32):
torch.manual_seed(random_seed)
fc = ipex.IpexMLPLinear(C, K).to(data_type)
return fc

def _cpu_linear(self, random_seed, data_type = torch.float32):
torch.manual_seed(random_seed)
fc = torch.nn.Linear(C, K).to(data_type)
return fc

def _run_mlp(self, random_seed, fc_module, data_type = torch.float32):
torch.manual_seed(random_seed)
x1 = torch.randn(MB, C, requires_grad=True).to(data_type).requires_grad_(True)
y1 = fc_module(x1)
z1 = y1.mean()
z1.backward()
return x1.grad, fc_module.weight.grad, fc_module.bias.grad

def test_mlp(self):
for data_type in [torch.float32, torch.bfloat16]:
prec = 1e-5
if data_type == torch.bfloat16:
prec = 1.2e-2
seed = self.get_rand_seed()
ipex_fc = self._ipxex_linear(seed, data_type)
cpu_fc = self._cpu_linear(seed, data_type)

seed = self.get_rand_seed()
input_grad_ipex, weight_grad_ipex, bias_grad_ipex = self._run_mlp(seed, ipex_fc, data_type)
input_grad_cpu, weight_grad_cpu, bias_grad_cpu = self._run_mlp(seed, cpu_fc, data_type)

if input_grad_ipex is None:
self.assertTrue(input_grad_cpu is None)
else:
self.assertEqual(input_grad_ipex.to(torch.float32), input_grad_cpu.to(torch.float32), prec)
self.assertEqual(weight_grad_ipex.to(torch.float32), weight_grad_cpu.to(torch.float32), prec)
self.assertEqual(bias_grad_ipex.to(torch.float32), bias_grad_cpu.to(torch.float32), prec)

if __name__ == '__main__':
test = unittest.main()
1 change: 1 addition & 0 deletions tests/cpu/test_rn50_cpu_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -827,6 +827,7 @@ def test_avg_pool3d_with_zero_divisor(self):
self.assertRaisesRegex(RuntimeError, "divisor must be not zero",
lambda: torch.nn.functional.avg_pool3d(torch.zeros(3, 3, 3, 3), (2, 2, 2), divisor_override=0))

@unittest.skip("oneDNN does not support this case")
def test_max_pool_nan(self):
for adaptive in ['', 'adaptive_']:
for num_dim in [1, 2, 3]:
Expand Down
2 changes: 2 additions & 0 deletions tests/cpu/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -10265,6 +10265,7 @@ def test_unfold_scalars(self, device):
self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 2))
self.assertEqual(torch.tensor([0.5], device=device), x.unfold(0, 1, 1))

@unittest.skipIf(SKIP_TEST_CASE_FOR_DPCPP_STORAGE, "IPEX does not support copy")
def test_copy_all_dtypes_and_devices(self, device):
from copy import copy
ipex.get_auto_optimization()
Expand Down Expand Up @@ -12834,6 +12835,7 @@ def transformation_fn(tensor, **kwargs):
self._test_memory_format_transformations(
device, get_generator(mf, shape), transformation_fn, mf, default_is_preserve=True)

@unittest.skipIf(SKIP_TEST_CASE_FOR_DPCPP_STORAGE, "IPEX feature limitation")
def test_memory_format_clone(self, device):
def get_generator(memory_format, shape):
def input_generator_fn(device):
Expand Down