Skip to content

Commit

Permalink
patch python test for bfloat16 (#1724)
Browse files Browse the repository at this point in the history
* patch python test
  • Loading branch information
shmsong committed May 23, 2022
1 parent 8fbd0b1 commit fade8da
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions test/test_jit_cuda_fuser.py
Original file line number Diff line number Diff line change
Expand Up @@ -598,7 +598,9 @@ def t(x: torch.Tensor, y: torch.Tensor):
# bfloat16 kernels instead of eager mode
# implementation, since mismatch in cast
# adds excessive noise.
o = t(x.to(torch.float64), y.to(torch.float64)).to(torch.bfloat16)
o = t(x.to(torch.float64), y.to(torch.float64))
if o.dtype.is_floating_point:
o = o.to(torch.bfloat16)
else:
o = t(x, y)

Expand Down Expand Up @@ -921,7 +923,6 @@ def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
self.assertEqual(o.dtype, jit_o.dtype)
if test_value:
self.assertEqual(o, jit_o)
print(t_jit.graph_for(x, y))
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
except Exception as e:
print("failing test for op: ", operation.__name__)
Expand Down

0 comments on commit fade8da

Please sign in to comment.