Skip to content

Commit

Permalink
Fixing Sputnik GPU tests.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 362984949
  • Loading branch information
tgale96 authored and Copybara-Service committed Mar 15, 2021
1 parent ac6315f commit ece8e5c
Showing 1 changed file with 23 additions and 23 deletions.
46 changes: 23 additions & 23 deletions sgk/sparse/ops/op_api_test.py
Expand Up @@ -52,7 +52,7 @@
class SpmmTest(op_test.TestCase):

@parameterized.parameters(*_BINARY_ARGUMENTS)
def testSpmm(self, m, k, n, sparsity, use_gpu):
def testSpmm(self, m, k, n, sparsity, force_gpu):
# Helpers to set up the matrices.
connector = connectors.Uniform(sparsity)
initializer = initializers.Uniform()
Expand All @@ -67,13 +67,13 @@ def testSpmm(self, m, k, n, sparsity, use_gpu):
output = ops.spmm(lhs, rhs)

# Execute the op and compare the results.
with self.test_session(use_gpu=use_gpu) as sess:
with self.test_session(force_gpu=force_gpu) as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(
sess.run(output), np.dot(lhs_np, rhs_np), atol=1e-03, rtol=1e-05)

@parameterized.parameters(*_BINARY_GRADIENT_ARGUMENTS)
def testSpmmGradient(self, m, k, n, sparsity, use_gpu):
def testSpmmGradient(self, m, k, n, sparsity, force_gpu):
# Helpers to set up the matrices.
connector = connectors.Uniform(sparsity)
initializer = initializers.Uniform()
Expand All @@ -86,7 +86,7 @@ def testSpmmGradient(self, m, k, n, sparsity, use_gpu):
rhs = tf.Variable(rhs_np, dtype=tf.float32)
output = ops.spmm(lhs, rhs)

with self.test_session(use_gpu=use_gpu) as sess:
with self.test_session(force_gpu=force_gpu) as sess:
sess.run(tf.global_variables_initializer())
error = tf.test.compute_gradient_error(
[lhs.values, rhs], [lhs.values.shape.as_list(), [k, n]], output,
Expand All @@ -96,7 +96,7 @@ def testSpmmGradient(self, m, k, n, sparsity, use_gpu):
@parameterized.parameters((2, 4, 4, 4, 0.0, True), (2, 4, 4, 4, 0.0, False),
(8, 512, 512, 512, 0.8, True),
(8, 512, 512, 512, 0.8, False))
def testSpmm_Replicated(self, r, m, k, n, sparsity, use_gpu):
def testSpmm_Replicated(self, r, m, k, n, sparsity, force_gpu):
# Helpers to set up the matrices.
connector = connectors.Uniform(sparsity, round_to=4)
initializer = initializers.Uniform()
Expand All @@ -115,15 +115,15 @@ def testSpmm_Replicated(self, r, m, k, n, sparsity, use_gpu):
output = ops.replicated_spmm(lhs, topology, rhs)

# Execute the op and compare the results.
with self.test_session(use_gpu=use_gpu) as sess:
with self.test_session(force_gpu=force_gpu) as sess:
sess.run(tf.global_variables_initializer())
out = sess.run(output)
for i in range(r):
expected_out = np.dot(lhs_np[i, :, :], rhs_np[i, :, :])
self.assertAllClose(out[i, :], expected_out, atol=1e-03, rtol=1e-05)

@parameterized.parameters(*_BINARY_ARGUMENTS)
def testSpmm_Fused(self, m, k, n, sparsity, use_gpu):
def testSpmm_Fused(self, m, k, n, sparsity, force_gpu):
# Helpers to set up the matrices.
connector = connectors.Uniform(sparsity)
initializer = initializers.Uniform()
Expand All @@ -140,7 +140,7 @@ def testSpmm_Fused(self, m, k, n, sparsity, use_gpu):
output = ops.fused_spmm(lhs, rhs, bias)

# Execute the op and compare the results.
with self.test_session(use_gpu=use_gpu) as sess:
with self.test_session(force_gpu=force_gpu) as sess:
sess.run(tf.global_variables_initializer())
self.assertAllClose(
sess.run(output),
Expand All @@ -152,7 +152,7 @@ def testSpmm_Fused(self, m, k, n, sparsity, use_gpu):
class SddmmTest(op_test.TestCase):

@parameterized.parameters(*_BINARY_ARGUMENTS)
def testSddmm(self, m, k, n, sparsity, use_gpu):
def testSddmm(self, m, k, n, sparsity, force_gpu):
# Helpers to set up the matrices.
connector = connectors.Uniform(sparsity)
initializer = initializers.Uniform()
Expand All @@ -169,7 +169,7 @@ def testSddmm(self, m, k, n, sparsity, use_gpu):
output = ops.sddmm(lhs, rhs, output_topology, transpose_rhs=True)

# Execute the op and compare the results.
with self.test_session(use_gpu=use_gpu) as sess:
with self.test_session(force_gpu=force_gpu) as sess:
sess.run(tf.global_variables_initializer())
expected_output = self.dense_to_scipy(
output_np * np.dot(lhs_np, np.transpose(rhs_np)))
Expand All @@ -183,7 +183,7 @@ def testSddmm(self, m, k, n, sparsity, use_gpu):
@parameterized.parameters((2, 4, 4, 4, 0.0, True), (2, 4, 4, 4, 0.0, False),
(8, 512, 512, 512, 0.8, True),
(8, 512, 512, 512, 0.8, False))
def testSddmm_Replicated(self, r, m, k, n, sparsity, use_gpu):
def testSddmm_Replicated(self, r, m, k, n, sparsity, force_gpu):
# Helpers to set up the matrices.
connector = connectors.Uniform(sparsity)
initializer = initializers.Uniform()
Expand All @@ -201,7 +201,7 @@ def testSddmm_Replicated(self, r, m, k, n, sparsity, use_gpu):
output = ops.replicated_sddmm(lhs, rhs, output_topology, transpose_rhs=True)

# Execute the op and compare the results.
with self.test_session(use_gpu=use_gpu) as sess:
with self.test_session(force_gpu=force_gpu) as sess:
sess.run(tf.global_variables_initializer())

# Run the replicated sddmm.
Expand All @@ -217,7 +217,7 @@ def testSddmm_Replicated(self, r, m, k, n, sparsity, use_gpu):
actual_output, expected_output, atol=1e-03, rtol=1e-05)

@parameterized.parameters(*_BINARY_GRADIENT_ARGUMENTS)
def testSddmmGradient(self, m, k, n, sparsity, use_gpu):
def testSddmmGradient(self, m, k, n, sparsity, force_gpu):
# Helpers to set up the matrices.
connector = connectors.Uniform(sparsity)
initializer = initializers.Uniform()
Expand All @@ -234,7 +234,7 @@ def testSddmmGradient(self, m, k, n, sparsity, use_gpu):
output = ops.sddmm(lhs, rhs, output_topology, transpose_rhs=True)

# Execute the op and compare the results.
with self.test_session(use_gpu=use_gpu) as sess:
with self.test_session(force_gpu=force_gpu) as sess:
sess.run(tf.global_variables_initializer())
error = tf.test.compute_gradient_error([lhs, rhs], [[m, k], [n, k]],
output.values,
Expand All @@ -245,7 +245,7 @@ def testSddmmGradient(self, m, k, n, sparsity, use_gpu):
@parameterized.parameters(*_UNARY_ARGUMENTS)
class TransposeTest(op_test.TestCase):

def testTranspose(self, m, n, sparsity, use_gpu):
def testTranspose(self, m, n, sparsity, force_gpu):
# Helpers to set up the matrices.
connector = connectors.Uniform(sparsity)
initializer = initializers.Uniform()
Expand All @@ -258,7 +258,7 @@ def testTranspose(self, m, n, sparsity, use_gpu):
output = ops.transpose(matrix)

# Execute the op and compare the results.
with self.test_session(use_gpu=use_gpu) as sess:
with self.test_session(force_gpu=force_gpu) as sess:
sess.run(tf.global_variables_initializer())
expected_output = self.dense_to_scipy(np.transpose(matrix_np))
actual_output = self.sparse_to_scipy(
Expand All @@ -272,7 +272,7 @@ def testTranspose(self, m, n, sparsity, use_gpu):
@parameterized.parameters(*_UNARY_ARGUMENTS)
class Csr2IdxTest(op_test.TestCase):

def testCsr2Idx(self, m, n, sparsity, use_gpu):
def testCsr2Idx(self, m, n, sparsity, force_gpu):
# Helpers to set up the matrices.
connector = connectors.Uniform(sparsity)
initializer = initializers.Uniform()
Expand All @@ -285,7 +285,7 @@ def testCsr2Idx(self, m, n, sparsity, use_gpu):
output = ops.csr2idx(matrix)

# Execute the op and compare the results.
with self.test_session(use_gpu=use_gpu) as sess:
with self.test_session(force_gpu=force_gpu) as sess:
sess.run(tf.global_variables_initializer())

# Calculate the linear indices in numpy.
Expand All @@ -310,7 +310,7 @@ def testSparseSoftmax(self, m, n, sparsity):
matrix = sparse_matrix.SparseMatrix("input", matrix=matrix_np)
output = ops.sparse_softmax(matrix)

with self.test_session(use_gpu=True) as sess:
with self.test_session(force_gpu=True) as sess:
sess.run(tf.global_variables_initializer())

# Zero terms should not contribute to the softmax.
Expand Down Expand Up @@ -347,7 +347,7 @@ def testSparseSoftmax_Replicated(self, r, m, n, sparsity):
np.reshape(matrix_np[matrix_np != 0], [r, -1]), dtype=tf.float32)
output = ops.replicated_sparse_softmax(values, topology)

with self.test_session(use_gpu=True) as sess:
with self.test_session(force_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
v, ro, ci = sess.run(
[output, topology.row_offsets, topology.column_indices])
Expand Down Expand Up @@ -384,7 +384,7 @@ def testFusedSoftmax(self, m, n):
matrix = tf.Variable(matrix_np, dtype=tf.float32)
output = ops.fused_softmax(matrix)

with self.test_session(use_gpu=True) as sess:
with self.test_session(force_gpu=True) as sess:
sess.run(tf.global_variables_initializer())

def softmax(x):
Expand Down Expand Up @@ -421,7 +421,7 @@ def testDepthwiseConv2d(self, batch_size, in_channels, height, width):
data_format="NCHW")

# Execute the op and compare the results.
with self.test_session(use_gpu=True) as sess:
with self.test_session(force_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
out_np, expected_out_np = sess.run([out, expected_out])
self.assertAllClose(out_np, expected_out_np)
Expand Down Expand Up @@ -449,7 +449,7 @@ def testFusedDepthwiseConv2d(self, batch_size, in_channels, height, width):
expected_out = tf.nn.relu(expected_out)

# Execute the op and compare the results.
with self.test_session(use_gpu=True) as sess:
with self.test_session(force_gpu=True) as sess:
sess.run(tf.global_variables_initializer())
out_np, expected_out_np = sess.run([out, expected_out])
self.assertAllClose(out_np, expected_out_np)
Expand Down

0 comments on commit ece8e5c

Please sign in to comment.