Skip to content

Commit

Permalink
Add tests for variables_test.py
Browse files Browse the repository at this point in the history
  • Loading branch information
Faisal-Alsrheed committed Sep 22, 2023
1 parent 52f240a commit e2ab5dc
Showing 1 changed file with 222 additions and 0 deletions.
222 changes: 222 additions & 0 deletions keras_core/backend/common/variables_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from keras_core.backend.common.variables import KerasVariable
from keras_core.backend.common.variables import standardize_shape
from keras_core.testing import test_case
from keras_core.ops.core import convert_to_tensor


class VariablesTest(test_case.TestCase):
Expand Down Expand Up @@ -125,3 +126,224 @@ def test_autocast_scope_with_non_float_dtype(self):
"`AutocastScope` can only be used with a floating-point",
):
_ = AutocastScope("int32")

def test_variable_initialization_with_non_callable(self):
v = backend.Variable(initializer=np.ones((2, 2)))
self.assertAllClose(v.value, np.ones((2, 2)))

def test_variable_path_creation(self):
v = backend.Variable(initializer=np.ones((2, 2)), name="test_var")
self.assertEqual(v.path, "test_var")

def test_variable_initialization_with_non_trainable(self):
v = backend.Variable(initializer=np.ones((2, 2)), trainable=False)
self.assertFalse(v.trainable)

def test_variable_initialization_with_dtype(self):
v = backend.Variable(initializer=np.ones((2, 2)), dtype="int32")
self.assertEqual(v.dtype, "int32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")

def test_variable_initialization_without_shape(self):
with self.assertRaisesRegex(
ValueError,
"When creating a Variable from an initializer, the `shape` ",
):
backend.Variable(initializer=initializers.RandomNormal())

def test_deferred_initialize_already_initialized(self):
v = backend.Variable(initializer=np.ones((2, 2)))
with self.assertRaisesRegex(
ValueError, f"Variable {v.path} is already initialized."
):
v._deferred_initialize()

def test_deferred_initialize_within_stateless_scope(self):
with backend.StatelessScope():
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
with self.assertRaisesRegex(
ValueError,
"You are attempting to initialize a variable "
"while in a stateless scope. This is disallowed.",
):
v._deferred_initialize()

def test_variable_as_boolean(self):
v = backend.Variable(initializer=np.ones((2, 2)))
with self.assertRaises(TypeError):
bool(v)

def test_variable_negation(self):
v = backend.Variable(initializer=np.array([-1, 2]))
neg_v = -v
self.assertAllClose(neg_v, np.array([1, -2]))

def test_variable_pos(self):
v = backend.Variable(initializer=np.array([-1, 2]))
pos_v = v
self.assertAllClose(pos_v, np.array([-1, 2]))

def test_variable_abs(self):
v = backend.Variable(initializer=np.array([-1, 2]))
abs_v = abs(v)
self.assertAllClose(abs_v, np.array([1, 2]))

def test_variable_invert(self):
v = backend.Variable(initializer=np.array([0, -1]), dtype="int32")
inv_v = ~v
self.assertAllClose(inv_v, np.array([-1, 0]))

def test_variable_lt_tensor(self):
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([1, 3, 2]))

lt_result = v1 < v2
self.assertAllClose(lt_result.numpy(), np.array([False, True, False]))

def test_variable_lt_scalar(self):
v = backend.Variable(initializer=np.array([1, 2, 3]))

lt_result = v < 3
self.assertAllClose(lt_result.numpy(), np.array([True, True, False]))

def test_variable_lt_numpy_array(self):
v = backend.Variable(initializer=np.array([1, 2, 3]))
arr = np.array([2, 2, 2])

lt_result = v < arr
self.assertAllClose(lt_result.numpy(), np.array([True, False, False]))

def test_variable_ge_tensor(self):
v1 = backend.Variable(initializer=np.array([1, 2, 3]))
v2 = backend.Variable(initializer=np.array([1, 3, 2]))

ge_result = v1 >= v2
self.assertAllClose(ge_result.numpy(), np.array([True, False, True]))

def test_variable_ge_scalar(self):
v = backend.Variable(initializer=np.array([1, 2, 3]))

ge_result = v >= 2
self.assertAllClose(ge_result.numpy(), np.array([False, True, True]))

def test_variable_ge_numpy_array(self):
v = backend.Variable(initializer=np.array([1, 2, 3]))
arr = np.array([2, 2, 2])

ge_result = v >= arr
self.assertAllClose(ge_result.numpy(), np.array([False, True, True]))

def test_variable_rsub_scalar(self):
v = backend.Variable(initializer=np.array([1, 2, 3]))

rsub_result = 2 - v
self.assertAllClose(rsub_result.numpy(), np.array([1, 0, -1]))

def test_variable_div_scalar(self):
v = backend.Variable(initializer=np.array([2, 4, 8]))

div_result = v / 2
self.assertAllClose(div_result.numpy(), np.array([1, 2, 4]))

def test_variable_rdiv_scalar(self):
v = backend.Variable(initializer=np.array([2, 4, 8]))

rdiv_result = 16 / v
self.assertAllClose(rdiv_result.numpy(), np.array([8, 4, 2]))

def test_variable_div_numpy_array(self):
v = backend.Variable(initializer=np.array([2, 4, 8]))
arr = np.array([2, 8, 16])

div_result = arr / v
self.assertAllClose(div_result, np.array([1, 2, 2]))

def test_variable_rdiv_numpy_array(self):
v = backend.Variable(initializer=np.array([2, 4, 8]))
arr = np.array([16, 32, 64])

rdiv_result = arr / v
self.assertAllClose(rdiv_result, np.array([8, 8, 8]))

def test_variable_rsub_numpy_array(self):
v = backend.Variable(initializer=np.array([1, 2, 3]))
arr = np.array([2, 2, 2])

rsub_result = arr - v
self.assertAllClose(rsub_result, np.array([1, 0, -1]))

def test_variable_rtruediv(self):
v = backend.Variable(initializer=np.array([2, 4, 8]))
result = 16 / v
self.assertAllClose(result.numpy(), np.array([8, 4, 2]))

def test_variable_floordiv(self):
v = backend.Variable(initializer=np.array([3, 4, 6]))
result = v // np.array([2, 3, 6])
self.assertAllClose(result.numpy(), np.array([1, 1, 1]))

def test_variable_rfloordiv(self):
v = backend.Variable(initializer=np.array([3, 4, 6]))
result = np.array([9, 12, 18]) // v
self.assertAllClose(result.numpy(), np.array([3, 3, 3]))

def test_variable_rfloordiv(self):
v = backend.Variable(initializer=np.array([3, 4, 6]))
result = np.array([9, 12, 18]) // v
self.assertAllClose(result, np.array([3, 3, 3]))

def test_variable_mod_scalar(self):
v = backend.Variable(initializer=np.array([2, 4, 8]))
mod_result = v % 3
self.assertAllClose(mod_result.numpy(), np.array([2, 1, 2]))

def test_variable_rmod_scalar(self):
v = backend.Variable(initializer=np.array([3, 5, 7]))
rmod_result = 10 % v
self.assertAllClose(rmod_result.numpy(), np.array([1, 0, 3]))

def test_variable_pow_scalar(self):
v = backend.Variable(initializer=np.array([2, 3, 4]))
pow_result = v**2
self.assertAllClose(pow_result.numpy(), np.array([4, 9, 16]))

def test_variable_rpow_scalar(self):
v = backend.Variable(initializer=np.array([2, 3, 4]))
rpow_result = 3**v
self.assertAllClose(rpow_result.numpy(), np.array([9, 27, 81]))

def test_variable_matmul(self):
v = backend.Variable(initializer=np.array([[2, 3], [4, 5]]))
other = np.array([[1, 2], [3, 4]])
matmul_result = v @ other
self.assertAllClose(
matmul_result.numpy(), np.array([[11, 16], [19, 28]])
)

def test_variable_rmatmul(self):
v = backend.Variable(initializer=np.array([[2, 3], [4, 5]]))
other = np.array([[1, 2], [3, 4]])
rmatmul_result = other @ v
self.assertAllClose(rmatmul_result, np.array([[10, 13], [22, 29]]))

def test_variable_and(self):
v = backend.Variable(
initializer=np.array([1, 0, 1, 0], dtype=np.int32), dtype="int32"
)
other_tensor = convert_to_tensor(
np.array([1, 1, 0, 1], dtype=np.int32), dtype="int32"
)
and_result = v & other_tensor
self.assertAllClose(and_result.numpy(), np.array([1, 0, 0, 0]))

def test_variable_rand(self):
v = backend.Variable(
initializer=np.array([1, 0, 1, 0], dtype=np.int32), dtype="int32"
)
other_tensor = convert_to_tensor(
np.array([1, 1, 0, 1], dtype=np.int32), dtype="int32"
)
rand_result = other_tensor & v
self.assertAllClose(rand_result.numpy(), np.array([1, 0, 0, 0]))

0 comments on commit e2ab5dc

Please sign in to comment.