Skip to content

Commit

Permalink
Avoid calling Model.predict() on small numpy arrays in loops
Browse files Browse the repository at this point in the history
This is innefficient and can lead to memory leaks.
See https://keras.io/api/models/model_training_apis/#predict-method and
tensorflow/tensorflow#44711

The issue even leads to crash in test suite on github for keras 3.0
(maybe also because of the tensorflow version used)
  • Loading branch information
nhuet authored and ducoffeM committed Jan 17, 2024
1 parent 89e3f3e commit be24e47
Show file tree
Hide file tree
Showing 5 changed files with 41 additions and 16 deletions.
25 changes: 25 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,31 @@ def is_method_mode_compatible(method, mode):

function = ModelNumpyFromKerasTensors

@staticmethod
def predict_on_small_numpy(
model: Model, x: Union[np.ndarray, List[np.ndarray]]
) -> Union[np.ndarray, List[np.ndarray]]:
"""Make predictions for model directly on small numpy arrays
Avoid using `model.predict()` known to be not designed for small arrays,
and leading to memory leaks when used in loops.
See https://keras.io/api/models/model_training_apis/#predict-method and
https://github.com/tensorflow/tensorflow/issues/44711
Args:
model:
x:
Returns:
"""
output_tensors = model(x)
if isinstance(output_tensors, list):
return [output.numpy() for output in output_tensors]
else:
return output_tensors.numpy()

@staticmethod
def get_standard_values_1d_box(n, dc_decomp=True, grad_bounds=False, nb=100):
"""A set of functions with their monotonic decomposition for testing the activations"""
Expand Down
16 changes: 8 additions & 8 deletions tests/test_clone.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,13 +70,13 @@ def test_convert_1D(n, method, mode, floatx, decimal, helpers):

#  keras model and output of reference
ref_nn = helpers.toy_network_tutorial(dtype=keras_config.floatx())
output_ref_ = ref_nn.predict(input_ref_)
output_ref_ = helpers.predict_on_small_numpy(ref_nn, input_ref_)

# decomon conversion
decomon_model = clone(ref_nn, method=method, final_ibp=ibp, final_affine=affine)

#  decomon outputs
outputs_ = decomon_model.predict(input_decomon_)
outputs_ = helpers.predict_on_small_numpy(decomon_model, input_decomon_)

#  check bounds consistency
helpers.assert_decomon_model_output_properties_box(
Expand Down Expand Up @@ -258,13 +258,13 @@ def test_clone_full_deellip_model_forward(method, mode, helpers):
k_coef_lip=1.0,
name="hkr_model",
)
output_ref_ = ref_nn.predict(input_ref_reshaped_)
output_ref_ = helpers.predict_on_small_numpy(ref_nn, input_ref_reshaped_)

# decomon conversion
decomon_model = clone(ref_nn, method=method, final_ibp=ibp, final_affine=affine)

#  decomon outputs
outputs_ = decomon_model.predict(input_decomon_)
outputs_ = helpers.predict_on_small_numpy(decomon_model, input_decomon_)

#  check bounds consistency
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_ = helpers.get_full_outputs_from_outputs_for_mode(
Expand Down Expand Up @@ -305,7 +305,7 @@ def test_convert_toy_models_1d(toy_model_1d, method, mode, helpers):

#  keras model and output of reference
ref_nn = toy_model_1d
output_ref_ = ref_nn.predict(input_ref_)
output_ref_ = helpers.predict_on_small_numpy(ref_nn, input_ref_)

# decomon conversion
if (get_direction(method) == FeedDirection.BACKWARD) and has_merge_layers(ref_nn):
Expand All @@ -317,7 +317,7 @@ def test_convert_toy_models_1d(toy_model_1d, method, mode, helpers):
decomon_model = clone(ref_nn, method=method, final_ibp=ibp, final_affine=affine)

#  decomon outputs
outputs_ = decomon_model.predict(input_decomon_)
outputs_ = helpers.predict_on_small_numpy(decomon_model, input_decomon_)

#  check bounds consistency
helpers.assert_decomon_model_output_properties_box(
Expand Down Expand Up @@ -363,13 +363,13 @@ def test_convert_cnn(method, mode, helpers):
#  keras model and output of reference
image_data_shape = input_ref_.shape[1:] # image shape: before flattening
ref_nn = helpers.toy_struct_cnn(dtype=keras_config.floatx(), image_data_shape=image_data_shape)
output_ref_ = ref_nn.predict(input_ref_reshaped_)
output_ref_ = helpers.predict_on_small_numpy(ref_nn, input_ref_reshaped_)

# decomon conversion
decomon_model = clone(ref_nn, method=method, final_ibp=ibp, final_affine=affine)

#  decomon outputs
outputs_ = decomon_model.predict(input_decomon_)
outputs_ = helpers.predict_on_small_numpy(decomon_model, input_decomon_)

#  check bounds consistency
z_, u_c_, w_u_, b_u_, l_c_, w_l_, b_l_, h_, g_ = helpers.get_full_outputs_from_outputs_for_mode(
Expand Down
2 changes: 1 addition & 1 deletion tests/test_clone_backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def test_convert_backward_1D(n, mode, floatx, decimal, helpers):

# keras model and output of reference
ref_nn = helpers.toy_network_tutorial(dtype=keras_config.floatx())
output_ref_ = ref_nn.predict(input_ref_)
output_ref_ = helpers.predict_on_small_numpy(ref_nn, input_ref_)

# convert to functional
ref_nn = ensure_functional_model(ref_nn)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_clone_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def test_convert_forward_1D(n, mode, floatx, decimal, helpers):

# keras model and output of reference
ref_nn = helpers.toy_network_tutorial(dtype=keras_config.floatx())
output_ref_ = ref_nn.predict(input_ref_)
output_ref_ = helpers.predict_on_small_numpy(ref_nn, input_ref_)

# decomon conversion
_, outputs, _, _ = convert_forward(ref_nn, ibp=ibp, affine=affine, shared=True, input_tensors=input_tensors)
Expand Down
12 changes: 6 additions & 6 deletions tests/test_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def test_get_upper_1d_box(toy_model_1d, n, method, mode, helpers):
affine = get_affine(mode)
backward_model = clone(toy_model_1d, method=method, ibp=ibp, affine=affine, mode=mode)
upper = get_upper_box(backward_model, z[:, 0], z[:, 1])
y_ref = toy_model_1d.predict(y)
y_ref = helpers.predict_on_small_numpy(toy_model_1d, y)

try:
assert (upper - y_ref).min() + 1e-6 >= 0.0
Expand All @@ -63,7 +63,7 @@ def test_get_lower_1d_box(toy_model_1d, n, method, mode, helpers):
affine = get_affine(mode)
backward_model = clone(toy_model_1d, method=method, final_ibp=ibp, final_affine=affine)
lower = get_lower_box(backward_model, z[:, 0], z[:, 1])
y_ref = toy_model_1d.predict(y)
y_ref = helpers.predict_on_small_numpy(toy_model_1d, y)

try:
assert (y_ref - lower).min() + 1e-6 >= 0.0
Expand All @@ -84,7 +84,7 @@ def test_get_range_1d_box(toy_model_1d, n, method, mode, helpers):
affine = get_affine(mode)
backward_model = clone(toy_model_1d, method=method, final_ibp=ibp, final_affine=affine)
upper, lower = get_range_box(backward_model, z[:, 0], z[:, 1])
y_ref = toy_model_1d.predict(y)
y_ref = helpers.predict_on_small_numpy(toy_model_1d, y)

try:
assert (upper - y_ref).min() + 1e-6 >= 0.0
Expand All @@ -105,7 +105,7 @@ def test_get_upper_multid_box(toy_model_multid, odd, method, mode, helpers):
affine = get_affine(mode)
backward_model = clone(toy_model_multid, method=method, final_ibp=ibp, final_affine=affine)
upper = get_upper_box(backward_model, z[:, 0], z[:, 1])
y_ref = toy_model_multid.predict(y)
y_ref = helpers.predict_on_small_numpy(toy_model_multid, y)

assert (upper - y_ref).min() + 1e-6 >= 0.0

Expand All @@ -122,7 +122,7 @@ def test_get_lower_multid_box(toy_model_multid, odd, method, mode, helpers):
affine = get_affine(mode)
backward_model = clone(toy_model_multid, method=method, final_ibp=ibp, final_affine=affine)
lower = get_lower_box(backward_model, z[:, 0], z[:, 1])
y_ref = toy_model_multid.predict(y)
y_ref = helpers.predict_on_small_numpy(toy_model_multid, y)

assert (y_ref - lower).min() + 1e-6 >= 0.0

Expand All @@ -139,7 +139,7 @@ def test_get_range_multid_box(toy_model_multid, odd, method, mode, helpers):
affine = get_affine(mode)
backward_model = clone(toy_model_multid, method=method, final_ibp=ibp, final_affine=affine)
upper, lower = get_range_box(backward_model, z[:, 0], z[:, 1])
y_ref = toy_model_multid.predict(y)
y_ref = helpers.predict_on_small_numpy(toy_model_multid, y)

assert (upper - y_ref).min() + 1e-6 >= 0.0
assert (y_ref - lower).min() + 1e-6 >= 0.0

0 comments on commit be24e47

Please sign in to comment.