Skip to content

Commit

Permalink
fix tests
Browse files Browse the repository at this point in the history
  • Loading branch information
elad-c committed Apr 17, 2024
1 parent a1fd469 commit 6cbbb63
Show file tree
Hide file tree
Showing 25 changed files with 102 additions and 56 deletions.
Expand Up @@ -13,6 +13,7 @@
# limitations under the License.
# ==============================================================================

from typing import Dict
import numpy as np
from sklearn.cluster import KMeans

Expand All @@ -37,7 +38,7 @@ def lut_kmeans_tensor(tensor_data: np.ndarray,
n_iter: int = 10,
min_threshold: float = MIN_THRESHOLD,
quant_error_method: qc.QuantizationErrorMethod = None,
is_symmetric=False) -> dict:
is_symmetric: bool = False) -> Dict:
"""
The quantizer first finds the closest max value per channel of tensor_data.
Now, we divide tensor_data with the threshold vector per channel. In addition, we scale the result to the range
Expand Down Expand Up @@ -94,7 +95,7 @@ def lut_kmeans_histogram(bins: np.ndarray,
constrained: bool = True,
n_iter: int = 20,
min_threshold: float = MIN_THRESHOLD,
quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE) -> dict:
quant_error_method: qc.QuantizationErrorMethod = qc.QuantizationErrorMethod.MSE) -> Dict:
"""
Finds quantization cluster points for non-uniform activation quantization.
The quantizer first finds the closest power-of-two number to the max value of the given histogram,
Expand Down
14 changes: 11 additions & 3 deletions tests/common_tests/helpers/generate_test_tp_model.py
Expand Up @@ -30,7 +30,7 @@


def generate_test_tp_model(edit_params_dict, name=""):
base_config, op_cfg_list, default_config = get_op_quantization_configs()
base_config, op_cfg_list, default_config, const_config = get_op_quantization_configs()

# separate weights attribute parameters from the requested param to edit
weights_params_names = [name for name in tp.AttributeQuantizationConfig.__init__.__code__.co_varnames if name != 'self']
Expand All @@ -45,6 +45,9 @@ def generate_test_tp_model(edit_params_dict, name=""):
attr_weights_configs_mapping[KERNEL_ATTR].clone_and_edit(**weights_params)
updated_config = base_config.clone_and_edit(attr_weights_configs_mapping=attr_weights_configs_mapping,
**rest_params)
default_weights_config = const_config.default_weight_attr_config.clone_and_edit(**weights_params)
updated_const_config = const_config.clone_and_edit(default_weight_attr_config=default_weights_config,
**rest_params)

# For the default config, we only update the non-weights attributes argument, since the behaviour for the weights
# quantization is supposed to remain the default defined behavior
Expand All @@ -57,10 +60,12 @@ def generate_test_tp_model(edit_params_dict, name=""):
return generate_tp_model(default_config=updated_default_config,
base_config=updated_config,
mixed_precision_cfg_list=op_cfg_list,
const_config=updated_const_config,
name=name)


def generate_mixed_precision_test_tp_model(base_cfg, default_config, mp_bitwidth_candidates_list, name=""):
def generate_mixed_precision_test_tp_model(base_cfg, default_config, mp_bitwidth_candidates_list,
const_config, name=""):
mp_op_cfg_list = []
for weights_n_bits, activation_n_bits in mp_bitwidth_candidates_list:
candidate_cfg = base_cfg.clone_and_edit(attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: weights_n_bits}},
Expand All @@ -71,10 +76,12 @@ def generate_mixed_precision_test_tp_model(base_cfg, default_config, mp_bitwidth
return generate_tp_model(default_config=default_config,
base_config=base_cfg,
mixed_precision_cfg_list=mp_op_cfg_list,
const_config=const_config,
name=name)


def generate_tp_model_with_activation_mp(base_cfg, default_config, mp_bitwidth_candidates_list, name="activation_mp_model"):
def generate_tp_model_with_activation_mp(base_cfg, default_config, mp_bitwidth_candidates_list,
const_config, name="activation_mp_model"):
mp_op_cfg_list = []
for weights_n_bits, activation_n_bits in mp_bitwidth_candidates_list:

Expand All @@ -90,6 +97,7 @@ def generate_tp_model_with_activation_mp(base_cfg, default_config, mp_bitwidth_c
base_tp_model = generate_tp_model(default_config=default_config,
base_config=base_cfg,
mixed_precision_cfg_list=mp_op_cfg_list,
const_config=const_config,
name=name)

mixed_precision_configuration_options = tp.QuantizationConfigOptions(mp_op_cfg_list,
Expand Down
4 changes: 2 additions & 2 deletions tests/common_tests/helpers/prep_graph_for_func_test.py
Expand Up @@ -40,11 +40,11 @@ def prepare_graph_with_configs(in_model,
qc=DEFAULTCONFIG,
mixed_precision_enabled=False):
# TPC
base_config, op_cfg_list, default_config = get_op_quantization_configs()
base_config, op_cfg_list, default_config, const_config = get_op_quantization_configs()

# To override the default TP in the test - pass a TPC generator function that includes a generation of the TP
# and doesn't use the TP that is passed from outside.
_tp = generate_tp_model(default_config, base_config, op_cfg_list, "function_test")
_tp = generate_tp_model(default_config, base_config, op_cfg_list, const_config, "function_test")
tpc = get_tpc_func("function_test", _tp)

# Read Model
Expand Down
Expand Up @@ -39,11 +39,12 @@ def __init__(self, unit_test, mixed_precision_candidates_list):
self.mixed_precision_candidates_list = mixed_precision_candidates_list

def get_tpc(self):
base_config, _, default_config = get_op_quantization_configs()
base_config, _, default_config, const_config = get_op_quantization_configs()

return get_tpc_with_activation_mp_keras(base_config=base_config,
default_config=default_config,
mp_bitwidth_candidates_list=self.mixed_precision_candidates_list,
const_config=const_config,
name="mp_bopts_test")

def get_mixed_precision_config(self):
Expand Down
Expand Up @@ -51,9 +51,11 @@ def get_tpc(self):
# sets all combinations of 2, 4, 8 bits for weights and activations
mixed_precision_candidates_list = get_base_mp_nbits_candidates()

default_config = eight_bits.clone_and_edit(attr_weights_configs_mapping={})
return get_tpc_with_activation_mp_keras(base_config=eight_bits,
default_config=eight_bits.clone_and_edit(attr_weights_configs_mapping={}),
default_config=default_config,
mp_bitwidth_candidates_list=mixed_precision_candidates_list,
const_config=default_config,
name="mixed_precision_activation_test")

def get_quantization_config(self):
Expand Down Expand Up @@ -145,6 +147,7 @@ def get_tpc(self):
return get_tpc_with_activation_mp_keras(base_config=eight_bits,
default_config=default_config,
mp_bitwidth_candidates_list=mixed_precision_candidates_list,
const_config=default_config,
name="mixed_precision_4bit_test")

def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
Expand Down Expand Up @@ -238,6 +241,7 @@ def get_tpc(self):
return get_tpc_with_activation_mp_keras(base_config=eight_bits,
default_config=default_config,
mp_bitwidth_candidates_list=mixed_precision_candidates_list,
const_config=default_config,
name="mixed_precision_depthwise_4bit_test")

def create_networks(self):
Expand Down Expand Up @@ -309,6 +313,7 @@ def get_tpc(self):
return get_tpc_with_activation_mp_keras(base_config=eight_bits,
default_config=default_config,
mp_bitwidth_candidates_list=mixed_precision_candidates_list,
const_config=default_config,
name="mixed_precision_activation_weights_disabled_test")

def get_resource_utilization(self):
Expand Down Expand Up @@ -357,6 +362,7 @@ def get_tpc(self):
return get_tpc_with_activation_mp_keras(base_config=eight_bits,
default_config=default_config,
mp_bitwidth_candidates_list=mixed_precision_candidates_list,
const_config=default_config,
name="mixed_precision_activation_weights_disabled_test")

def get_resource_utilization(self):
Expand Down
Expand Up @@ -44,6 +44,7 @@ def get_tpc(self):
return get_weights_only_mp_tpc_keras(base_config=base_config,
default_config=default_config,
mp_bitwidth_candidates_list=[(2, 16), (4, 16), (16, 16)],
const_config=default_config,
name="reused_layer_mp_test")

def get_quantization_config(self):
Expand Down
Expand Up @@ -139,7 +139,7 @@ def get_tpc(self):
# Building a TPC that gives Conv layers mixed precision candidates and Dense layers a fixed candidate.
# Both layers that have weights to quantized, so we want to verify that finalizing the model is successful.
# Note that this is important that the quantization config options would include also activation quantization.
cfg, mixed_precision_cfg_list, _ = get_op_quantization_configs()
cfg, mixed_precision_cfg_list, _, _ = get_op_quantization_configs()

two_bit_cfg = mixed_precision_cfg_list[2]

Expand Down Expand Up @@ -367,6 +367,7 @@ def get_tpc(self):
return get_weights_only_mp_tpc_keras(base_config=base_config,
default_config=default_config,
mp_bitwidth_candidates_list=[(8, 16), (2, 16), (4, 16), (16, 16)],
const_config=default_config,
name="mp_dw_test")

def get_quantization_config(self):
Expand Down Expand Up @@ -394,12 +395,13 @@ def get_mixed_precision_config(self):
return mct.core.MixedPrecisionQuantizationConfig(num_of_images=1)

def get_tpc(self):
base_config, _, default_config = get_op_quantization_configs()
base_config, _, default_config, const_config = get_op_quantization_configs()
activation_disabled_config = base_config.clone_and_edit(enable_activation_quantization=False)

return get_weights_only_mp_tpc_keras(base_config=activation_disabled_config,
default_config=default_config,
mp_bitwidth_candidates_list=[(8, 8), (4, 8), (2, 8)],
const_config=const_config,
name="mp_weights_only_test")

def get_resource_utilization(self):
Expand Down
Expand Up @@ -95,7 +95,7 @@ def representative_dataset():
yield [np.random.randn(1, 8, 8, 3).astype(np.float32)]


def prepare_graph(in_model, keras_impl, mixed_precision_candidates_list, base_config, default_config):
def prepare_graph(in_model, keras_impl, mixed_precision_candidates_list, base_config, default_config, const_config):
fw_info = DEFAULT_KERAS_INFO
qc = mct.core.QuantizationConfig()

Expand All @@ -104,6 +104,7 @@ def prepare_graph(in_model, keras_impl, mixed_precision_candidates_list, base_co
tpc = get_tpc_with_activation_mp_keras(base_config=base_config,
default_config=default_config,
mp_bitwidth_candidates_list=mixed_precision_candidates_list,
const_config=const_config,
name="activation_weights_composition_test")

graph.set_fw_info(fw_info)
Expand Down Expand Up @@ -152,10 +153,10 @@ def test_two_conv_net_compose_without_split(self):
in_model = two_conv_model()
keras_impl = KerasImplementation()

base_config, _, default_config = get_op_quantization_configs()
base_config, _, default_config, const_config = get_op_quantization_configs()
graph = prepare_graph(in_model, keras_impl,
mixed_precision_candidates_list=_get_base_mp_nbits_candidates(), base_config=base_config,
default_config=default_config)
default_config=default_config, const_config=const_config)

# Nodes composition substitution
v_graph = substitute(copy.deepcopy(graph), [VirtualActivationWeightsComposition()])
Expand All @@ -178,10 +179,10 @@ def test_two_conv_net_compose_after_split(self):
in_model = two_conv_model()
keras_impl = KerasImplementation()

base_config, _, default_config = get_op_quantization_configs()
base_config, _, default_config, const_config = get_op_quantization_configs()
graph = prepare_graph(in_model, keras_impl,
mixed_precision_candidates_list=_get_base_mp_nbits_candidates(), base_config=base_config,
default_config=default_config)
default_config=default_config, const_config=const_config)

# Nodes split and composition substitution
split_graph = substitute(graph, [WeightsActivationSplit()])
Expand All @@ -193,11 +194,11 @@ def test_two_conv_net_compose_after_split_weights_only(self):
in_model = two_conv_model()
keras_impl = KerasImplementation()

base_config, _, default_config = get_op_quantization_configs()
base_config, _, default_config, const_config = get_op_quantization_configs()
base_config = base_config.clone_and_edit(enable_activation_quantization=False)
graph = prepare_graph(in_model, keras_impl,
mixed_precision_candidates_list=_get_base_mp_nbits_candidates(), base_config=base_config,
default_config=default_config)
default_config=default_config, const_config=const_config)

# Nodes split and composition substitution
split_graph = substitute(graph, [WeightsActivationSplit()])
Expand All @@ -214,7 +215,7 @@ def test_two_conv_net_compose_after_split_activation_only(self):

graph = prepare_graph(in_model, keras_impl,
mixed_precision_candidates_list=_get_base_mp_nbits_candidates(), base_config=base_config,
default_config=default_config)
default_config=default_config, const_config=default_config)

# Nodes split and composition substitution
split_graph = substitute(graph, [WeightsActivationSplit()])
Expand All @@ -226,11 +227,12 @@ def test_all_weights_layers_composition(self):
in_model = multiple_weights_nodes_model()
keras_impl = KerasImplementation()

base_config, _, default_config = get_op_quantization_configs()
base_config, _, default_config, const_config = get_op_quantization_configs()
graph = prepare_graph(in_model, keras_impl,
mixed_precision_candidates_list=_get_base_mp_nbits_candidates(),
base_config=base_config,
default_config=default_config)
default_config=default_config,
const_config=const_config)

# Nodes split and composition substitution
split_graph = substitute(graph, [WeightsActivationSplit()])
Expand Down Expand Up @@ -264,10 +266,10 @@ def test_multiple_output_activation(self):
in_model = multiple_outputs_activation_model()
keras_impl = KerasImplementation()

base_config, _, default_config = get_op_quantization_configs()
base_config, _, default_config, const_config = get_op_quantization_configs()
graph = prepare_graph(in_model, keras_impl,
mixed_precision_candidates_list=_get_base_mp_nbits_candidates(), base_config=base_config,
default_config=default_config)
default_config=default_config, const_config=const_config)

# Nodes composition substitution
v_graph = substitute(graph, [VirtualActivationWeightsComposition()])
Expand Down
17 changes: 11 additions & 6 deletions tests/keras_tests/function_tests/test_cfg_candidates_filter.py
Expand Up @@ -38,11 +38,12 @@ def get_full_bitwidth_candidates():
(2, 8), (2, 4), (2, 2)]


def prepare_graph(in_model, base_config, default_config, bitwidth_candidates):
def prepare_graph(in_model, base_config, default_config, bitwidth_candidates, const_config):
tpc = get_tpc_with_activation_mp_keras(base_config=base_config,
mp_bitwidth_candidates_list=bitwidth_candidates,
name="candidates_filter_test",
default_config=default_config)
default_config=default_config,
const_config=const_config)

fw_info = DEFAULT_KERAS_INFO
keras_impl = KerasImplementation()
Expand Down Expand Up @@ -89,7 +90,8 @@ def test_cfg_filter_activation_only_nodes(self):
graph = prepare_graph(in_model,
base_config=base_config,
bitwidth_candidates=get_full_bitwidth_candidates(),
default_config=default_config)
default_config=default_config,
const_config=default_config)

# Filtering nodes; candidates
filtered_graph = filter_nodes_candidates(graph)
Expand Down Expand Up @@ -118,7 +120,8 @@ def test_cfg_filter_weights_disabled(self):
graph = prepare_graph(in_model,
base_config=base_config,
bitwidth_candidates=get_full_bitwidth_candidates(),
default_config=default_config)
default_config=default_config,
const_config=default_config)

# Filtering nodes; candidates
filtered_graph = filter_nodes_candidates(graph)
Expand Down Expand Up @@ -147,7 +150,8 @@ def test_cfg_filter_activation_disabled(self):
graph = prepare_graph(in_model,
base_config=base_config,
bitwidth_candidates=get_full_bitwidth_candidates(),
default_config=default_config)
default_config=default_config,
const_config=default_config)

# Filtering nodes; candidates
filtered_graph = filter_nodes_candidates(graph)
Expand All @@ -171,7 +175,8 @@ def test_cfg_filter_multiple_candidates_weights_disabled(self):
graph = prepare_graph(in_model,
base_config=base_config,
bitwidth_candidates=[(8, 8), (4, 8), (2, 8)],
default_config=default_config)
default_config=default_config,
const_config=default_config)

# Filtering nodes; candidates
filtered_graph = filter_nodes_candidates(graph)
Expand Down

0 comments on commit 6cbbb63

Please sign in to comment.