Skip to content

Commit

Permalink
INC Bench v1.14 upgrade(#1207)
Browse files Browse the repository at this point in the history
  • Loading branch information
bmyrcha committed Sep 2, 2022
1 parent 9d261ea commit 26e902d
Show file tree
Hide file tree
Showing 107 changed files with 10,755 additions and 6,801 deletions.
@@ -0,0 +1,57 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration generator class."""
from typing import Any

from neural_compressor.ux.components.config_generator.config_generator import ConfigGenerator
from neural_compressor.ux.utils.workload.config import Config
from neural_compressor.ux.utils.workload.evaluation import Accuracy, Evaluation, Metric
from neural_compressor.ux.utils.workload.mixed_precision import MixedPrecision


class MixedPrecisionConfigGenerator(ConfigGenerator):
"""Configuration generator class."""

def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize configuration generator."""
super().__init__(*args, **kwargs)
data = kwargs.get("data", {})
self.optimization_precision: str = data["optimization_precision"]

def generate(self) -> None:
"""Generate yaml config file."""
config = Config()
config.load(self.predefined_config_path)
config.model = self.generate_model_config()
config.evaluation = self.generate_evaluation_config()
config.mixed_precision = self.generate_mixed_precision_config()
config.dump(self.config_path)

def generate_evaluation_config(self) -> Evaluation:
"""Generate evaluation configuration."""
evaluation = Evaluation()
evaluation.accuracy = Accuracy()

if self.metric:
evaluation.accuracy.metric = Metric(self.metric)

evaluation.accuracy.dataloader = self.generate_dataloader_config(batch_size=1)
return evaluation

def generate_mixed_precision_config(self) -> MixedPrecision:
"""Generate graph optimization configuration."""
graph_opt = MixedPrecision()
graph_opt.precisions = self.optimization_precision
return graph_opt
Expand Up @@ -75,6 +75,13 @@ def __init__(self) -> None:
"list<list<int>>": ["shape", "input_shape", "label_shape"],
"bool": ["train", "label", "do_lower_case", "dynamic_length"],
}

self.metric_types: Dict[str, List[str]] = {
"str": ["anno_path"],
"int": ["num_detections", "boxes", "scores", "classes", "k"],
"bool": ["compare_label"],
}

self.types_definitions: Dict[str, Union[Type, List[Any]]] = {
"str": str,
"int": int,
Expand All @@ -99,14 +106,36 @@ def parse(self, data: dict) -> dict:
{"dataloader": self.parse_dataloader(quantization_dataloader)},
)

evaluation_dataloader = data.get("evaluation", {}).get("dataloader", None)
evaluation_data = data.get("evaluation", None)
if evaluation_data and isinstance(evaluation_data, dict):
self.parse_evaluation_data(evaluation_data)

data["tuning"] = parse_bool_value(data["tuning"])

return data

def parse_evaluation_data(self, evaluation_data: dict) -> None:
"""Parse input evaluation data."""
evaluation_dataloader = evaluation_data.get("dataloader", None)
if evaluation_dataloader and isinstance(evaluation_dataloader, dict):
data["evaluation"].update(
evaluation_data.update(
{"dataloader": self.parse_dataloader(evaluation_dataloader)},
)
metric_data = evaluation_data.get("metric_param", None)

if metric_data and isinstance(metric_data, dict):
parsed_metric_data = self.parse_metric(metric_data)
evaluation_data.update(
{"metric_param": parsed_metric_data},
)

num_cores = HWInfo().cores
cores_per_instance = int(data.get("evaluation", {}).get("cores_per_instance", 4))
cores_per_instance = int(
evaluation_data.get(
"cores_per_instance",
4,
),
)

if cores_per_instance < 1:
raise ClientErrorException(
Expand All @@ -119,7 +148,12 @@ def parse(self, data: dict) -> dict:
)

max_number_of_instances = num_cores // cores_per_instance
instances = int(data.get("evaluation", {}).get("instances", max_number_of_instances))
instances = int(
evaluation_data.get(
"instances",
max_number_of_instances,
),
)

if instances < 1:
raise ClientErrorException("At least one instance must be used.")
Expand All @@ -130,18 +164,13 @@ def parse(self, data: dict) -> dict:
f"while only {max_number_of_instances} allowed.",
)

if "evaluation" in data:
data["evaluation"].update(
{
"cores_per_instance": cores_per_instance,
"num_of_instance": instances,
"batch_size": int(data.get("evaluation", {}).get("batch_size", 1)),
},
)

data["tuning"] = parse_bool_value(data["tuning"])

return data
evaluation_data.update(
{
"cores_per_instance": cores_per_instance,
"num_of_instance": instances,
"batch_size": int(evaluation_data.get("batch_size", 1)),
},
)

def parse_transforms(self, transforms_data: List[dict]) -> List[dict]:
"""Parse transforms list."""
Expand Down Expand Up @@ -189,6 +218,19 @@ def parse_dataloader(self, dataloader_data: dict) -> dict:
)
return parsed_dataloader_data

def parse_metric(self, metric_data: dict) -> dict:
"""Parse metric data."""
parsed_data = {}
for param_name, param_value in metric_data.items():
if isinstance(param_value, dict):
parsed_data.update({param_name: self.parse_metric(param_value)})
elif isinstance(param_value, str):
param_type = self.get_param_type("metric", param_name)
if param_type is None:
continue
parsed_data.update({param_name: self.parse_value(param_value, param_type)})
return parsed_data

def get_param_type(
self,
param_group: str,
Expand All @@ -200,6 +242,8 @@ def get_param_type(
params_definitions = self.transform_types
elif param_group == "dataloader":
params_definitions = self.dataloader_types
elif param_group == "metric":
params_definitions = self.metric_types
for param_type, param_names in params_definitions.items():
if param_name in param_names:
found_type = self.types_definitions.get(param_type, None)
Expand Down
Expand Up @@ -15,16 +15,18 @@
"""Parameters feeder module."""
from typing import Any, Dict, List, Optional

from neural_compressor.experimental.metric.metric import registry_metrics
from neural_compressor.objective import OBJECTIVES
from neural_compressor.strategy import STRATEGIES
from neural_compressor.ux.components.model.repository import ModelRepository
from neural_compressor.ux.utils.exceptions import ClientErrorException
from neural_compressor.ux.utils.utils import (
_update_metric_parameters,
check_module,
filter_transforms,
get_metrics_dict,
load_dataloader_config,
load_help_nc_params,
load_metrics_config,
load_model_config,
load_precisions_config,
load_transforms_config,
Expand Down Expand Up @@ -205,8 +207,30 @@ def get_metrics(self) -> List[Dict[str, Any]]:
else:
check_module(framework)

metrics = get_metrics_dict()
return metrics.get(framework, [])
inc_framework_name = "onnxrt" if framework == "onnxrt_qlinearops" else framework

fw_metrics = registry_metrics.get(inc_framework_name, None)

raw_metric_list = list(fw_metrics.keys()) if fw_metrics else []
raw_metric_list += ["custom"]

metrics = []

loaded_metrics = load_metrics_config()

for metric_name in raw_metric_list:
if metric_name in [metric_item.get("name", None) for metric_item in loaded_metrics]:
metric_config = None
for metric_item in loaded_metrics:
if metric_item.get("name") == metric_name:
metric_config = metric_item
if metric_config is not None:
metric = _update_metric_parameters(metric_config)
metrics.append(metric)
else:
metrics.append({"name": metric_name, "help": "", "value": None})

return metrics


def get_possible_values(data: dict) -> Dict[str, List[Any]]:
Expand Down
@@ -0,0 +1,74 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa
# mypy: ignore-errors
"""v1.14
Revision ID: 6ece06672ed3
Revises: 6f0d0f71d92e
Create Date: 2022-08-31 07:16:24.229939
"""
from sqlalchemy.orm import sessionmaker

from neural_compressor.ux.components.db_manager.db_manager import DBManager
from neural_compressor.ux.components.db_manager.db_models.optimization_type import OptimizationType
from neural_compressor.ux.components.db_manager.db_models.precision import (
Precision,
precision_optimization_type_association,
)
from neural_compressor.ux.utils.consts import OptimizationTypes, Precisions

db_manager = DBManager()
Session = sessionmaker(bind=db_manager.engine)


# revision identifiers, used by Alembic.
revision = "6ece06672ed3"
down_revision = "6f0d0f71d92e"
branch_labels = None
depends_on = None


def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with Session.begin() as db_session:
mixed_precision_id = OptimizationType.add(
db_session=db_session,
name=OptimizationTypes.MIXED_PRECISION.value,
)
bf16_precision_id = Precision.get_precision_by_name(
db_session=db_session,
precision_name=Precisions.BF16.value,
)[0]

print("mixed_precision_id")
print(mixed_precision_id)
print("bf16_precision_id")
print(bf16_precision_id)

query = precision_optimization_type_association.insert().values(
precision_id=bf16_precision_id,
optimization_type_id=mixed_precision_id,
)
db_session.execute(query)

# ### end Alembic commands ###


def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###

0 comments on commit 26e902d

Please sign in to comment.