diff --git a/.ci/azure/linux.yml b/.ci/azure/linux.yml
index 7b1ee18d792d74..f61bb85dfb1850 100644
--- a/.ci/azure/linux.yml
+++ b/.ci/azure/linux.yml
@@ -88,6 +88,11 @@ jobs:
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/wheel/requirements-dev.txt
# For running Python API tests
python3 -m pip install -r $(REPO_DIR)/inference-engine/ie_bridges/python/src/requirements-dev.txt
+ # For running nGraph unit tests dependent on Python frameworks
+ python3 -m pip install -r $(REPO_DIR)/ngraph/test/requirements_test.txt
+ # For MO unit tests
+ python3 -m pip install -r $(REPO_DIR)/model-optimizer/requirements.txt
+ python3 -m pip install -r $(REPO_DIR)/model-optimizer/requirements_dev.txt
# Speed up build
wget https://github.com/ninja-build/ninja/releases/download/v1.10.0/ninja-linux.zip
unzip ninja-linux.zip
@@ -109,6 +114,7 @@ jobs:
-DENABLE_WHEEL=ON
-DENABLE_TESTS=ON
-DNGRAPH_ONNX_IMPORT_ENABLE=ON
+ -DNGRAPH_ONNX_FRONTEND_ENABLE=ON
-DENABLE_FASTER_BUILD=ON
-DENABLE_STRICT_DEPENDENCIES=OFF
-DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules
@@ -149,7 +155,15 @@ jobs:
workingDirectory: $(BUILD_SAMPLES_DIR)
displayName: 'Build c samples'
- - script: . $(SETUPVARS) && $(INSTALL_TEST_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
+ - script: |
+ export MO_ROOT=$(INSTALL_DIR)/deployment_tools/model_optimizer
+ . $(SETUPVARS) -pyver 3.6 && python3 -m pytest -s $(INSTALL_DIR)/deployment_tools/model_optimizer/unit_tests --junitxml=TEST-ModelOptimizer.xml
+ displayName: 'Model Optimizer UT'
+ continueOnError: false
+
+ - script: |
+ export FE_TEST_MODELS=$(INSTALL_DIR)/tests
+ . $(SETUPVARS) && $(INSTALL_TEST_DIR)/unit-test --gtest_print_time=1 --gtest_filter=-backend_api.config_unsupported:*IE_GPU* --gtest_output=xml:TEST-NGraphUT.xml
displayName: 'nGraph UT'
continueOnError: false
diff --git a/.ci/azure/linux_onnxruntime.yml b/.ci/azure/linux_onnxruntime.yml
index fce8fdddcc4f91..a2bfee8c70ac3e 100644
--- a/.ci/azure/linux_onnxruntime.yml
+++ b/.ci/azure/linux_onnxruntime.yml
@@ -95,6 +95,7 @@ jobs:
-DENABLE_SAMPLES=OFF
-DENABLE_SPEECH_DEMO=OFF
-DNGRAPH_ONNX_IMPORT_ENABLE=ON
+ -DNGRAPH_ONNX_FRONTEND_ENABLE=ON
-DNGRAPH_DEBUG_ENABLE=OFF
$(REPO_DIR)
workingDirectory: $(BUILD_DIR)
diff --git a/.ci/azure/mac.yml b/.ci/azure/mac.yml
index 04d4c16ea23344..90fc812bbaa36c 100644
--- a/.ci/azure/mac.yml
+++ b/.ci/azure/mac.yml
@@ -87,9 +87,6 @@ jobs:
export PATH="/usr/local/opt/cython/bin:$PATH"
export CC=gcc
export CXX=g++
- # Disable errors with Ninja
- export CXXFLAGS="-Wno-error=unused-command-line-argument"
- export CFLAGS="-Wno-error=unused-command-line-argument"
cmake -GNinja -DVERBOSE_BUILD=ON -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DENABLE_STRICT_DEPENDENCIES=OFF -DIE_EXTRA_MODULES=$(OPENVINO_CONTRIB_REPO_DIR)/modules $(REPO_DIR)
workingDirectory: $(BUILD_DIR)
displayName: 'CMake'
diff --git a/.ci/openvino-onnx/Dockerfile b/.ci/openvino-onnx/Dockerfile
index 9b0f48cf66cc3e..315598225627e0 100644
--- a/.ci/openvino-onnx/Dockerfile
+++ b/.ci/openvino-onnx/Dockerfile
@@ -69,6 +69,7 @@ RUN cmake .. \
-DENABLE_PYTHON=ON \
-DPYTHON_EXECUTABLE=/usr/bin/python3 \
-DNGRAPH_ONNX_IMPORT_ENABLE=ON \
+ -DNGRAPH_ONNX_FRONTEND_ENABLE=ON \
-DNGRAPH_DEBUG_ENABLE=OFF \
-DCMAKE_INSTALL_PREFIX=/openvino/dist \
-DNGRAPH_USE_PROTOBUF_LITE=${PROTOBUF_LITE}
diff --git a/.github/workflows/code_style.yml b/.github/workflows/code_style.yml
index 607fe2cb64ae1a..7969cf13aa15e9 100644
--- a/.github/workflows/code_style.yml
+++ b/.github/workflows/code_style.yml
@@ -25,7 +25,7 @@ jobs:
run: |
mkdir build
cd build
- cmake -DENABLE_PYTHON=ON -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT ..
+ cmake -DENABLE_PYTHON=ON -DENABLE_TESTS=ON -DENABLE_PROFILING_ITT=ON -DSELECTIVE_BUILD=COLLECT ..
- name: Check code style
run: cmake --build build --target clang_format_check_all
diff --git a/cmake/coverage.cmake b/cmake/coverage.cmake
index 60c137337b3173..4d8976e0a80beb 100644
--- a/cmake/coverage.cmake
+++ b/cmake/coverage.cmake
@@ -92,9 +92,15 @@ ie_coverage_genhtml(INFO_FILE "ngraph"
if(NGRAPH_ONNX_IMPORT_ENABLE)
ie_coverage_extract(INPUT "openvino" OUTPUT "onnx_importer"
- PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx_common*"
- "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx_editor*"
- "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx_import*")
+ PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/onnx_common*"
+ "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/onnx_import*")
ie_coverage_genhtml(INFO_FILE "onnx_importer"
PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
endif()
+
+if(NGRAPH_ONNX_FRONTEND_ENABLE)
+ ie_coverage_extract(INPUT "openvino" OUTPUT "onnx_ngraph_frontend"
+ PATTERNS "${OV_COVERAGE_BASE_DIRECTORY}/ngraph/frontend/onnx/frontend*")
+ ie_coverage_genhtml(INFO_FILE "onnx_ngraph_frontend"
+ PREFIX "${OV_COVERAGE_BASE_DIRECTORY}")
+endif()
diff --git a/cmake/developer_package/compile_flags/os_flags.cmake b/cmake/developer_package/compile_flags/os_flags.cmake
index 7b5cc66d3e3434..0fdbf79e9ec7f2 100644
--- a/cmake/developer_package/compile_flags/os_flags.cmake
+++ b/cmake/developer_package/compile_flags/os_flags.cmake
@@ -68,13 +68,13 @@ function(ie_sse42_optimization_flags flags)
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
# No such option for MSVC 2019
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
- set(${flags} /arch:SSE4.2 /QxSSE4.2 PARENT_SCOPE)
+ set(${flags} /QxSSE4.2 PARENT_SCOPE)
else()
message(WARNING "Unsupported CXX compiler ${CMAKE_CXX_COMPILER_ID}")
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
- set(${flags} -msse4.2 -xSSE4.2 PARENT_SCOPE)
+ set(${flags} -xSSE4.2 PARENT_SCOPE)
else()
set(${flags} -msse4.2 PARENT_SCOPE)
endif()
@@ -95,7 +95,7 @@ function(ie_avx2_optimization_flags flags)
endif()
else()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
- set(${flags} -march=core-avx2 -xCORE-AVX2 -mtune=core-avx2 PARENT_SCOPE)
+ set(${flags} -xCORE-AVX2 PARENT_SCOPE)
else()
set(${flags} -mavx2 -mfma PARENT_SCOPE)
endif()
@@ -152,6 +152,24 @@ function(ie_arm_neon_optimization_flags flags)
endif()
endfunction()
+#
+# Disables all warnings for 3rd party targets
+#
+function(ov_disable_all_warnings)
+ foreach(target IN LISTS ARGN)
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ target_compile_options(${target} PRIVATE /WX-)
+ elseif(CMAKE_COMPILER_IS_GNUCXX OR OV_COMPILER_IS_CLANG)
+ target_compile_options(${target} PRIVATE -w)
+ elseif(UNIX AND CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
+ # 193: zero used for undefined preprocessing identifier "XXX"
+ # 1011: missing return statement at end of non-void function "XXX"
+ # 2415: variable "xxx" of static storage duration was declared but never referenced
+ target_compile_options(${target} PRIVATE -diag-disable=warn,193,1011,2415)
+ endif()
+ endforeach()
+endfunction()
+
#
# Enables Link Time Optimization compilation
#
@@ -286,15 +304,12 @@ else()
ie_add_compiler_flags(-Wreturn-type)
ie_add_compiler_flags(-Wunused-variable)
- # Disable noisy warnings
-
if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
ie_add_compiler_flags(-Wswitch)
elseif(UNIX)
ie_add_compiler_flags(-Wuninitialized -Winit-self)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
- ie_add_compiler_flags(-Wno-error=switch
- -Winconsistent-missing-override)
+ ie_add_compiler_flags(-Winconsistent-missing-override)
else()
ie_add_compiler_flags(-Wmaybe-uninitialized)
check_cxx_compiler_flag("-Wsuggest-override" SUGGEST_OVERRIDE_SUPPORTED)
@@ -304,10 +319,11 @@ else()
endif()
endif()
+ # Disable noisy warnings
+
if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
- ie_add_compiler_flags(-diag-disable=remark)
- # noisy warnings from Intel Compiler 19.1.1.217 20200306
- ie_add_compiler_flags(-diag-disable=2196)
+ # 177: function "XXX" was declared but never referenced
+ ie_add_compiler_flags(-diag-disable=remark,177,2196)
endif()
# Linker flags
@@ -319,7 +335,6 @@ else()
elseif(LINUX)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
- set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
endif()
endif()
diff --git a/cmake/developer_package/compile_flags/sanitizer.cmake b/cmake/developer_package/compile_flags/sanitizer.cmake
index ef71780c0f169b..35343b129f3a34 100644
--- a/cmake/developer_package/compile_flags/sanitizer.cmake
+++ b/cmake/developer_package/compile_flags/sanitizer.cmake
@@ -34,13 +34,13 @@ endif()
# common sanitizer options
if (DEFINED SANITIZER_COMPILER_FLAGS)
# ensure sumbols are present
- set(SANITIZER_COMPILER_FLAGS "-g -fno-omit-frame-pointer")
+ set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -g -fno-omit-frame-pointer")
# prevent unloading libraries at runtime, so sanitizer can resolve their symbols
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -Wl,-z,nodelete")
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=gold")
- elseif(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$" AND NOT WIN32)
+ elseif(OV_COMPILER_IS_CLANG AND NOT WIN32)
if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 8.0)
set(SANITIZER_LINKER_FLAGS "${SANITIZER_LINKER_FLAGS} -fuse-ld=lld")
endif()
diff --git a/cmake/developer_package/compile_flags/sdl.cmake b/cmake/developer_package/compile_flags/sdl.cmake
index 10a1e86ad6d48f..7690a9031d864a 100644
--- a/cmake/developer_package/compile_flags/sdl.cmake
+++ b/cmake/developer_package/compile_flags/sdl.cmake
@@ -23,7 +23,7 @@ if (CMAKE_BUILD_TYPE STREQUAL "Release")
if (NOT ENABLE_SANITIZER)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -s")
endif()
- elseif(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$")
+ elseif(OV_COMPILER_IS_CLANG)
set(IE_C_CXX_FLAGS "${IE_C_CXX_FLAGS} -fstack-protector-all")
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
if (NOT ENABLE_SANITIZER)
diff --git a/cmake/developer_package/features.cmake b/cmake/developer_package/features.cmake
index 487dea8c7e382a..33e3530bac3359 100644
--- a/cmake/developer_package/features.cmake
+++ b/cmake/developer_package/features.cmake
@@ -58,7 +58,7 @@ ie_option (VERBOSE_BUILD "shows extra information about build" OFF)
ie_option (ENABLE_UNSAFE_LOCATIONS "skip check for MD5 for dependency" OFF)
-ie_dependent_option (ENABLE_FUZZING "instrument build for fuzzing" OFF "CMAKE_CXX_COMPILER_ID MATCHES ^(Apple)?Clang$; NOT WIN32" OFF)
+ie_dependent_option (ENABLE_FUZZING "instrument build for fuzzing" OFF "OV_COMPILER_IS_CLANG; NOT WIN32" OFF)
#
# Check features
diff --git a/cmake/developer_package/packaging.cmake b/cmake/developer_package/packaging.cmake
index 4cb21210d4a166..4095a16157c068 100644
--- a/cmake/developer_package/packaging.cmake
+++ b/cmake/developer_package/packaging.cmake
@@ -53,7 +53,9 @@ macro(ie_cpack)
set(CPACK_PACKAGE_VENDOR "Intel Corporation")
set(CPACK_VERBATIM_VARIABLES ON)
set(CPACK_COMPONENTS_ALL ${ARGN})
- set(CPACK_STRIP_FILES ON)
+ if (NOT DEFINED CPACK_STRIP_FILES)
+ set(CPACK_STRIP_FILES ON)
+ endif()
set(CPACK_THREADS 8)
string(REPLACE "/" "_" CPACK_PACKAGE_VERSION "${CI_BUILD_NUMBER}")
diff --git a/cmake/developer_package/target_flags.cmake b/cmake/developer_package/target_flags.cmake
index 181c4dd4187e1b..d4fd9837647005 100644
--- a/cmake/developer_package/target_flags.cmake
+++ b/cmake/developer_package/target_flags.cmake
@@ -55,3 +55,9 @@ endif()
if(UNIX AND NOT APPLE)
set(LINUX ON)
endif()
+
+if(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$")
+ set(OV_COMPILER_IS_CLANG ON)
+else()
+ set(OV_COMPILER_IS_CLANG OFF)
+endif()
diff --git a/cmake/features.cmake b/cmake/features.cmake
index ea32a7a42fe822..b7e23ee9226747 100644
--- a/cmake/features.cmake
+++ b/cmake/features.cmake
@@ -38,8 +38,6 @@ ie_dependent_option (ENABLE_PYTHON "enables ie python bridge build" OFF "PYTHONL
find_package(PythonInterp 3 QUIET)
ie_dependent_option (ENABLE_DOCS "Build docs using Doxygen" OFF "PYTHONINTERP_FOUND" OFF)
-ie_option (ENABLE_SYSTEM_PUGIXML "use the system copy of pugixml" OFF)
-
#
# Inference Engine specific options
#
@@ -112,7 +110,11 @@ ie_dependent_option(ENABLE_TBB_RELEASE_ONLY "Only Release TBB libraries are link
ie_option (ENABLE_SYSTEM_PUGIXML "use the system copy of pugixml" OFF)
-ie_option (ENABLE_CPU_DEBUG_CAPS "enable CPU debug capabilities at runtime" OFF)
+ie_option (ENABLE_DEBUG_CAPS "enable OpenVINO debug capabilities at runtime" OFF)
+
+ie_dependent_option (ENABLE_GPU_DEBUG_CAPS "enable GPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS" OFF)
+
+ie_dependent_option (ENABLE_CPU_DEBUG_CAPS "enable CPU debug capabilities at runtime" ON "ENABLE_DEBUG_CAPS" OFF)
if(ANDROID OR WINDOWS_STORE OR (MSVC AND (ARM OR AARCH64)))
set(protoc_available OFF)
@@ -121,10 +123,12 @@ else()
endif()
ie_dependent_option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" ON "protoc_available" OFF)
-ie_dependent_option(NGRAPH_ONNX_EDITOR_ENABLE "Enable ONNX Editor" ON "NGRAPH_ONNX_IMPORT_ENABLE" OFF)
+ie_dependent_option(NGRAPH_ONNX_FRONTEND_ENABLE "Enable ONNX FrontEnd" OFF "NGRAPH_ONNX_IMPORT_ENABLE" OFF)
ie_dependent_option(NGRAPH_PDPD_FRONTEND_ENABLE "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF)
ie_dependent_option(NGRAPH_USE_PROTOBUF_LITE "Compiles and links with protobuf-lite" OFF
"NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE" OFF)
+ie_dependent_option(NGRAPH_USE_SYSTEM_PROTOBUF "Use system protobuf" OFF
+ "NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE" OFF)
ie_dependent_option(NGRAPH_UNIT_TEST_ENABLE "Enables ngraph unit tests" ON "ENABLE_TESTS;NOT ANDROID" OFF)
ie_dependent_option(NGRAPH_UNIT_TEST_BACKENDS_ENABLE "Control the building of unit tests using backends" ON
"NGRAPH_UNIT_TEST_ENABLE" OFF)
diff --git a/cmake/templates/InferenceEngineConfig.cmake.in b/cmake/templates/InferenceEngineConfig.cmake.in
index 261edbf3d730f3..43408483f9af6e 100644
--- a/cmake/templates/InferenceEngineConfig.cmake.in
+++ b/cmake/templates/InferenceEngineConfig.cmake.in
@@ -73,6 +73,10 @@ function(_ie_target_no_deprecation_error)
else()
set(flags "-Wno-error=deprecated-declarations")
endif()
+ if(CMAKE_CROSSCOMPILING)
+ set_target_properties(${ARGV} PROPERTIES
+ INTERFACE_LINK_OPTIONS "-Wl,--allow-shlib-undefined")
+ endif()
set_target_properties(${ARGV} PROPERTIES INTERFACE_COMPILE_OPTIONS ${flags})
endif()
diff --git a/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_RetinaNet_From_Tensorflow.md b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_RetinaNet_From_Tensorflow.md
new file mode 100644
index 00000000000000..f02d50499fd857
--- /dev/null
+++ b/docs/MO_DG/prepare_model/convert_model/tf_specific/Convert_RetinaNet_From_Tensorflow.md
@@ -0,0 +1,15 @@
+# Converting RetinaNet Model from TensorFlow* to the Intermediate Representation {#openvino_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_RetinaNet_From_Tensorflow}
+
+This tutorial explains how to convert RetinaNet model to the Intermediate Representation (IR).
+
+[Public RetinaNet model](https://github.com/fizyr/keras-retinanet) does not contain pretrained TensorFlow\* weights.
+To convert this model to the TensorFlow\* format, you can use [Reproduce Keras* to TensorFlow* Conversion tutorial](https://docs.openvinotoolkit.org/latest/omz_models_model_retinanet_tf.html).
+
+After you convert the model to TensorFlow* format, run the Model Optimizer command below:
+```sh
+python mo.py --input "input_1[1 1333 1333 3]" --input_model retinanet_resnet50_coco_best_v2.1.0.pb --data_type FP32 --transformations_config ./extensions/front/tf/retinanet.json
+```
+
+Where `transformations_config` command-line parameter specifies the configuration json file containing model conversion hints for the Model Optimizer.
+The json file contains some parameters that need to be changed if you train the model yourself. It also contains information on how to match endpoints
+to replace the subgraph nodes. After the model is converted to IR, the output nodes will be replaced with DetectionOutput layer.
diff --git a/docs/doxygen/ie_docs.xml b/docs/doxygen/ie_docs.xml
index 19a87a1e11e97c..99e91e53ed572f 100644
--- a/docs/doxygen/ie_docs.xml
+++ b/docs/doxygen/ie_docs.xml
@@ -34,6 +34,7 @@ limitations under the License.
+
@@ -176,6 +177,7 @@ limitations under the License.
+
@@ -219,6 +221,7 @@ limitations under the License.
+
diff --git a/docs/ops/arithmetic/Acosh_3.md b/docs/ops/arithmetic/Acosh_3.md
index 79fde27fbd3c20..9f858924d4e01e 100644
--- a/docs/ops/arithmetic/Acosh_3.md
+++ b/docs/ops/arithmetic/Acosh_3.md
@@ -6,32 +6,28 @@
**Short description**: *Acosh* performs element-wise hyperbolic inverse cosine (arccosh) operation with given tensor.
-**Attributes**:
+**Detailed description**: Operation takes one input tensor and performs the element-wise hyperbolic inverse cosine operation on a given input tensor, based on the following mathematical formula:
- No attributes available.
+\f[
+a_{i} = acosh(a_{i})
+\f]
+
+**Attributes**: *Acosh* operation has no attributes.
**Inputs**
-* **1**: A tensor of type *T*. **Required.**
+* **1**: A tensor of type *T* and arbitrary shape. **Required.**
**Outputs**
-* **1**: The result of element-wise acosh operation. A tensor of type *T*.
+* **1**: The result of element-wise *Acosh* operation. A tensor of type *T* and the same shape as the input tensor.
**Types**
-* *T*: any floating-point type.
-
-*Acosh* does the following with the input tensor *a*:
-
-\f[
-a_{i} = acosh(a_{i})
-\f]
+* *T*: any numeric type.
**Examples**
-*Example 1*
-
```xml
diff --git a/docs/ops/arithmetic/Erf_1.md b/docs/ops/arithmetic/Erf_1.md
index 6b445dafad29bb..52d2d0301cb679 100644
--- a/docs/ops/arithmetic/Erf_1.md
+++ b/docs/ops/arithmetic/Erf_1.md
@@ -4,34 +4,32 @@
**Category**: Arithmetic unary operation
-**Short description**: *Erf* calculates the Gauss error function element-wise with given tensor.
+**Short description**: *Erf* performs element-wise Gauss error function (erf) on a given input tensor.
**Detailed Description**
-For each element from the input tensor calculates corresponding element in the output tensor with the following formula:
+*Erf* performs element-wise erf operation on a given input tensor, based on the following mathematical formula:
+
\f[
erf(x) = \pi^{-1} \int_{-x}^{x} e^{-t^2} dt
\f]
-**Attributes**:
-
- No attributes available.
+**Attributes**: *Erf* operation has no attributes.
**Inputs**
-* **1**: A tensor of type *T*. **Required.**
+* **1**: A tensor of type *T* and arbitrary shape. **Required.**
**Outputs**
-* **1**: The result of element-wise operation. A tensor of type *T*.
+* **1**: The result of element-wise *Erf* function applied to the input tensor. A tensor of type *T* and the same shape as the input tensor.
**Types**
-* *T*: any supported floating-point type.
+* *T*: any supported numeric type.
-**Examples**
-*Example 1*
+**Example**
```xml
diff --git a/docs/ops/arithmetic/Sign_1.md b/docs/ops/arithmetic/Sign_1.md
index e68cc51f97f7c7..1aa87097e62136 100644
--- a/docs/ops/arithmetic/Sign_1.md
+++ b/docs/ops/arithmetic/Sign_1.md
@@ -4,33 +4,30 @@
**Category**: Arithmetic unary operation
-**Short description**: *Sign* performs element-wise sign operation with given tensor.
+**Short description**: *Sign* performs element-wise sign operation on a given input tensor.
-**Attributes**:
+**Detailed description**: *Sign* performs element-wise sign operation on a given input tensor, based on the following mathematical formula:
- No attributes available.
+\f[
+a_{i} = sign(a_{i})
+\f]
+
+**Attributes**: *Sign* operation has no attributes.
**Inputs**
-* **1**: An tensor of type *T*. **Required.**
+* **1**: A tensor of type *T* and arbitrary shape. **Required.**
**Outputs**
-* **1**: The result of element-wise sign operation. A tensor of type *T* with mapped elements of the input tensor to -1 (if it is negative), 0 (if it is zero), or 1 (if it is positive).
+* **1**: The result of element-wise *Sign* operation. A tensor of type *T* with mapped elements of the input tensor to -1 (if it is negative), 0 (if it is zero), or 1 (if it is positive).
**Types**
* *T*: any numeric type.
-*Sign* does the following with the input tensor *a*:
-
-\f[
-a_{i} = sign(a_{i})
-\f]
-
-**Examples**
-*Example 1*
+**Example**
```xml
diff --git a/docs/ops/condition/If_8.md b/docs/ops/condition/If_8.md
new file mode 100644
index 00000000000000..7de2449b1eada1
--- /dev/null
+++ b/docs/ops/condition/If_8.md
@@ -0,0 +1,226 @@
+## If {#openvino_docs_ops_infrastructure_If_8}
+
+**Versioned name**: *If-8*
+
+**Category**: Infrastructure
+
+**Short description**: *If* operation contains two internal networks(subgraphs) such as `then_body` and `else_body`,
+and performs one of them depending on `cond` value. If `cond` is `True`, `then_body` is executed. If `cond` is `False`,
+the operation executes the `else_body` subgraph.
+
+**Detailed description**
+
+*If* must not contain empty subgraphs. Each of them must have at least one operation `Result`.
+Also the number of outputs from *If* always must be greater than zero and equal to the number of outputs from each subgraph.
+
+**If attributes**:
+
+* **Subgraphs**:
+
+ `then_body`/`else_body` are subgraphs that are executed depending on the `cond` value.
+ The subgraph is described operation by operation as a typical IR network.
+ The subgraph has inputs (`Parameter` operations) and outputs (`Result` operations).
+
+ * **Subgraph's inputs** - inputs to the subgraph which associated with *If* inputs via *port_map*.
+ The subgraph can have any number of inputs (even zero).
+
+ * **Subgraph's outputs** - outputs from the subgraph which associated with *If* outputs via *port_map*.
+ The subgraph must contain at least one output. Each *If* output is associated with one output from the subgraph.
+ Therefore the number of `then_body` outputs is equal to the number of outputs from *If* and
+ the number of `else_body` outputs.
+ The type of the subgraph output and the type of the associated output from *If* must be equal.
+
+
+* **Port maps**:
+
+ *port_map* is a set of rules to map input or output data tensors of *If* operation onto the subgraph data tensors.
+ The `port_map` entries can be `input` and `output`. Each entry describes a corresponding mapping rule.
+ *If* has two *port_maps*: `then_port_map` for `then_body` and `else_port_map` for `else_body`.
+
+ * **Port map attributes**:
+
+ * *external_port_id*
+ * **Description**: *external_port_id* is a port ID of *If* operation.
+ * **Range of values**: IDs of the *If* inputs and outputs
+ * **Type**: `unsigned int`
+ * **Default value**: None
+ * **Required**: *yes*
+
+ * *internal_layer_id*
+
+ * **Description**: *internal_layer_id* is a `Parameter` or `Result` operation ID inside
+ the subgraph to map to.
+ * **Range of values**: IDs of the `Parameter` or `Result` operations in the subgraph
+ * **Type**: `unsigned int`
+ * **Default value**: None
+ * **Required**: *yes*
+
+**If Inputs**
+
+
+* **cond**: A scalar or 1D tensor with 1 element of `boolean` type specifying which subgraph to execute.
+`True` value means to execute the `then_body`, `False` - `else_body`. *Required*.
+
+* **Multiple other inputs**: Tensors of different types and shapes. *Optional*.
+
+**If Outputs**
+
+* **Multiple outputs**: Results of execution of one of the subgraph. Tensors of any type and shape.
+
+
+**Body Inputs**
+
+* **Multiple inputs**: Tensors of different types and shapes. *Optional*.
+
+
+**Body Outputs**
+
+* **Multiple outputs**: Results of execution of the subgraph. Tensors of any type and shape.
+
+
+**Examples**
+
+*Example 1: a typical If structure*
+```xml
+
+
+
+
+ 2
+ 4
+
+
+ 2
+ 4
+
+
+ 2
+ 4
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 2
+ 4
+
+
+ 2
+ 4
+
+
+
+
+
+
+
+ 2
+ 4
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 2
+ 4
+
+
+ 2
+ 4
+
+
+
+
+
+
+
+ 2
+ 4
+
+
+
+
+
+
+
+
+
+
+
+```
diff --git a/docs/ops/condition/Select_1.md b/docs/ops/condition/Select_1.md
index 8f51624961078e..56e5fde8eab790 100644
--- a/docs/ops/condition/Select_1.md
+++ b/docs/ops/condition/Select_1.md
@@ -17,26 +17,31 @@
* **Description**: specifies rules used for auto-broadcasting of input tensors.
* **Range of values**:
- * *none* - no auto-broadcasting is allowed, all input shapes should match
- * *numpy* - numpy broadcasting rules, aligned with ONNX Broadcasting. Description is available in ONNX docs.
- * **Type**: string
+ * *none* - no auto-broadcasting is allowed, all input shapes must match
+ * *numpy* - numpy broadcasting rules, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md)
+ * *pdpd* - PaddlePaddle-style implicit broadcasting, description is available in [Broadcast Rules For Elementwise Operations](../broadcast_rules.md)
+ * **Type**: `string`
* **Default value**: "numpy"
* **Required**: *no*
**Inputs**:
-* **1**: `cond` tensor with selection mask of type `boolean`. The tensor can be 0D.
+* **1**: `cond` - tensor of type *T_COND* and arbitrary shape with selection mask. **Required**.
-* **2**: `then` the tensor with elements to take where the corresponding element in `cond` is true. Arbitrary type that should match type of `else` input tensor.
+* **2**: `then` - tensor of type *T* and arbitrary shape with elements to take where the corresponding element in `cond` is `true`. **Required**.
-* **3**: `else` the tensor with elements to take where the corresponding element in `cond` is false. Arbitrary type that should match type of `then` input tensor.
+* **3**: `else` - tensor of type *T* and arbitrary shape with elements to take where the corresponding element in `cond` is `false`. **Required**.
**Outputs**:
* **1**: blended output tensor that is tailored from values of inputs tensors `then` and `else` based on `cond` and broadcasting rules. It has the same type of elements as `then` and `else`.
+**Types**
+
+* *T_COND*: `boolean` type.
+* *T*: any supported numeric type.
**Example**
diff --git a/docs/ops/generation/RandomUniform_8.md b/docs/ops/generation/RandomUniform_8.md
new file mode 100644
index 00000000000000..4269c82bc6a8aa
--- /dev/null
+++ b/docs/ops/generation/RandomUniform_8.md
@@ -0,0 +1,231 @@
+## RandomUniform {#openvino_docs_ops_generation_RandomUniform_8}
+
+**Versioned name**: *RandomUniform-8*
+
+**Category**: Generation
+
+**Short description**: *RandomUniform* operation generates a sequence of random values from a uniform distribution.
+
+**Detailed description**:
+
+*RandomUniform* operation generates random numbers from a uniform distribution in the range `[*minval*, *maxval*)`.
+The generation algorithm is based on underlying random integer generator that uses Philox algorithm. Philox algorithm
+is a counter-based pseudo-random generator, which produces uint32 values. Single invocation of Philox algorithm returns
+four result random values, depending on the given *key* and *counter* values. *Key* and *counter* are initialized
+with *seed* and *seed2* attributes respectively.
+
+\f[
+key = seed\\
+counter = seed2
+\f]
+
+Link to the original paper [Parallel Random Numbers: As Easy as 1, 2, 3](https://www.thesalmons.org/john/random123/papers/random123sc11.pdf)
+
+The result of Philox is calculated by applying a fixed number of *key* and *counter* updating so-called "rounds".
+This implementation uses 4x32_10 version of Philox algorithm, where number of rounds = 10.
+
+Suppose we have *n* which determines *n*-th 4 elements of random sequence.
+In each round *key*, *counter* and *n* are splitted to pairs of uint32 values:
+
+\f[
+R = cast\_to\_uint32(value)\\
+L = cast\_to\_uint32(value >> 32),
+\f]
+where *cast\_to\_uint32* - static cast to uint32, *value* - uint64 input value, *L*, *R* - uint32
+result values, >> - bitwise right shift.
+
+Then *n* and *counter* are updated with the following formula:
+
+\f[
+L'= mullo(R, M)\\
+R' = mulhi(R, M) {\oplus} k {\oplus} L \\
+mulhi(a, b) = floor((a {\times} b) / 2^{32}) \\
+mullo(a, b) = (a {\times} b) \mod 2^{32}
+\f]
+where `{\oplus}` - bitwise xor, *k* = `R_{key}` for updating counter, *k* = `L_{key}` for updating *n*,
+*M* = `0xD2511F53` for updating *n*, *M* = `0xCD9E8D57` for updating *counter*.
+
+After each round *key* is raised by summing with another pair of const values:
+\f[
+L += 0x9E3779B9 \\
+R += 0xBB67AE85
+\f]
+Values *L'_{n}*, *R'_{n}*, *L'_{counter}*, *R'_{counter}* are resulting four random numbers.
+
+Float values between [0..1) are obtained from 32-bit integers by the following rules.
+
+Float16 is formatted as follows: *sign*(1 bit) *exponent*(5 bits) *mantissa*(10 bits). The value is interpreted
+using following formula:
+\f[
+(-1)^{sign} * 1, mantissa * 2 ^{exponent - 15}
+\f]
+
+so to obtain float16 values *sign*, *exponent* and *mantissa* are set as follows:
+```
+sign = 0
+exponent = 15 - representation of a zero exponent.
+mantissa = 10 right bits from generated uint32 random value.
+```
+
+So the resulting float16 value is:
+```
+x_uint16 = x // Truncate the upper 16 bits.
+val = ((exponent << 10) | x_uint16 & 0x3ffu) - 1.0,
+```
+where x is uint32 generated random value.
+
+Float32 is formatted as follows: *sign*(1 bit) *exponent*(8 bits) *mantissa*(23 bits). The value is interpreted
+using following formula:
+\f[
+(-1)^{sign} * 1, mantissa * 2 ^{exponent - 127}
+\f]
+
+so to obtain float values *sign*, *exponent* and *mantissa* are set as follows:
+```
+sign = 0
+exponent = 127 - representation of a zero exponent.
+mantissa = 23 right bits from generated uint32 random value.
+```
+
+So the resulting float value is:
+```
+val = ((exponent << 23) | x & 0x7fffffu) - 1.0,
+```
+where x is uint32 generated random value.
+
+Double is formatted as follows: *sign*(1 bit) *exponent*(11 bits) *mantissa*(52 bits). The value is interpreted
+using following formula:
+\f[
+(-1)^{sign} * 1, mantissa * 2 ^{exponent - 1023}
+\f]
+
+so to obtain double values *sign*, *exponent* and *mantissa* are set as follows:
+```
+sign = 0
+exponent = 1023 - representation of a zero exponent.
+mantissa = 52 right bits from two concatinated uint32 values from random integer generator.
+```
+
+So the resulting double is obtained as follows:
+```
+mantissa_h = x0 & 0xfffffu; // upper 20 bits of mantissa
+mantissa_l = x1; // lower 32 bits of mantissa
+mantissa = (mantissa_h << 32) | mantissa_l;
+val = ((exponent << 52) | mantissa) - 1.0,
+```
+where x0, x1 are uint32 generated random values.
+
+To obtain a value in a specified range each value is processed with the following formulas:
+
+For float values:
+\f[
+result = x * (maxval - minval) + minval,
+\f]
+where *x* is random float or double value between [0..1).
+
+For integer values:
+\f[
+result = x \mod (maxval - minval) + minval,
+\f]
+where *x* is uint32 random value.
+
+
+Example 1. *RandomUniform* output with `seed` = 150, `seed2` = 10, `output_type` = f32:
+
+```
+input_shape = [ 3, 3 ]
+output = [[0.7011236 0.30539632 0.93931055]
+ [0.9456035 0.11694777 0.50770056]
+ [0.5197197 0.22727466 0.991374 ]]
+```
+
+Example 2. *RandomUniform* output with `seed` = 80, `seed2` = 100, `output_type` = double:
+
+```
+input_shape = [ 2, 2 ]
+
+minval = 2
+
+maxval = 10
+
+output = [[5.65927959 4.23122376]
+ [2.67008206 2.36423758]]
+```
+
+Example 3. *RandomUniform* output with `seed` = 80, `seed2` = 100, `output_type` = i32:
+
+```
+input_shape = [ 2, 3 ]
+
+minval = 50
+
+maxval = 100
+
+output = [[65 70 56]
+ [59 82 92]]
+```
+
+**Attributes**:
+
+* *output_type*
+
+ * **Description**: the type of the output. Determines generation algorithm and affects resulting values.
+ Output numbers generated for different values of *output_type* may not be equal.
+ * **Range of values**: "i32", "i64", "f16", "bf16", "f32", "f64".
+ * **Type**: string
+ * **Required**: *Yes*
+
+* *seed*
+
+ * **Description**: global seed value.
+ * **Range of values**: positive integers
+ * **Type**: `int`
+ * **Required**: *Yes*
+
+* *seed2*
+
+ * **Description**: operational seed value.
+ * **Range of values**: positive integers
+ * **Type**: `int`
+ * **Required**: *Yes*
+
+**Inputs**:
+
+* **1**: `shape` - 1D tensor of type *T_SHAPE* describing output shape. **Required.**
+
+* **2**: `minval` - scalar or 1D tensor with 1 element with type specified by the attribute *output_type*,
+ defines the lower bound on the range of random values to generate (inclusive). **Required.**
+
+* **3**: `maxval` - scalar or 1D tensor with 1 element with type specified by the attribute *output_type*,
+ defines the upper bound on the range of random values to generate (exclusive). **Required.**
+
+
+**Outputs**:
+
+* **1**: A tensor with type specified by the attribute *output_type* and shape defined by `shape` input tensor.
+
+**Types**
+
+* *T_SHAPE*: `int32` or `int64`.
+
+*Example 1: IR example.*
+
+```xml
+
+
+
+
+ 3
+
+
+
+
+
+
+```
diff --git a/docs/ops/normalization/MVN_1.md b/docs/ops/normalization/MVN_1.md
index ef8a37204dd82f..a82c9a9ca40531 100644
--- a/docs/ops/normalization/MVN_1.md
+++ b/docs/ops/normalization/MVN_1.md
@@ -4,57 +4,89 @@
**Category**: *Normalization*
-**Short description**: [Reference](http://caffe.berkeleyvision.org/tutorial/layers/mvn.html)
+**Short description**: Calculates mean-variance normalization of the input tensor. Supports two normalization techniques: [Instance/Contrast Normalization](https://arxiv.org/abs/1607.08022) and [Layer Normalization](https://arxiv.org/abs/1607.06450).
**Detailed description**
-*MVN* subtracts mean value from the input blob:
+Based on `across_channels` attribute mean value is calculated using one of formulas below:
+
+1. if `true` mean value is calculated using Layer Normalization:
+\f[
+\mu_{n} = \frac{\sum_{c}^{C}\sum_{h}^{H}\sum_{w}^{W} i_{nchw}}{C * H * W}
+\f]
+2. if `false` mean value is calculated using Instance/Contrast Normalization:
\f[
-o_{i} = i_{i} - \frac{\sum{i_{k}}}{C * H * W}
+\mu_{nc} = \frac{\sum_{h}^{H}\sum_{w}^{W} i_{nchw}}{H * W}
\f]
-If *normalize_variance* is set to 1, the output blob is divided by variance:
+
+where \f$i_{nchw}\f$ is an input tensor parametrized by \f$n\f$ batches, \f$c\f$ channels and \f$h,w\f$ spatial dimesnions.
+
+If `reduction_axes` attribute is provided mean value is calculated based on formula:
\f[
-o_{i}=\frac{o_{i}}{\sum \sqrt {o_{k}^2}+\epsilon}
+\mu_{n} = ReduceMean(i_{k}, reduction_axes)
\f]
+Afterwards *MVN* subtracts mean value from the input blob.
+
+If *normalize_variance* is set to `true`, the output blob is divided by variance:
+\f[
+o_{i}=\frac{o_{i}}{\sqrt {\sum {\sigma_{k}^2}+\epsilon}}
+\f]
+
+where \f$\sigma_{k}^2\f$ is the variance calculated based on mean value, \f$\epsilon\f$ is a value added to the variance for numerical stability and corresponds to `epsilon` attribute.
+
**Attributes**
* *across_channels*
- * **Description**: *across_channels* is a flag that specifies whether mean values are shared across channels. For example, *across_channels* equal to `false` means that mean values are not shared across channels.
+ * **Description**: *across_channels* is a flag that specifies whether mean values are shared across channels. If `true` mean values and variance are calculated for each sample across all channels and spatial dimensions (Layer Normalization), otherwise calculation is done for each sample and for each channel across spatial dimensions (Instance/Contrast Normalization).
* **Range of values**:
* `false` - do not share mean values across channels
* `true` - share mean values across channels
* **Type**: `boolean`
- * **Default value**: `false`
- * **Required**: *no*
+ * **Required**: *yes*
+
+* *reduction_axes*
+
+ * **Description**: 1D tensor of unique elements and type *T_IND* which specifies indices of dimensions in `data` that define normalization slices. Negative value means counting dimensions from the back.
+ * **Range of values**: allowed range of axes is `[-r; r-1]` where `r = rank(data)`, the order cannot be sorted
+ * **Type**: `int`
+ * **Required**: *yes*
* *normalize_variance*
* **Description**: *normalize_variance* is a flag that specifies whether to perform variance normalization.
* **Range of values**:
- * `false` -- do not normalize variance
- * `true` -- normalize variance
+ * `false` - do not normalize variance
+ * `true` - normalize variance
* **Type**: `boolean`
- * **Default value**: `false`
- * **Required**: *no*
+ * **Required**: *yes*
* *eps*
* **Description**: *eps* is the number to be added to the variance to avoid division by zero when normalizing the value. For example, *epsilon* equal to 0.001 means that 0.001 is added to the variance.
* **Range of values**: a positive floating-point number
- * **Type**: `float`
+ * **Type**: `double`
* **Required**: *yes*
+* **Note** Important: it is necessary to use only one of `across_channels` or `reduction_axes` attributes, they cannot be defined together.
+
**Inputs**
-* **1**: 4D or 5D input tensor of any floating-point type. **Required.**
+* **1**: `data` - input tensor of type *T* and arbitrary shape. **Required.**
**Outputs**
-* **1**: normalized tensor of the same type and shape as input tensor.
+* **1**: normalized tensor of type *T* and shape as input tensor.
-**Example**
+**Types**
+
+* *T*: any floating point type.
+* *T_IND*: `int64` or `int32`.
+
+**Examples**
+
+*Example: with `across_channels` attribute*
```xml
@@ -77,3 +109,27 @@ o_{i}=\frac{o_{i}}{\sum \sqrt {o_{k}^2}+\epsilon}
```
+
+*Example: with `reduction_axes` attribute*
+
+```xml
+
+
+
+
+ 6
+ 12
+ 10
+ 24
+
+
+
+
+```
diff --git a/docs/ops/normalization/MVN_6.md b/docs/ops/normalization/MVN_6.md
index 9de691458c462d..f89cf60e92df7e 100644
--- a/docs/ops/normalization/MVN_6.md
+++ b/docs/ops/normalization/MVN_6.md
@@ -30,8 +30,8 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
* **Description**: *normalize_variance* is a flag that specifies whether to perform variance normalization.
* **Range of values**:
- * `false` -- Do not normalize variance
- * `true` -- Normalize variance
+ * `false` - do not normalize variance
+ * `true` - normalize variance
* **Type**: `boolean`
* **Required**: *yes*
@@ -46,14 +46,14 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
* **Description**: Choose where to add epsilon.
* **Range of values**:
- * `inside_sqrt` -- Add epsilon inside sqrt
- * `outside_sqrt` -- Add epsilon outside of sqrt
+ * `inside_sqrt` - add epsilon inside sqrt
+ * `outside_sqrt` - add epsilon outside of sqrt
* **Type**: `string`
* **Required**: *yes*
**Inputs**
-* **1**: `data` - Input tensor to be normalized. Type *T*. **Required.**
+* **1**: `data` - Input tensor to be normalized of type *T* and arbitrary shape. **Required.**
* **2**: `axes` - 1D tensor which specifies indices of dimensions in `data` that define normalization slices. Allowed range of axes is `[-r; r-1]` where `r = rank(data)`, the order can be not sorted. Negative value means counting dimensions from the back. Type *T_IND*. **Required.**
@@ -63,8 +63,7 @@ o_{i}=\frac{o_{i}}{\sqrt {\sum {o_{k}^2}}+\epsilon}
**Types**
-* *T*: any floating-point type.
-
+* *T*: any floating point type.
* *T_IND*: `int64` or `int32`.
**Example**
diff --git a/docs/ops/opset8.md b/docs/ops/opset8.md
index 02e97eab4e42f6..4c71a0bb2fa7fc 100644
--- a/docs/ops/opset8.md
+++ b/docs/ops/opset8.md
@@ -79,6 +79,7 @@ declared in `namespace opset8`.
* [HSigmoid](activation/HSigmoid_5.md)
* [HSwish](activation/HSwish_4.md)
* [IDFT](signals/IDFT_7.md)
+* [If](condition/If_8.md)
* [Interpolate](image/Interpolate_4.md)
* [Less](comparison/Less_1.md)
* [LessEqual](comparison/LessEqual_1.md)
@@ -114,6 +115,7 @@ declared in `namespace opset8`.
* [PriorBox](detection/PriorBox_1.md)
* [Proposal](detection/Proposal_4.md)
* [PSROIPooling](detection/PSROIPooling_1.md)
+* [RandomUniform](generation/RandomUniform_8.md)
* [Range](generation/Range_4.md)
* [ReLU](activation/ReLU_1.md)
* [ReadValue](infrastructure/ReadValue_3.md)
diff --git a/docs/template_extension/cpu_kernel.cpp b/docs/template_extension/cpu_kernel.cpp
index aa2486589cbff2..b1d426b15825ce 100644
--- a/docs/template_extension/cpu_kernel.cpp
+++ b/docs/template_extension/cpu_kernel.cpp
@@ -102,6 +102,7 @@ InferenceEngine::StatusCode OpImplementation::init(InferenceEngine::LayerConfig&
IE_THROW() << "Operation supports only FP32 precisions!";
}
} catch (InferenceEngine::Exception& ex) {
+ error = ex.what();
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg) - 1] = 0;
diff --git a/docs/template_extension/fft_kernel.cpp b/docs/template_extension/fft_kernel.cpp
index 12554a70c75406..3fcf71a8f641b1 100644
--- a/docs/template_extension/fft_kernel.cpp
+++ b/docs/template_extension/fft_kernel.cpp
@@ -66,6 +66,7 @@ InferenceEngine::StatusCode FFTImpl::init(InferenceEngine::LayerConfig& config,
IE_THROW() << "Operation supports only FP32 precisions!";
}
} catch (InferenceEngine::Exception& ex) {
+ error = ex.what();
if (resp) {
strncpy(resp->msg, error.c_str(), sizeof(resp->msg) - 1);
resp->msg[sizeof(resp->msg) - 1] = 0;
diff --git a/docs/template_plugin/tests/functional/op_reference/acosh.cpp b/docs/template_plugin/tests/functional/op_reference/acosh.cpp
new file mode 100644
index 00000000000000..e854c98b7e0f7a
--- /dev/null
+++ b/docs/template_plugin/tests/functional/op_reference/acosh.cpp
@@ -0,0 +1,81 @@
+// Copyright (C) 2018-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "base_reference_test.hpp"
+
+using namespace ngraph;
+
+namespace reference_tests {
+namespace {
+
+struct AcoshParams {
+ Tensor input;
+ Tensor expected;
+};
+
+struct Builder : ParamsBuilder {
+ REFERENCE_TESTS_ADD_SET_PARAM(Builder, input);
+ REFERENCE_TESTS_ADD_SET_PARAM(Builder, expected);
+};
+
+class ReferenceAcoshLayerTest : public testing::TestWithParam, public CommonReferenceTest {
+public:
+ void SetUp() override {
+ auto params = GetParam();
+ function = CreateFunction(params.input.shape, params.input.type);
+ inputData = {params.input.data};
+ refOutData = {params.expected.data};
+ }
+ static std::string getTestCaseName(const testing::TestParamInfo& obj) {
+ auto param = obj.param;
+ std::ostringstream result;
+ result << "shape=" << param.input.shape << "_";
+ result << "type=" << param.input.type;
+ return result.str();
+ }
+
+private:
+ static std::shared_ptr CreateFunction(const Shape& shape, const element::Type& type) {
+ const auto in = std::make_shared(type, shape);
+ const auto acosh = std::make_shared(in);
+ return std::make_shared(NodeVector {acosh}, ParameterVector {in});
+ }
+};
+
+TEST_P(ReferenceAcoshLayerTest, AcoshWithHardcodedRefs) {
+ Exec();
+}
+
+} // namespace
+
+INSTANTIATE_TEST_SUITE_P(
+ smoke_Acosh_With_Hardcoded_Refs, ReferenceAcoshLayerTest,
+ ::testing::Values(Builder {}
+ .input({{8}, element::f16, std::vector {1.f, 2.f, 3.f, 4.f, 5.f, 10.f, 100.f, 1000.f}})
+ .expected({{8}, element::f16, std::vector {0., 1.317, 1.763, 2.063, 2.292, 2.993, 5.298, 7.6012}}),
+ Builder {}
+ .input({{8}, element::f32, std::vector {1.f, 2.f, 3.f, 4.f, 5.f, 10.f, 100.f, 1000.f}})
+ .expected({{8}, element::f32, std::vector {0., 1.317, 1.763, 2.063, 2.292, 2.993, 5.298, 7.6012}}),
+ Builder {}
+ .input({{8}, element::i32, std::vector {1, 2, 3, 4, 5, 10, 100, 1000}})
+ .expected({{8}, element::i32, std::vector {0, 1, 2, 2, 2, 3, 5, 8}}),
+ Builder {}
+ .input({{8}, element::i64, std::vector {1, 2, 3, 4, 5, 10, 100, 1000}})
+ .expected({{8}, element::i64, std::vector {0, 1, 2, 2, 2, 3, 5, 8}}),
+ Builder {}
+ .input({{8}, element::u32, std::vector {1, 2, 3, 4, 5, 10, 100, 1000}})
+ .expected({{8}, element::u32, std::vector {0, 1, 2, 2, 2, 3, 5, 8}}),
+ Builder {}
+ .input({{8}, element::u64, std::vector {1, 2, 3, 4, 5, 10, 100, 1000}})
+ .expected({{8}, element::u64, std::vector {0, 1, 2, 2, 2, 3, 5, 8}})),
+ ReferenceAcoshLayerTest::getTestCaseName);
+} // namespace reference_tests
diff --git a/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp b/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp
index 51af4d2ea1a221..f2d2cf68aa39a2 100644
--- a/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp
+++ b/docs/template_plugin/tests/functional/op_reference/base_reference_test.cpp
@@ -9,6 +9,8 @@
using namespace InferenceEngine;
+namespace reference_tests {
+
CommonReferenceTest::CommonReferenceTest(): targetDevice("TEMPLATE") {
core = PluginCache::get().ie(targetDevice);
}
@@ -171,3 +173,5 @@ void CommonReferenceTest::ValidateBlobs(const InferenceEngine::Blob::Ptr& refBlo
FAIL() << "Comparator for " << precision << " precision isn't supported";
}
}
+
+} // namespace reference_tests
diff --git a/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp b/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp
index 6e3fd942a9e722..de08533405e566 100644
--- a/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp
+++ b/docs/template_plugin/tests/functional/op_reference/base_reference_test.hpp
@@ -5,8 +5,12 @@
#include
#include
#include
+#include
+#include
#include
+namespace reference_tests {
+
class CommonReferenceTest {
public:
CommonReferenceTest();
@@ -51,3 +55,55 @@ InferenceEngine::Blob::Ptr CreateBlob(const ngraph::element::Type& element_type,
return blob;
}
+///
+/// Class which should help to build data for single input
+///
+struct Tensor {
+ Tensor() = default;
+
+ Tensor(const ngraph::Shape& shape, ngraph::element::Type type, const InferenceEngine::Blob::Ptr& data): shape {shape}, type {type}, data {data} {}
+
+ template
+ Tensor(const ngraph::Shape& shape, ngraph::element::Type type, const std::vector& data_elements)
+ : Tensor {shape, type, CreateBlob(type, data_elements)} {}
+
+ ngraph::Shape shape;
+ ngraph::element::Type type;
+ InferenceEngine::Blob::Ptr data;
+};
+
+///
+/// Class which should helps build test parameters.
+///
+/// e.g.:
+/// struct Params {
+/// Tensor i,o;
+/// int mul;
+/// };
+/// struct TestParamsBuilder : ParamsBuilder
+/// REFERENCE_TESTS_ADD_SET_PARAM(TestParamsBuilder, i);
+/// REFERENCE_TESTS_ADD_SET_PARAM(TestParamsBuilder, o);
+/// REFERENCE_TESTS_ADD_SET_PARAM(TestParamsBuilder, mul);
+/// };
+///
+/// const Params p = TestParamsBuilder{}
+/// .i(Tensor{{0}, i32, {1}})
+/// .o(Tensor{{0}, i32, {1}})
+/// .mul(10);
+template
+class ParamsBuilder {
+protected:
+ Params params;
+
+public:
+ operator Params() const {
+ return params;
+ }
+};
+#define REFERENCE_TESTS_ADD_SET_PARAM(builder_type, param_to_set) \
+ builder_type& param_to_set(decltype(params.param_to_set) t) { \
+ params.param_to_set = std::move(t); \
+ return *this; \
+ }
+
+} // namespace reference_tests
diff --git a/docs/template_plugin/tests/functional/op_reference/convert.cpp b/docs/template_plugin/tests/functional/op_reference/convert.cpp
index fb32fda4cbbfd8..b8e6f5846f7408 100644
--- a/docs/template_plugin/tests/functional/op_reference/convert.cpp
+++ b/docs/template_plugin/tests/functional/op_reference/convert.cpp
@@ -12,6 +12,7 @@
#include "base_reference_test.hpp"
+using namespace reference_tests;
using namespace ngraph;
using namespace InferenceEngine;
diff --git a/docs/template_plugin/tests/functional/op_reference/erf.cpp b/docs/template_plugin/tests/functional/op_reference/erf.cpp
new file mode 100644
index 00000000000000..bd888a8e03c90f
--- /dev/null
+++ b/docs/template_plugin/tests/functional/op_reference/erf.cpp
@@ -0,0 +1,94 @@
+// Copyright (C) 2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "base_reference_test.hpp"
+
+using namespace reference_tests;
+using namespace ngraph;
+using namespace InferenceEngine;
+
+struct ErfParams {
+ template
+ ErfParams(const ngraph::PartialShape& shape, const ngraph::element::Type& iType, const std::vector& iValues)
+ : pshape(shape), inType(iType), outType(iType), inputData(CreateBlob(iType, iValues)) {
+ std::vector oValues;
+ std::vector output;
+ for (auto element : iValues)
+ output.push_back(static_cast(element));
+
+ std::transform(output.begin(), output.end(), output.begin(), [](double input) -> double {
+ return std::erf(input);
+ });
+
+ if (std::is_integral()) {
+ std::transform(output.begin(), output.end(), output.begin(), [](double input) -> double {
+ return std::round(input);
+ });
+ }
+
+ for (auto element : output)
+ oValues.push_back(static_cast(element));
+ refData = CreateBlob(outType, oValues);
+ }
+ ngraph::PartialShape pshape;
+ ngraph::element::Type inType;
+ ngraph::element::Type outType;
+ InferenceEngine::Blob::Ptr inputData;
+ InferenceEngine::Blob::Ptr refData;
+};
+
+class ReferenceErfLayerTest : public testing::TestWithParam, public CommonReferenceTest {
+public:
+ void SetUp() override {
+ auto params = GetParam();
+ function = CreateFunction(params.pshape, params.inType, params.outType);
+ inputData = {params.inputData};
+ refOutData = {params.refData};
+ }
+ static std::string getTestCaseName(const testing::TestParamInfo& obj) {
+ auto param = obj.param;
+ std::ostringstream result;
+ result << "shape=" << param.pshape << "_";
+ result << "iType=" << param.inType << "_";
+ result << "oType=" << param.outType;
+ return result.str();
+ }
+
+private:
+ static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type,
+ const element::Type& expected_output_type) {
+ const auto in = std::make_shared(input_type, input_shape);
+ const auto erf = std::make_shared(in);
+ return std::make_shared(NodeVector {erf}, ParameterVector {in});
+ }
+};
+
+TEST_P(ReferenceErfLayerTest, CompareWithRefs) {
+ Exec();
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ smoke_Erf_With_Hardcoded_Refs, ReferenceErfLayerTest,
+ ::testing::Values(ErfParams(ngraph::PartialShape {2, 5}, ngraph::element::f32,
+ std::vector {-INFINITY, -4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f, INFINITY}),
+ ErfParams(ngraph::PartialShape {2, 5}, ngraph::element::f16,
+ std::vector {-INFINITY, -4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f, INFINITY}),
+ ErfParams(ngraph::PartialShape {2, 3}, ngraph::element::i32,
+ std::vector {std::numeric_limits::min(), -2, -1, 1, 2, std::numeric_limits::max()}),
+ ErfParams(ngraph::PartialShape {2, 3}, ngraph::element::u32,
+ std::vector {std::numeric_limits::min(), 0, 1, 2, 3, std::numeric_limits::max()}),
+ ErfParams(ngraph::PartialShape {2, 3}, ngraph::element::i64,
+ std::vector {std::numeric_limits::min(), -2, -1, 1, 2, std::numeric_limits::max()}),
+ ErfParams(ngraph::PartialShape {2, 3}, ngraph::element::u64,
+ std::vector {std::numeric_limits::min(), 0, 1, 2, 3, std::numeric_limits::max()})),
+ ReferenceErfLayerTest::getTestCaseName);
diff --git a/docs/template_plugin/tests/functional/op_reference/grn.cpp b/docs/template_plugin/tests/functional/op_reference/grn.cpp
index 4d003b9b9a2fef..e7fc0c79f6b82b 100644
--- a/docs/template_plugin/tests/functional/op_reference/grn.cpp
+++ b/docs/template_plugin/tests/functional/op_reference/grn.cpp
@@ -12,21 +12,22 @@
#include "base_reference_test.hpp"
+using namespace reference_tests;
using namespace ngraph;
using namespace InferenceEngine;
namespace {
struct GrnParams {
template
- GrnParams(const float bias, const ngraph::PartialShape& shape, const ngraph::element::Type& iType, const std::vector& iValues,
+ GrnParams(const float bias, const PartialShape& shape, const element::Type& iType, const std::vector& iValues,
const std::vector& oValues)
: bias(bias), pshape(shape), inType(iType), outType(iType), inputData(CreateBlob(iType, iValues)), refData(CreateBlob(iType, oValues)) {}
float bias;
- ngraph::PartialShape pshape;
- ngraph::element::Type inType;
- ngraph::element::Type outType;
- InferenceEngine::Blob::Ptr inputData;
- InferenceEngine::Blob::Ptr refData;
+ PartialShape pshape;
+ element::Type inType;
+ element::Type outType;
+ Blob::Ptr inputData;
+ Blob::Ptr refData;
};
class ReferenceGrnLayerTest : public testing::TestWithParam, public CommonReferenceTest {
@@ -60,21 +61,21 @@ TEST_P(ReferenceGrnLayerTest, CompareWithHardcodedRefs) {
}
template
-std::vector generateGrnParams(const ngraph::element::Type& type) {
+std::vector generateGrnParams(const element::Type& type) {
using T = typename element_type_traits::value_type;
std::vector grnParams {
// bias 1e-6 // 2D // 3D // 4D
- GrnParams(1e-6, ngraph::PartialShape {3, 4}, type, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
+ GrnParams(1e-6, PartialShape {3, 4}, type, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
std::vector {0.182574, 0.365148, 0.547723, 0.730297, 0.379049, 0.454859, 0.530669, 0.606478, 0.426162, 0.473514, 0.520865, 0.568217}),
- GrnParams(1e-6, ngraph::PartialShape {2, 3, 4}, type,
+ GrnParams(1e-6, PartialShape {2, 3, 4}, type,
std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
std::vector {0.0966737, 0.169031, 0.224231, 0.267261, 0.483368, 0.507093, 0.523205, 0.534522, 0.870063, 0.845154, 0.822179, 0.801784,
0.433574, 0.441836, 0.449215, 0.455842, 0.566982, 0.568075, 0.569005, 0.569803, 0.700389, 0.694314, 0.688796, 0.683763}),
- GrnParams(1e-6, ngraph::PartialShape {1, 2, 3, 4}, type,
+ GrnParams(1e-6, PartialShape {1, 2, 3, 4}, type,
std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
std::vector {0.0766965, 0.141421, 0.196116, 0.242536, 0.282166, 0.316228, 0.345705, 0.371391, 0.393919, 0.413803, 0.431455, 0.447214,
0.997055, 0.989949, 0.980581, 0.970143, 0.959365, 0.948683, 0.938343, 0.928477, 0.919145, 0.910366, 0.902134, 0.894427}),
- GrnParams(1e-6, ngraph::PartialShape {2, 2, 3, 4}, type,
+ GrnParams(1e-6, PartialShape {2, 2, 3, 4}, type,
std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48},
std::vector {0.0766965, 0.141421, 0.196116, 0.242536, 0.282166, 0.316228, 0.345705, 0.371391, 0.393919, 0.413803, 0.431455, 0.447214,
@@ -82,17 +83,17 @@ std::vector generateGrnParams(const ngraph::element::Type& type) {
0.559857, 0.564684, 0.56921, 0.573462, 0.577465, 0.581238, 0.584802, 0.588172, 0.591364, 0.594391, 0.597266, 0.6,
0.828589, 0.825307, 0.822192, 0.819232, 0.816416, 0.813733, 0.811176, 0.808736, 0.806405, 0.804176, 0.802043, 0.8}),
// bias 100.25 // 2D // 3D // 4D
- GrnParams(100.25, ngraph::PartialShape {3, 4}, type, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
+ GrnParams(100.25, PartialShape {3, 4}, type, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
std::vector {0.0876216, 0.175243, 0.262865, 0.350486, 0.301923, 0.362308, 0.422693, 0.483077, 0.385076, 0.427863, 0.470649, 0.513435}),
- GrnParams(100.25, ngraph::PartialShape {2, 3, 4}, type,
+ GrnParams(100.25, PartialShape {2, 3, 4}, type,
std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
std::vector {0.0694629, 0.129032, 0.179525, 0.222137, 0.347314, 0.387097, 0.418891, 0.444273, 0.625166, 0.645161, 0.658258, 0.66641,
0.41125, 0.421303, 0.430287, 0.438356, 0.537789, 0.541675, 0.54503, 0.547945, 0.664327, 0.662047, 0.659774, 0.657534}),
- GrnParams(100.25, ngraph::PartialShape {1, 2, 3, 4}, type,
+ GrnParams(100.25, PartialShape {1, 2, 3, 4}, type,
std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
std::vector {0.0608299, 0.115422, 0.164091, 0.207321, 0.245662, 0.279675, 0.309889, 0.336786, 0.360795, 0.38229, 0.401596, 0.418994,
0.790789, 0.807954, 0.820457, 0.829283, 0.835252, 0.839026, 0.841128, 0.841965, 0.841854, 0.841037, 0.839701, 0.837989f}),
- GrnParams(100.25, ngraph::PartialShape {2, 2, 3, 4}, type,
+ GrnParams(100.25, PartialShape {2, 2, 3, 4}, type,
std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48},
std::vector {0.0608299, 0.115422, 0.164091, 0.207321, 0.245662, 0.279675, 0.309889, 0.336786, 0.360795, 0.38229, 0.401596, 0.418994,
@@ -103,9 +104,9 @@ std::vector generateGrnParams(const ngraph::element::Type& type) {
}
std::vector generateGrnCombinedParams() {
- const std::vector> grnTypeParams {generateGrnParams(ngraph::element::bf16),
- generateGrnParams(ngraph::element::f16),
- generateGrnParams(ngraph::element::f32)};
+ const std::vector> grnTypeParams {generateGrnParams(element::bf16),
+ generateGrnParams(element::f16),
+ generateGrnParams(element::f32)};
std::vector combinedParams;
std::for_each(grnTypeParams.begin(), grnTypeParams.end(), [&](std::vector params) {
combinedParams.insert(combinedParams.end(), params.begin(), params.end());
diff --git a/docs/template_plugin/tests/functional/op_reference/mvn.cpp b/docs/template_plugin/tests/functional/op_reference/mvn.cpp
new file mode 100644
index 00000000000000..5321164807b852
--- /dev/null
+++ b/docs/template_plugin/tests/functional/op_reference/mvn.cpp
@@ -0,0 +1,254 @@
+// Copyright (C) 2018-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "base_reference_test.hpp"
+
+using namespace ngraph;
+using namespace InferenceEngine;
+using namespace reference_tests;
+
+// ------------------------------ V0 ------------------------------
+
+struct MVN1Params {
+ MVN1Params(const Tensor& paramInput, const ngraph::AxisSet& paramReductionAxes, const bool paramAcrossChannels, const bool paramNormalizeVariance,
+ const double paramEps, const Tensor& paramExpected)
+ : input(paramInput),
+ reductionAxes(paramReductionAxes),
+ acrossChannels(paramAcrossChannels),
+ normalizeVariance(paramNormalizeVariance),
+ eps(paramEps),
+ expected(paramExpected) {}
+ Tensor input;
+ ngraph::AxisSet reductionAxes;
+ bool acrossChannels;
+ bool normalizeVariance;
+ double eps;
+ Tensor expected;
+};
+
+class ReferenceMVN1LayerTest : public testing::TestWithParam, public CommonReferenceTest {
+public:
+ void SetUp() override {
+ auto params = GetParam();
+ function = CreateFunction(params.input, params.reductionAxes, params.acrossChannels, params.normalizeVariance, params.eps);
+ inputData = {params.input.data};
+ refOutData = {params.expected.data};
+ }
+ static std::string getTestCaseName(const testing::TestParamInfo& obj) {
+ auto param = obj.param;
+ std::ostringstream result;
+ result << "shape=" << param.input.shape;
+ result << "_iType=" << param.input.type;
+ if (!param.reductionAxes.empty()) {
+ result << "_reductionAccess=" << CommonTestUtils::vec2str(param.reductionAxes.to_vector());
+ } else {
+ result << "_acrossChannels=" << (param.acrossChannels ? "TRUE" : "FALSE");
+ }
+ result << "_normalizeVariance=" << (param.normalizeVariance ? "TRUE" : "FALSE");
+ result << "_eps=" << param.eps;
+ return result.str();
+ }
+
+private:
+ static std::shared_ptr CreateFunction(const Tensor& input, const ngraph::AxisSet& reductionAxes, const bool acrossChannels,
+ const bool normalizeVariance, const double eps) {
+ const auto in = std::make_shared(input.type, input.shape);
+ auto mvn = std::make_shared(in, acrossChannels, normalizeVariance, eps);
+ if (!reductionAxes.empty()) {
+ mvn = std::make_shared(in, reductionAxes, normalizeVariance, eps);
+ }
+ return std::make_shared(NodeVector {mvn}, ParameterVector {in});
+ }
+};
+
+TEST_P(ReferenceMVN1LayerTest, CompareWithHardcodedRefs) {
+ Exec();
+}
+
+const ngraph::AxisSet emptyReductionAxes {};
+
+INSTANTIATE_TEST_SUITE_P(
+ smoke_MVN1_With_Hardcoded_Refs, ReferenceMVN1LayerTest,
+ ::testing::Values(
+ // across_channels=false, variance=false
+ MVN1Params(Tensor {{1, 3, 3, 3}, ngraph::element::f32, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
+ emptyReductionAxes,
+ false,
+ false,
+ 1e-9,
+ Tensor {{1, 3, 3, 3}, ngraph::element::f32, std::vector {-4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0,
+ 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4}}),
+ // across_channels=true, variance=false
+ MVN1Params(
+ Tensor {{1, 3, 2, 2}, ngraph::element::f32, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3}},
+ emptyReductionAxes,
+ true,
+ false,
+ 1e-9,
+ Tensor {{1, 3, 2, 2}, ngraph::element::f32, std::vector {-3.25, -2.25, -1.25, -0.25, 0.75, 1.75, 2.75, 3.75, 4.75, -3.25, -2.25, -1.25}}),
+ // across_channels=false, variance=true
+ MVN1Params(Tensor {{1, 3, 3, 3}, ngraph::element::f32, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
+ emptyReductionAxes,
+ false,
+ true,
+ 1e-9,
+ Tensor {{1, 3, 3, 3},
+ ngraph::element::f32,
+ std::vector {-1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934}}),
+ // across_channels=true, variance=true
+ MVN1Params(Tensor {{1, 3, 3, 3}, ngraph::element::f32, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
+ emptyReductionAxes,
+ true,
+ true,
+ 1e-9,
+ Tensor {{1, 3, 3, 3},
+ ngraph::element::f32,
+ std::vector {-1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934}}),
+ // reductionAxes, variance=false
+ MVN1Params(
+ Tensor {{1, 3, 2, 2}, ngraph::element::f32, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3}},
+ {1, 2, 3},
+ false,
+ false,
+ 1e-9,
+ Tensor {{1, 3, 2, 2}, ngraph::element::f32, std::vector {-3.25, -2.25, -1.25, -0.25, 0.75, 1.75, 2.75, 3.75, 4.75, -3.25, -2.25, -1.25}}),
+ // reductionAxes, variance=true
+ MVN1Params(Tensor {{1, 3, 3, 3}, ngraph::element::f32, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
+ {2, 3},
+ false,
+ true,
+ 1e-9,
+ Tensor {{1, 3, 3, 3},
+ ngraph::element::f32,
+ std::vector {-1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934}})),
+ ReferenceMVN1LayerTest::getTestCaseName);
+
+// ------------------------------ V6 ------------------------------
+
+struct MVN6Params {
+ MVN6Params(const Tensor& paramInput, const Tensor& paramReductionAxes, const bool paramNormalizeVariance, const double paramEps,
+ const ngraph::op::MVNEpsMode mode, const Tensor& paramExpected)
+ : input(paramInput),
+ reductionAxes(paramReductionAxes),
+ normalizeVariance(paramNormalizeVariance),
+ eps(paramEps),
+ epsMode(mode),
+ expected(paramExpected) {}
+ Tensor input;
+ Tensor reductionAxes;
+ bool normalizeVariance;
+ double eps;
+ ngraph::op::MVNEpsMode epsMode;
+ Tensor expected;
+};
+
+class ReferenceMVN6LayerTest : public testing::TestWithParam, public CommonReferenceTest {
+public:
+ void SetUp() override {
+ auto params = GetParam();
+ function = CreateFunction(params.input, params.reductionAxes, params.normalizeVariance, params.eps, params.epsMode);
+ inputData = {params.input.data};
+ refOutData = {params.expected.data};
+ }
+ static std::string getTestCaseName(const testing::TestParamInfo& obj) {
+ auto param = obj.param;
+ std::ostringstream result;
+ result << "shape=" << param.input.shape;
+ result << "_iType=" << param.input.type;
+ result << "_reductionAccess=" << CommonTestUtils::vec2str(param.reductionAxes.shape);
+ result << "_normalizeVariance=" << (param.normalizeVariance ? "TRUE" : "FALSE");
+ result << "_eps=" << param.eps;
+ result << "_eps_mode=" << param.epsMode;
+ return result.str();
+ }
+
+private:
+ static std::shared_ptr CreateFunction(const Tensor& input, const Tensor& reductionAxes, const bool normalizeVariance, const double eps,
+ const ngraph::op::MVNEpsMode epsMode) {
+ std::vector dataVector(reductionAxes.shape[0]);
+ const auto in = std::make_shared(input.type, input.shape);
+ auto mRef = as(reductionAxes.data);
+ IE_ASSERT(mRef);
+ const auto refLockMemory = mRef->rmap();
+ const auto refBuffer = refLockMemory.as();
+ for (size_t i = 0; i < dataVector.size(); ++i) {
+ dataVector[i] = refBuffer[i];
+ }
+ const auto axes = std::make_shared(reductionAxes.type, reductionAxes.shape, dataVector);
+ auto mvn = std::make_shared(in, axes, normalizeVariance, eps, epsMode);
+ return std::make_shared(NodeVector {mvn}, ParameterVector {in});
+ }
+};
+
+TEST_P(ReferenceMVN6LayerTest, CompareWithHardcodedRefs) {
+ Exec();
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ smoke_MVN6_With_Hardcoded_Refs, ReferenceMVN6LayerTest,
+ ::testing::Values(
+ // variance=false, OUTSIDE_SQRT
+ MVN6Params(Tensor {{1, 3, 3, 3}, ngraph::element::f32, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
+ Tensor {Shape {2}, ngraph::element::i64, std::vector {2, 3}},
+ false,
+ 1e-9,
+ ngraph::op::MVNEpsMode::OUTSIDE_SQRT,
+ Tensor {{1, 3, 3, 3}, ngraph::element::f32, std::vector {-4, -3, -2, -1, 0, 1, 2, 3, 4, -4, -3, -2, -1, 0,
+ 1, 2, 3, 4, -4, -3, -2, -1, 0, 1, 2, 3, 4}}),
+ // variance=true, OUTSIDE_SQRT
+ MVN6Params(Tensor {{1, 3, 3, 3}, ngraph::element::f32, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
+ Tensor {Shape {2}, ngraph::element::i64, std::vector {2, 3}},
+ true,
+ 1e-9,
+ ngraph::op::MVNEpsMode::OUTSIDE_SQRT,
+ Tensor {{1, 3, 3, 3},
+ ngraph::element::f32,
+ std::vector {-1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934}}),
+ // variance=true, INSIDE_SQRT
+ MVN6Params(Tensor {{1, 3, 3, 3}, ngraph::element::f32, std::vector {1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
+ Tensor {Shape {2}, ngraph::element::i64, std::vector {2, 3}},
+ true,
+ 1e-9,
+ ngraph::op::MVNEpsMode::INSIDE_SQRT,
+ Tensor {{1, 3, 3, 3},
+ ngraph::element::f32,
+ std::vector {-1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934}}),
+ // variance=true, another reductionAxes, OUTSIDE_SQRT
+ MVN6Params(Tensor {{1, 3, 3, 3}, ngraph::element::f32, std::vector({1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
+ Tensor {Shape {3}, ngraph::element::i64, std::vector({1, 2, 3})},
+ true,
+ 1e-9,
+ ngraph::op::MVNEpsMode::OUTSIDE_SQRT,
+ Tensor {{1, 3, 3, 3},
+ ngraph::element::f32,
+ std::vector {-1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934,
+ -1.5491934, -1.161895, -0.7745967, -0.38729835, 0., 0.38729835, 0.7745967, 1.161895, 1.5491934}})),
+ ReferenceMVN6LayerTest::getTestCaseName);
diff --git a/docs/template_plugin/tests/functional/op_reference/select.cpp b/docs/template_plugin/tests/functional/op_reference/select.cpp
new file mode 100644
index 00000000000000..0cbc242c61b202
--- /dev/null
+++ b/docs/template_plugin/tests/functional/op_reference/select.cpp
@@ -0,0 +1,140 @@
+// Copyright (C) 2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "base_reference_test.hpp"
+
+using namespace reference_tests;
+using namespace ngraph;
+using namespace InferenceEngine;
+
+struct SelectParams {
+ template
+ SelectParams(const element::Type& data_type, const op::AutoBroadcastSpec& broadcast, const PartialShape& select_input_pshape,
+ const std::vector& select_input, const PartialShape& if_input_pshape, const std::vector& if_input,
+ const PartialShape& else_input_pshape, const std::vector& else_input, const std::vector& expected_output)
+ : data_type(data_type),
+ broadcast(broadcast),
+ select_input_pshape(select_input_pshape),
+ select_input(CreateBlob(element::boolean, select_input)),
+ if_input_pshape(if_input_pshape),
+ if_input(CreateBlob(data_type, if_input)),
+ else_input_pshape(else_input_pshape),
+ else_input(CreateBlob(data_type, else_input)),
+ expected_output(CreateBlob(data_type, expected_output)) {}
+
+ element::Type data_type;
+ op::AutoBroadcastSpec broadcast;
+ PartialShape select_input_pshape;
+ Blob::Ptr select_input;
+ PartialShape if_input_pshape;
+ Blob::Ptr if_input;
+ PartialShape else_input_pshape;
+ Blob::Ptr else_input;
+ Blob::Ptr expected_output;
+};
+
+class ReferenceSelectLayerTest : public testing::TestWithParam, public CommonReferenceTest {
+public:
+ void SetUp() override {
+ auto params = GetParam();
+ function = CreateFunction(params.data_type, params.broadcast, params.select_input_pshape, params.if_input_pshape, params.else_input_pshape);
+ inputData = {params.select_input, params.if_input, params.else_input};
+ refOutData = {params.expected_output};
+ }
+ static std::string getTestCaseName(const testing::TestParamInfo& obj) {
+ auto param = obj.param;
+ std::ostringstream result;
+ result << "data_type=" << param.data_type << "_";
+ result << "broadcast=" << param.broadcast.m_type << "_";
+ result << "select_shape=" << param.select_input_pshape << "_";
+ result << "if_shape=" << param.if_input_pshape << "_";
+ result << "else_shape=" << param.else_input_pshape;
+ return result.str();
+ }
+
+private:
+ static std::shared_ptr CreateFunction(const element::Type& data_type, const op::AutoBroadcastSpec& broadcast,
+ const PartialShape& select_pshape, const PartialShape& if_pshape, const PartialShape& else_pshape) {
+ auto A = std::make_shared(element::boolean, select_pshape);
+ auto B = std::make_shared(data_type, if_pshape);
+ auto C = std::make_shared(data_type, else_pshape);
+ return std::make_shared(std::make_shared(A, B, C, broadcast), ParameterVector {A, B, C});
+ }
+};
+
+TEST_P(ReferenceSelectLayerTest, CompareWithHardcodedRefs) {
+ Exec();
+}
+
+INSTANTIATE_TEST_SUITE_P(smoke_Select_With_Hardcoded_Refs, ReferenceSelectLayerTest,
+ ::testing::Values(
+ // fp32, no brodcasting
+ SelectParams(element::f32, // if/else/output data type
+ op::AutoBroadcastType::NONE, // broadcasting type
+ PartialShape {2, 2, 2}, // select shape
+ std::vector {0, 1, 1, 0, 0, 1, 0, 1}, // select data
+ PartialShape {2, 2, 2}, // if shape
+ std::vector {1, 2, 3, 4, 5, 6, 7, 8}, // if data
+ PartialShape {2, 2, 2}, // else shape
+ std::vector {11, 12, 13, 14, 15, 16, 17, 18}, // else data
+ std::vector {11, 2, 3, 14, 15, 6, 17, 8}), // expected output data
+ // i32, no brodcasting
+ SelectParams(element::i32, // if/else/output data type
+ op::AutoBroadcastType::NONE, // broadcasting type
+ PartialShape {2, 2, 2}, // select shape
+ std::vector {0, 1, 1, 0, 0, 1, 0, 1}, // select data
+ PartialShape {2, 2, 2}, // if shape
+ std::vector {1, 2, 3, 4, 5, 6, 7, 8}, // if data
+ PartialShape {2, 2, 2}, // else shape
+ std::vector {11, 12, 13, 14, 15, 16, 17, 18}, // else data
+ std::vector {11, 2, 3, 14, 15, 6, 17, 8}), // expected output data
+ // fp32, numpy brodcasting
+ SelectParams(element::f32, // if/else/output data type
+ op::AutoBroadcastType::NUMPY, // broadcasting type
+ PartialShape {4}, // select shape
+ std::vector {0, 1, 1, 0}, // select data
+ PartialShape {4}, // if shape
+ std::vector {1, 2, 3, 4}, // if data
+ PartialShape {2, 4}, // else shape
+ std::vector {11, 12, 13, 14, 15, 16, 17, 18}, // else data
+ std::vector {11, 2, 3, 14, 15, 2, 3, 18}), // expected output data
+ // i32, numpy brodcasting
+ SelectParams(element::i32, // if/else/output data type
+ op::AutoBroadcastType::NUMPY, // broadcasting type
+ PartialShape {4}, // select shape
+ std::vector {0, 1, 1, 0}, // select data
+ PartialShape {4}, // if shape
+ std::vector {1, 2, 3, 4}, // if data
+ PartialShape {2, 4}, // else shape
+ std::vector {11, 12, 13, 14, 15, 16, 17, 18}, // else data
+ std::vector {11, 2, 3, 14, 15, 2, 3, 18}), // expected output data
+ // fp32, pdpd brodcasting
+ SelectParams(element::f32, // if/else/output data type
+ {op::AutoBroadcastType::PDPD, -1}, // broadcasting type
+ PartialShape {2, 4}, // select shape
+ std::vector {0, 0, 0, 0, 0, 1, 1, 1}, // select data
+ PartialShape {2, 4}, // if shape
+ std::vector {1, 2, 3, 4, 5, 6, 7, 8}, // if data
+ PartialShape {4}, // else shape
+ std::vector {11, 12, 13, 14}, // else data
+ std::vector {11, 12, 13, 14, 11, 6, 7, 8}), // expected output data
+ // i32, pdpd brodcasting
+ SelectParams(element::i32, // if/else/output data type
+ {op::AutoBroadcastType::PDPD, -1}, // broadcasting type
+ PartialShape {2, 4}, // select shape
+ std::vector {0, 0, 0, 0, 0, 1, 1, 1}, // select data
+ PartialShape {2, 4}, // if shape
+ std::vector {1, 2, 3, 4, 5, 6, 7, 8}, // if data
+ PartialShape {4}, // else shape
+ std::vector {11, 12, 13, 14}, // else data
+ std::vector {11, 12, 13, 14, 11, 6, 7, 8})), // expected output data
+ ReferenceSelectLayerTest::getTestCaseName);
diff --git a/docs/template_plugin/tests/functional/op_reference/sign.cpp b/docs/template_plugin/tests/functional/op_reference/sign.cpp
new file mode 100644
index 00000000000000..ca1505cea1368e
--- /dev/null
+++ b/docs/template_plugin/tests/functional/op_reference/sign.cpp
@@ -0,0 +1,81 @@
+// Copyright (C) 2018-2021 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#include
+
+#include
+#include
+#include
+#include
+#include
+
+#include "base_reference_test.hpp"
+
+using namespace reference_tests;
+using namespace ngraph;
+using namespace InferenceEngine;
+
+struct SignParams {
+ template
+ SignParams(const PartialShape& shape, const element::Type& iType, const element::Type& oType, const std::vector& iValues,
+ const std::vector& oValues)
+ : pshape(shape), inType(iType), outType(oType), inputData(CreateBlob(iType, iValues)), refData(CreateBlob(oType, oValues)) {}
+ PartialShape pshape;
+ element::Type inType;
+ element::Type outType;
+ Blob::Ptr inputData;
+ Blob::Ptr refData;
+};
+
+class ReferenceSignLayerTest : public testing::TestWithParam, public CommonReferenceTest {
+public:
+ void SetUp() override {
+ auto params = GetParam();
+ function = CreateFunction(params.pshape, params.inType);
+ inputData = {params.inputData};
+ refOutData = {params.refData};
+ }
+ static std::string getTestCaseName(const testing::TestParamInfo& obj) {
+ auto param = obj.param;
+ std::ostringstream result;
+ result << "shape=" << param.pshape << "_";
+ result << "iType=" << param.inType << "_";
+ result << "oType=" << param.outType;
+ return result.str();
+ }
+
+private:
+ static std::shared_ptr CreateFunction(const PartialShape& input_shape, const element::Type& input_type) {
+ const auto in = std::make_shared(input_type, input_shape);
+ const auto sign = std::make_shared(in);
+ return std::make_shared(NodeVector {sign}, ParameterVector {in});
+ }
+};
+
+TEST_P(ReferenceSignLayerTest, CompareWithHardcodedRefs) {
+ Exec();
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ smoke_Sign_With_Hardcoded_Refs, ReferenceSignLayerTest,
+ ::testing::Values(
+ SignParams(PartialShape {6}, element::f32, element::f32,
+ std::vector {1, -2, 0, -4.8f, 4.8f, -0.0f},
+ std::vector {1, -1, 0, -1, 1, 0}),
+ SignParams(PartialShape {6}, element::f16, element::f16,
+ std::vector {1, -2, 0, -4.8f, 4.8f, -0.0f},
+ std::vector {1, -1, 0, -1, 1, 0}),
+ SignParams(PartialShape {6}, element::u64, element::u64,
+ std::vector {1, 2, 0, 4, 4, 0},
+ std::vector {1, 1, 0, 1, 1, 0}),
+ SignParams(PartialShape {6}, element::u32, element::u32,
+ std::vector {1, 2, 0, 4, 4, 0},
+ std::vector {1, 1, 0, 1, 1, 0}),
+ SignParams(PartialShape {6}, element::i32, element::i32,
+ std::vector {1, -2, 0, -4, 4, -0},
+ std::vector {1, -1, 0, -1, 1, 0}),
+ SignParams(PartialShape {6}, element::i64, element::i64,
+ std::vector {1, -2, 0, -4, 4, -0},
+ std::vector {1, -1, 0, -1, 1, 0})),
+ ReferenceSignLayerTest::getTestCaseName);
diff --git a/inference-engine/cmake/ie_parallel.cmake b/inference-engine/cmake/ie_parallel.cmake
index d33a73a5fa760d..eb844d25b76e02 100644
--- a/inference-engine/cmake/ie_parallel.cmake
+++ b/inference-engine/cmake/ie_parallel.cmake
@@ -29,6 +29,7 @@ function(set_ie_threading_interface_for TARGET_NAME)
set(TBB_IMPORTED_TARGETS ${TBB_IMPORTED_TARGETS} PARENT_SCOPE)
set(TBB_VERSION ${TBB_VERSION} PARENT_SCOPE)
if (NOT TBB_FOUND)
+ set(THREADING "SEQ" PARENT_SCOPE)
ext_message(WARNING "TBB was not found by the configured TBB_DIR/TBBROOT path.\
SEQ method will be used.")
endif ()
@@ -95,6 +96,7 @@ function(set_ie_threading_interface_for TARGET_NAME)
set(IE_THREAD_DEFINE "IE_THREAD_TBB")
ie_target_link_libraries(${TARGET_NAME} ${LINK_TYPE} ${TBB_IMPORTED_TARGETS})
else ()
+ set(THREADING "SEQ" PARENT_SCOPE)
ext_message(WARNING "TBB was not found by the configured TBB_DIR path.\
SEQ method will be used for ${TARGET_NAME}")
endif ()
@@ -133,6 +135,7 @@ function(set_ie_threading_interface_for TARGET_NAME)
if (NOT OMP_LIBRARIES_RELEASE)
ext_message(WARNING "Intel OpenMP not found. Intel OpenMP support will be disabled. ${IE_THREAD_DEFINE} is defined")
+ set(THREADING "SEQ" PARENT_SCOPE)
else ()
set(IE_THREAD_DEFINE "IE_THREAD_OMP")
diff --git a/inference-engine/cmake/vpu_dependencies.cmake b/inference-engine/cmake/vpu_dependencies.cmake
index d134c29171802c..e6ec3799a3ccf8 100644
--- a/inference-engine/cmake/vpu_dependencies.cmake
+++ b/inference-engine/cmake/vpu_dependencies.cmake
@@ -6,14 +6,14 @@ include_guard(GLOBAL)
set(VPU_SUPPORTED_FIRMWARES usb-ma2x8x pcie-ma2x8x)
set(VPU_SUPPORTED_FIRMWARES_HASH
- "420b300d193f7fcfe7e3f9bbec6c247d65b784a500b5cd2effb7cb1ec6e1b209"
- "bfe3caf270b168b9de18ef88f04bde3907d7d12a679f1fa7cc580423c35db637")
+ "54a732b5fb17a0124652bc5113fa628c718a5af40621bca309471cb5ffd9271b"
+ "5750b2831c77ef54b8e243d3840c5ed1c9509681d55aee7e369d558cef628735")
#
# Default packages
#
-set(FIRMWARE_PACKAGE_VERSION 1688)
+set(FIRMWARE_PACKAGE_VERSION 1717)
set(VPU_CLC_MA2X8X_VERSION "movi-cltools-20.09.2")
#
diff --git a/inference-engine/ie_bridges/python/CMakeLists.txt b/inference-engine/ie_bridges/python/CMakeLists.txt
index 7b93a4291a2d3a..a88b1017a124f4 100644
--- a/inference-engine/ie_bridges/python/CMakeLists.txt
+++ b/inference-engine/ie_bridges/python/CMakeLists.txt
@@ -43,12 +43,14 @@ else()
endif()
if(ENABLE_CONDA_FOLDER)
+ set(PYTHON_COMPONENT conda_${PYTHON_VERSION})
if(WIN32)
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python_api/Conda/${PYTHON_VERSION}/openvino)
else()
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python_api/Conda/${PYTHON_VERSION}/openvino)
endif()
else()
+ set(PYTHON_COMPONENT ${PYTHON_VERSION})
if(WIN32)
set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python_api/${PYTHON_VERSION}/openvino)
else()
@@ -56,6 +58,13 @@ else()
endif()
endif()
+function(ov_python_disable_intel_warnings target)
+ if(UNIX AND CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
+ # 1292: unknown attribute "fallthrough"
+ target_compile_options(${target} PRIVATE -diag-disable=1292)
+ endif()
+endfunction()
+
set (PYTHON_BRIDGE_SRC_ROOT ${CMAKE_CURRENT_SOURCE_DIR})
add_subdirectory (src/openvino/inference_engine)
add_subdirectory (src/openvino/offline_transformations)
@@ -74,19 +83,19 @@ endif()
# install
-ie_cpack_add_component(${PYTHON_VERSION})
+ie_cpack_add_component(${PYTHON_COMPONENT})
install(FILES requirements.txt
DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}
- COMPONENT ${PYTHON_VERSION})
+ COMPONENT ${PYTHON_COMPONENT})
install(FILES requirements.txt
DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}
- COMPONENT ${PYTHON_VERSION})
+ COMPONENT ${PYTHON_COMPONENT})
install(PROGRAMS src/openvino/__init__.py
DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino
- COMPONENT ${PYTHON_VERSION})
+ COMPONENT ${PYTHON_COMPONENT})
# install Python samples
@@ -96,4 +105,4 @@ install(DIRECTORY sample/
DESTINATION ${IE_CPACK_IE_DIR}/samples/python
COMPONENT python_samples)
-ie_cpack(${PYTHON_VERSION} python_samples)
+ie_cpack(${PYTHON_COMPONENT} python_samples)
diff --git a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt
index a236db836d60ae..cfab4f2d907f28 100644
--- a/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt
+++ b/inference-engine/ie_bridges/python/src/openvino/inference_engine/CMakeLists.txt
@@ -20,13 +20,15 @@ set_source_files_properties(${PYX_SOURCES} PROPERTIES CYTHON_IS_CXX ON)
# create target
cython_add_module(${TARGET_NAME} ${SOURCES})
-set(INSTALLED_TARGETS ${TARGET_NAME})
+ov_python_disable_intel_warnings(${TARGET_NAME})
+set(INSTALLED_TARGETS ${TARGET_NAME})
list(REMOVE_ITEM PYX_SOURCES "${CMAKE_CURRENT_SOURCE_DIR}/ie_api.pyx")
foreach(PYX_FILE IN LISTS PYX_SOURCES)
get_filename_component(PYX_NAME "${PYX_FILE}" NAME_WE)
cython_add_module(${PYX_NAME} ${PYX_FILE})
+ ov_python_disable_intel_warnings(${PYX_NAME})
add_dependencies(${TARGET_NAME} ${PYX_NAME})
target_include_directories(${PYX_NAME} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}")
target_link_libraries(${PYX_NAME} PRIVATE ${InferenceEngine_LIBRARIES})
@@ -70,12 +72,12 @@ add_custom_command(TARGET ${TARGET_NAME}
# install
install(TARGETS ${INSTALLED_TARGETS}
- RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_VERSION}
- LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_VERSION})
+ RUNTIME DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_COMPONENT}
+ LIBRARY DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine COMPONENT ${PYTHON_COMPONENT})
install(PROGRAMS __init__.py
DESTINATION ${PYTHON_BRIDGE_CPACK_PATH}/${PYTHON_VERSION}/openvino/inference_engine
- COMPONENT ${PYTHON_VERSION})
+ COMPONENT ${PYTHON_COMPONENT})
add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME}
EXCLUDE_PATTERNS ".*\\.cxx;.*\\.pxd;.*\\.pyx")
diff --git a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt
index 27c9e7bf898257..512b1662be525c 100644
--- a/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt
+++ b/inference-engine/ie_bridges/python/src/openvino/offline_transformations/CMakeLists.txt
@@ -20,7 +20,9 @@ set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/offline_transformations_
# create target
cython_add_module(${TARGET_NAME} ${SOURCES})
+
add_dependencies(${TARGET_NAME} ie_api)
+ov_python_disable_intel_warnings(${TARGET_NAME})
if(COMMAND ie_add_vs_version_file)
ie_add_vs_version_file(NAME ${TARGET_NAME}
@@ -54,12 +56,12 @@ add_custom_command(TARGET ${TARGET_NAME}
# install
# TODO: use ${PYTHON_VERSION}_dev component below
-# ie_cpack_add_component(${PYTHON_VERSION}_dev DEPENDS ${PYTHON_VERSION})
+# ie_cpack_add_component(${PYTHON_VERSION}_dev DEPENDS ${PYTHON_COMPONENT})
install(TARGETS ${TARGET_NAME}
- RUNTIME DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_VERSION}
- LIBRARY DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_VERSION})
+ RUNTIME DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT}
+ LIBRARY DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations COMPONENT ${PYTHON_COMPONENT})
install(PROGRAMS __init__.py
DESTINATION python/${PYTHON_VERSION}/openvino/offline_transformations
- COMPONENT ${PYTHON_VERSION})
+ COMPONENT ${PYTHON_COMPONENT})
diff --git a/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt b/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt
index 8367f941d9f793..9d3e1e0ffc082d 100644
--- a/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt
+++ b/inference-engine/ie_bridges/python/src/openvino/test_utils/CMakeLists.txt
@@ -20,7 +20,9 @@ set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/test_utils_api.pyx
# create target
cython_add_module(${TARGET_NAME} ${SOURCES})
+
add_dependencies(${TARGET_NAME} ie_api)
+ov_python_disable_intel_warnings(${TARGET_NAME})
if(COMMAND ie_add_vs_version_file)
ie_add_vs_version_file(NAME ${TARGET_NAME}
diff --git a/inference-engine/include/gpu/gpu_ocl_wrapper.hpp b/inference-engine/include/gpu/gpu_ocl_wrapper.hpp
index 85ca2521a76346..496f0974ad51e1 100644
--- a/inference-engine/include/gpu/gpu_ocl_wrapper.hpp
+++ b/inference-engine/include/gpu/gpu_ocl_wrapper.hpp
@@ -39,7 +39,7 @@
# pragma GCC system_header
#endif
-#include
+#include
#ifdef __GNUC__
# pragma GCC diagnostic pop
diff --git a/inference-engine/samples/CMakeLists.txt b/inference-engine/samples/CMakeLists.txt
index aef11e16f47bf8..c06336ec8f4e47 100644
--- a/inference-engine/samples/CMakeLists.txt
+++ b/inference-engine/samples/CMakeLists.txt
@@ -76,6 +76,10 @@ else()
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") #treating warnings as errors
endif()
+ if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -diag-disable:177")
+ endif()
+
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall")
if (APPLE)
@@ -135,10 +139,6 @@ if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/cnpy")
add_subdirectory(thirdparty/cnpy EXCLUDE_FROM_ALL)
endif()
-if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
-endif()
-
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/common/utils")
add_subdirectory(common/utils)
endif()
diff --git a/inference-engine/samples/benchmark_app/README.md b/inference-engine/samples/benchmark_app/README.md
index 7c61bc570d518e..2d5076a60c613c 100644
--- a/inference-engine/samples/benchmark_app/README.md
+++ b/inference-engine/samples/benchmark_app/README.md
@@ -95,6 +95,7 @@ Options:
-layout Optional. Prompts how network layouts should be treated by application. For example, "input1[NCHW],input2[NC]" or "[NCHW]" in case of one input size.
-cache_dir "" Optional. Enables caching of loaded models to specified directory.
-load_from_file Optional. Loads model from file directly without ReadNetwork.
+ -latency_percentile Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value is 50 (median).
CPU-specific performance options:
-nstreams "" Optional. Number of streams to use for inference on the CPU, GPU or MYRIAD devices
diff --git a/inference-engine/samples/benchmark_app/benchmark_app.hpp b/inference-engine/samples/benchmark_app/benchmark_app.hpp
index af18c908e31b96..a369c2f1055ce1 100644
--- a/inference-engine/samples/benchmark_app/benchmark_app.hpp
+++ b/inference-engine/samples/benchmark_app/benchmark_app.hpp
@@ -56,6 +56,10 @@ static const char infer_num_streams_message[] = "Optional. Number of streams to
"Also, using nstreams>1 is inherently throughput-oriented option, "
"while for the best-latency estimations the number of streams should be set to 1.";
+/// @brief message for latency percentile settings
+static const char infer_latency_percentile_message[] =
+ "Optional. Defines the percentile to be reported in latency metric. The valid range is [1, 100]. The default value is 50 (median).";
+
/// @brief message for enforcing of BF16 execution where it is possible
static const char enforce_bf16_message[] = "Optional. By default floating point operations execution in bfloat16 precision are enforced "
"if supported by platform.\n"
@@ -189,6 +193,9 @@ DEFINE_uint32(nthreads, 0, infer_num_threads_message);
/// @brief Number of streams to use for inference on the CPU (also affects Hetero cases)
DEFINE_string(nstreams, "", infer_num_streams_message);
+/// @brief The percentile which will be reported in latency metric
+DEFINE_uint32(latency_percentile, 50, infer_latency_percentile_message);
+
/// @brief Enforces bf16 execution with bfloat16 precision on systems having this capability
DEFINE_bool(enforcebf16, false, enforce_bf16_message);
@@ -278,6 +285,7 @@ static void showUsage() {
std::cout << " -layout " << layout_message << std::endl;
std::cout << " -cache_dir \"\" " << cache_dir_message << std::endl;
std::cout << " -load_from_file " << load_from_file_message << std::endl;
+ std::cout << " -latency_percentile " << infer_latency_percentile_message << std::endl;
std::cout << std::endl << " device-specific performance options:" << std::endl;
std::cout << " -nstreams \"\" " << infer_num_streams_message << std::endl;
std::cout << " -nthreads \"\" " << infer_num_threads_message << std::endl;
diff --git a/inference-engine/samples/benchmark_app/main.cpp b/inference-engine/samples/benchmark_app/main.cpp
index 2a5252ba443a85..8df3bc2f8e400b 100644
--- a/inference-engine/samples/benchmark_app/main.cpp
+++ b/inference-engine/samples/benchmark_app/main.cpp
@@ -52,6 +52,10 @@ bool ParseAndCheckCommandLine(int argc, char* argv[]) {
throw std::logic_error("Model is required but not set. Please set -m option.");
}
+ if (FLAGS_latency_percentile > 100 || FLAGS_latency_percentile < 1) {
+ showUsage();
+ throw std::logic_error("The percentile value is incorrect. The applicable values range is [1, 100].");
+ }
if (FLAGS_api != "async" && FLAGS_api != "sync") {
throw std::logic_error("Incorrect API. Please set -api option to `sync` or `async` value.");
}
@@ -100,11 +104,10 @@ static void next_step(const std::string additional_info = "") {
}
template
-T getMedianValue(const std::vector& vec) {
+T getMedianValue(const std::vector& vec, std::size_t percentile) {
std::vector sortedVec(vec);
std::sort(sortedVec.begin(), sortedVec.end());
- return (sortedVec.size() % 2 != 0) ? sortedVec[sortedVec.size() / 2ULL]
- : (sortedVec[sortedVec.size() / 2ULL] + sortedVec[sortedVec.size() / 2ULL - 1ULL]) / static_cast(2.0);
+ return sortedVec[(sortedVec.size() / 100) * percentile];
}
/**
@@ -624,7 +627,7 @@ int main(int argc, char* argv[]) {
// wait the latest inference executions
inferRequestsQueue.waitAll();
- double latency = getMedianValue(inferRequestsQueue.getLatencies());
+ double latency = getMedianValue(inferRequestsQueue.getLatencies(), FLAGS_latency_percentile);
double totalDuration = inferRequestsQueue.getDurationInMilliseconds();
double fps = (FLAGS_api == "sync") ? batchSize * 1000.0 / latency : batchSize * 1000.0 * iteration / totalDuration;
@@ -634,8 +637,14 @@ int main(int argc, char* argv[]) {
{"total number of iterations", std::to_string(iteration)},
});
if (device_name.find("MULTI") == std::string::npos) {
+ std::string latency_label;
+ if (FLAGS_latency_percentile == 50) {
+ latency_label = "latency (ms)";
+ } else {
+ latency_label = "latency (" + std::to_string(FLAGS_latency_percentile) + " percentile) (ms)";
+ }
statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {
- {"latency (ms)", double_to_string(latency)},
+ {latency_label, double_to_string(latency)},
});
}
statistics->addParameters(StatisticsReport::Category::EXECUTION_RESULTS, {{"throughput", double_to_string(fps)}});
@@ -684,8 +693,15 @@ int main(int argc, char* argv[]) {
std::cout << "Count: " << iteration << " iterations" << std::endl;
std::cout << "Duration: " << double_to_string(totalDuration) << " ms" << std::endl;
- if (device_name.find("MULTI") == std::string::npos)
- std::cout << "Latency: " << double_to_string(latency) << " ms" << std::endl;
+ if (device_name.find("MULTI") == std::string::npos) {
+ std::cout << "Latency";
+ if (FLAGS_latency_percentile == 50) {
+ std::cout << ": ";
+ } else {
+ std::cout << " (" << FLAGS_latency_percentile << " percentile): ";
+ }
+ std::cout << double_to_string(latency) << " ms" << std::endl;
+ }
std::cout << "Throughput: " << double_to_string(fps) << " FPS" << std::endl;
} catch (const std::exception& ex) {
slog::err << ex.what() << slog::endl;
diff --git a/inference-engine/src/cldnn_engine/CMakeLists.txt b/inference-engine/src/cldnn_engine/CMakeLists.txt
index e292228c73f664..46dfd5e9fce858 100644
--- a/inference-engine/src/cldnn_engine/CMakeLists.txt
+++ b/inference-engine/src/cldnn_engine/CMakeLists.txt
@@ -12,7 +12,7 @@ if(CMAKE_COMPILER_IS_GNUCC)
endif()
endif()
-if(GPU_DEBUG_CONFIG)
+if(ENABLE_GPU_DEBUG_CAPS)
add_definitions(-DGPU_DEBUG_CONFIG=1)
endif()
diff --git a/inference-engine/src/cldnn_engine/cldnn_engine.cpp b/inference-engine/src/cldnn_engine/cldnn_engine.cpp
index 078a68c67843b6..206c50c93c857a 100644
--- a/inference-engine/src/cldnn_engine/cldnn_engine.cpp
+++ b/inference-engine/src/cldnn_engine/cldnn_engine.cpp
@@ -70,9 +70,12 @@
#include
#include
#include
-#include
+#include
#include
+#include
+#include
#include