Skip to content

Commit

Permalink
Merge branch '3.4' into merge-3.4
Browse files Browse the repository at this point in the history
  • Loading branch information
alalek committed Dec 22, 2018
2 parents 578ea4b + c0e11bb commit 1dee705
Show file tree
Hide file tree
Showing 29 changed files with 1,084 additions and 1,968 deletions.
3 changes: 3 additions & 0 deletions cmake/OpenCVDetectApacheAnt.cmake
@@ -1,3 +1,6 @@
set(OPENCV_JAVA_SOURCE_VERSION "" CACHE STRING "Java source version (javac Ant target)")
set(OPENCV_JAVA_TARGET_VERSION "" CACHE STRING "Java target version (javac Ant target)")

file(TO_CMAKE_PATH "$ENV{ANT_DIR}" ANT_DIR_ENV_PATH)
file(TO_CMAKE_PATH "$ENV{ProgramFiles}" ProgramFiles_ENV_PATH)

Expand Down
4 changes: 2 additions & 2 deletions cmake/OpenCVDetectInferenceEngine.cmake
Expand Up @@ -78,9 +78,9 @@ endif()

if(INF_ENGINE_TARGET)
if(NOT INF_ENGINE_RELEASE)
message(WARNING "InferenceEngine version have not been set, 2018R4 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
message(WARNING "InferenceEngine version have not been set, 2018R5 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.")
endif()
set(INF_ENGINE_RELEASE "2018040000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2018R2.0.2 -> 2018020002)")
set(INF_ENGINE_RELEASE "2018050000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2018R2.0.2 -> 2018020002)")
set_target_properties(${INF_ENGINE_TARGET} PROPERTIES
INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}"
)
Expand Down
2 changes: 1 addition & 1 deletion doc/tutorials/dnn/dnn_android/dnn_android.markdown
Expand Up @@ -12,7 +12,7 @@ Tutorial was written for the following versions of corresponding software:

- Download and install Android Studio from https://developer.android.com/studio.

- Get the latest pre-built OpenCV for Android release from https://github.com/opencv/opencv/releases and unpack it (for example, `opencv-3.4.4-android-sdk.zip`).
- Get the latest pre-built OpenCV for Android release from https://github.com/opencv/opencv/releases and unpack it (for example, `opencv-3.4.5-android-sdk.zip`).

- Download MobileNet object detection model from https://github.com/chuanqi305/MobileNet-SSD. We need a configuration file `MobileNetSSD_deploy.prototxt` and weights `MobileNetSSD_deploy.caffemodel`.

Expand Down
10 changes: 10 additions & 0 deletions modules/core/include/opencv2/core/hal/intrin_avx.hpp
Expand Up @@ -1278,6 +1278,16 @@ OPENCV_HAL_IMPL_AVX_CHECK_FLT(v_float64x4, 15)
OPENCV_HAL_IMPL_AVX_MULADD(v_float32x8, ps)
OPENCV_HAL_IMPL_AVX_MULADD(v_float64x4, pd)

inline v_int32x8 v_fma(const v_int32x8& a, const v_int32x8& b, const v_int32x8& c)
{
return a * b + c;
}

inline v_int32x8 v_muladd(const v_int32x8& a, const v_int32x8& b, const v_int32x8& c)
{
return v_fma(a, b, c);
}

inline v_float32x8 v_invsqrt(const v_float32x8& x)
{
v_float32x8 half = x * v256_setall_f32(0.5);
Expand Down
3 changes: 2 additions & 1 deletion modules/dnn/include/opencv2/dnn/dnn.hpp
Expand Up @@ -750,6 +750,7 @@ CV__DNN_INLINE_NS_BEGIN
* @brief Reads a network model stored in <a href="http://torch.ch">Torch7</a> framework's format.
* @param model path to the file, dumped from Torch by using torch.save() function.
* @param isBinary specifies whether the network was serialized in ascii mode or binary.
* @param evaluate specifies testing phase of network. If true, it's similar to evaluate() method in Torch.
* @returns Net object.
*
* @note Ascii mode of Torch serializer is more preferable, because binary mode extensively use `long` type of C language,
Expand All @@ -771,7 +772,7 @@ CV__DNN_INLINE_NS_BEGIN
*
* Also some equivalents of these classes from cunn, cudnn, and fbcunn may be successfully imported.
*/
CV_EXPORTS_W Net readNetFromTorch(const String &model, bool isBinary = true);
CV_EXPORTS_W Net readNetFromTorch(const String &model, bool isBinary = true, bool evaluate = true);

/**
* @brief Read deep learning network represented in one of the supported formats.
Expand Down
2 changes: 1 addition & 1 deletion modules/dnn/include/opencv2/dnn/version.hpp
Expand Up @@ -6,7 +6,7 @@
#define OPENCV_DNN_VERSION_HPP

/// Use with major OpenCV version only.
#define OPENCV_DNN_API_VERSION 20181205
#define OPENCV_DNN_API_VERSION 20181221

#if !defined CV_DOXYGEN && !defined CV_DNN_DONT_ADD_INLINE_NS
#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)
Expand Down
6 changes: 6 additions & 0 deletions modules/dnn/src/layers/mvn_layer.cpp
Expand Up @@ -116,9 +116,15 @@ class MVNLayerImpl CV_FINAL : public MVNLayer

virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
return !zeroDev && eps <= 1e-7f;
#else
return !zeroDev && (preferableTarget == DNN_TARGET_CPU || eps <= 1e-7f);
#endif
else
#endif // HAVE_INF_ENGINE
return backendId == DNN_BACKEND_OPENCV;
}

Expand Down
11 changes: 5 additions & 6 deletions modules/dnn/src/onnx/onnx_importer.cpp
Expand Up @@ -420,31 +420,30 @@ void ONNXImporter::populateNet(Net dstNet)
}
else if (layer_type == "Sub")
{
Mat blob = (-1.0f) * getBlob(node_proto, constBlobs, 1);
blob = blob.reshape(1, 1);
Mat blob = getBlob(node_proto, constBlobs, 1);
if (blob.total() == 1) {
layerParams.type = "Power";
layerParams.set("shift", blob.at<float>(0));
layerParams.set("shift", -blob.at<float>(0));
}
else {
layerParams.type = "Scale";
layerParams.set("has_bias", true);
layerParams.blobs.push_back(blob);
layerParams.blobs.push_back(-1.0f * blob.reshape(1, 1));
}
}
else if (layer_type == "Div")
{
Mat blob = getBlob(node_proto, constBlobs, 1);
CV_Assert_N(blob.type() == CV_32F, blob.total());
divide(1.0, blob, blob);
if (blob.total() == 1)
{
layerParams.set("scale", blob.at<float>(0));
layerParams.set("scale", 1.0f / blob.at<float>(0));
layerParams.type = "Power";
}
else
{
layerParams.type = "Scale";
divide(1.0, blob, blob);
layerParams.blobs.push_back(blob);
layerParams.set("bias_term", false);
}
Expand Down
5 changes: 3 additions & 2 deletions modules/dnn/src/op_inf_engine.hpp
Expand Up @@ -26,10 +26,11 @@
#define INF_ENGINE_RELEASE_2018R2 2018020000
#define INF_ENGINE_RELEASE_2018R3 2018030000
#define INF_ENGINE_RELEASE_2018R4 2018040000
#define INF_ENGINE_RELEASE_2018R5 2018050000

#ifndef INF_ENGINE_RELEASE
#warning("IE version have not been provided via command-line. Using 2018R4 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2018R4
#warning("IE version have not been provided via command-line. Using 2018R5 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2018R5
#endif

#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
Expand Down
13 changes: 8 additions & 5 deletions modules/dnn/src/torch/torch_importer.cpp
Expand Up @@ -129,13 +129,15 @@ struct TorchImporter
Module *rootModule;
Module *curModule;
int moduleCounter;
bool testPhase;

TorchImporter(String filename, bool isBinary)
TorchImporter(String filename, bool isBinary, bool evaluate)
{
CV_TRACE_FUNCTION();

rootModule = curModule = NULL;
moduleCounter = 0;
testPhase = evaluate;

file = cv::Ptr<THFile>(THDiskFile_new(filename, "r", 0), THFile_free);
CV_Assert(file && THFile_isOpened(file));
Expand Down Expand Up @@ -680,7 +682,8 @@ struct TorchImporter
layerParams.blobs.push_back(tensorParams["bias"].second);
}

if (nnName == "InstanceNormalization")
bool trainPhase = scalarParams.get<bool>("train", false);
if (nnName == "InstanceNormalization" || (trainPhase && !testPhase))
{
cv::Ptr<Module> mvnModule(new Module(nnName));
mvnModule->apiType = "MVN";
Expand Down Expand Up @@ -1243,18 +1246,18 @@ struct TorchImporter

Mat readTorchBlob(const String &filename, bool isBinary)
{
TorchImporter importer(filename, isBinary);
TorchImporter importer(filename, isBinary, true);
importer.readObject();
CV_Assert(importer.tensors.size() == 1);

return importer.tensors.begin()->second;
}

Net readNetFromTorch(const String &model, bool isBinary)
Net readNetFromTorch(const String &model, bool isBinary, bool evaluate)
{
CV_TRACE_FUNCTION();

TorchImporter importer(model, isBinary);
TorchImporter importer(model, isBinary, evaluate);
Net net;
importer.populateNet(net);
return net;
Expand Down
4 changes: 2 additions & 2 deletions modules/dnn/test/test_backends.cpp
Expand Up @@ -226,9 +226,9 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
TEST_P(DNNTestNetwork, OpenFace)
{
#if defined(INF_ENGINE_RELEASE)
#if INF_ENGINE_RELEASE < 2018030000
#if (INF_ENGINE_RELEASE < 2018030000 || INF_ENGINE_RELEASE == 2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is enabled starts from OpenVINO 2018R3");
throw SkipTestException("");
#elif INF_ENGINE_RELEASE < 2018040000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException("Test is enabled starts from OpenVINO 2018R4");
Expand Down
8 changes: 8 additions & 0 deletions modules/dnn/test/test_ie_models.cpp
Expand Up @@ -190,6 +190,14 @@ TEST_P(DNNTestOpenVINO, models)
modelName == "landmarks-regression-retail-0009" ||
modelName == "semantic-segmentation-adas-0001")))
throw SkipTestException("");
#elif INF_ENGINE_RELEASE == 2018050000
if (modelName == "single-image-super-resolution-0063" ||
modelName == "single-image-super-resolution-1011" ||
modelName == "single-image-super-resolution-1021" ||
(target == DNN_TARGET_OPENCL_FP16 && modelName == "face-reidentification-retail-0095") ||
(target == DNN_TARGET_MYRIAD && (modelName == "license-plate-recognition-barrier-0001" ||
modelName == "semantic-segmentation-adas-0001")))
throw SkipTestException("");
#endif
#endif

Expand Down
4 changes: 4 additions & 0 deletions modules/dnn/test/test_layers.cpp
Expand Up @@ -295,6 +295,10 @@ TEST_P(Test_Caffe_layers, Eltwise)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
throw SkipTestException("Test is disabled for OpenVINO 2018R5");
#endif
testLayerUsingCaffeModels("layer_eltwise");
}

Expand Down
24 changes: 22 additions & 2 deletions modules/dnn/test/test_onnx_importer.cpp
Expand Up @@ -164,6 +164,8 @@ TEST_P(Test_ONNX_layers, MultyInputs)

TEST_P(Test_ONNX_layers, DynamicReshape)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
testONNXModels("dynamic_reshape");
}

Expand Down Expand Up @@ -249,6 +251,10 @@ TEST_P(Test_ONNX_nets, VGG16)
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL) {
lInf = 1.2e-4;
}
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
l1 = 0.131;
#endif
testONNXModels("vgg16", pb, l1, lInf);
}

Expand Down Expand Up @@ -327,7 +333,7 @@ TEST_P(Test_ONNX_nets, CNN_MNIST)
TEST_P(Test_ONNX_nets, MobileNet_v2)
{
// output range: [-166; 317]
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.38 : 7e-5;
const double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.4 : 7e-5;
const double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 2.87 : 5e-4;
testONNXModels("mobilenetv2", pb, l1, lInf);
}
Expand All @@ -350,7 +356,17 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)

TEST_P(Test_ONNX_nets, Emotion_ferplus)
{
testONNXModels("emotion_ferplus", pb);
double l1 = default_l1;
double lInf = default_lInf;
// Output values are in range [-2.01109, 2.11111]
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
l1 = 0.007;
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
{
l1 = 0.021;
lInf = 0.034;
}
testONNXModels("emotion_ferplus", pb, l1, lInf);
}

TEST_P(Test_ONNX_nets, Inception_v2)
Expand All @@ -371,6 +387,10 @@ TEST_P(Test_ONNX_nets, DenseNet121)

TEST_P(Test_ONNX_nets, Inception_v1)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
#endif
testONNXModels("inception_v1", pb);
}

Expand Down
12 changes: 12 additions & 0 deletions modules/dnn/test/test_tf_importer.cpp
Expand Up @@ -241,6 +241,10 @@ TEST_P(Test_TensorFlow_layers, unfused_flatten)

TEST_P(Test_TensorFlow_layers, leaky_relu)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL)
throw SkipTestException("");
#endif
runTensorFlowNet("leaky_relu_order1");
runTensorFlowNet("leaky_relu_order2");
runTensorFlowNet("leaky_relu_order3");
Expand Down Expand Up @@ -383,6 +387,10 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)

TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN)
{
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("Unstable test case");
#endif
checkBackend();
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pbtxt", false);
std::string model = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pb", false);
Expand Down Expand Up @@ -560,6 +568,10 @@ TEST_P(Test_TensorFlow_layers, slice)
if (backend == DNN_BACKEND_INFERENCE_ENGINE &&
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
#endif
runTensorFlowNet("slice_4d");
}

Expand Down

0 comments on commit 1dee705

Please sign in to comment.