diff --git a/modules/core/include/opencv2/core/utils/logger.hpp b/modules/core/include/opencv2/core/utils/logger.hpp index bf73d9ac51f9..accb860ada8e 100644 --- a/modules/core/include/opencv2/core/utils/logger.hpp +++ b/modules/core/include/opencv2/core/utils/logger.hpp @@ -104,14 +104,16 @@ struct LogTagAuto // non-null. Do not re-define. #define CV_LOGTAG_GLOBAL cv::utils::logging::internal::getGlobalLogTag() -#define CV_LOG_WITH_TAG(tag, msgLevel, ...) \ +#define CV_LOG_WITH_TAG(tag, msgLevel, extra_check0, extra_check1, ...) \ for(;;) { \ + extra_check0; \ const auto cv_temp_msglevel = (cv::utils::logging::LogLevel)(msgLevel); \ if (cv_temp_msglevel >= (CV_LOG_STRIP_LEVEL)) break; \ auto cv_temp_logtagptr = CV_LOGTAG_PTR_CAST(CV_LOGTAG_EXPAND_NAME(tag)); \ if (!cv_temp_logtagptr) cv_temp_logtagptr = CV_LOGTAG_PTR_CAST(CV_LOGTAG_FALLBACK); \ if (!cv_temp_logtagptr) cv_temp_logtagptr = CV_LOGTAG_PTR_CAST(CV_LOGTAG_GLOBAL); \ if (cv_temp_logtagptr && (cv_temp_msglevel > cv_temp_logtagptr->level)) break; \ + extra_check1; \ std::stringstream cv_temp_logstream; \ cv_temp_logstream << __VA_ARGS__; \ cv::utils::logging::internal::writeLogMessageEx( \ @@ -124,28 +126,91 @@ struct LogTagAuto break; \ } -#define CV_LOG_FATAL(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_FATAL, __VA_ARGS__) -#define CV_LOG_ERROR(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_ERROR, __VA_ARGS__) -#define CV_LOG_WARNING(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_WARNING, __VA_ARGS__) -#define CV_LOG_INFO(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_INFO, __VA_ARGS__) -#define CV_LOG_DEBUG(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_DEBUG, __VA_ARGS__) -#define CV_LOG_VERBOSE(tag, v, ...) CV_LOG_WITH_TAG(tag, (cv::utils::logging::LOG_LEVEL_VERBOSE + (int)(v)), __VA_ARGS__) +#define CV_LOG_FATAL(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_FATAL, , , __VA_ARGS__) +#define CV_LOG_ERROR(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_ERROR, , , __VA_ARGS__) +#define CV_LOG_WARNING(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_WARNING, , , __VA_ARGS__) +#define CV_LOG_INFO(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_INFO, , , __VA_ARGS__) +#define CV_LOG_DEBUG(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_DEBUG, , , __VA_ARGS__) +#define CV_LOG_VERBOSE(tag, v, ...) CV_LOG_WITH_TAG(tag, (cv::utils::logging::LOG_LEVEL_VERBOSE + (int)(v)), , , __VA_ARGS__) #if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_INFO -# undef CV_LOG_INFO -# define CV_LOG_INFO(tag, ...) +#undef CV_LOG_INFO +#define CV_LOG_INFO(tag, ...) #endif #if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_DEBUG -# undef CV_LOG_DEBUG -# define CV_LOG_DEBUG(tag, ...) +#undef CV_LOG_DEBUG +#define CV_LOG_DEBUG(tag, ...) #endif #if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_VERBOSE -# undef CV_LOG_VERBOSE -# define CV_LOG_VERBOSE(tag, v, ...) +#undef CV_LOG_VERBOSE +#define CV_LOG_VERBOSE(tag, v, ...) #endif +//! @cond IGNORED +#define CV__LOG_ONCE_CHECK_PRE \ + static bool _cv_log_once_ ## __LINE__ = false; \ + if (_cv_log_once_ ## __LINE__) break; + +#define CV__LOG_ONCE_CHECK_POST \ + _cv_log_once_ ## __LINE__ = true; + +#define CV__LOG_IF_CHECK(logging_cond) \ + if (!(logging_cond)) break; + +//! @endcond + + +// CV_LOG_ONCE_XXX macros + +#define CV_LOG_ONCE_ERROR(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_ERROR, CV__LOG_ONCE_CHECK_PRE, CV__LOG_ONCE_CHECK_POST, __VA_ARGS__) +#define CV_LOG_ONCE_WARNING(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_WARNING, CV__LOG_ONCE_CHECK_PRE, CV__LOG_ONCE_CHECK_POST, __VA_ARGS__) +#define CV_LOG_ONCE_INFO(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_INFO, CV__LOG_ONCE_CHECK_PRE, CV__LOG_ONCE_CHECK_POST, __VA_ARGS__) +#define CV_LOG_ONCE_DEBUG(tag, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_DEBUG, CV__LOG_ONCE_CHECK_PRE, CV__LOG_ONCE_CHECK_POST, __VA_ARGS__) +#define CV_LOG_ONCE_VERBOSE(tag, v, ...) CV_LOG_WITH_TAG(tag, (cv::utils::logging::LOG_LEVEL_VERBOSE + (int)(v)), CV__LOG_ONCE_CHECK_PRE, CV__LOG_ONCE_CHECK_POST, __VA_ARGS__) + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_INFO +#undef CV_LOG_ONCE_INFO +#define CV_LOG_ONCE_INFO(tag, ...) +#endif + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_DEBUG +#undef CV_LOG_ONCE_DEBUG +#define CV_LOG_ONCE_DEBUG(tag, ...) +#endif + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_VERBOSE +#undef CV_LOG_ONCE_VERBOSE +#define CV_LOG_ONCE_VERBOSE(tag, v, ...) +#endif + + +// CV_LOG_IF_XXX macros + +#define CV_LOG_IF_FATAL(tag, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_FATAL, , CV__LOG_IF_CHECK(logging_cond), __VA_ARGS__) +#define CV_LOG_IF_ERROR(tag, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_ERROR, , CV__LOG_IF_CHECK(logging_cond), __VA_ARGS__) +#define CV_LOG_IF_WARNING(tag, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_WARNING, , CV__LOG_IF_CHECK(logging_cond), __VA_ARGS__) +#define CV_LOG_IF_INFO(tag, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_INFO, , CV__LOG_IF_CHECK(logging_cond), __VA_ARGS__) +#define CV_LOG_IF_DEBUG(tag, logging_cond, ...) CV_LOG_WITH_TAG(tag, cv::utils::logging::LOG_LEVEL_DEBUG, , CV__LOG_IF_CHECK(logging_cond), __VA_ARGS__) +#define CV_LOG_IF_VERBOSE(tag, v, logging_cond, ...) CV_LOG_WITH_TAG(tag, (cv::utils::logging::LOG_LEVEL_VERBOSE + (int)(v)), , CV__LOG_IF_CHECK(logging_cond), __VA_ARGS__) + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_INFO +#undef CV_LOG_IF_INFO +#define CV_LOG_IF_INFO(tag, logging_cond, ...) +#endif + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_DEBUG +#undef CV_LOG_IF_DEBUG +#define CV_LOG_IF_DEBUG(tag, logging_cond, ...) +#endif + +#if CV_LOG_STRIP_LEVEL <= CV_LOG_LEVEL_VERBOSE +#undef CV_LOG_IF_VERBOSE +#define CV_LOG_IF_VERBOSE(tag, v, logging_cond, ...) +#endif + + //! @} }}} // namespace diff --git a/modules/core/src/logger.cpp b/modules/core/src/logger.cpp index 4a4a23602c14..b87789185a07 100644 --- a/modules/core/src/logger.cpp +++ b/modules/core/src/logger.cpp @@ -193,8 +193,8 @@ void writeLogMessage(LogLevel logLevel, const char* message) case LOG_LEVEL_INFO: ss << "[ INFO:" << threadID << "] " << message << std::endl; break; case LOG_LEVEL_DEBUG: ss << "[DEBUG:" << threadID << "] " << message << std::endl; break; case LOG_LEVEL_VERBOSE: ss << message << std::endl; break; - default: - return; + case LOG_LEVEL_SILENT: return; // avoid compiler warning about incomplete switch + case ENUM_LOG_LEVEL_FORCE_INT: return; // avoid compiler warning about incomplete switch } #ifdef __ANDROID__ int android_logLevel = ANDROID_LOG_INFO; diff --git a/modules/core/src/matrix_expressions.cpp b/modules/core/src/matrix_expressions.cpp index 5ac1fafbd63b..d7fb6e42287a 100644 --- a/modules/core/src/matrix_expressions.cpp +++ b/modules/core/src/matrix_expressions.cpp @@ -117,8 +117,17 @@ class MatOp_GEMM CV_FINAL : public MatOp void transpose(const MatExpr& expr, MatExpr& res) const CV_OVERRIDE; + Size size(const MatExpr& expr) const CV_OVERRIDE + { + return Size( + (expr.flags & GEMM_2_T) ? expr.b.rows : expr.b.cols, + (expr.flags & GEMM_1_T) ? expr.a.cols : expr.a.rows + ); + } + static void makeExpr(MatExpr& res, int flags, const Mat& a, const Mat& b, double alpha=1, const Mat& c=Mat(), double beta=1); + }; static MatOp_GEMM g_MatOp_GEMM; @@ -199,7 +208,7 @@ static inline bool isReciprocal(const MatExpr& e) { return isBin(e,'/') && (!e.b static inline bool isT(const MatExpr& e) { return e.op == &g_MatOp_T; } static inline bool isInv(const MatExpr& e) { return e.op == &g_MatOp_Invert; } static inline bool isSolve(const MatExpr& e) { return e.op == &g_MatOp_Solve; } -static inline bool isGEMM(const MatExpr& e) { return e.op == &g_MatOp_GEMM; } +//static inline bool isGEMM(const MatExpr& e) { return e.op == &g_MatOp_GEMM; } static inline bool isMatProd(const MatExpr& e) { return e.op == &g_MatOp_GEMM && (!e.c.data || e.beta == 0); } static inline bool isInitializer(const MatExpr& e) { return e.op == getGlobalMatOpInitializer(); } @@ -1240,8 +1249,6 @@ Size MatExpr::size() const { if( isT(*this) || isInv(*this) ) return Size(a.rows, a.cols); - if( isGEMM(*this) ) - return Size(b.cols, a.rows); if( isSolve(*this) ) return Size(b.cols, a.cols); if( isInitializer(*this) ) diff --git a/modules/core/test/test_mat.cpp b/modules/core/test/test_mat.cpp index 76d8c5f038e0..54ba911f5810 100644 --- a/modules/core/test/test_mat.cpp +++ b/modules/core/test/test_mat.cpp @@ -2019,6 +2019,29 @@ TEST(Core_MatExpr, issue_16655) << "Mat: CV_8UC3 != " << typeToString(ab_mat.type()); } +TEST(Core_MatExpr, issue_16689) +{ + Mat a(Size(10, 5), CV_32FC1, 5); + Mat b(Size(10, 5), CV_32FC1, 2); + Mat bt(Size(5, 10), CV_32FC1, 3); + { + MatExpr r = a * bt; // gemm + EXPECT_EQ(Mat(r).size(), r.size()) << "[10x5] x [5x10] => [5x5]"; + } + { + MatExpr r = a * b.t(); // gemm + EXPECT_EQ(Mat(r).size(), r.size()) << "[10x5] x [10x5].t() => [5x5]"; + } + { + MatExpr r = a.t() * b; // gemm + EXPECT_EQ(Mat(r).size(), r.size()) << "[10x5].t() x [10x5] => [10x10]"; + } + { + MatExpr r = a.t() * bt.t(); // gemm + EXPECT_EQ(Mat(r).size(), r.size()) << "[10x5].t() x [5x10].t() => [10x10]"; + } +} + #ifdef HAVE_EIGEN TEST(Core_Eigen, eigen2cv_check_Mat_type) { diff --git a/modules/core/test/test_utils.cpp b/modules/core/test/test_utils.cpp index 1a23e01fb9de..d8789ddfc2b0 100644 --- a/modules/core/test/test_utils.cpp +++ b/modules/core/test/test_utils.cpp @@ -2,6 +2,9 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include "test_precomp.hpp" +#include "opencv2/core/utils/logger.defines.hpp" +#undef CV_LOG_STRIP_LEVEL +#define CV_LOG_STRIP_LEVEL CV_LOG_LEVEL_VERBOSE + 1 #include "opencv2/core/utils/logger.hpp" #include "opencv2/core/utils/buffer_area.private.hpp" @@ -287,6 +290,53 @@ TEST(CommandLineParser, testScalar) EXPECT_EQ(parser.get("s5"), Scalar(5, -4, 3, 2)); } + +TEST(Logger, DISABLED_message) +{ + int id = 42; + CV_LOG_VERBOSE(NULL, 0, "Verbose message: " << id); + CV_LOG_VERBOSE(NULL, 1, "Verbose message: " << id); + CV_LOG_DEBUG(NULL, "Debug message: " << id); + CV_LOG_INFO(NULL, "Info message: " << id); + CV_LOG_WARNING(NULL, "Warning message: " << id); + CV_LOG_ERROR(NULL, "Error message: " << id); + CV_LOG_FATAL(NULL, "Fatal message: " << id); +} + +static int testLoggerMessageOnce(int id) +{ + CV_LOG_ONCE_VERBOSE(NULL, 0, "Verbose message: " << id++); + CV_LOG_ONCE_VERBOSE(NULL, 1, "Verbose message: " << id++); + CV_LOG_ONCE_DEBUG(NULL, "Debug message: " << id++); + CV_LOG_ONCE_INFO(NULL, "Info message: " << id++); + CV_LOG_ONCE_WARNING(NULL, "Warning message: " << id++); + CV_LOG_ONCE_ERROR(NULL, "Error message: " << id++); + // doesn't make sense: CV_LOG_ONCE_FATAL + return id; +} +TEST(Logger, DISABLED_message_once) +{ + int check_id_first = testLoggerMessageOnce(42); + EXPECT_GT(check_id_first, 42); + int check_id_second = testLoggerMessageOnce(0); + EXPECT_EQ(0, check_id_second); +} + +TEST(Logger, DISABLED_message_if) +{ + for (int i = 0; i < 100; i++) + { + CV_LOG_IF_VERBOSE(NULL, 0, i == 0 || i == 42, "Verbose message: " << i); + CV_LOG_IF_VERBOSE(NULL, 1, i == 0 || i == 42, "Verbose message: " << i); + CV_LOG_IF_DEBUG(NULL, i == 0 || i == 42, "Debug message: " << i); + CV_LOG_IF_INFO(NULL, i == 0 || i == 42, "Info message: " << i); + CV_LOG_IF_WARNING(NULL, i == 0 || i == 42, "Warning message: " << i); + CV_LOG_IF_ERROR(NULL, i == 0 || i == 42, "Error message: " << i); + CV_LOG_IF_FATAL(NULL, i == 0 || i == 42, "Fatal message: " << i); + } +} + + TEST(Samples, findFile) { cv::utils::logging::LogLevel prev = cv::utils::logging::setLogLevel(cv::utils::logging::LOG_LEVEL_VERBOSE); diff --git a/modules/dnn/src/graph_simplifier.cpp b/modules/dnn/src/graph_simplifier.cpp index 62651053fbfd..166564c21556 100644 --- a/modules/dnn/src/graph_simplifier.cpp +++ b/modules/dnn/src/graph_simplifier.cpp @@ -69,8 +69,12 @@ int Subgraph::getInputNodeId(const Ptr& net, const int numNodes = net->getNumNodes(); for (int i = 0; i < numNodes; ++i) { - if (net->getNodeName(i) == name) - return i; + const int numOutputs = net->getNumOutputs(i); + for (int j = 0; j < numOutputs; j++) + { + if (net->getOutputName(i, j) == name) + return i; + } } CV_Error(Error::StsParseError, "Input node with name " + name + " not found"); } @@ -111,12 +115,12 @@ bool Subgraph::match(const Ptr& net, int nodeId, continue; nodeId = getInputNodeId(net, node, j); const Ptr inpNode = net->getNode(nodeId); - if (inpNode->getType() != "Const") + if (inpNode->getType() != "Const" && inpNode->getType() != "Constant") { nodesToMatch.push(nodeId); targetNodes.push(inputNodes[j]); } - else if (nodes[inputNodes[j]] != "Const") + else if (nodes[inputNodes[j]] != "Const" && nodes[inputNodes[j]] != "Constant") return false; } matchedNodesIds.push_back(nodeToMatch); @@ -190,15 +194,14 @@ void simplifySubgraphs(const Ptr& net, { int numNodes = net->getNumNodes(); std::vector matchedNodesIds, targetNodesIds; - for (int i = 0; i < numNodes; ++i) + for (int j = 0; j < patterns.size(); ++j) { - for (int j = 0; j < patterns.size(); ++j) + for (int i = 0; i < numNodes; ++i) { if (patterns[j]->match(net, i, matchedNodesIds, targetNodesIds)) { patterns[j]->replace(net, matchedNodesIds, targetNodesIds); numNodes -= matchedNodesIds.size() - 1; // #matchedNodes removed and one added. - break; } } } diff --git a/modules/dnn/src/graph_simplifier.hpp b/modules/dnn/src/graph_simplifier.hpp index 8f3958ba52b7..39d6262c1b50 100644 --- a/modules/dnn/src/graph_simplifier.hpp +++ b/modules/dnn/src/graph_simplifier.hpp @@ -39,7 +39,9 @@ class ImportGraphWrapper virtual int getNumNodes() const = 0; - virtual std::string getNodeName(int idx) const = 0; + virtual int getNumOutputs(int nodeId) const = 0; + + virtual std::string getOutputName(int nodeId, int outId) const = 0; virtual void removeNode(int idx) = 0; }; diff --git a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp index e27be30cd3cc..74fd44b7b279 100644 --- a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp +++ b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp @@ -76,12 +76,21 @@ class ONNXGraphWrapper : public ImportGraphWrapper return numInputs + net.node_size(); } - virtual std::string getNodeName(int idx) const CV_OVERRIDE + virtual int getNumOutputs(int nodeId) const CV_OVERRIDE { - if (idx < numInputs) - return net.input(idx).name(); + if (nodeId < numInputs) + return 1; else - return net.node(idx - numInputs).output(0); + return net.node(nodeId - numInputs).output_size(); + } + + virtual std::string getOutputName(int nodeId, int outId) const CV_OVERRIDE + { + CV_Assert(outId < getNumOutputs(nodeId)); + if (nodeId < numInputs) + return net.input(nodeId).name(); + else + return net.node(nodeId - numInputs).output(outId); } virtual void removeNode(int idx) CV_OVERRIDE @@ -145,13 +154,221 @@ class SoftMaxSubgraph : public Subgraph int axis; }; +class GatherCastSubgraph : public Subgraph +{ +public: + GatherCastSubgraph() + { + int input = addNodeToMatch(""); + int index = addNodeToMatch("Constant"); + int gather = addNodeToMatch("Gather", input, index); + addNodeToMatch("Cast", gather); + setFusedNode("Gather", input, index); + } +}; + +class MulCastSubgraph : public Subgraph +{ +public: + MulCastSubgraph() + { + int input = addNodeToMatch(""); + int scaleNode = addNodeToMatch("Constant"); + int mul = addNodeToMatch("Mul", input, scaleNode); + addNodeToMatch("Cast", mul); + setFusedNode("Mul", input, scaleNode); + } +}; + +class ExtractScalesSubgraph : public Subgraph +{ +public: + ExtractScalesSubgraph() + { + input = addNodeToMatch(""); + + int indexH = addNodeToMatch("Constant"); + int shape1 = addNodeToMatch("Shape", input); + int gather1 = addNodeToMatch("Gather", shape1, indexH); + scaleHNode = addNodeToMatch("Constant"); + int mul1 = addNodeToMatch("Mul", gather1, scaleHNode); + int floor1 = addNodeToMatch("Floor", mul1); + + int indexW = addNodeToMatch("Constant"); + int shape2 = addNodeToMatch("Shape", input); + int gather2 = addNodeToMatch("Gather", shape2, indexW); + scaleWNode = addNodeToMatch("Constant"); + int mul2 = addNodeToMatch("Mul", gather2, scaleWNode); + int floor2 = addNodeToMatch("Floor", mul2); + + int unsqueeze1 = addNodeToMatch("Unsqueeze", floor1); + int unsqueeze2 = addNodeToMatch("Unsqueeze", floor2); + concatId = addNodeToMatch("Concat", unsqueeze1, unsqueeze2); + } + + void finalize(const Ptr& net, + const Ptr& fusedNode, + std::vector >& inputs) CV_OVERRIDE + { + opencv_onnx::NodeProto* constant_node = inputs[1].dynamicCast()->node; + opencv_onnx::TensorProto tensor_proto = constant_node->attribute(0).t(); + Mat scaleW = getMatFromTensor(tensor_proto); + CV_Assert(scaleW.total() == 1); + scaleW.convertTo(scaleW, CV_32F); + + constant_node = inputs[2].dynamicCast()->node; + tensor_proto = constant_node->attribute(0).t(); + Mat scaleH = getMatFromTensor(tensor_proto); + CV_Assert(scaleH.total() == 1); + scaleH.convertTo(scaleH, CV_32F); + + opencv_onnx::NodeProto* node = fusedNode.dynamicCast()->node; + opencv_onnx::AttributeProto* attrH = node->add_attribute(); + attrH->set_name("height_scale"); + attrH->set_i(scaleH.at(0)); + opencv_onnx::AttributeProto* attrW = node->add_attribute(); + attrW->set_name("width_scale"); + attrW->set_i(scaleW.at(0)); + + node->mutable_input()->DeleteSubrange(1, 2); // Remove two last inputs + } + +protected: + int input, concatId; + int scaleHNode, scaleWNode; +}; + +class UpsampleSubgraph : public ExtractScalesSubgraph +{ +public: + UpsampleSubgraph() : ExtractScalesSubgraph() + { + int shape = addNodeToMatch("Shape", input); + int slice = addNodeToMatch("Slice", shape); + + int castConcat = addNodeToMatch("Cast", concatId); + int castSlice = addNodeToMatch("Cast", slice); + int divide = addNodeToMatch("Div", castConcat, castSlice); + + int constant = addNodeToMatch("Constant"); + int concat = addNodeToMatch("Concat", constant, divide); + + addNodeToMatch("Upsample", input, concat); + setFusedNode("Upsample", input, scaleWNode, scaleHNode); + } +}; + +class ResizeSubgraph1 : public ExtractScalesSubgraph +{ +public: + ResizeSubgraph1() : ExtractScalesSubgraph() + { + int shape = addNodeToMatch("Shape", input); + int slice = addNodeToMatch("Slice", shape, addNodeToMatch("Constant"), addNodeToMatch("Constant"), addNodeToMatch("Constant")); + + int castConcat = addNodeToMatch("Cast", concatId); + int concat = addNodeToMatch("Concat", slice, castConcat); + int constant = addNodeToMatch("Constant"); + + addNodeToMatch("Resize", input, constant, constant, concat); + setFusedNode("Upsample", input, scaleWNode, scaleHNode); + } +}; + +class ResizeSubgraph2 : public ExtractScalesSubgraph +{ +public: + ResizeSubgraph2() : ExtractScalesSubgraph() + { + int constantConcat = addNodeToMatch("Constant"); + int castConcat = addNodeToMatch("Cast", concatId); + int concat = addNodeToMatch("Concat", constantConcat, castConcat); + int constant = addNodeToMatch("Constant"); + + addNodeToMatch("Resize", input, constant, constant, concat); + setFusedNode("Upsample", input, scaleWNode, scaleHNode); + } +}; + void simplifySubgraphs(opencv_onnx::GraphProto& net) { std::vector > subgraphs; + subgraphs.push_back(makePtr()); + subgraphs.push_back(makePtr()); + subgraphs.push_back(makePtr()); + subgraphs.push_back(makePtr()); + subgraphs.push_back(makePtr()); subgraphs.push_back(makePtr()); simplifySubgraphs(Ptr(new ONNXGraphWrapper(net)), subgraphs); } +Mat getMatFromTensor(opencv_onnx::TensorProto& tensor_proto) +{ + if (tensor_proto.raw_data().empty() && tensor_proto.float_data().empty() && + tensor_proto.double_data().empty() && tensor_proto.int64_data().empty()) + return Mat(); + + opencv_onnx::TensorProto_DataType datatype = tensor_proto.data_type(); + Mat blob; + std::vector sizes; + for (int i = 0; i < tensor_proto.dims_size(); i++) { + sizes.push_back(tensor_proto.dims(i)); + } + if (sizes.empty()) + sizes.assign(1, 1); + if (datatype == opencv_onnx::TensorProto_DataType_FLOAT) { + + if (!tensor_proto.float_data().empty()) { + const ::google::protobuf::RepeatedField field = tensor_proto.float_data(); + Mat(sizes, CV_32FC1, (void*)field.data()).copyTo(blob); + } + else { + char* val = const_cast(tensor_proto.raw_data().c_str()); + Mat(sizes, CV_32FC1, val).copyTo(blob); + } + } + else if (datatype == opencv_onnx::TensorProto_DataType_DOUBLE) + { + const ::google::protobuf::RepeatedField field = tensor_proto.double_data(); + CV_Assert(!field.empty()); + Mat(sizes, CV_64FC1, (void*)field.data()).convertTo(blob, CV_32FC1); + } + else if (datatype == opencv_onnx::TensorProto_DataType_INT64) + { + blob.create(sizes, CV_32SC1); + int32_t* dst = reinterpret_cast(blob.data); + + if (!tensor_proto.int64_data().empty()) { + ::google::protobuf::RepeatedField< ::google::protobuf::int64> src = tensor_proto.int64_data(); + convertInt64ToInt32(src, dst, blob.total()); + } + else + { + const char* val = tensor_proto.raw_data().c_str(); +#if CV_STRONG_ALIGNMENT + // Aligned pointer is required: https://github.com/opencv/opencv/issues/16373 + // this doesn't work: typedef int64_t CV_DECL_ALIGNED(1) unaligned_int64_t; + AutoBuffer aligned_val; + if (!isAligned(val)) + { + size_t sz = tensor_proto.raw_data().size(); + aligned_val.allocate(divUp(sz, sizeof(int64_t))); + memcpy(aligned_val.data(), val, sz); + val = (const char*)aligned_val.data(); + } +#endif + const int64_t* src = reinterpret_cast(val); + convertInt64ToInt32(src, dst, blob.total()); + } + } + else + CV_Error(Error::StsUnsupportedFormat, "Unsupported data type: " + + opencv_onnx::TensorProto_DataType_Name(datatype)); + if (tensor_proto.dims_size() == 0) + blob.dims = 1; // To force 1-dimensional cv::Mat for scalars. + return blob; +} + CV__DNN_INLINE_NS_END }} // namespace cv::dnn diff --git a/modules/dnn/src/onnx/onnx_graph_simplifier.hpp b/modules/dnn/src/onnx/onnx_graph_simplifier.hpp index df560ea9fe77..714df587eed2 100644 --- a/modules/dnn/src/onnx/onnx_graph_simplifier.hpp +++ b/modules/dnn/src/onnx/onnx_graph_simplifier.hpp @@ -24,6 +24,19 @@ CV__DNN_INLINE_NS_BEGIN void simplifySubgraphs(opencv_onnx::GraphProto& net); +template +void convertInt64ToInt32(const T1& src, T2& dst, int size) +{ + for (int i = 0; i < size; i++) { + if (src[i] < std::numeric_limits::min() || src[i] > std::numeric_limits::max()) { + CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range"); + } + dst[i] = saturate_cast(src[i]); + } +} + +Mat getMatFromTensor(opencv_onnx::TensorProto& tensor_proto); + CV__DNN_INLINE_NS_END }} // namespace dnn, namespace cv diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index c68846299f69..0c8fdf3833a0 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -95,83 +95,6 @@ void releaseONNXTensor(opencv_onnx::TensorProto& tensor_proto) } } -template -void convertInt64ToInt32(const T1& src, T2& dst, int size) -{ - for (int i = 0; i < size; i++) { - if (src[i] < std::numeric_limits::min() || src[i] > std::numeric_limits::max()) { - CV_Error(Error::StsOutOfRange, "Input is out of OpenCV 32S range"); - } - dst[i] = saturate_cast(src[i]); - } -} - -Mat getMatFromTensor(opencv_onnx::TensorProto& tensor_proto) -{ - CV_Assert(!tensor_proto.raw_data().empty() || !tensor_proto.float_data().empty() - || !tensor_proto.double_data().empty() || !tensor_proto.int64_data().empty()); - - opencv_onnx::TensorProto_DataType datatype = tensor_proto.data_type(); - Mat blob; - std::vector sizes; - for (int i = 0; i < tensor_proto.dims_size(); i++) { - sizes.push_back(tensor_proto.dims(i)); - } - if (sizes.empty()) - sizes.assign(1, 1); - if (datatype == opencv_onnx::TensorProto_DataType_FLOAT) { - - if (!tensor_proto.float_data().empty()) { - const ::google::protobuf::RepeatedField field = tensor_proto.float_data(); - Mat(sizes, CV_32FC1, (void*)field.data()).copyTo(blob); - } - else { - char* val = const_cast(tensor_proto.raw_data().c_str()); - Mat(sizes, CV_32FC1, val).copyTo(blob); - } - } - else if (datatype == opencv_onnx::TensorProto_DataType_DOUBLE) - { - const ::google::protobuf::RepeatedField field = tensor_proto.double_data(); - CV_Assert(!field.empty()); - Mat(sizes, CV_64FC1, (void*)field.data()).convertTo(blob, CV_32FC1); - } - else if (datatype == opencv_onnx::TensorProto_DataType_INT64) - { - blob.create(sizes, CV_32SC1); - int32_t* dst = reinterpret_cast(blob.data); - - if (!tensor_proto.int64_data().empty()) { - ::google::protobuf::RepeatedField< ::google::protobuf::int64> src = tensor_proto.int64_data(); - convertInt64ToInt32(src, dst, blob.total()); - } - else - { - const char* val = tensor_proto.raw_data().c_str(); -#if CV_STRONG_ALIGNMENT - // Aligned pointer is required: https://github.com/opencv/opencv/issues/16373 - // this doesn't work: typedef int64_t CV_DECL_ALIGNED(1) unaligned_int64_t; - AutoBuffer aligned_val; - if (!isAligned(val)) - { - size_t sz = tensor_proto.raw_data().size(); - aligned_val.allocate(divUp(sz, sizeof(int64_t))); - memcpy(aligned_val.data(), val, sz); - val = (const char*)aligned_val.data(); - } -#endif - const int64_t* src = reinterpret_cast(val); - convertInt64ToInt32(src, dst, blob.total()); - } - } - else - CV_Error(Error::StsUnsupportedFormat, "Unsupported data type: " + - opencv_onnx::TensorProto_DataType_Name(datatype)); - if (tensor_proto.dims_size() == 0) - blob.dims = 1; // To force 1-dimensional cv::Mat for scalars. - return blob; -} - void runLayer(LayerParams& params, const std::vector& inputs, std::vector& outputs) { @@ -542,31 +465,6 @@ void ONNXImporter::populateNet(Net dstNet) layerParams.blobs.push_back(-1.0f * blob.reshape(1, 1)); } } - else if (layer_type == "Div") - { - if (constBlobs.find(node_proto.input(1)) == constBlobs.end()) - { - layerParams.type = "Eltwise"; - layerParams.set("operation", "div"); - } - else - { - Mat blob = getBlob(node_proto, constBlobs, 1); - CV_Assert_N(blob.type() == CV_32F, blob.total()); - if (blob.total() == 1) - { - layerParams.set("scale", 1.0f / blob.at(0)); - layerParams.type = "Power"; - } - else - { - layerParams.type = "Scale"; - divide(1.0, blob, blob); - layerParams.blobs.push_back(blob); - layerParams.set("bias_term", false); - } - } - } else if (layer_type == "Neg") { layerParams.type = "Power"; @@ -715,24 +613,58 @@ void ONNXImporter::populateNet(Net dstNet) layerParams.set("bias_term", false); layerParams.set("num_output", layerParams.blobs[0].size[0]); } - else if (layer_type == "Mul") + else if (layer_type == "Mul" || layer_type == "Div") { CV_Assert(node_proto.input_size() == 2); - if (layer_id.find(node_proto.input(1)) == layer_id.end()) { - Mat blob = getBlob(node_proto, constBlobs, 1); + + bool isDiv = layer_type == "Div"; + int constId = -1; + bool haveVariables = false; + for (int i = 0; i < 2; ++i) + { + if (constBlobs.find(node_proto.input(i)) != constBlobs.end()) + constId = i; + else + haveVariables = true; + } + if (constId != -1 && haveVariables) + { + Mat blob = getBlob(node_proto, constBlobs, constId); blob = blob.reshape(1, 1); if (blob.total() == 1) { - layerParams.set("scale", blob.at(0)); + float coeff = isDiv ? 1.0 / blob.at(0) : blob.at(0); + layerParams.set("scale", coeff); layerParams.type = "Power"; } else { + if (isDiv) + divide(1.0, blob, blob); layerParams.blobs.push_back(blob); layerParams.type = "Scale"; } } else { layerParams.type = "Eltwise"; - layerParams.set("operation", "prod"); + layerParams.set("operation", isDiv ? "div" : "prod"); + } + + if (!haveVariables) + { + Mat inp0 = getBlob(node_proto, constBlobs, 0); + Mat inp1 = getBlob(node_proto, constBlobs, 1); + if (inp0.size != inp1.size) + CV_Error(Error::StsNotImplemented, "Constant multiply with different shapes"); + + Mat out; + if (isDiv) + divide(inp0, inp1, out); + else + multiply(inp0, inp1, out); + + out = out.reshape(1, inp0.dims, inp0.size); + out.dims = inp0.dims; // to workaround dims == 1 + constBlobs.insert(std::make_pair(layerParams.name, out)); + continue; } } else if (layer_type == "Conv") diff --git a/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp b/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp index b28f22800d01..ba816778008a 100644 --- a/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp +++ b/modules/dnn/src/tensorflow/tf_graph_simplifier.cpp @@ -69,9 +69,15 @@ class TFGraphWrapper : public ImportGraphWrapper return net.node_size(); } - virtual std::string getNodeName(int idx) const CV_OVERRIDE + virtual int getNumOutputs(int nodeId) const CV_OVERRIDE { - return net.node(idx).name(); + return 1; + } + + virtual std::string getOutputName(int nodeId, int outId) const CV_OVERRIDE + { + CV_Assert(outId == 0); + return net.node(nodeId).name(); } virtual void removeNode(int idx) CV_OVERRIDE diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 3262a6799dbf..3e8c67f1867e 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -340,6 +340,16 @@ TEST_P(Test_ONNX_layers, Resize) testONNXModels("resize_bilinear"); } +TEST_P(Test_ONNX_layers, ResizeUnfused) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + testONNXModels("upsample_unfused_torch1.2"); + testONNXModels("upsample_unfused_opset9_torch1.4"); + testONNXModels("resize_nearest_unfused_opset11_torch1.4"); + testONNXModels("resize_nearest_unfused_opset11_torch1.3"); +} + TEST_P(Test_ONNX_layers, MultyInputs) { const String model = _tf("models/multy_inputs.onnx"); @@ -397,6 +407,8 @@ TEST_P(Test_ONNX_layers, DynamicReshape) if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); } testONNXModels("dynamic_reshape"); + testONNXModels("dynamic_reshape_opset_11"); + testONNXModels("flatten_by_prod"); } TEST_P(Test_ONNX_layers, Reshape) diff --git a/modules/imgproc/perf/perf_integral.cpp b/modules/imgproc/perf/perf_integral.cpp index 59a506093724..2b1ab381e7ed 100644 --- a/modules/imgproc/perf/perf_integral.cpp +++ b/modules/imgproc/perf/perf_integral.cpp @@ -39,10 +39,23 @@ PERF_TEST_P(Size_MatType_OutMatDepth, integral, Mat sum(sz, sdepth); declare.in(src, WARMUP_RNG).out(sum); + if (sdepth == CV_32F) + src *= (1 << 23) / (double)(sz.area() * 256); // FP32 calculations are not accurate (mantissa is 23-bit) TEST_CYCLE() integral(src, sum, sdepth); - SANITY_CHECK(sum, 1e-6); + Mat src_roi; src(Rect(src.cols - 4, src.rows - 4, 4, 4)).convertTo(src_roi, sdepth); + Mat restored_src_roi = + sum(Rect(sum.cols - 4, sum.rows - 4, 4, 4)) + sum(Rect(sum.cols - 5, sum.rows - 5, 4, 4)) - + sum(Rect(sum.cols - 4, sum.rows - 5, 4, 4)) - sum(Rect(sum.cols - 5, sum.rows - 4, 4, 4)); + EXPECT_EQ(0, cvtest::norm(restored_src_roi, src_roi, NORM_INF)) + << src_roi << endl << restored_src_roi << endl + << sum(Rect(sum.cols - 4, sum.rows - 4, 4, 4)); + + if (sdepth == CV_32F) + SANITY_CHECK_NOTHING(); + else + SANITY_CHECK(sum, 1e-6); } PERF_TEST_P(Size_MatType_OutMatDepth, integral_sqsum, diff --git a/modules/imgproc/src/sumpixels.simd.hpp b/modules/imgproc/src/sumpixels.simd.hpp index 2ac02a0c3cdd..f5f3a92d852b 100644 --- a/modules/imgproc/src/sumpixels.simd.hpp +++ b/modules/imgproc/src/sumpixels.simd.hpp @@ -237,7 +237,11 @@ struct Integral_SIMD v_int32 prev_1 = vx_setzero_s32(), prev_2 = vx_setzero_s32(), prev_3 = vx_setzero_s32(); int j = 0; - for ( ; j + v_uint16::nlanes * cn <= width; j += v_uint16::nlanes * cn) + const int j_max = + ((_srcstep * i + (width - v_uint16::nlanes * cn + v_uint8::nlanes * cn)) >= _srcstep * height) + ? width - v_uint8::nlanes * cn // uint8 in v_load_deinterleave() + : width - v_uint16::nlanes * cn; // v_expand_low + for ( ; j <= j_max; j += v_uint16::nlanes * cn) { v_uint8 v_src_row_1, v_src_row_2, v_src_row_3; v_load_deinterleave(src_row + j, v_src_row_1, v_src_row_2, v_src_row_3); @@ -546,7 +550,11 @@ struct Integral_SIMD v_float32 prev_1 = vx_setzero_f32(), prev_2 = vx_setzero_f32(), prev_3 = vx_setzero_f32(); int j = 0; - for (; j + v_uint16::nlanes * cn <= width; j += v_uint16::nlanes * cn) + const int j_max = + ((_srcstep * i + (width - v_uint16::nlanes * cn + v_uint8::nlanes * cn)) >= _srcstep * height) + ? width - v_uint8::nlanes * cn // uint8 in v_load_deinterleave() + : width - v_uint16::nlanes * cn; // v_expand_low + for ( ; j <= j_max; j += v_uint16::nlanes * cn) { v_uint8 v_src_row_1, v_src_row_2, v_src_row_3; v_load_deinterleave(src_row + j, v_src_row_1, v_src_row_2, v_src_row_3); @@ -896,7 +904,11 @@ struct Integral_SIMD v_float64 prev_1 = vx_setzero_f64(), prev_2 = vx_setzero_f64(), prev_3 = vx_setzero_f64(); int j = 0; - for (; j + v_uint16::nlanes * cn <= width; j += v_uint16::nlanes * cn) + const int j_max = + ((_srcstep * i + (width - v_uint16::nlanes * cn + v_uint8::nlanes * cn)) >= _srcstep * height) + ? width - v_uint8::nlanes * cn // uint8 in v_load_deinterleave() + : width - v_uint16::nlanes * cn; // v_expand_low + for ( ; j <= j_max; j += v_uint16::nlanes * cn) { v_uint8 v_src_row_1, v_src_row_2, v_src_row_3; v_load_deinterleave(src_row + j, v_src_row_1, v_src_row_2, v_src_row_3); diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py index 8d7f8707c277..130c19058e23 100755 --- a/modules/python/src2/hdr_parser.py +++ b/modules/python/src2/hdr_parser.py @@ -571,8 +571,8 @@ def parse_func_decl(self, decl_str, mat="Mat", docstring=""): arg_type, arg_name, modlist, argno = self.parse_arg(a, argno) if self.wrap_mode: # TODO: Vectors should contain UMat, but this is not very easy to support and not very needed - vector_mat = "vector_{}".format("Mat") - vector_mat_template = "vector<{}>".format("Mat") + vector_mat = "vector_{}".format(mat) + vector_mat_template = "vector<{}>".format(mat) if arg_type == "InputArray": arg_type = mat diff --git a/modules/python/test/test_umat.py b/modules/python/test/test_umat.py index 102bb12bfb67..0081bec5464f 100644 --- a/modules/python/test/test_umat.py +++ b/modules/python/test/test_umat.py @@ -4,8 +4,22 @@ import numpy as np import cv2 as cv +import os + from tests_common import NewOpenCVTests + +def load_exposure_seq(path): + images = [] + times = [] + with open(os.path.join(path, 'list.txt'), 'r') as list_file: + for line in list_file.readlines(): + name, time = line.split() + images.append(cv.imread(os.path.join(path, name))) + times.append(1. / float(time)) + return images, times + + class UMat(NewOpenCVTests): def test_umat_construct(self): @@ -85,5 +99,22 @@ def test_umat_optical_flow(self): for data_umat0, data_umat in zip(_p1_mask_err_umat0[:2], _p1_mask_err_umat[:2]): self.assertTrue(np.allclose(data_umat0, data_umat)) + def test_umat_merge_mertens(self): + if self.extraTestDataPath is None: + self.fail('Test data is not available') + + test_data_path = os.path.join(self.extraTestDataPath, 'cv', 'hdr') + + images, _ = load_exposure_seq(os.path.join(test_data_path, 'exposures')) + + merge = cv.createMergeMertens() + mat_result = merge.process(images) + + umat_images = [cv.UMat(img) for img in images] + umat_result = merge.process(umat_images) + + self.assertTrue(np.allclose(umat_result.get(), mat_result)) + + if __name__ == '__main__': NewOpenCVTests.bootstrap() diff --git a/modules/videoio/src/cap_msmf.cpp b/modules/videoio/src/cap_msmf.cpp index ec3f90970e1f..508f4a200f51 100644 --- a/modules/videoio/src/cap_msmf.cpp +++ b/modules/videoio/src/cap_msmf.cpp @@ -504,7 +504,7 @@ class FormatStorage best = *i; break; } - if (i->second.isBetterThan(best.second, newType)) + if (best.second.isEmpty() || i->second.isBetterThan(best.second, newType)) { best = *i; } diff --git a/samples/cpp/3calibration.cpp b/samples/cpp/3calibration.cpp index 4f96d403c6c4..2495dbd04187 100644 --- a/samples/cpp/3calibration.cpp +++ b/samples/cpp/3calibration.cpp @@ -17,10 +17,10 @@ using namespace std; enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 }; -static void help() +static void help(char** argv) { printf( "\nThis is a camera calibration sample that calibrates 3 horizontally placed cameras together.\n" - "Usage: 3calibration\n" + "Usage: %s\n" " -w= # the number of inner corners per one of board dimension\n" " -h= # the number of inner corners per another board dimension\n" " [-s=] # square size in some user-defined units (1 by default)\n" @@ -29,7 +29,7 @@ static void help() " [-a=] # fix aspect ratio (fx/fy)\n" " [-p] # fix the principal point at the center\n" " [input_data] # input data - text file with a list of the images of the board\n" - "\n" ); + "\n", argv[0] ); } @@ -190,7 +190,7 @@ int main( int argc, char** argv ) "{zt||}{a|1|}{p||}{@input||}"); if (parser.has("help")) { - help(); + help(argv); return 0; } boardSize.width = parser.get("w"); @@ -207,7 +207,7 @@ int main( int argc, char** argv ) inputFilename = parser.get("@input"); if (!parser.check()) { - help(); + help(argv); parser.printErrors(); return -1; } diff --git a/samples/cpp/calibration.cpp b/samples/cpp/calibration.cpp index 3f38c8bc38da..3e978736e7d0 100644 --- a/samples/cpp/calibration.cpp +++ b/samples/cpp/calibration.cpp @@ -47,10 +47,10 @@ const char* liveCaptureHelp = " 'g' - start capturing images\n" " 'u' - switch undistortion on/off\n"; -static void help() +static void help(char** argv) { printf( "This is a camera calibration sample.\n" - "Usage: calibration\n" + "Usage: %s\n" " -w= # the number of inner corners per one of board dimension\n" " -h= # the number of inner corners per another board dimension\n" " [-pt=] # the type of pattern: chessboard or circles' grid\n" @@ -81,7 +81,7 @@ static void help() " # the text file can be generated with imagelist_creator\n" " # - name of video file with a video of the board\n" " # if input_data not specified, a live view from the camera is used\n" - "\n" ); + "\n", argv[0] ); printf("\n%s",usage); printf( "\n%s", liveCaptureHelp ); } @@ -378,7 +378,7 @@ int main( int argc, char** argv ) "{@input_data|0|}"); if (parser.has("help")) { - help(); + help(argv); return 0; } boardSize.width = parser.get( "w" ); @@ -427,7 +427,7 @@ int main( int argc, char** argv ) } if (!parser.check()) { - help(); + help(argv); parser.printErrors(); return -1; } diff --git a/samples/cpp/camshiftdemo.cpp b/samples/cpp/camshiftdemo.cpp index 22ce7f8488f0..e0840f75046f 100644 --- a/samples/cpp/camshiftdemo.cpp +++ b/samples/cpp/camshiftdemo.cpp @@ -1,4 +1,4 @@ -#include +#include "opencv2/core/utility.hpp" #include "opencv2/video/tracking.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/videoio.hpp" @@ -57,13 +57,13 @@ string hot_keys = "\tp - pause video\n" "To initialize tracking, select the object with mouse\n"; -static void help() +static void help(const char** argv) { cout << "\nThis is a demo that shows mean-shift based tracking\n" "You select a color objects such as your face and it tracks it.\n" "This reads from video camera (0 by default, or the camera number the user enters\n" - "Usage: \n" - " ./camshiftdemo [camera number]\n"; + "Usage: \n\t"; + cout << argv[0] << " [camera number]\n"; cout << hot_keys; } @@ -82,7 +82,7 @@ int main( int argc, const char** argv ) CommandLineParser parser(argc, argv, keys); if (parser.has("help")) { - help(); + help(argv); return 0; } int camNum = parser.get(0); @@ -90,7 +90,7 @@ int main( int argc, const char** argv ) if( !cap.isOpened() ) { - help(); + help(argv); cout << "***Could not initialize capturing...***\n"; cout << "Current parameter's value: \n"; parser.printMessage(); diff --git a/samples/cpp/contours2.cpp b/samples/cpp/contours2.cpp index 437f76cb246a..c3653fb5d667 100644 --- a/samples/cpp/contours2.cpp +++ b/samples/cpp/contours2.cpp @@ -6,15 +6,16 @@ using namespace cv; using namespace std; -static void help() +static void help(char** argv) { cout - << "\nThis program illustrates the use of findContours and drawContours\n" - << "The original image is put up along with the image of drawn contours\n" - << "Usage:\n" - << "./contours2\n" - << "\nA trackbar is put up which controls the contour level from -3 to 3\n" - << endl; + << "\nThis program illustrates the use of findContours and drawContours\n" + << "The original image is put up along with the image of drawn contours\n" + << "Usage:\n"; + cout + << argv[0] + << "\nA trackbar is put up which controls the contour level from -3 to 3\n" + << endl; } const int w = 500; @@ -38,7 +39,7 @@ int main( int argc, char** argv) cv::CommandLineParser parser(argc, argv, "{help h||}"); if (parser.has("help")) { - help(); + help(argv); return 0; } Mat img = Mat::zeros(w, w, CV_8UC1); diff --git a/samples/cpp/convexhull.cpp b/samples/cpp/convexhull.cpp index f89e5b27bbc1..bb34be246932 100644 --- a/samples/cpp/convexhull.cpp +++ b/samples/cpp/convexhull.cpp @@ -5,11 +5,11 @@ using namespace cv; using namespace std; -static void help() +static void help(char** argv) { cout << "\nThis sample program demonstrates the use of the convexHull() function\n" << "Call:\n" - << "./convexhull\n" << endl; + << argv[0] << endl; } int main( int argc, char** argv ) @@ -17,7 +17,7 @@ int main( int argc, char** argv ) CommandLineParser parser(argc, argv, "{help h||}"); if (parser.has("help")) { - help(); + help(argv); return 0; } Mat img(500, 500, CV_8UC3); diff --git a/samples/cpp/cout_mat.cpp b/samples/cpp/cout_mat.cpp index 8315d7b78073..0d28241556da 100644 --- a/samples/cpp/cout_mat.cpp +++ b/samples/cpp/cout_mat.cpp @@ -11,7 +11,7 @@ using namespace std; using namespace cv; -static void help() +static void help(char** argv) { cout << "\n------------------------------------------------------------------\n" @@ -19,8 +19,8 @@ static void help() << "That is, cv::Mat M(...); cout << M; Now works.\n" << "Output can be formatted to OpenCV, matlab, python, numpy, csv and \n" << "C styles Usage:\n" - << "./cvout_sample\n" - << "------------------------------------------------------------------\n\n" + << argv[0] + << "\n------------------------------------------------------------------\n\n" << endl; } @@ -30,7 +30,7 @@ int main(int argc, char** argv) cv::CommandLineParser parser(argc, argv, "{help h||}"); if (parser.has("help")) { - help(); + help(argv); return 0; } Mat I = Mat::eye(4, 4, CV_64F); diff --git a/samples/cpp/delaunay2.cpp b/samples/cpp/delaunay2.cpp index 26f10bd66814..428804e31c67 100644 --- a/samples/cpp/delaunay2.cpp +++ b/samples/cpp/delaunay2.cpp @@ -5,15 +5,15 @@ using namespace cv; using namespace std; -static void help() +static void help(char** argv) { cout << "\nThis program demonstrates iterative construction of\n" - "delaunay triangulation and voronoi tessellation.\n" - "It draws a random set of points in an image and then delaunay triangulates them.\n" - "Usage: \n" - "./delaunay \n" - "\nThis program builds the triangulation interactively, you may stop this process by\n" - "hitting any key.\n"; + "delaunay triangulation and voronoi tessellation.\n" + "It draws a random set of points in an image and then delaunay triangulates them.\n" + "Usage: \n"; + cout << argv[0]; + cout << "\n\nThis program builds the triangulation interactively, you may stop this process by\n" + "hitting any key.\n"; } static void draw_subdiv_point( Mat& img, Point2f fp, Scalar color ) @@ -108,7 +108,7 @@ int main( int argc, char** argv ) cv::CommandLineParser parser(argc, argv, "{help h||}"); if (parser.has("help")) { - help(); + help(argv); return 0; } diff --git a/samples/cpp/detect_blob.cpp b/samples/cpp/detect_blob.cpp index e969cc06848a..97170a9df18b 100644 --- a/samples/cpp/detect_blob.cpp +++ b/samples/cpp/detect_blob.cpp @@ -10,12 +10,13 @@ using namespace std; using namespace cv; -static void help() +static void help(char** argv) { cout << "\n This program demonstrates how to use BLOB to detect and filter region \n" - "Usage: \n" - " ./detect_blob \n" - "Press a key when image window is active to change descriptor"; + << "Usage: \n" + << argv[0] + << " \n" + << "Press a key when image window is active to change descriptor"; } @@ -74,7 +75,7 @@ int main(int argc, char *argv[]) cv::CommandLineParser parser(argc, argv, "{@input |detect_blob.png| }{h help | | }"); if (parser.has("h")) { - help(); + help(argv); return 0; } fileName = parser.get("@input"); @@ -120,7 +121,7 @@ int main(int argc, char *argv[]) uchar c3 = (uchar)rand(); palette.push_back(Vec3b(c1, c2, c3)); } - help(); + help(argv); // These descriptors are going to be detecting and computing BLOBS with 6 different params diff --git a/samples/cpp/detect_mser.cpp b/samples/cpp/detect_mser.cpp index e2de544a9135..d289e229235b 100644 --- a/samples/cpp/detect_mser.cpp +++ b/samples/cpp/detect_mser.cpp @@ -34,13 +34,13 @@ using namespace std; using namespace cv; -static void help() +static void help(char** argv) { cout << "\nThis program demonstrates how to use MSER to detect extremal regions\n" - "Usage:\n" - " ./detect_mser \n" - "Press esc key when image window is active to change descriptor parameter\n" - "Press 2, 8, 4, 6, +, -, or 5 keys in openGL windows to change view or use mouse\n"; + "Usage:\n" + << argv[0] << " \n" + "Press esc key when image window is active to change descriptor parameter\n" + "Press 2, 8, 4, 6, +, -, or 5 keys in openGL windows to change view or use mouse\n"; } struct MSERParams @@ -405,7 +405,7 @@ int main(int argc, char *argv[]) cv::CommandLineParser parser(argc, argv, "{ help h | | }{ @input | | }"); if (parser.has("help")) { - help(); + help(argv); return 0; } @@ -431,7 +431,7 @@ int main(int argc, char *argv[]) for (int i = 0; i<=numeric_limits::max(); i++) palette.push_back(Vec3b((uchar)rand(), (uchar)rand(), (uchar)rand())); - help(); + help(argv); MSERParams params; diff --git a/samples/cpp/digits.cpp b/samples/cpp/digits.cpp index e3ef7fc59f6e..e401ab2b895b 100644 --- a/samples/cpp/digits.cpp +++ b/samples/cpp/digits.cpp @@ -15,7 +15,7 @@ const int SZ = 20; // size of each digit is SZ x SZ const int CLASS_N = 10; const char* DIGITS_FN = "digits.png"; -static void help() +static void help(char** argv) { cout << "\n" @@ -38,7 +38,7 @@ static void help() " http://www.robots.ox.ac.uk/~vgg/publications/2012/Arandjelovic12/arandjelovic12.pdf\n" "\n" "Usage:\n" - " ./digits\n" << endl; + << argv[0] << endl; } static void split2d(const Mat& image, const Size cell_size, vector& cells) @@ -299,9 +299,9 @@ static void shuffle(vector& digits, vector& labels) labels = shuffled_labels; } -int main() +int main(int /* argc */, char* argv[]) { - help(); + help(argv); vector digits; vector labels; diff --git a/samples/cpp/distrans.cpp b/samples/cpp/distrans.cpp index 67547f25e3dd..9304ec6d9d95 100644 --- a/samples/cpp/distrans.cpp +++ b/samples/cpp/distrans.cpp @@ -87,11 +87,11 @@ static void onTrackbar( int, void* ) imshow("Distance Map", dist8u ); } -static void help() +static void help(const char** argv) { printf("\nProgram to demonstrate the use of the distance transform function between edge images.\n" "Usage:\n" - "./distrans [image_name -- default image is stuff.jpg]\n" + "%s [image_name -- default image is stuff.jpg]\n" "\nHot keys: \n" "\tESC - quit the program\n" "\tC - use C/Inf metric\n" @@ -102,7 +102,7 @@ static void help() "\t0 - use precise distance transform\n" "\tv - switch to Voronoi diagram mode\n" "\tp - switch to pixel-based Voronoi diagram mode\n" - "\tSPACE - loop through all the modes\n\n"); + "\tSPACE - loop through all the modes\n\n", argv[0]); } const char* keys = @@ -113,7 +113,7 @@ const char* keys = int main( int argc, const char** argv ) { CommandLineParser parser(argc, argv, keys); - help(); + help(argv); if (parser.has("help")) return 0; string filename = parser.get(0); @@ -121,7 +121,7 @@ int main( int argc, const char** argv ) if(gray.empty()) { printf("Cannot read image file: %s\n", filename.c_str()); - help(); + help(argv); return -1; } diff --git a/samples/cpp/drawing.cpp b/samples/cpp/drawing.cpp index 93f09fe2e460..eba297ee5efa 100644 --- a/samples/cpp/drawing.cpp +++ b/samples/cpp/drawing.cpp @@ -5,11 +5,11 @@ using namespace cv; -static void help() +static void help(char** argv) { printf("\nThis program demonstrates OpenCV drawing and text output functions.\n" "Usage:\n" - " ./drawing\n"); + " %s\n", argv[0]); } static Scalar randomColor(RNG& rng) { @@ -17,9 +17,9 @@ static Scalar randomColor(RNG& rng) return Scalar(icolor&255, (icolor>>8)&255, (icolor>>16)&255); } -int main() +int main(int /* argc */, char** argv) { - help(); + help(argv); char wndname[] = "Drawing Demo"; const int NUMBER = 100; const int DELAY = 5; diff --git a/samples/cpp/edge.cpp b/samples/cpp/edge.cpp index ba21cb512ab3..339baf2aeb79 100644 --- a/samples/cpp/edge.cpp +++ b/samples/cpp/edge.cpp @@ -39,11 +39,11 @@ static void onTrackbar(int, void*) imshow(window_name2, cedge); } -static void help() +static void help(const char** argv) { printf("\nThis sample demonstrates Canny edge detection\n" "Call:\n" - " /.edge [image_name -- Default is fruits.jpg]\n\n"); + " %s [image_name -- Default is fruits.jpg]\n\n", argv[0]); } const char* keys = @@ -53,7 +53,7 @@ const char* keys = int main( int argc, const char** argv ) { - help(); + help(argv); CommandLineParser parser(argc, argv, keys); string filename = parser.get(0); @@ -61,7 +61,7 @@ int main( int argc, const char** argv ) if(image.empty()) { printf("Cannot read image file: %s\n", filename.c_str()); - help(); + help(argv); return -1; } cedge.create(image.size(), image.type()); diff --git a/samples/cpp/facedetect.cpp b/samples/cpp/facedetect.cpp index cf3a2a3e2ffa..9c846faf4898 100644 --- a/samples/cpp/facedetect.cpp +++ b/samples/cpp/facedetect.cpp @@ -7,19 +7,21 @@ using namespace std; using namespace cv; -static void help() +static void help(const char** argv) { cout << "\nThis program demonstrates the use of cv::CascadeClassifier class to detect objects (Face + eyes). You can use Haar or LBP features.\n" "This classifier can recognize many kinds of rigid objects, once the appropriate classifier is trained.\n" "It's most known use is for faces.\n" "Usage:\n" - "./facedetect [--cascade= this is the primary trained classifier such as frontal face]\n" - " [--nested-cascade[=nested_cascade_path this an optional secondary classifier such as eyes]]\n" - " [--scale=]\n" - " [--try-flip]\n" - " [filename|camera_index]\n\n" - "see facedetect.cmd for one call:\n" - "./facedetect --cascade=\"data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n" + << argv[0] + << " [--cascade= this is the primary trained classifier such as frontal face]\n" + " [--nested-cascade[=nested_cascade_path this an optional secondary classifier such as eyes]]\n" + " [--scale=]\n" + " [--try-flip]\n" + " [filename|camera_index]\n\n" + "example:\n" + << argv[0] + << " --cascade=\"data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"data/haarcascades/haarcascade_eye_tree_eyeglasses.xml\" --scale=1.3\n\n" "During execution:\n\tHit any key to quit.\n" "\tUsing OpenCV version " << CV_VERSION << "\n" << endl; } @@ -48,7 +50,7 @@ int main( int argc, const char** argv ) ); if (parser.has("help")) { - help(); + help(argv); return 0; } cascadeName = parser.get("cascade"); @@ -68,7 +70,7 @@ int main( int argc, const char** argv ) if (!cascade.load(samples::findFile(cascadeName))) { cerr << "ERROR: Could not load classifier cascade" << endl; - help(); + help(argv); return -1; } if( inputName.empty() || (isdigit(inputName[0]) && inputName.size() == 1) ) diff --git a/samples/cpp/facial_features.cpp b/samples/cpp/facial_features.cpp index 5495bbbf66d0..ebe756c8207b 100644 --- a/samples/cpp/facial_features.cpp +++ b/samples/cpp/facial_features.cpp @@ -19,7 +19,7 @@ using namespace std; using namespace cv; // Functions for facial feature detection -static void help(); +static void help(char** argv); static void detectFaces(Mat&, vector >&, string); static void detectEyes(Mat&, vector >&, string); static void detectNose(Mat&, vector >&, string); @@ -35,7 +35,7 @@ int main(int argc, char** argv) "{eyes||}{nose||}{mouth||}{help h||}{@image||}{@facexml||}"); if (parser.has("help")) { - help(); + help(argv); return 0; } input_image_path = parser.get("@image"); @@ -63,14 +63,14 @@ int main(int argc, char** argv) return 0; } -static void help() +static void help(char** argv) { cout << "\nThis file demonstrates facial feature points detection using Haarcascade classifiers.\n" "The program detects a face and eyes, nose and mouth inside the face." "The code has been tested on the Japanese Female Facial Expression (JAFFE) database and found" "to give reasonably accurate results. \n"; - cout << "\nUSAGE: ./cpp-example-facial_features [IMAGE] [FACE_CASCADE] [OPTIONS]\n" + cout << "\nUSAGE: " << argv[0] << " [IMAGE] [FACE_CASCADE] [OPTIONS]\n" "IMAGE\n\tPath to the image of a face taken as input.\n" "FACE_CASCSDE\n\t Path to a haarcascade classifier for face detection.\n" "OPTIONS: \nThere are 3 options available which are described in detail. There must be a " @@ -81,11 +81,11 @@ static void help() cout << "EXAMPLE:\n" - "(1) ./cpp-example-facial_features image.jpg face.xml -eyes=eyes.xml -mouth=mouth.xml\n" + "(1) " << argv[0] << " image.jpg face.xml -eyes=eyes.xml -mouth=mouth.xml\n" "\tThis will detect the face, eyes and mouth in image.jpg.\n" - "(2) ./cpp-example-facial_features image.jpg face.xml -nose=nose.xml\n" + "(2) " << argv[0] << " image.jpg face.xml -nose=nose.xml\n" "\tThis will detect the face and nose in image.jpg.\n" - "(3) ./cpp-example-facial_features image.jpg face.xml\n" + "(3) " << argv[0] << " image.jpg face.xml\n" "\tThis will detect only the face in image.jpg.\n"; cout << " \n\nThe classifiers for face and eyes can be downloaded from : " diff --git a/samples/cpp/fback.cpp b/samples/cpp/fback.cpp index a0448440234c..74e55a5e928b 100644 --- a/samples/cpp/fback.cpp +++ b/samples/cpp/fback.cpp @@ -8,14 +8,14 @@ using namespace cv; using namespace std; -static void help() +static void help(char** argv) { cout << "\nThis program demonstrates dense optical flow algorithm by Gunnar Farneback\n" "Mainly the function: calcOpticalFlowFarneback()\n" "Call:\n" - "./fback\n" - "This reads from video camera 0\n" << endl; + << argv[0] + << "This reads from video camera 0\n" << endl; } static void drawOptFlowMap(const Mat& flow, Mat& cflowmap, int step, double, const Scalar& color) @@ -35,11 +35,11 @@ int main(int argc, char** argv) cv::CommandLineParser parser(argc, argv, "{help h||}"); if (parser.has("help")) { - help(); + help(argv); return 0; } VideoCapture cap(0); - help(); + help(argv); if( !cap.isOpened() ) return -1; diff --git a/samples/cpp/ffilldemo.cpp b/samples/cpp/ffilldemo.cpp index 074a9ae3408b..1f0f73714ecc 100644 --- a/samples/cpp/ffilldemo.cpp +++ b/samples/cpp/ffilldemo.cpp @@ -8,11 +8,12 @@ using namespace cv; using namespace std; -static void help() +static void help(char** argv) { cout << "\nThis program demonstrated the floodFill() function\n" "Call:\n" - "./ffilldemo [image_name -- Default: fruits.jpg]\n" << endl; + << argv[0] + << " [image_name -- Default: fruits.jpg]\n" << endl; cout << "Hot keys: \n" "\tESC - quit the program\n" @@ -90,7 +91,7 @@ int main( int argc, char** argv ) parser.printMessage(); return 0; } - help(); + help(argv); image0.copyTo(image); cvtColor(image0, gray, COLOR_BGR2GRAY); mask.create(image0.rows+2, image0.cols+2, CV_8UC1); diff --git a/samples/cpp/fitellipse.cpp b/samples/cpp/fitellipse.cpp index 6d3346be1abe..f136b9c408a5 100644 --- a/samples/cpp/fitellipse.cpp +++ b/samples/cpp/fitellipse.cpp @@ -164,14 +164,13 @@ class canvas{ }; -static void help() +static void help(char** argv) { - cout << - "\nThis program is demonstration for ellipse fitting. The program finds\n" - "contours and approximate it by ellipses. Three methods are used to find the \n" - "elliptical fits: fitEllipse, fitEllipseAMS and fitEllipseDirect.\n" - "Call:\n" - "./fitellipse [image_name -- Default ellipses.jpg]\n" << endl; + cout << "\nThis program is demonstration for ellipse fitting. The program finds\n" + "contours and approximate it by ellipses. Three methods are used to find the \n" + "elliptical fits: fitEllipse, fitEllipseAMS and fitEllipseDirect.\n" + "Call:\n" + << argv[0] << " [image_name -- Default ellipses.jpg]\n" << endl; } int sliderPos = 70; @@ -195,7 +194,7 @@ int main( int argc, char** argv ) cv::CommandLineParser parser(argc, argv,"{help h||}{@image|ellipses.jpg|}"); if (parser.has("help")) { - help(); + help(argv); return 0; } string filename = parser.get("@image"); diff --git a/samples/cpp/grabcut.cpp b/samples/cpp/grabcut.cpp index 03ca7be23bdf..d3e3db49f9d1 100644 --- a/samples/cpp/grabcut.cpp +++ b/samples/cpp/grabcut.cpp @@ -7,25 +7,25 @@ using namespace std; using namespace cv; -static void help() +static void help(char** argv) { cout << "\nThis program demonstrates GrabCut segmentation -- select an object in a region\n" "and then grabcut will attempt to segment it out.\n" "Call:\n" - "./grabcut \n" - "\nSelect a rectangular area around the object you want to segment\n" << - "\nHot keys: \n" - "\tESC - quit the program\n" - "\tr - restore the original image\n" - "\tn - next iteration\n" - "\n" - "\tleft mouse button - set rectangle\n" - "\n" - "\tCTRL+left mouse button - set GC_BGD pixels\n" - "\tSHIFT+left mouse button - set GC_FGD pixels\n" - "\n" - "\tCTRL+right mouse button - set GC_PR_BGD pixels\n" - "\tSHIFT+right mouse button - set GC_PR_FGD pixels\n" << endl; + << argv[0] << " \n" + "\nSelect a rectangular area around the object you want to segment\n" << + "\nHot keys: \n" + "\tESC - quit the program\n" + "\tr - restore the original image\n" + "\tn - next iteration\n" + "\n" + "\tleft mouse button - set rectangle\n" + "\n" + "\tCTRL+left mouse button - set GC_BGD pixels\n" + "\tSHIFT+left mouse button - set GC_FGD pixels\n" + "\n" + "\tCTRL+right mouse button - set GC_PR_BGD pixels\n" + "\tSHIFT+right mouse button - set GC_PR_FGD pixels\n" << endl; } const Scalar RED = Scalar(0,0,255); @@ -277,7 +277,7 @@ static void on_mouse( int event, int x, int y, int flags, void* param ) int main( int argc, char** argv ) { cv::CommandLineParser parser(argc, argv, "{@input| messi5.jpg |}"); - help(); + help(argv); string filename = parser.get("@input"); if( filename.empty() ) diff --git a/samples/cpp/image_alignment.cpp b/samples/cpp/image_alignment.cpp index ab0c1292190e..fc7ab18a7145 100644 --- a/samples/cpp/image_alignment.cpp +++ b/samples/cpp/image_alignment.cpp @@ -28,7 +28,7 @@ using namespace cv; using namespace std; -static void help(void); +static void help(const char** argv); static int readWarp(string iFilename, Mat& warp, int motionType); static int saveWarp(string fileName, const Mat& warp, int motionType); static void draw_warped_roi(Mat& image, const int width, const int height, Mat& W); @@ -57,7 +57,7 @@ const std::string keys = ; -static void help(void) +static void help(const char** argv) { cout << "\nThis file demonstrates the use of the ECC image alignment algorithm. When one image" @@ -65,10 +65,14 @@ static void help(void) " are given, the initialization of the warp by command line parsing is possible. " "If inputWarp is missing, the identity transformation initializes the algorithm. \n" << endl; - cout << "\nUsage example (one image): \n./image_alignment fruits.jpg -o=outWarp.ecc " - "-m=euclidean -e=1e-6 -N=70 -v=1 \n" << endl; + cout << "\nUsage example (one image): \n" + << argv[0] + << " fruits.jpg -o=outWarp.ecc " + "-m=euclidean -e=1e-6 -N=70 -v=1 \n" << endl; - cout << "\nUsage example (two images with initialization): \n./image_alignment yourInput.png yourTemplate.png " + cout << "\nUsage example (two images with initialization): \n" + << argv[0] + << " yourInput.png yourTemplate.png " "yourInitialWarp.ecc -o=outWarp.ecc -m=homography -e=1e-6 -N=70 -v=1 -w=yourFinalImage.png \n" << endl; } @@ -178,7 +182,7 @@ int main (const int argc, const char * argv[]) parser.about("ECC demo"); parser.printMessage(); - help(); + help(argv); string imgFile = parser.get(0); string tempImgFile = parser.get(1); diff --git a/samples/cpp/laplace.cpp b/samples/cpp/laplace.cpp index 1bf3e518d13e..e33a284a033e 100644 --- a/samples/cpp/laplace.cpp +++ b/samples/cpp/laplace.cpp @@ -9,13 +9,13 @@ using namespace cv; using namespace std; -static void help() +static void help(char** argv) { cout << "\nThis program demonstrates Laplace point/edge detection using OpenCV function Laplacian()\n" "It captures from the camera of your choice: 0, 1, ... default 0\n" "Call:\n" - "./laplace -c= -p=\n" << endl; + << argv[0] << " -c= -p=\n" << endl; } enum {GAUSSIAN, BLUR, MEDIAN}; @@ -26,7 +26,7 @@ int smoothType = GAUSSIAN; int main( int argc, char** argv ) { cv::CommandLineParser parser(argc, argv, "{ c | 0 | }{ p | | }"); - help(); + help(argv); VideoCapture cap; string camera = parser.get("c"); diff --git a/samples/cpp/letter_recog.cpp b/samples/cpp/letter_recog.cpp index 66756b61cbed..bcad2f468739 100644 --- a/samples/cpp/letter_recog.cpp +++ b/samples/cpp/letter_recog.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace cv; using namespace cv::ml; -static void help() +static void help(char** argv) { printf("\nThe sample demonstrates how to train Random Trees classifier\n" "(or Boosting classifier, or MLP, or Knearest, or Nbayes, or Support Vector Machines - see main()) using the provided dataset.\n" @@ -28,10 +28,10 @@ static void help() "and the remaining 4000 (10000 for boosting) - to test the classifier.\n" "======================================================\n"); printf("\nThis is letter recognition sample.\n" - "The usage: letter_recog [-data=] \\\n" + "The usage: %s [-data=] \\\n" " [-save=] \\\n" " [-load=] \\\n" - " [-boost|-mlp|-knearest|-nbayes|-svm] # to use boost/mlp/knearest/SVM classifier instead of default Random Trees\n" ); + " [-boost|-mlp|-knearest|-nbayes|-svm] # to use boost/mlp/knearest/SVM classifier instead of default Random Trees\n", argv[0] ); } // This function reads data and responses from the file @@ -538,7 +538,7 @@ int main( int argc, char *argv[] ) else if (parser.has("svm")) method = 5; - help(); + help(argv); if( (method == 0 ? build_rtrees_classifier( data_filename, filename_to_save, filename_to_load ) : diff --git a/samples/cpp/matchmethod_orb_akaze_brisk.cpp b/samples/cpp/matchmethod_orb_akaze_brisk.cpp index 8cf8a997a85d..6e0680a2d4a9 100644 --- a/samples/cpp/matchmethod_orb_akaze_brisk.cpp +++ b/samples/cpp/matchmethod_orb_akaze_brisk.cpp @@ -8,11 +8,11 @@ using namespace std; using namespace cv; -static void help() +static void help(char* argv[]) { cout << "\n This program demonstrates how to detect compute and match ORB BRISK and AKAZE descriptors \n" - "Usage: \n" - " ./matchmethod_orb_akaze_brisk --image1= --image2=\n" + "Usage: \n " + << argv[0] << " --image1= --image2=\n" "Press a key when image window is active to change algorithm or descriptor"; } @@ -39,7 +39,7 @@ int main(int argc, char *argv[]) "{help h ||}"); if (parser.has("help")) { - help(); + help(argv); return 0; } fileName.push_back(samples::findFile(parser.get(0))); diff --git a/samples/cpp/morphology2.cpp b/samples/cpp/morphology2.cpp index 2464e303282a..f1d8d15b177f 100644 --- a/samples/cpp/morphology2.cpp +++ b/samples/cpp/morphology2.cpp @@ -7,12 +7,12 @@ using namespace cv; -static void help() +static void help(char** argv) { printf("\nShow off image morphology: erosion, dialation, open and close\n" - "Call:\n morphology2 [image]\n" - "This program also shows use of rect, ellipse and cross kernels\n\n"); + "Call:\n %s [image]\n" + "This program also shows use of rect, ellipse and cross kernels\n\n", argv[0]); printf( "Hot keys: \n" "\tESC - quit the program\n" "\tr - use rectangle structuring element\n" @@ -62,13 +62,13 @@ int main( int argc, char** argv ) cv::CommandLineParser parser(argc, argv, "{help h||}{ @image | baboon.jpg | }"); if (parser.has("help")) { - help(); + help(argv); return 0; } std::string filename = samples::findFile(parser.get("@image")); if( (src = imread(filename,IMREAD_COLOR)).empty() ) { - help(); + help(argv); return -1; } diff --git a/samples/cpp/segment_objects.cpp b/samples/cpp/segment_objects.cpp index 3053bb8efe5e..2bcddddf85a1 100644 --- a/samples/cpp/segment_objects.cpp +++ b/samples/cpp/segment_objects.cpp @@ -8,14 +8,14 @@ using namespace std; using namespace cv; -static void help() +static void help(char** argv) { printf("\n" "This program demonstrated a simple method of connected components clean up of background subtraction\n" "When the program starts, it begins learning the background.\n" "You can toggle background learning on and off by hitting the space bar.\n" "Call\n" - "./segment_objects [video file, else it reads camera 0]\n\n"); + "%s [video file, else it reads camera 0]\n\n", argv[0]); } static void refineSegments(const Mat& img, Mat& mask, Mat& dst) @@ -66,7 +66,7 @@ int main(int argc, char** argv) CommandLineParser parser(argc, argv, "{help h||}{@input||}"); if (parser.has("help")) { - help(); + help(argv); return 0; } string input = parser.get("@input"); diff --git a/samples/cpp/select3dobj.cpp b/samples/cpp/select3dobj.cpp index 4f32f195a4fd..252bc266cc1d 100644 --- a/samples/cpp/select3dobj.cpp +++ b/samples/cpp/select3dobj.cpp @@ -19,48 +19,51 @@ #include #include #include +#include using namespace std; using namespace cv; -const char* helphelp = -"\nThis program's purpose is to collect data sets of an object and its segmentation mask.\n" -"\n" -"It shows how to use a calibrated camera together with a calibration pattern to\n" -"compute the homography of the plane the calibration pattern is on. It also shows grabCut\n" -"segmentation etc.\n" -"\n" -"select3dobj -w= -h= [-s=]\n" -" -i= -o=\n" -"\n" -" -w= Number of chessboard corners wide\n" -" -h= Number of chessboard corners width\n" -" [-s=] Optional measure of chessboard squares in meters\n" -" -i= Camera matrix .yml file from calibration.cpp\n" -" -o= Prefix the output segmentation images with this\n" -" [video_filename/cameraId] If present, read from that video file or that ID\n" -"\n" -"Using a camera's intrinsics (from calibrating a camera -- see calibration.cpp) and an\n" -"image of the object sitting on a planar surface with a calibration pattern of\n" -"(board_width x board_height) on the surface, we draw a 3D box around the object. From\n" -"then on, we can move a camera and as long as it sees the chessboard calibration pattern,\n" -"it will store a mask of where the object is. We get successive images using \n" -"of the segmentation mask containing the object. This makes creating training sets easy.\n" -"It is best if the chessboard is odd x even in dimensions to avoid ambiguous poses.\n" -"\n" -"The actions one can use while the program is running are:\n" -"\n" -" Select object as 3D box with the mouse.\n" -" First draw one line on the plane to outline the projection of that object on the plane\n" -" Then extend that line into a box to encompass the projection of that object onto the plane\n" -" The use the mouse again to extend the box upwards from the plane to encase the object.\n" -" Then use the following commands\n" -" ESC - Reset the selection\n" -" SPACE - Skip the frame; move to the next frame (not in video mode)\n" -" ENTER - Confirm the selection. Grab next object in video mode.\n" -" q - Exit the program\n" -"\n\n"; - +static string helphelp(char** argv) +{ + return string("\nThis program's purpose is to collect data sets of an object and its segmentation mask.\n") + + "\n" + "It shows how to use a calibrated camera together with a calibration pattern to\n" + "compute the homography of the plane the calibration pattern is on. It also shows grabCut\n" + "segmentation etc.\n" + "\n" + + argv[0] + + " -w= -h= [-s=]\n" + " -i= -o=\n" + "\n" + " -w= Number of chessboard corners wide\n" + " -h= Number of chessboard corners width\n" + " [-s=] Optional measure of chessboard squares in meters\n" + " -i= Camera matrix .yml file from calibration.cpp\n" + " -o= Prefix the output segmentation images with this\n" + " [video_filename/cameraId] If present, read from that video file or that ID\n" + "\n" + "Using a camera's intrinsics (from calibrating a camera -- see calibration.cpp) and an\n" + "image of the object sitting on a planar surface with a calibration pattern of\n" + "(board_width x board_height) on the surface, we draw a 3D box around the object. From\n" + "then on, we can move a camera and as long as it sees the chessboard calibration pattern,\n" + "it will store a mask of where the object is. We get successive images using \n" + "of the segmentation mask containing the object. This makes creating training sets easy.\n" + "It is best if the chessboard is odd x even in dimensions to avoid ambiguous poses.\n" + "\n" + "The actions one can use while the program is running are:\n" + "\n" + " Select object as 3D box with the mouse.\n" + " First draw one line on the plane to outline the projection of that object on the plane\n" + " Then extend that line into a box to encompass the projection of that object onto the plane\n" + " The use the mouse again to extend the box upwards from the plane to encase the object.\n" + " Then use the following commands\n" + " ESC - Reset the selection\n" + " SPACE - Skip the frame; move to the next frame (not in video mode)\n" + " ENTER - Confirm the selection. Grab next object in video mode.\n" + " q - Exit the program\n" + "\n\n"; +} // static void help() // { // puts(helphelp); @@ -384,7 +387,7 @@ static bool readStringList( const string& filename, vector& l ) int main(int argc, char** argv) { - const char* help = "Usage: select3dobj -w= -h= [-s=]\n" + string help = string("Usage: ") + argv[0] + " -w= -h= [-s=]\n" + "\t-i= -o= [video_filename/cameraId]\n"; const char* screen_help = "Actions: \n" @@ -397,8 +400,8 @@ int main(int argc, char** argv) cv::CommandLineParser parser(argc, argv, "{help h||}{w||}{h||}{s|1|}{i||}{o||}{@input|0|}"); if (parser.has("help")) { - puts(helphelp); - puts(help); + puts(helphelp(argv).c_str()); + puts(help.c_str()); return 0; } string intrinsicsFilename; @@ -419,26 +422,26 @@ int main(int argc, char** argv) inputName = samples::findFileOrKeep(parser.get("@input")); if (!parser.check()) { - puts(help); + puts(help.c_str()); parser.printErrors(); return 0; } if ( boardSize.width <= 0 ) { printf("Incorrect -w parameter (must be a positive integer)\n"); - puts(help); + puts(help.c_str()); return 0; } if ( boardSize.height <= 0 ) { printf("Incorrect -h parameter (must be a positive integer)\n"); - puts(help); + puts(help.c_str()); return 0; } if ( squareSize <= 0 ) { printf("Incorrect -s parameter (must be a positive real number)\n"); - puts(help); + puts(help.c_str()); return 0; } Mat cameraMatrix, distCoeffs; diff --git a/samples/cpp/stereo_calib.cpp b/samples/cpp/stereo_calib.cpp index 894261dab838..5c1471e4c2cf 100644 --- a/samples/cpp/stereo_calib.cpp +++ b/samples/cpp/stereo_calib.cpp @@ -38,7 +38,7 @@ using namespace cv; using namespace std; -static int print_help() +static int print_help(char** argv) { cout << " Given a list of chessboard images, the number of corners (nx, ny)\n" @@ -49,7 +49,7 @@ static int print_help() " matrix separately) stereo. \n" " Calibrate the cameras and display the\n" " rectified results along with the computed disparity images. \n" << endl; - cout << "Usage:\n ./stereo_calib -w= -h= -s= \n" << endl; + cout << "Usage:\n " << argv[0] << " -w= -h= -s= \n" << endl; return 0; } @@ -348,7 +348,7 @@ int main(int argc, char** argv) bool showRectified; cv::CommandLineParser parser(argc, argv, "{w|9|}{h|6|}{s|1.0|}{nr||}{help||}{@input|stereo_calib.xml|}"); if (parser.has("help")) - return print_help(); + return print_help(argv); showRectified = !parser.has("nr"); imagelistfn = samples::findFile(parser.get("@input")); boardSize.width = parser.get("w"); @@ -364,7 +364,7 @@ int main(int argc, char** argv) if(!ok || imagelist.empty()) { cout << "can not open " << imagelistfn << " or the string list is empty" << endl; - return print_help(); + return print_help(argv); } StereoCalib(imagelist, boardSize, squareSize, false, true, showRectified); diff --git a/samples/cpp/stereo_match.cpp b/samples/cpp/stereo_match.cpp index 9194aa49430f..0cfaba6f702c 100644 --- a/samples/cpp/stereo_match.cpp +++ b/samples/cpp/stereo_match.cpp @@ -17,12 +17,12 @@ using namespace cv; -static void print_help() +static void print_help(char** argv) { printf("\nDemo stereo matching converting L and R images into disparity and point clouds\n"); - printf("\nUsage: stereo_match [--algorithm=bm|sgbm|hh|sgbm3way] [--blocksize=]\n" + printf("\nUsage: %s [--algorithm=bm|sgbm|hh|sgbm3way] [--blocksize=]\n" "[--max-disparity=] [--scale=scale_factor>] [-i=] [-e=]\n" - "[--no-display] [-o=] [-p=]\n"); + "[--no-display] [-o=] [-p=]\n", argv[0]); } static void saveXYZ(const char* filename, const Mat& mat) @@ -62,7 +62,7 @@ int main(int argc, char** argv) "{@arg1||}{@arg2||}{help h||}{algorithm||}{max-disparity|0|}{blocksize|0|}{no-display||}{scale|1|}{i||}{e||}{o||}{p||}"); if(parser.has("help")) { - print_help(); + print_help(argv); return 0; } img1_filename = samples::findFile(parser.get(0)); @@ -96,13 +96,13 @@ int main(int argc, char** argv) if( alg < 0 ) { printf("Command-line parameter error: Unknown stereo algorithm\n\n"); - print_help(); + print_help(argv); return -1; } if ( numberOfDisparities < 1 || numberOfDisparities % 16 != 0 ) { printf("Command-line parameter error: The max disparity (--maxdisparity=<...>) must be a positive integer divisible by 16\n"); - print_help(); + print_help(argv); return -1; } if (scale < 0) diff --git a/samples/cpp/stitching_detailed.cpp b/samples/cpp/stitching_detailed.cpp index a556217f8b15..15534e7b3aba 100644 --- a/samples/cpp/stitching_detailed.cpp +++ b/samples/cpp/stitching_detailed.cpp @@ -29,11 +29,11 @@ using namespace std; using namespace cv; using namespace cv::detail; -static void printUsage() +static void printUsage(char** argv) { cout << "Rotation model images stitcher.\n\n" - "stitching_detailed img1 img2 [...imgN] [flags]\n\n" + << argv[0] << " img1 img2 [...imgN] [flags]\n\n" "Flags:\n" " --preview\n" " Run stitching in the preview mode. Works faster than usual mode,\n" @@ -147,14 +147,14 @@ static int parseCmdArgs(int argc, char** argv) { if (argc == 1) { - printUsage(); + printUsage(argv); return -1; } for (int i = 1; i < argc; ++i) { if (string(argv[i]) == "--help" || string(argv[i]) == "/?") { - printUsage(); + printUsage(argv); return -1; } else if (string(argv[i]) == "--preview") diff --git a/samples/cpp/tree_engine.cpp b/samples/cpp/tree_engine.cpp index 96b2adf83727..956deb8f78f4 100644 --- a/samples/cpp/tree_engine.cpp +++ b/samples/cpp/tree_engine.cpp @@ -8,14 +8,14 @@ using namespace cv; using namespace cv::ml; -static void help() +static void help(char** argv) { printf( "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees.\n" - "Usage:\n\t./tree_engine [-r=] [-ts=type_spec] \n" + "Usage:\n\t%s [-r=] [-ts=type_spec] \n" "where -r= specified the 0-based index of the response (0 by default)\n" "-ts= specifies the var type spec in the form ord[n1,n2-n3,n4-n5,...]cat[m1-m2,m3,m4-m5,...]\n" - " is the name of training data file in comma-separated value format\n\n"); + " is the name of training data file in comma-separated value format\n\n", argv[0]); } static void train_and_print_errs(Ptr model, const Ptr& data) @@ -37,7 +37,7 @@ int main(int argc, char** argv) cv::CommandLineParser parser(argc, argv, "{ help h | | }{r | 0 | }{ts | | }{@input | | }"); if (parser.has("help")) { - help(); + help(argv); return 0; } std::string filename = parser.get("@input"); @@ -48,7 +48,7 @@ int main(int argc, char** argv) if( filename.empty() || !parser.check() ) { parser.printErrors(); - help(); + help(argv); return 0; } printf("\nReading in %s...\n\n",filename.c_str());