diff --git a/CMakeLists.txt b/CMakeLists.txt index 2a7f1c73..495f0f2a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,23 +31,23 @@ include_directories(${CURL_INCLUDE_DIRS}) # -- Yaml-cpp # find the yaml-cpp include directory find_path(YAMLCPP_INCLUDE_DIR yaml-cpp/yaml.h - PATH_SUFFIXES include - PATHS - ~/Library/Frameworks/yaml-cpp/include/ - /Library/Frameworks/yaml-cpp/include/ - /usr/local/include/ - /usr/include/ - /sw/yaml-cpp/ # Fink - /opt/local/yaml-cpp/ # DarwinPorts - /opt/csw/yaml-cpp/ # Blastwave - /opt/yaml-cpp/ - ${YAMLCPP_DIR}/include/) + PATH_SUFFIXES include + PATHS + ~/Library/Frameworks/yaml-cpp/include/ + /Library/Frameworks/yaml-cpp/include/ + /usr/local/include/ + /usr/include/ + /sw/yaml-cpp/ # Fink + /opt/local/yaml-cpp/ # DarwinPorts + /opt/csw/yaml-cpp/ # Blastwave + /opt/yaml-cpp/ + ${YAMLCPP_DIR}/include/) # find the yaml-cpp library find_library(YAMLCPP_LIBRARY - NAMES ${YAMLCPP_STATIC} yaml-cpp - PATH_SUFFIXES lib64 lib - PATHS ~/Library/Frameworks + NAMES ${YAMLCPP_STATIC} yaml-cpp + PATH_SUFFIXES lib64 lib + PATHS ~/Library/Frameworks /Library/Frameworks /usr/local /usr @@ -97,10 +97,10 @@ endif() # Use C++11 if(UNIX) - #-Wno-deprecated-declarations hides warning in yaml-cpp (using std::auto_ptr) + #-Wno-deprecated-declarations hides warning in yaml-cpp (using std::auto_ptr) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wno-deprecated-declarations") elseif(MSVC) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP /D NOMINMAX") endif() @@ -184,6 +184,8 @@ set(SOURCE_FILES ${HELPERS_DIR}/logger.cpp ${HELPERS_DIR}/string_utils.h ${HELPERS_DIR}/string_utils.cpp + ${HELPERS_DIR}/type_utils.h + ${HELPERS_DIR}/format.h ${CONFIG_DIR}/worker_config.cpp ${CONFIG_DIR}/worker_config.h @@ -213,6 +215,8 @@ set(SOURCE_FILES ${COMMAND_DIR}/jobs_client_commands.h ) +include_directories(AFTER, ${SRC_DIR}) + add_executable(${EXEC_NAME} ${SOURCE_FILES}) @@ -238,7 +242,7 @@ if(UNIX) target_link_libraries(${EXEC_NAME} -lzmq) target_link_libraries(${EXEC_NAME} pthread) elseif(MSVC) - target_link_libraries(${EXEC_NAME} ${ZEROMQ_LIB}) + target_link_libraries(${EXEC_NAME} ${ZEROMQ_LIB}) endif() @@ -326,3 +330,4 @@ add_custom_target(lines COMMENT "Counting lines" VERBATIM ) + diff --git a/judges/recodex_token_judge/bpplib/algo/lcs.hpp b/judges/recodex_token_judge/bpplib/algo/lcs.hpp index 472710ad..72318d42 100644 --- a/judges/recodex_token_judge/bpplib/algo/lcs.hpp +++ b/judges/recodex_token_judge/bpplib/algo/lcs.hpp @@ -18,7 +18,7 @@ namespace bpp * \tparam RES The result type (must be an integral type). * \tparam CONTAINER Class holding the sequence. The class must have size() method * and the comparator must be able to get values from the container based on their indices. - * \tparma COMPARATOR Comparator class holds a static method compare(seq1, i1, seq2, i2) -> bool. + * \tparam COMPARATOR Comparator class holds a static method compare(seq1, i1, seq2, i2) -> bool. * I.e., the comparator is also responsible for fetching values from the seq. containers. */ template @@ -31,7 +31,7 @@ namespace bpp const CONTAINER &seq2 = sequence1.size() < sequence2.size() ? sequence1 : sequence2; std::vector row((std::size_t) seq2.size()); - std::size_t rows = (std::size_t) seq1.size(); + auto rows = (std::size_t) seq1.size(); // Dynamic programming - matrix traversal that keeps only the last row. for (std::size_t r = 0; r < rows; ++r) { @@ -65,7 +65,7 @@ namespace bpp * \tparam RES The result type (must be an integral type). * \tparam CONTAINER Class holding the sequence. The class must have size() method * and the comparator must be able to get values from the container based on their indices. - * \tparma COMPARATOR Comparator class holds a static method compare(seq1, i1, seq2, i2) -> bool. + * \tparam COMPARATOR Comparator class holds a static method compare(seq1, i1, seq2, i2) -> bool. * I.e., the comparator is also responsible for fetching values from the seq. containers. */ template diff --git a/judges/recodex_token_judge/bpplib/cli/args.hpp b/judges/recodex_token_judge/bpplib/cli/args.hpp index e2e958b4..72c2909d 100644 --- a/judges/recodex_token_judge/bpplib/cli/args.hpp +++ b/judges/recodex_token_judge/bpplib/cli/args.hpp @@ -44,9 +44,7 @@ namespace bpp ArgumentException(const std::string &msg) : RuntimeError(msg) { } - virtual ~ArgumentException() throw() - { - } + ~ArgumentException() noexcept override = default; /* * Overloading << operator that uses stringstream to append data to mMessage. @@ -106,26 +104,26 @@ namespace bpp // Constraint checks are done only if the argument is present. if (isPresent()) { // Check for collisions. - for (auto it = mConflictsWith.begin(); it != mConflictsWith.end(); ++it) { - if (arguments.find(*it) == arguments.end()) + for (const auto &it : mConflictsWith) { + if (arguments.find(it) == arguments.end()) throw(ArgumentException() - << "Internal Error: Argument '" << mName << "' has unspecified argument '" << *it + << "Internal Error: Argument '" << mName << "' has unspecified argument '" << it << "' on its collision list."); - if (arguments.find(*it)->second->isPresent()) + if (arguments.find(it)->second->isPresent()) throw(ArgumentException() - << "The argument '" << mName << "' conflicts with argument '" << *it << "'."); + << "The argument '" << mName << "' conflicts with argument '" << it << "'."); } // Check for requirements. - for (auto it = mRequiresAlso.begin(); it != mRequiresAlso.end(); ++it) { - if (arguments.find(*it) == arguments.end()) + for (const auto & it : mRequiresAlso) { + if (arguments.find(it) == arguments.end()) throw(ArgumentException() - << "Internal Error: Argument '" << mName << "' has unspecified argument '" << *it + << "Internal Error: Argument '" << mName << "' has unspecified argument '" << it << "' on its requirements list."); - if (!arguments.find(*it)->second->isPresent()) - throw(ArgumentException() << "The argument '" << *it << "' is also required when '" << mName + if (!arguments.find(it)->second->isPresent()) + throw(ArgumentException() << "The argument '" << it << "' is also required when '" << mName << "' was specified."); } } @@ -161,9 +159,7 @@ namespace bpp } // Enforce virtual destructor for descendants. - virtual ~ArgBase() - { - } + virtual ~ArgBase() = default; /** @@ -242,10 +238,10 @@ namespace bpp class ArgBool : public ArgBase { public: - typedef bool value_t; + using value_t = bool; protected: - virtual void process(int &, const char **&) + void process(int &, const char **&) override { this->mPresent = true; } @@ -269,7 +265,7 @@ namespace bpp class ArgIntBase : public ArgBase { public: - typedef std::int64_t value_t; + using value_t = std::int64_t; protected: value_t mMin; ///< Range constraint for the value. @@ -350,7 +346,7 @@ namespace bpp value_t mValue; ///< Parsed value of the argument. protected: - virtual void process(int &argc, const char **(&argv)) + void process(int &argc, const char **(&argv)) override { mValue = this->processInt(argc, argv); this->mPresent = true; @@ -379,7 +375,7 @@ namespace bpp if (getAsUint() > (std::uint64_t) std::numeric_limits::max()) throw(bpp::ArgumentException() - << "Unable to convert int argument '" << this->getName() << "' to size_t."); + << "Unable to convert int argument '" << this->getName() << "' to std::size_t."); return (std::size_t) mValue; } @@ -420,7 +416,7 @@ namespace bpp std::vector mValues; ///< Parsed values of the argument. protected: - virtual void process(int &argc, const char **(&argv)) + void process(int &argc, const char **(&argv)) override { if (!this->mPresent) mValues.clear(); @@ -479,7 +475,7 @@ namespace bpp class ArgFloatBase : public ArgBase { public: - typedef double value_t; + using value_t = double; protected: value_t mMin; ///< Range constraint for the value. @@ -548,7 +544,7 @@ namespace bpp value_t mValue; ///< Parsed value of the argument. protected: - virtual void process(int &argc, const char **(&argv)) + void process(int &argc, const char **(&argv)) override { mValue = this->processFloat(argc, argv); this->mPresent = true; @@ -583,7 +579,7 @@ namespace bpp std::vector mValues; ///< Parsed values of the argument. protected: - virtual void process(int &argc, const char **(&argv)) + void process(int &argc, const char **(&argv)) override { if (!this->mPresent) mValues.clear(); @@ -643,12 +639,12 @@ namespace bpp class ArgString : public ArgBase { public: - typedef std::string value_t; + using value_t = std::string; protected: std::string mValue; ///< The value of the argument stored after parsing. - virtual void process(int &argc, const char **(&argv)) + void process(int &argc, const char **(&argv)) override { if (argc == 0) throw(ArgumentException() << "Value of argument '" << this->getName() << "' is missing!"); @@ -682,7 +678,7 @@ namespace bpp class ArgEnum : public ArgString { public: - typedef std::string value_t; + using value_t = std::string; private: std::string mNormalizedValue; @@ -702,7 +698,7 @@ namespace bpp mOptions.insert(mCaseSensitive ? str : toLower(str)); } - virtual void process(int &argc, const char **(&argv)) + void process(int &argc, const char **(&argv)) override { ArgString::process(argc, argv); @@ -769,7 +765,7 @@ namespace bpp std::vector mValues; ///< The list of values of the argument stored after parsing. protected: - virtual void process(int &argc, const char **(&argv)) + void process(int &argc, const char **(&argv)) override { if (argc == 0) throw(ArgumentException() << "Value of argument '" << this->getName() << "' is missing!"); @@ -1110,12 +1106,12 @@ namespace bpp stream << "Usage: " << Path::getFileName(getProgramName()) << std::endl; stream << "Named arguments:" << std::endl; - for (auto it = mArguments.begin(); it != mArguments.end(); ++it) { - stream << " " << it->first << " - " << it->second->getComment() << std::endl; + for (const auto &mArgument : mArguments) { + stream << " " << mArgument.first << " - " << mArgument.second->getComment() << std::endl; } stream << "Nameless arguments (" << mNamelessMin << ", " << mNamelessMax << "):"; - for (std::size_t i = 0; i < mNamelessCaptions.size(); ++i) { stream << " " << mNamelessCaptions[i]; } + for (const auto &mNamelessCaption : mNamelessCaptions) { stream << " " << mNamelessCaption; } stream << std::endl; } }; diff --git a/judges/recodex_token_judge/bpplib/cli/logger.hpp b/judges/recodex_token_judge/bpplib/cli/logger.hpp index 3a5ce141..9073c5ec 100644 --- a/judges/recodex_token_judge/bpplib/cli/logger.hpp +++ b/judges/recodex_token_judge/bpplib/cli/logger.hpp @@ -89,7 +89,7 @@ namespace bpp std::size_t applySizeLimit(LogSeverity &severity) { std::size_t total = 0; - for (std::size_t i = (std::size_t) LogSeverity::UNDEFINED; i < (std::size_t) severity; ++i) { + for (auto i = (std::size_t) LogSeverity::UNDEFINED; i < (std::size_t) severity; ++i) { total += mLengths[(LogSeverity) i]; if (total >= mMaxLength) { // max log length would be exceeded including current severity into output severity = (LogSeverity) i; @@ -215,8 +215,8 @@ namespace bpp std::size_t size(LogSeverity severity = LogSeverity::ANY) const { std::size_t size = 0; - for (std::size_t i = (std::size_t) LogSeverity::UNDEFINED; i <= (std::size_t) severity; ++i) { - LogSeverity s = (LogSeverity) i; + for (auto i = (std::size_t) LogSeverity::UNDEFINED; i <= (std::size_t) severity; ++i) { + auto s = (LogSeverity) i; auto it = mLengths.find(s); if (it != mLengths.end()) { size += it->second; } diff --git a/judges/recodex_token_judge/bpplib/misc/exception.hpp b/judges/recodex_token_judge/bpplib/misc/exception.hpp index 95f5c428..4473c4fc 100644 --- a/judges/recodex_token_judge/bpplib/misc/exception.hpp +++ b/judges/recodex_token_judge/bpplib/misc/exception.hpp @@ -33,11 +33,9 @@ namespace bpp StreamException(const std::string &msg) : std::exception(), mMessage(msg) { } - virtual ~StreamException() throw() - { - } + ~StreamException() noexcept override = default; - virtual const char *what() const throw() + const char *what() const noexcept override { return mMessage.c_str(); } @@ -68,9 +66,7 @@ namespace bpp RuntimeError(const std::string &msg) : StreamException(msg) { } - virtual ~RuntimeError() throw() - { - } + ~RuntimeError() noexcept override = default; // Overloading << operator that uses stringstream to append data to mMessage. @@ -102,10 +98,7 @@ namespace bpp LogicError(const std::string &msg) : RuntimeError(msg) { } - virtual ~LogicError() throw() - { - } - + ~LogicError() noexcept override = default; // Overloading << operator that uses stringstream to append data to mMessage. template LogicError &operator<<(const T &data) @@ -133,10 +126,7 @@ namespace bpp NotImplementedError(const std::string &msg) : RuntimeError(msg) { } - virtual ~NotImplementedError() throw() - { - } - + ~NotImplementedError() noexcept override = default; // Overloading << operator that uses stringstream to append data to mMessage. template NotImplementedError &operator<<(const T &data) diff --git a/judges/recodex_token_judge/bpplib/system/filesystem.hpp b/judges/recodex_token_judge/bpplib/system/filesystem.hpp index d98a6cd3..5a4ccd78 100644 --- a/judges/recodex_token_judge/bpplib/system/filesystem.hpp +++ b/judges/recodex_token_judge/bpplib/system/filesystem.hpp @@ -45,9 +45,7 @@ namespace bpp FileError(const std::string &msg) : RuntimeError(msg) { } - virtual ~FileError() throw() - { - } + ~FileError() noexcept override = default; // Overloading << operator that uses stringstream to append data to mMessage. @@ -75,7 +73,7 @@ namespace bpp */ static std::string getFileName(const std::string &path) { - size_t pos = path.find_last_of("/\\"); + std::size_t pos = path.find_last_of("/\\"); return (pos != std::string::npos) ? path.substr(pos + 1) : path; } diff --git a/judges/recodex_token_judge/bpplib/system/mmap_file.hpp b/judges/recodex_token_judge/bpplib/system/mmap_file.hpp index 15877f18..3795c64f 100644 --- a/judges/recodex_token_judge/bpplib/system/mmap_file.hpp +++ b/judges/recodex_token_judge/bpplib/system/mmap_file.hpp @@ -43,7 +43,7 @@ namespace bpp #ifdef _WIN32 typedef LONGLONG length_t; #else - typedef size_t length_t; + using length_t = std::size_t; #endif void *mData; ///< Pointer to memory area where the file is mapped. @@ -123,7 +123,7 @@ namespace bpp throw RuntimeError("Cannot mmap the file."); } #endif - }; + } /** @@ -189,7 +189,8 @@ namespace bpp if (!opened()) throw RuntimeError("The file must be opened before prepopulation."); // Traverse the mapped file accessing first dword on each page. - unsigned x, *data = (unsigned *) getData(); + unsigned x = 0; + auto *data = (unsigned *) getData(); for (length_t i = 0; i < mLength / 4096; ++i) { x ^= *data; // read from page data += 4096 / sizeof(unsigned); // move to another page diff --git a/judges/recodex_token_judge/comparator.hpp b/judges/recodex_token_judge/comparator.hpp index 8e316df7..8e951524 100644 --- a/judges/recodex_token_judge/comparator.hpp +++ b/judges/recodex_token_judge/comparator.hpp @@ -48,15 +48,15 @@ template bool try_get_double(const STRING &str, double &res) template class TokenComparator { public: - typedef CHAR char_t; - typedef OFFSET offset_t; + using char_t = CHAR; + using offset_t = OFFSET; private: /** * Internal structure where tokens are loaded if more commplex comparison is required. */ struct TokenPair { - typedef std::basic_string, std::allocator> string_t; + using string_t = std::basic_string, std::allocator>; public: string_t token[2]; @@ -201,11 +201,11 @@ template class TokenComp template class LineComparator { public: - typedef CHAR char_t; - typedef OFFSET offset_t; - typedef RESULT result_t; - typedef typename Reader::Line line_t; - typedef typename Reader::TokenRef token_t; + using char_t = CHAR; + using offset_t = OFFSET; + using result_t = RESULT; + using line_t = typename Reader::Line; + using token_t = typename Reader::TokenRef; private: TokenComparator &mTokenComparator; ///< Token comparator used for comparing tokens on the lines. diff --git a/judges/recodex_token_judge/judge.hpp b/judges/recodex_token_judge/judge.hpp index fad096a5..f762a530 100644 --- a/judges/recodex_token_judge/judge.hpp +++ b/judges/recodex_token_judge/judge.hpp @@ -3,6 +3,7 @@ #include +#include #include #include @@ -17,10 +18,10 @@ template class Judge { public: - typedef READER reader_t; - typedef typename READER::Line line_t; - typedef LINE_COMPARATOR line_comparator_t; - typedef typename LINE_COMPARATOR::result_t score_t; + using reader_t = READER; + using line_t = typename READER::Line; + using line_comparator_t = LINE_COMPARATOR; + using score_t = typename LINE_COMPARATOR::result_t; private: /** @@ -464,4 +465,4 @@ template class Judge }; -#endif \ No newline at end of file +#endif diff --git a/judges/recodex_token_judge/reader.hpp b/judges/recodex_token_judge/reader.hpp index af61bc8f..df22e045 100644 --- a/judges/recodex_token_judge/reader.hpp +++ b/judges/recodex_token_judge/reader.hpp @@ -23,8 +23,8 @@ template class Reader { public: - typedef CHAR char_t; - typedef OFFSET offset_t; + using char_t = CHAR; + using offset_t = OFFSET; /** diff --git a/src/archives/archivator.cpp b/src/archives/archivator.cpp index ba5d48fa..c9e6d543 100644 --- a/src/archives/archivator.cpp +++ b/src/archives/archivator.cpp @@ -7,10 +7,6 @@ void archivator::compress(const std::string &dir, const std::string &destination) { - archive *a; - archive_entry *entry; - int r; - std::map files; fs::path dir_path; try { @@ -42,26 +38,26 @@ void archivator::compress(const std::string &dir, const std::string &destination throw archive_exception(e.what()); } - a = archive_write_new(); - if (a == NULL) { throw archive_exception("Cannot create destination archive."); } - if (archive_write_set_format_zip(a) != ARCHIVE_OK) { + std::unique_ptr a = {archive_write_new(), archive_write_free}; + if (a == nullptr) { throw archive_exception("Cannot create destination archive."); } + if (archive_write_set_format_zip(a.get()) != ARCHIVE_OK) { throw archive_exception("Cannot set ZIP format on destination archive."); } - if (archive_write_open_filename(a, destination.c_str()) != ARCHIVE_OK) { + if (archive_write_open_filename(a.get(), destination.c_str()) != ARCHIVE_OK) { throw archive_exception("Cannot open destination archive."); } for (auto &file : files) { - entry = archive_entry_new(); + std::unique_ptr entry = {archive_entry_new(), archive_entry_free}; - archive_entry_set_pathname(entry, (fs::path(destination).stem() / file.second).string().c_str()); - archive_entry_set_size(entry, fs::file_size(file.first)); - archive_entry_set_mtime(entry, fs::last_write_time(file.first), 0); // 0 nanoseconds - archive_entry_set_filetype(entry, AE_IFREG); - archive_entry_set_perm(entry, 0644); + archive_entry_set_pathname(entry.get(), (fs::path(destination).stem() / file.second).string().c_str()); + archive_entry_set_size(entry.get(), fs::file_size(file.first)); + archive_entry_set_mtime(entry.get(), fs::last_write_time(file.first), 0); // 0 nanoseconds + archive_entry_set_filetype(entry.get(), AE_IFREG); + archive_entry_set_perm(entry.get(), 0644); - r = archive_write_header(a, entry); - if (r < ARCHIVE_OK) { throw archive_exception(archive_error_string(a)); } + int r = archive_write_header(a.get(), entry.get()); + if (r < ARCHIVE_OK) { throw archive_exception(archive_error_string(a.get())); } std::ifstream ifs((file.first).string(), std::ios::in | std::ios::binary); if (ifs.is_open()) { @@ -78,27 +74,20 @@ void archivator::compress(const std::string &dir, const std::string &destination throw archive_exception("Error reading input file."); } - r = archive_write_data(a, buff, static_cast(ifs.gcount())); - if (r < ARCHIVE_OK) { throw archive_exception(archive_error_string(a)); } + r = archive_write_data(a.get(), buff, static_cast(ifs.gcount())); + if (r < ARCHIVE_OK) { throw archive_exception(archive_error_string(a.get())); } } } else { throw archive_exception("Cannot open file " + (file.first).string() + " for reading."); } - archive_entry_free(entry); } - archive_write_close(a); - archive_write_free(a); + + archive_write_close(a.get()); } void archivator::decompress(const std::string &filename, const std::string &destination) { - archive *a; - archive *ext; - archive_entry *entry; - int flags; - int r; - if (!fs::is_directory(destination)) { throw archive_exception("Destination '" + destination + "' is not a directory. Cannot decompress archive."); } @@ -107,35 +96,38 @@ void archivator::decompress(const std::string &filename, const std::string &dest } // Select which attributes we want to restore. + int flags; flags = ARCHIVE_EXTRACT_TIME; flags |= ARCHIVE_EXTRACT_FFLAGS; // Don't allow ".." in any path within archive flags |= ARCHIVE_EXTRACT_SECURE_NODOTDOT; - a = archive_read_new(); - if (a == NULL) { throw archive_exception("Cannot create source archive."); } - if (archive_read_support_format_all(a) != ARCHIVE_OK) { + std::unique_ptr a = {archive_write_new(), archive_write_free}; + if (a == nullptr) { throw archive_exception("Cannot create source archive."); } + if (archive_read_support_format_all(a.get()) != ARCHIVE_OK) { throw archive_exception("Cannot set formats for source archive."); } - if (archive_read_support_compression_all(a) != ARCHIVE_OK) { + if (archive_read_support_filter_all(a.get()) != ARCHIVE_OK) { throw archive_exception("Cannot set compression methods for source archive."); } - ext = archive_write_disk_new(); - if (ext == NULL) { throw archive_exception("Cannot allocate archive entry."); } - if (archive_write_disk_set_options(ext, flags) != ARCHIVE_OK) { + + std::unique_ptr ext = {archive_write_disk_new(), archive_write_free}; + if (ext == nullptr) { throw archive_exception("Cannot allocate archive entry."); } + if (archive_write_disk_set_options(ext.get(), flags) != ARCHIVE_OK) { throw archive_exception("Cannot set options for writing to disk."); } - if (archive_write_disk_set_standard_lookup(ext) != ARCHIVE_OK) { + if (archive_write_disk_set_standard_lookup(ext.get()) != ARCHIVE_OK) { throw archive_exception("Cannot set lookup for writing to disk."); } - r = archive_read_open_filename(a, filename.c_str(), 10240); + int r = archive_read_open_filename(a.get(), filename.c_str(), 10240); if (r < ARCHIVE_OK) { throw archive_exception("Cannot open source archive."); } while (true) { - r = archive_read_next_header(a, &entry); + archive_entry *entry; + r = archive_read_next_header(a.get(), &entry); if (r == ARCHIVE_EOF) { break; } - if (r < ARCHIVE_OK) { throw archive_exception(archive_error_string(a)); } + if (r < ARCHIVE_OK) { throw archive_exception(archive_error_string(a.get())); } const char *current_file = archive_entry_pathname(entry); const std::string full_path = (fs::path(destination) / current_file).string(); @@ -149,28 +141,26 @@ void archivator::decompress(const std::string &filename, const std::string &dest throw archive_exception("Unsupported archive entry filetype."); } - r = archive_write_header(ext, entry); - if (r < ARCHIVE_OK) { throw archive_exception(archive_error_string(ext)); } + r = archive_write_header(ext.get(), entry); + if (r < ARCHIVE_OK) { throw archive_exception(archive_error_string(ext.get())); } - if (archive_entry_size(entry) > 0) { copy_data(a, ext); } + if (archive_entry_size(entry) > 0) { copy_data(a.get(), ext.get()); } - r = archive_write_finish_entry(ext); - if (r < ARCHIVE_OK) { throw archive_exception(archive_error_string(ext)); } + r = archive_write_finish_entry(ext.get()); + if (r < ARCHIVE_OK) { throw archive_exception(archive_error_string(ext.get())); } } - archive_read_close(a); - archive_read_free(a); - archive_write_close(ext); - archive_write_free(ext); + archive_read_close(a.get()); + archive_write_close(ext.get()); } void archivator::copy_data(archive *ar, archive *aw) { - int r; + std::int64_t r; const void *buff; - size_t size; - int64_t offset; + std::size_t size; + std::int64_t offset; while (true) { r = archive_read_data_block(ar, &buff, &size, &offset); diff --git a/src/archives/archivator.h b/src/archives/archivator.h index b524b2f1..9336326e 100644 --- a/src/archives/archivator.h +++ b/src/archives/archivator.h @@ -76,22 +76,20 @@ class archive_exception : public std::exception * Constructor with custom string. * @param what String with description of failure. */ - archive_exception(std::string what) : what_(what) + archive_exception(const std::string &what) : what_(what) { } /** * Destructor. */ - virtual ~archive_exception() - { - } + ~archive_exception() override = default; /** * Get failure description. * @return Stored string. */ - virtual const char *what() const noexcept + const char *what() const noexcept override { return what_.c_str(); } diff --git a/src/broker_connection.h b/src/broker_connection.h index facda583..09a519fa 100644 --- a/src/broker_connection.h +++ b/src/broker_connection.h @@ -24,7 +24,7 @@ struct message_origin { /** * A set of origins from which there are incoming messages */ - typedef std::bitset<3> set; + using set = std::bitset<3>; }; /** @@ -126,7 +126,7 @@ template class broker_connection { const std::chrono::milliseconds ping_interval = config_->get_broker_ping_interval(); std::chrono::milliseconds poll_limit = ping_interval; - size_t broker_liveness = config_->get_max_broker_liveness(); + std::size_t broker_liveness = config_->get_max_broker_liveness(); while (true) { std::vector msg; diff --git a/src/commands/command_holder.h b/src/commands/command_holder.h index 49e897a1..2758220b 100644 --- a/src/commands/command_holder.h +++ b/src/commands/command_holder.h @@ -6,9 +6,9 @@ #include #include #include -#include "../helpers/logger.h" -#include "../job/job_evaluator_interface.h" -#include "../config/worker_config.h" +#include "helpers/logger.h" +#include "job/job_evaluator_interface.h" +#include "config/worker_config.h" /** @@ -79,7 +79,7 @@ template class command_holder { public: /** Type of callback function for easier use. */ - typedef std::function &, const command_context &)> callback_fn; + using callback_fn = std::function &, const command_context &)>; /** * Constructor with initialization of dependent (templated) part of context and logger. diff --git a/src/commands/jobs_client_commands.h b/src/commands/jobs_client_commands.h index 704ea7b4..2265b1ba 100644 --- a/src/commands/jobs_client_commands.h +++ b/src/commands/jobs_client_commands.h @@ -2,9 +2,9 @@ #define RECODEX_WORKER_JOBS_CLIENT_COMMANDS_H #include "command_holder.h" -#include "../helpers/zmq_socket.h" -#include "../eval_request.h" -#include "../eval_response.h" +#include "helpers/zmq_socket.h" +#include "eval_request.h" +#include "eval_response.h" /** * Commands from worker "main" thread. diff --git a/src/config/fileman_config.h b/src/config/fileman_config.h index 3552d3fc..38aff97c 100644 --- a/src/config/fileman_config.h +++ b/src/config/fileman_config.h @@ -1,6 +1,7 @@ #ifndef RECODEX_WORKER_FILEMAN_CONFIG_H #define RECODEX_WORKER_FILEMAN_CONFIG_H +#include /** * Struct which stores informations which are usefull in file managers. diff --git a/src/config/log_config.h b/src/config/log_config.h index fe4d077e..36b752a1 100644 --- a/src/config/log_config.h +++ b/src/config/log_config.h @@ -2,7 +2,7 @@ #define RECODEX_WORKER_LOG_CONFIG_H #include "spdlog/spdlog.h" - +#include /** * Structure which stores all information needed to initialize logger. @@ -18,9 +18,9 @@ struct log_config { /** Level of logging. Log levels are taken from spdlog. */ std::string log_level = "debug"; /** File size of one log file. */ - int log_file_size = 1024 * 1024; + std::size_t log_file_size = 1024 * 1024; /** Number of rotations which will be used. */ - int log_files_count = 3; + std::size_t log_files_count = 3; /** * Classical equality operator on log_config structures. diff --git a/src/config/sandbox_config.h b/src/config/sandbox_config.h index 926d85bb..ace2083f 100644 --- a/src/config/sandbox_config.h +++ b/src/config/sandbox_config.h @@ -2,6 +2,7 @@ #define RECODEX_WORKER_SANDBOX_CONFIG_H #include +#include #include "sandbox_limits.h" @@ -68,9 +69,7 @@ class sandbox_config /** * Constructor with defaults. */ - sandbox_config() - { - } + sandbox_config() = default; }; #endif // RECODEX_WORKER_SANDBOX_CONFIG_H diff --git a/src/config/sandbox_limits.h b/src/config/sandbox_limits.h index 53aa663c..85de4976 100644 --- a/src/config/sandbox_limits.h +++ b/src/config/sandbox_limits.h @@ -7,6 +7,7 @@ #include #include #include +#include "helpers/type_utils.h" /** @@ -30,12 +31,12 @@ struct sandbox_limits { * Limit memory usage. For Isolate, this limits whole control group (--cg-mem switch). * Memory size is set in kilobytes. */ - size_t memory_usage = 0; + std::size_t memory_usage = 0; /** * Extra memory which will be added to memory limit before killing program. * Memory size is set in kilobytes. */ - size_t extra_memory = 0; + std::size_t extra_memory = 0; /** * Limit total run time by CPU time. For Isolate, this is for whole control group. * Time is set in seconds and can be fractional. @@ -51,38 +52,38 @@ struct sandbox_limits { * time is also in (fractional) seconds. */ float extra_time = 0; + /** + * Allow to share host computers network. Otherwise, dedicated + * local interface will be created. + */ + bool share_net = false; /** * Limit stack size. This is additional memory limit, 0 is no special limit for stack, * global memory rules will aply. Otherwise, max stack size is @a stack_size kilobytes. */ - size_t stack_size = 0; + std::size_t stack_size = 0; /** * Limit size of created files. This could be useful, if your filesystem doesn't support * quotas. 0 means not set. * @warning This option is deprecated! Use @ref disk_size and @ref disk_files instead. */ - size_t files_size = 0; + std::size_t files_size = 0; /** * Set disk quota to given number of kilobytes. * @warning Underlying filesystem must support quotas. */ - size_t disk_size = 0; + std::size_t disk_size = 0; /** * Set disk quota to given number of files. Actual implementation may vary, for example * on Linux with ext4 filesystem this should be maximum number of used inodes. * @warning Underlying filesystem must support quotas. */ - size_t disk_files = 0; + std::size_t disk_files = 0; /** * Limit number of processes/threads that could be created. * 0 means no limit. */ - size_t processes = 0; - /** - * Allow to share host computers network. Otherwise, dedicated - * local interface will be created. - */ - bool share_net = false; + std::size_t processes = 0; /** * Set environment variables before run command inside the sandbox. */ @@ -96,9 +97,7 @@ struct sandbox_limits { /** * Constructor with some defaults. */ - sandbox_limits() - { - } + sandbox_limits() = default; /** * Insert environment variables which are not present yet. @@ -134,10 +133,11 @@ struct sandbox_limits { bool operator==(const sandbox_limits &second) const { return (memory_usage == second.memory_usage && extra_memory == second.extra_memory && - cpu_time == second.cpu_time && wall_time == second.wall_time && extra_time == second.extra_time && - stack_size == second.stack_size && files_size == second.files_size && disk_size == second.disk_size && - disk_files == second.disk_files && processes == second.processes && share_net == second.share_net && - environ_vars == second.environ_vars && bound_dirs == second.bound_dirs); + helpers::almost_equal(cpu_time, second.cpu_time) && helpers::almost_equal(wall_time, second.wall_time) && + helpers::almost_equal(extra_time, second.extra_time) && stack_size == second.stack_size && + files_size == second.files_size && disk_size == second.disk_size && disk_files == second.disk_files && + processes == second.processes && share_net == second.share_net && environ_vars == second.environ_vars && + bound_dirs == second.bound_dirs); } /** diff --git a/src/config/task_metadata.h b/src/config/task_metadata.h index dede61af..2e1d0588 100644 --- a/src/config/task_metadata.h +++ b/src/config/task_metadata.h @@ -27,16 +27,16 @@ class task_metadata * @param args arguments supplied for command, default = none * @param sandbox configuration of sandbox, shared pointer, its data can be changed! default = nullptr */ - task_metadata(std::string task_id = "", - size_t priority = 0, + task_metadata(const std::string &task_id = "", + std::size_t priority = 0, bool fatal = false, std::vector deps = {}, task_type type = task_type::INNER, - std::string cmd = "", + const std::string &cmd = "", std::vector args = {}, std::shared_ptr sandbox = nullptr, std::string test_id = "") - : task_id(task_id), priority(priority), fatal_failure(fatal), dependencies(deps), test_id(test_id), type(type), + : task_id(task_id), priority(priority), dependencies(deps), test_id(test_id), type(type), fatal_failure(fatal), binary(cmd), cmd_args(args), sandbox(sandbox) { } @@ -44,9 +44,7 @@ class task_metadata /** Unique identifier of task in job. */ std::string task_id; /** Priority of task among all others. Bigger priority number == greater priority. */ - size_t priority; - /** If true than failure of task will end execution of whole job. */ - bool fatal_failure; + std::size_t priority; /** Dependent tasks which have to be executed before this one. */ std::vector dependencies; /** Test id for external tasks */ @@ -54,6 +52,8 @@ class task_metadata /** Type of this task. */ task_type type; + /** If true than failure of task will end execution of whole job. */ + bool fatal_failure; /** Command which will be executed within this task. */ std::string binary; diff --git a/src/config/task_results.h b/src/config/task_results.h index 7bc27a9d..cec299ad 100644 --- a/src/config/task_results.h +++ b/src/config/task_results.h @@ -24,66 +24,66 @@ struct sandbox_results { * Return code of sandbox. * Default: 0 */ - int exitcode; + int exitcode = 0; /** * Total run time of program inside the sandbox. * Default: 0 (s) */ - float time; + float time = 0; /** * Total run time (wall clock) of program inside the sandbox. * Default: 0 (s) */ - float wall_time; + float wall_time = 0; + /** + * Flag if program exited normaly or was killed. + * Default: false + */ + bool killed = false; /** * Amount of memory used by program inside the sandbox. * Default: 0 (kB) */ - size_t memory; + std::size_t memory = 0; /** * Maximum resident set size of the process. * Default: 0 (kB) */ - size_t max_rss; + std::size_t max_rss = 0; /** * Error code returned by sandbox. * Default: OK */ - isolate_status status; + isolate_status status = isolate_status::OK; /** * Signal, which killed the process. * Default: 0 */ - int exitsig; - /** - * Flag if program exited normaly or was killed. - * Default: false - */ - bool killed; + int exitsig = 0; /** * Error message of the sandbox. * Default: "" */ - std::string message; + std::string message = ""; /** * Number of voluntary context switches. * Default: 0 */ - size_t csw_voluntary; + std::size_t csw_voluntary = 0; /** * Number of forced context switches. * Default: 0 */ - size_t csw_forced; + std::size_t csw_forced = 0; /** * Constructor with default values initialization. */ - sandbox_results() - : exitcode(0), time(0), wall_time(0), memory(0), max_rss(0), status(isolate_status::OK), exitsig(0), - killed(false), message(), csw_voluntary(0), csw_forced(0) - { - } + sandbox_results() = default; + /** + * Destructor + */ + ~sandbox_results() = default; /** * Defaulted copy constructor. @@ -112,7 +112,7 @@ struct task_results { * Status of task after execution. * Default: OK */ - task_status status; + task_status status = task_status::OK; /** * Error message if the task failed. * Default: "" @@ -130,14 +130,16 @@ struct task_results { * Pointer to @ref sandbox_results for external task results. * Default: nullptr (other types of tasks) */ - std::unique_ptr sandbox_status; + std::unique_ptr sandbox_status = nullptr; /** * Constructor with default values initiazation. */ - task_results() : status(task_status::OK), error_message(), output_stdout(), output_stderr(), sandbox_status(nullptr) - { - } + task_results() = default; + /** + * Destructor + */ + ~task_results() = default; /** * Defaulted copy constructor. diff --git a/src/config/worker_config.cpp b/src/config/worker_config.cpp index dfa2c7f0..2184ff5e 100644 --- a/src/config/worker_config.cpp +++ b/src/config/worker_config.cpp @@ -1,9 +1,7 @@ #include "worker_config.h" -#include "../helpers/config.h" +#include "helpers/config.h" -worker_config::worker_config() -{ -} +worker_config::worker_config() = default; worker_config::worker_config(const YAML::Node &config) { @@ -15,11 +13,11 @@ worker_config::worker_config(const YAML::Node &config) } if (config["broker-ping-interval"] && config["broker-ping-interval"].IsScalar()) { - broker_ping_interval_ = std::chrono::milliseconds(config["broker-ping-interval"].as()); + broker_ping_interval_ = std::chrono::milliseconds(config["broker-ping-interval"].as()); } if (config["max-broker-liveness"] && config["max-broker-liveness"].IsScalar()) { - max_broker_liveness_ = config["max-broker-liveness"].as(); + max_broker_liveness_ = config["max-broker-liveness"].as(); } if (!config["headers"].IsMap()) { throw config_error("Headers are not a map"); } @@ -58,7 +56,7 @@ worker_config::worker_config(const YAML::Node &config) // load worker-id if (config["worker-id"] && config["worker-id"].IsScalar()) { - worker_id_ = config["worker-id"].as(); + worker_id_ = config["worker-id"].as(); } else { throw config_error("Item worker-id not defined properly"); } @@ -106,10 +104,10 @@ worker_config::worker_config(const YAML::Node &config) log_config_.log_level = config["logger"]["level"].as(); } // no throw... can be omitted if (config["logger"]["max-size"] && config["logger"]["max-size"].IsScalar()) { - log_config_.log_file_size = config["logger"]["max-size"].as(); + log_config_.log_file_size = config["logger"]["max-size"].as(); } // no throw... can be omitted if (config["logger"]["rotations"] && config["logger"]["rotations"].IsScalar()) { - log_config_.log_files_count = config["logger"]["rotations"].as(); + log_config_.log_files_count = config["logger"]["rotations"].as(); } // no throw... can be omitted } // no throw... can be omitted @@ -126,22 +124,22 @@ worker_config::worker_config(const YAML::Node &config) limits_.extra_time = limits["extra-time"].as(); } // no throw... can be omitted if (limits["stack-size"] && limits["stack-size"].IsScalar()) { - limits_.stack_size = limits["stack-size"].as(); + limits_.stack_size = limits["stack-size"].as(); } // no throw... can be omitted if (limits["memory"] && limits["memory"].IsScalar()) { - limits_.memory_usage = limits["memory"].as(); + limits_.memory_usage = limits["memory"].as(); } // no throw... can be omitted if (limits["extra-memory"] && limits["extra-memory"].IsScalar()) { - limits_.extra_memory = limits["extra-memory"].as(); + limits_.extra_memory = limits["extra-memory"].as(); } // no throw... can be omitted if (limits["parallel"] && limits["parallel"].IsScalar()) { - limits_.processes = limits["parallel"].as(); + limits_.processes = limits["parallel"].as(); } // no throw... can be omitted if (limits["disk-size"] && limits["disk-size"].IsScalar()) { - limits_.disk_size = limits["disk-size"].as(); + limits_.disk_size = limits["disk-size"].as(); } // no throw... can be omitted if (limits["disk-files"] && limits["disk-files"].IsScalar()) { - limits_.disk_files = limits["disk-files"].as(); + limits_.disk_files = limits["disk-files"].as(); } // no throw... can be omitted try { @@ -153,8 +151,7 @@ worker_config::worker_config(const YAML::Node &config) if (limits["environ-variable"] && limits["environ-variable"].IsMap()) { for (const auto &var : limits["environ-variable"]) { - limits_.environ_vars.push_back( - std::make_pair(var.first.as(), var.second.as())); + limits_.environ_vars.emplace_back(var.first.as(), var.second.as()); } } // no throw... can be omitted @@ -164,14 +161,14 @@ worker_config::worker_config(const YAML::Node &config) // load max-output-length if (config["max-output-length"] && config["max-output-length"].IsScalar()) { - max_output_length_ = config["max-output-length"].as(); + max_output_length_ = config["max-output-length"].as(); } else { throw config_error("Item max-output-length not defined properly"); } // load max-carboncopy-length if (config["max-carboncopy-length"] && config["max-carboncopy-length"].IsScalar()) { - max_carboncopy_length_ = config["max-carboncopy-length"].as(); + max_carboncopy_length_ = config["max-carboncopy-length"].as(); } else { throw config_error("Item max-carboncopy-length not defined properly"); } @@ -188,9 +185,7 @@ worker_config::worker_config(const YAML::Node &config) } } -worker_config::~worker_config() -{ -} +worker_config::~worker_config() = default; size_t worker_config::get_worker_id() const { diff --git a/src/config/worker_config.h b/src/config/worker_config.h index 82c7170f..50beac1b 100644 --- a/src/config/worker_config.h +++ b/src/config/worker_config.h @@ -14,7 +14,7 @@ namespace fs = boost::filesystem; #include "log_config.h" #include "fileman_config.h" -#include "../sandbox/sandbox_base.h" +#include "sandbox/sandbox_base.h" /** @@ -24,7 +24,7 @@ class worker_config { public: /** Type of the header map */ - typedef std::multimap header_map_t; + using header_map_t = std::multimap; /** * The default constructor @@ -46,7 +46,7 @@ class worker_config * Get worker ID which has to be unique at least in context of one machine. * @return integer which can be used also as identifier/index of sandbox */ - virtual size_t get_worker_id() const; + virtual std::size_t get_worker_id() const; /** * Get worker human readable description (name), which will be shown in broker logs. * @return string with the description @@ -78,7 +78,7 @@ class worker_config * Get the maximum number of pings in a row without response before the broker is considered disconnected. * @return broker liveness integer */ - virtual size_t get_max_broker_liveness() const; + virtual std::size_t get_max_broker_liveness() const; /** * Get the interval between pings sent to the broker. @@ -112,13 +112,13 @@ class worker_config * Get maximal length of output which can be written to the results. * @return length of output in bytes */ - virtual size_t get_max_output_length() const; + virtual std::size_t get_max_output_length() const; /** * Get maximal length of output which can be copied into results folder. * @return length of output in bytes */ - virtual size_t get_max_carboncopy_length() const; + virtual std::size_t get_max_carboncopy_length() const; /** * Get flag which determines if cleanup is made after sumbission is evaluated. @@ -128,35 +128,35 @@ class worker_config private: /** Unique worker number in context of one machine (0-100 preferably) */ - size_t worker_id_; + std::size_t worker_id_ = 0; /** Human readable description of the worker for logging purposes */ - std::string worker_description_; + std::string worker_description_ = ""; /** Working directory of whole worker used as base directory for all temporary files */ - std::string working_directory_; + std::string working_directory_ = ""; /** Broker URI, address where broker is listening */ - std::string broker_uri_; + std::string broker_uri_ = ""; /** Header which are sent to broker and should specify worker abilities */ - header_map_t headers_; + header_map_t headers_ = {}; /** Hwgroup which is sent to broker and is used in job configuration to select right limits */ - std::string hwgroup_; + std::string hwgroup_ = {}; /** Maximum number of pings in a row without response before the broker is considered disconnected */ - size_t max_broker_liveness_ = 4; + std::size_t max_broker_liveness_ = 4; /** How often should the worker ping the broker */ std::chrono::milliseconds broker_ping_interval_ = std::chrono::milliseconds(1000); /** The caching directory path */ - std::string cache_dir_; + std::string cache_dir_ = ""; /** Configuration of logger */ - log_config log_config_; + log_config log_config_ = {}; /** Default configuration of file managers */ - std::vector filemans_configs_; + std::vector filemans_configs_ = {}; /** Default sandbox limits */ - sandbox_limits limits_; + sandbox_limits limits_ = {}; /** Maximal length of output from sandbox which can be written to the results file, in bytes. */ - size_t max_output_length_; + std::size_t max_output_length_ = 0; /** Maximal lenght of output from sandbox which can be copied into results folder, in bytes */ - size_t max_carboncopy_length_; + std::size_t max_carboncopy_length_ = 0; /** If true then all files created during evaluation of job will be deleted at the end. */ - bool cleanup_submission_; + bool cleanup_submission_ = true; }; diff --git a/src/connection_proxy.h b/src/connection_proxy.h index e1a8b6ab..9c4457b5 100644 --- a/src/connection_proxy.h +++ b/src/connection_proxy.h @@ -19,7 +19,7 @@ static const std::string PROGRESS_SOCKET_ID = "progress"; class connection_proxy { private: - static const size_t socket_count_ = 3; + static const std::size_t socket_count_ = 3; zmq::socket_t broker_; zmq::socket_t jobs_; zmq::socket_t progress_; diff --git a/src/fileman/cache_manager.cpp b/src/fileman/cache_manager.cpp index b36da0f7..0a181eda 100644 --- a/src/fileman/cache_manager.cpp +++ b/src/fileman/cache_manager.cpp @@ -1,5 +1,5 @@ #include "cache_manager.h" -#include "../helpers/string_utils.h" +#include "helpers/string_utils.h" cache_manager::cache_manager(std::shared_ptr logger) diff --git a/src/fileman/cache_manager.h b/src/fileman/cache_manager.h index edd36d85..696353f1 100644 --- a/src/fileman/cache_manager.h +++ b/src/fileman/cache_manager.h @@ -4,7 +4,7 @@ #include #include #include "file_manager_interface.h" -#include "../helpers/logger.h" +#include "helpers/logger.h" #define BOOST_FILESYSTEM_NO_DEPRECATED #define BOOST_NO_CXX11_SCOPED_ENUMS @@ -38,22 +38,20 @@ class cache_manager : public file_manager_interface /** * Destructor. */ - virtual ~cache_manager() - { - } + ~cache_manager() override = default; /** * Copy a file from cache to destination. * @param src_name Name of the file without path. * @param dst_name Name of the destination path with requested filename - the file * can be renamed during fetching. */ - virtual void get_file(const std::string &src_name, const std::string &dst_name); + void get_file(const std::string &src_name, const std::string &dst_name) override; /** * Copy file to cache. * @param src_name Path and name of the file to be copied. * @param dst_name Name of the file in cache. */ - virtual void put_file(const std::string &src_name, const std::string &dst_name); + void put_file(const std::string &src_name, const std::string &dst_name) override; /** * Get path to the directory where files are stored. diff --git a/src/fileman/fallback_file_manager.h b/src/fileman/fallback_file_manager.h index 591f3a74..58689f29 100644 --- a/src/fileman/fallback_file_manager.h +++ b/src/fileman/fallback_file_manager.h @@ -21,7 +21,7 @@ class fallback_file_manager : public file_manager_interface { public: /** Pointer to every file manager type. */ - typedef std::shared_ptr file_manager_ptr; + using file_manager_ptr = std::shared_ptr; public: /** @@ -35,9 +35,7 @@ class fallback_file_manager : public file_manager_interface /** * Destructor. */ - virtual ~fallback_file_manager() - { - } + ~fallback_file_manager() override = default; /** * Get file. If requested file is in cache, copy will be saved as @a dst_name immediately, @@ -46,7 +44,7 @@ class fallback_file_manager : public file_manager_interface * @param dst_name Path (with filename) where to save the file (actual path you want, * caching is transparent from this point of view). */ - virtual void get_file(const std::string &src_name, const std::string &dst_name); + void get_file(const std::string &src_name, const std::string &dst_name) override; /** * Save file using only secondary manager (i.e. upload file to remote server). @@ -54,7 +52,7 @@ class fallback_file_manager : public file_manager_interface * @param src_name Name of the file (with path) to upload. * @param dst_url Destinaton (url where to upload the file). */ - virtual void put_file(const std::string &src_name, const std::string &dst_url); + void put_file(const std::string &src_name, const std::string &dst_url) override; private: /** Primary file manager (cache). */ diff --git a/src/fileman/file_manager_interface.h b/src/fileman/file_manager_interface.h index 73fa61cf..5407f8cd 100644 --- a/src/fileman/file_manager_interface.h +++ b/src/fileman/file_manager_interface.h @@ -17,9 +17,7 @@ class file_manager_interface /** * Destructor. */ - virtual ~file_manager_interface() - { - } + virtual ~file_manager_interface() = default; /** * Get the file. @@ -53,22 +51,20 @@ class fm_exception : public std::exception * Constructor with custom string. * @param what String with description of failure. */ - fm_exception(std::string what) : what_(what) + fm_exception(const std::string &what) : what_(what) { } /** * Destructor. */ - virtual ~fm_exception() - { - } + ~fm_exception() override = default; /** * Get failure description. * @return Stored string. */ - virtual const char *what() const noexcept + const char *what() const noexcept override { return what_.c_str(); } diff --git a/src/fileman/http_manager.cpp b/src/fileman/http_manager.cpp index 53661865..4ba5d4d7 100644 --- a/src/fileman/http_manager.cpp +++ b/src/fileman/http_manager.cpp @@ -14,27 +14,31 @@ namespace fs = boost::filesystem; #pragma warning(disable : 4996) #endif - -/* If you want run this program on Windows with libcurl as a - DLL, you MUST also provide a read callback with CURLOPT_READFUNCTION. - Failing to do so will give you a crash since a DLL may not use the - variable's memory when passed in to it from an app like this. */ -static size_t fread_wrapper(void *ptr, size_t size, size_t nmemb, FILE *stream) +namespace { - return fread(ptr, size, nmemb, stream); -} -// And the same for writing ... -static size_t fwrite_wrapper(void *ptr, size_t size, size_t nmemb, FILE *stream) -{ - return fwrite(ptr, size, nmemb, stream); -} + /* If you want run this program on Windows with libcurl as a + DLL, you MUST also provide a read callback with CURLOPT_READFUNCTION. + Failing to do so will give you a crash since a DLL may not use the + variable's memory when passed in to it from an app like this. */ + static std::size_t fread_wrapper(void *ptr, std::size_t size, std::size_t nmemb, FILE *stream) + { + return fread(ptr, size, nmemb, stream); + } -// Nothing write callback -size_t write_callback(char *, size_t size, size_t nmemb, void *) -{ - return size * nmemb; -} + // And the same for writing ... + static std::size_t fwrite_wrapper(void *ptr, std::size_t size, std::size_t nmemb, FILE *stream) + { + return fwrite(ptr, size, nmemb, stream); + } + + // Nothing write callback + std::size_t write_callback(char *, std::size_t size, std::size_t nmemb, void *) + { + return size * nmemb; + } + +} // namespace // Tweak for older libcurls #ifndef CURL_HTTP_VERSION_2_0 diff --git a/src/fileman/http_manager.h b/src/fileman/http_manager.h index 146e0788..cc2084e9 100644 --- a/src/fileman/http_manager.h +++ b/src/fileman/http_manager.h @@ -4,8 +4,8 @@ #include #include #include "file_manager_interface.h" -#include "../helpers/logger.h" -#include "../config/fileman_config.h" +#include "helpers/logger.h" +#include "config/fileman_config.h" /** @@ -34,23 +34,22 @@ class http_manager : public file_manager_interface /** * Destructor. */ - virtual ~http_manager() - { - } + ~http_manager() override = default; + /** * Get and save file locally. * @param src_name Name of requested file (without path) * @param dst_name Path to the directory with name of the created file - the file can * be renamed during fetching. */ - virtual void get_file(const std::string &src_name, const std::string &dst_name); + void get_file(const std::string &src_name, const std::string &dst_name) override; /** * Upload file to remote server with HTTP PUT method. * @param src_name Name with path to a file to upload. * @param dst_url Url where the file will be uploaded. If this is with or without file name * depends on your HTTP server configuration. */ - virtual void put_file(const std::string &src_name, const std::string &dst_url); + void put_file(const std::string &src_name, const std::string &dst_url) override; protected: /** diff --git a/src/fileman/prefixed_file_manager.h b/src/fileman/prefixed_file_manager.h index 1fa0c931..5668d97d 100644 --- a/src/fileman/prefixed_file_manager.h +++ b/src/fileman/prefixed_file_manager.h @@ -28,6 +28,10 @@ class prefixed_file_manager : public file_manager_interface * method and destination name in put_file() method. */ prefixed_file_manager(std::shared_ptr fm, const std::string &prefix); + /** + * Destructor + */ + ~prefixed_file_manager() override = default; /** * Get file. This method has same semantics and arguments as underlying @@ -37,7 +41,7 @@ class prefixed_file_manager : public file_manager_interface * @param src_name Source file - same as underlying file manager * @param dst_name Destination file - same as underlying file manager */ - virtual void get_file(const std::string &src_name, const std::string &dst_name); + void get_file(const std::string &src_name, const std::string &dst_name) override; /** * Put file. This method has same semantics and arguments as underlying @@ -47,7 +51,7 @@ class prefixed_file_manager : public file_manager_interface * @param src_name Source file - same as underlying file manager * @param dst_name Destination file - same as underlying file manager */ - virtual void put_file(const std::string &src_name, const std::string &dst_name); + void put_file(const std::string &src_name, const std::string &dst_name) override; }; diff --git a/src/helpers/config.cpp b/src/helpers/config.cpp index 032948f9..a4ecdbe7 100644 --- a/src/helpers/config.cpp +++ b/src/helpers/config.cpp @@ -55,7 +55,7 @@ std::shared_ptr helpers::build_job_metadata(const YAML::Node &conf throw config_exception("Configuration task has missing task-id"); } if (ctask["priority"] && ctask["priority"].IsScalar()) { - task_meta->priority = ctask["priority"].as(); + task_meta->priority = ctask["priority"].as(); } else { task_meta->priority = 1; // default value } @@ -170,34 +170,34 @@ std::shared_ptr helpers::build_job_metadata(const YAML::Node &conf sl->extra_time = FLT_MAX; // set undefined value (max float) } if (lim["stack-size"] && lim["stack-size"].IsScalar()) { - sl->stack_size = lim["stack-size"].as(); + sl->stack_size = lim["stack-size"].as(); } else { - sl->stack_size = SIZE_MAX; // set undefined value (max size_t) + sl->stack_size = SIZE_MAX; // set undefined value (max std::size_t) } if (lim["memory"] && lim["memory"].IsScalar()) { - sl->memory_usage = lim["memory"].as(); + sl->memory_usage = lim["memory"].as(); } else { - sl->memory_usage = SIZE_MAX; // set undefined value (max size_t) + sl->memory_usage = SIZE_MAX; // set undefined value (max std::size_t) } if (lim["extra-memory"] && lim["extra-memory"].IsScalar()) { - sl->extra_memory = lim["extra-memory"].as(); + sl->extra_memory = lim["extra-memory"].as(); } else { - sl->extra_memory = SIZE_MAX; // set undefined value (max size_t) + sl->extra_memory = SIZE_MAX; // set undefined value (max std::size_t) } if (lim["parallel"] && lim["parallel"].IsScalar()) { // TODO not defined properly - sl->processes = lim["parallel"].as(); + sl->processes = lim["parallel"].as(); } else { - sl->processes = SIZE_MAX; // set undefined value (max size_t) + sl->processes = SIZE_MAX; // set undefined value (max std::size_t) } if (lim["disk-size"] && lim["disk-size"].IsScalar()) { - sl->disk_size = lim["disk-size"].as(); + sl->disk_size = lim["disk-size"].as(); } else { - sl->disk_size = SIZE_MAX; // set undefined value (max size_t) + sl->disk_size = SIZE_MAX; // set undefined value (max std::size_t) } if (lim["disk-files"] && lim["disk-files"].IsScalar()) { - sl->disk_files = lim["disk-files"].as(); + sl->disk_files = lim["disk-files"].as(); } else { - sl->disk_files = SIZE_MAX; // set undefined value (max size_t) + sl->disk_files = SIZE_MAX; // set undefined value (max std::size_t) } // find bound dirs from config and attach them to limits @@ -206,8 +206,8 @@ std::shared_ptr helpers::build_job_metadata(const YAML::Node &conf if (lim["environ-variable"] && lim["environ-variable"].IsMap()) { for (auto &var : lim["environ-variable"]) { - sl->environ_vars.push_back( - std::make_pair(var.first.as(), var.second.as())); + sl->environ_vars.emplace_back( + var.first.as(), var.second.as()); } } @@ -292,7 +292,7 @@ std::vector> help mode = static_cast(mode | sandbox_limits::dir_perm::DEV); } } // no throw... can be omitted - bound_dirs.push_back(std::tuple{src, dst, mode}); + bound_dirs.emplace_back(src, dst, mode); } } } // can be omitted... no throw diff --git a/src/helpers/config.h b/src/helpers/config.h index d0a707d4..490ef1ff 100644 --- a/src/helpers/config.h +++ b/src/helpers/config.h @@ -3,8 +3,8 @@ #include #include -#include "../config/job_metadata.h" -#include "../config/task_metadata.h" +#include "config/job_metadata.h" +#include "config/task_metadata.h" namespace helpers @@ -54,15 +54,13 @@ namespace helpers /** * Stated for completion. */ - virtual ~config_exception() - { - } + ~config_exception() override = default; /** * Returns description of exception. * @return c-style string */ - virtual const char *what() const noexcept + const char *what() const noexcept override { return what_.c_str(); } diff --git a/src/helpers/filesystem.h b/src/helpers/filesystem.h index e597828d..33895012 100644 --- a/src/helpers/filesystem.h +++ b/src/helpers/filesystem.h @@ -1,7 +1,7 @@ #ifndef RECODEX_WORKER_HELPERS_FILESYSTEM_HPP #define RECODEX_WORKER_HELPERS_FILESYSTEM_HPP -#include "../config/sandbox_limits.h" +#include "config/sandbox_limits.h" #define BOOST_FILESYSTEM_NO_DEPRECATED #define BOOST_NO_CXX11_SCOPED_ENUMS #include @@ -69,15 +69,13 @@ namespace helpers /** * Stated for completion. */ - virtual ~filesystem_exception() - { - } + ~filesystem_exception() override = default; /** * Returns description of exception. * @return c-style string */ - virtual const char *what() const noexcept + const char *what() const noexcept override { return what_.c_str(); } diff --git a/src/helpers/format.h b/src/helpers/format.h index b025731b..fc1b5641 100644 --- a/src/helpers/format.h +++ b/src/helpers/format.h @@ -5,15 +5,23 @@ namespace helpers { + + /** + * Last step of recursive template + */ inline void format(std::ostringstream &) { } + /** + * Concatenate various types into one string + */ template void format(std::ostringstream &oss, const ArgT &a, T... args) { oss << a; format(oss, args...); } + } // namespace helpers #endif // RECODEX_WORKER_HELPERS_FORMAT_H diff --git a/src/helpers/string_utils.cpp b/src/helpers/string_utils.cpp index 50fe882a..86f62301 100644 --- a/src/helpers/string_utils.cpp +++ b/src/helpers/string_utils.cpp @@ -1,13 +1,13 @@ #include "string_utils.h" #include -std::string helpers::random_alphanum_string(size_t length) +std::string helpers::random_alphanum_string(std::size_t length) { auto randchar = []() -> char { const char charset[] = "0123456789" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz"; - const size_t max_index = (sizeof(charset) - 1); + const std::size_t max_index = (sizeof(charset) - 1); return charset[rand() % max_index]; }; std::string str(length, 0); diff --git a/src/helpers/string_utils.h b/src/helpers/string_utils.h index 6b9c6f1f..f2fd682c 100644 --- a/src/helpers/string_utils.h +++ b/src/helpers/string_utils.h @@ -11,7 +11,7 @@ namespace helpers * @param length * @return generated string */ - std::string random_alphanum_string(size_t length); + std::string random_alphanum_string(std::size_t length); /** * Filter non-printable characters from given string and write it back. diff --git a/src/helpers/topological_sort.cpp b/src/helpers/topological_sort.cpp index d7fd690f..18fe7c19 100644 --- a/src/helpers/topological_sort.cpp +++ b/src/helpers/topological_sort.cpp @@ -1,7 +1,7 @@ #include "topological_sort.h" -typedef std::priority_queue, std::vector>, task_compare> - priority_queue_type; +using priority_queue_type = + std::priority_queue, std::vector>, task_compare>; void helpers::topological_sort(std::shared_ptr root, std::vector> &result) { diff --git a/src/helpers/topological_sort.h b/src/helpers/topological_sort.h index 5af3bba9..f86df587 100644 --- a/src/helpers/topological_sort.h +++ b/src/helpers/topological_sort.h @@ -5,7 +5,7 @@ #include #include #include -#include "../tasks/task_base.h" +#include "tasks/task_base.h" namespace helpers @@ -46,15 +46,13 @@ namespace helpers /** * Stated for completion. */ - virtual ~top_sort_exception() - { - } + ~top_sort_exception() override = default; /** * Returns description of exception. * @return c-style string */ - virtual const char *what() const noexcept + const char *what() const noexcept override { return what_.c_str(); } diff --git a/src/helpers/type_utils.h b/src/helpers/type_utils.h new file mode 100644 index 00000000..89a5813a --- /dev/null +++ b/src/helpers/type_utils.h @@ -0,0 +1,31 @@ +#ifndef RECODEX_WORKER_HELPERS_TYPE_UTILS_HPP +#define RECODEX_WORKER_HELPERS_TYPE_UTILS_HPP + +#include +#include +#include +#include + +namespace helpers +{ + + /** + * Floating point types comparison with some epsion. + * @param x First number + * @param y Second number + * @param ulp Desired precision in ULPs (units in the last place) + * @returns @a true if the numbers are almost the same + */ + template + typename std::enable_if::is_integer, bool>::type almost_equal(T x, T y, int ulp = 2) + { + // the machine epsilon has to be scaled to the magnitude of the values used + // and multiplied by the desired precision in ULPs (units in the last place) + return std::abs(x - y) <= std::numeric_limits::epsilon() * std::abs(x + y) * ulp + // unless the result is subnormal + || std::abs(x - y) < std::numeric_limits::min(); + } + +} // namespace helpers + +#endif // RECODEX_WORKER_HELPERS_TYPE_UTILS_HPP diff --git a/src/helpers/zmq_socket.cpp b/src/helpers/zmq_socket.cpp index 7b148a91..7159cb52 100644 --- a/src/helpers/zmq_socket.cpp +++ b/src/helpers/zmq_socket.cpp @@ -3,9 +3,11 @@ bool helpers::send_through_socket(zmq::socket_t &socket, const std::vector &msg) { for (auto it = std::begin(msg); it != std::end(msg); ++it) { - bool retval = socket.send(it->c_str(), it->size(), std::next(it) != std::end(msg) ? ZMQ_SNDMORE : 0) >= 0; - - if (!retval) { return false; } + try { + socket.send(it->c_str(), it->size(), std::next(it) != std::end(msg) ? ZMQ_SNDMORE : 0); + } catch (const zmq::error_t &) { + return false; + } } return true; @@ -21,7 +23,7 @@ bool helpers::recv_from_socket(zmq::socket_t &socket, std::vector & try { retval = socket.recv(&msg); - } catch (zmq::error_t &) { + } catch (const zmq::error_t &) { if (terminate != nullptr) { *terminate = true; } retval = false; } diff --git a/src/job/job.cpp b/src/job/job.cpp index 81f03dff..a868b95b 100644 --- a/src/job/job.cpp +++ b/src/job/job.cpp @@ -1,5 +1,6 @@ #include "job.h" #include "job_exception.h" +#include "helpers/type_utils.h" job::job(std::shared_ptr job_meta, std::shared_ptr worker_conf, @@ -86,7 +87,7 @@ void job::build_job() } // create root task, which is logical root of evaluation - size_t id = 0; + std::size_t id = 0; root_task_ = factory_->create_internal_task(id++); // construct all tasks with their ids and check if they have all datas, but do not connect them @@ -102,9 +103,7 @@ void job::build_job() // go through variables parsing task_meta->binary = parse_job_var(task_meta->binary); - for (size_t i = 0; i < task_meta->cmd_args.size(); ++i) { - task_meta->cmd_args.at(i) = parse_job_var(task_meta->cmd_args.at(i)); - } + for (auto &cmd_arg : task_meta->cmd_args) { cmd_arg = parse_job_var(cmd_arg); } std::shared_ptr task; @@ -146,8 +145,8 @@ void job::build_job() sandbox->carboncopy_stderr = parse_job_var(sandbox->carboncopy_stderr); std::vector> new_bnd_dirs; for (auto &bnd_dir : limits->bound_dirs) { - new_bnd_dirs.push_back(std::tuple{ - parse_job_var(std::get<0>(bnd_dir)), parse_job_var(std::get<1>(bnd_dir)), std::get<2>(bnd_dir)}); + new_bnd_dirs.emplace_back( + parse_job_var(std::get<0>(bnd_dir)), parse_job_var(std::get<1>(bnd_dir)), std::get<2>(bnd_dir)); } limits->bound_dirs = new_bnd_dirs; @@ -200,7 +199,7 @@ void job::build_job() print_job_queue(); } -void job::process_task_limits(std::shared_ptr limits) +void job::process_task_limits(const std::shared_ptr &limits) { if (limits == nullptr) { throw job_exception("Internal error. Nullptr dereference in process_task_limits."); } @@ -208,17 +207,17 @@ void job::process_task_limits(std::shared_ptr limits) std::string msg = " item is bigger than default worker value"; // we have to load defaults from worker_config if necessary and check for bigger limits than in worker_config - if (limits->cpu_time == FLT_MAX) { + if (helpers::almost_equal(limits->cpu_time, FLT_MAX)) { limits->cpu_time = worker_limits.cpu_time; } else { if (limits->cpu_time > worker_limits.cpu_time) { throw job_exception("time" + msg); } } - if (limits->wall_time == FLT_MAX) { + if (helpers::almost_equal(limits->wall_time, FLT_MAX)) { limits->wall_time = worker_limits.wall_time; } else { if (limits->wall_time > worker_limits.wall_time) { throw job_exception("wall-time" + msg); } } - if (limits->extra_time == FLT_MAX) { + if (helpers::almost_equal(limits->extra_time, FLT_MAX)) { limits->extra_time = worker_limits.extra_time; } else { if (limits->extra_time > worker_limits.extra_time) { throw job_exception("extra-time" + msg); } @@ -260,7 +259,7 @@ void job::process_task_limits(std::shared_ptr limits) } void job::connect_tasks( - std::shared_ptr root, std::map> &unconn_tasks) + const std::shared_ptr &root, std::map> &unconn_tasks) { for (auto &elem : unconn_tasks) { const std::vector &depend = elem.second->get_dependencies(); @@ -271,13 +270,13 @@ void job::connect_tasks( elem.second->add_parent(root); } - for (size_t i = 0; i < depend.size(); ++i) { + for (const auto &i : depend) { try { - auto ptr = unconn_tasks.at(depend.at(i)); + auto ptr = unconn_tasks.at(i); ptr->add_children(elem.second); elem.second->add_parent(ptr); } catch (std::out_of_range &) { - throw job_exception("Non existing task-id (" + depend.at(i) + ") in dependency list"); + throw job_exception("Non existing task-id (" + i + ") in dependency list"); } } } @@ -303,7 +302,7 @@ std::vector>> job::run() } // add result from task into whole results set - results.push_back({task_id, res}); + results.emplace_back(task_id, res); // if task has some results then process them if (res != nullptr) { @@ -343,7 +342,7 @@ std::vector>> job::run() // even skipped task has its own result entry std::shared_ptr result(new task_results()); result->status = task_status::SKIPPED; - results.push_back({task_id, result}); + results.emplace_back(task_id, result); // we have to pass information about non-execution to children task->set_children_execution(false); @@ -424,10 +423,10 @@ std::string job::parse_job_var(const std::string &src) { std::string res = src; - size_t start = 0; + std::size_t start = 0; while ((start = res.find("${", start)) != std::string::npos) { - size_t end = res.find("}", start + 1); - size_t len = end - start - 2; + std::size_t end = res.find('}', start + 1); + std::size_t len = end - start - 2; if (end == std::string::npos) { throw job_exception("Not closed variable name: " + res.substr(start)); } if (job_variables_.find(res.substr(start + 2, len)) != job_variables_.end()) { diff --git a/src/job/job.h b/src/job/job.h index ebd9cb1e..fe4027b5 100644 --- a/src/job/job.h +++ b/src/job/job.h @@ -13,14 +13,14 @@ namespace fs = boost::filesystem; #include "spdlog/spdlog.h" -#include "../helpers/logger.h" -#include "../helpers/topological_sort.h" -#include "../helpers/filesystem.h" -#include "../config/worker_config.h" -#include "../config/job_metadata.h" -#include "../config/task_metadata.h" -#include "../tasks/task_factory_interface.h" -#include "../sandbox/sandbox_base.h" +#include "helpers/logger.h" +#include "helpers/topological_sort.h" +#include "helpers/filesystem.h" +#include "config/worker_config.h" +#include "config/job_metadata.h" +#include "config/task_metadata.h" +#include "tasks/task_factory_interface.h" +#include "sandbox/sandbox_base.h" #include "progress_callback_interface.h" @@ -108,7 +108,7 @@ class job * Check limits and in case of undefined values set worker defaults. * @param limits limits which will be checked */ - void process_task_limits(std::shared_ptr limits); + void process_task_limits(const std::shared_ptr &limits); /** * Given unconnected tasks will be connected according to their dependencies. * If they do not have dependency, they will be assigned to given root task. @@ -116,7 +116,7 @@ class job * @param unconn_tasks given unconnected tasks which will be connected */ void connect_tasks( - std::shared_ptr root, std::map> &unconn_tasks); + const std::shared_ptr &root, std::map> &unconn_tasks); /** * Prepare variables which can be used in job configuration. diff --git a/src/job/job_evaluator.cpp b/src/job/job_evaluator.cpp index 50c603da..fa8b7921 100644 --- a/src/job/job_evaluator.cpp +++ b/src/job/job_evaluator.cpp @@ -1,9 +1,9 @@ #include "job_evaluator.h" #include "job_exception.h" -#include "../config/job_metadata.h" -#include "../fileman/fallback_file_manager.h" -#include "../fileman/prefixed_file_manager.h" -#include "../helpers/config.h" +#include "config/job_metadata.h" +#include "fileman/fallback_file_manager.h" +#include "fileman/prefixed_file_manager.h" +#include "helpers/config.h" job_evaluator::job_evaluator(std::shared_ptr logger, std::shared_ptr config, diff --git a/src/job/job_evaluator.h b/src/job/job_evaluator.h index cdebe20f..901dd586 100644 --- a/src/job/job_evaluator.h +++ b/src/job/job_evaluator.h @@ -5,7 +5,7 @@ #include #include #include -#include "../helpers/logger.h" +#include "helpers/logger.h" #define BOOST_FILESYSTEM_NO_DEPRECATED #define BOOST_NO_CXX11_SCOPED_ENUMS @@ -13,11 +13,11 @@ namespace fs = boost::filesystem; #include "job.h" -#include "../config/worker_config.h" -#include "../fileman/file_manager_interface.h" -#include "../tasks/task_factory.h" -#include "../archives/archivator.h" -#include "../helpers/filesystem.h" +#include "config/worker_config.h" +#include "fileman/file_manager_interface.h" +#include "tasks/task_factory.h" +#include "archives/archivator.h" +#include "helpers/filesystem.h" #include "job_evaluator_interface.h" @@ -52,7 +52,7 @@ class job_evaluator : public job_evaluator_interface /** * Process an "eval" request */ - virtual eval_response evaluate(eval_request request); + eval_response evaluate(eval_request request) override; private: /** diff --git a/src/job/job_evaluator_interface.h b/src/job/job_evaluator_interface.h index c9c71532..e5ad800d 100644 --- a/src/job/job_evaluator_interface.h +++ b/src/job/job_evaluator_interface.h @@ -1,8 +1,8 @@ #ifndef RECODEX_WORKER_JOB_EVALUATOR_BASE_H #define RECODEX_WORKER_JOB_EVALUATOR_BASE_H -#include "../eval_request.h" -#include "../eval_response.h" +#include "eval_request.h" +#include "eval_response.h" /** @@ -14,9 +14,7 @@ class job_evaluator_interface /** * Virtual destructor for proper destruction of inherited classes. */ - virtual ~job_evaluator_interface() - { - } + virtual ~job_evaluator_interface() = default; /** * Process an "eval" request diff --git a/src/job/job_exception.h b/src/job/job_exception.h index d3f47335..a9eb35a9 100644 --- a/src/job/job_exception.h +++ b/src/job/job_exception.h @@ -1,6 +1,8 @@ #ifndef RECODEX_WORKER_JOB_EXCEPTION_H #define RECODEX_WORKER_JOB_EXCEPTION_H +#include +#include /** * Job exception class. @@ -14,6 +16,7 @@ class job_exception : public std::exception job_exception() : what_("Generic job exception") { } + /** * Exception with some brief description. * @param what textual description of a problem @@ -21,17 +24,17 @@ class job_exception : public std::exception job_exception(const std::string &what) : what_(what) { } + /** * Virtual destructor. */ - virtual ~job_exception() - { - } + ~job_exception() override = default; + /** * Return description of this exception. * @return C string */ - virtual const char *what() const noexcept + const char *what() const noexcept override { return what_.c_str(); } @@ -55,6 +58,9 @@ class job_unrecoverable_exception : public job_exception job_unrecoverable_exception(const std::string &what) : job_exception(what) { } + + /** Destructor */ + ~job_unrecoverable_exception() override = default; }; #endif // RECODEX_WORKER_JOB_EXCEPTION_H diff --git a/src/job/job_receiver.cpp b/src/job/job_receiver.cpp index 21644afa..9bbdf998 100644 --- a/src/job/job_receiver.cpp +++ b/src/job/job_receiver.cpp @@ -1,12 +1,12 @@ #include "job_receiver.h" -#include "../connection_proxy.h" -#include "../eval_request.h" -#include "../eval_response.h" -#include "../helpers/zmq_socket.h" -#include "../commands/jobs_client_commands.h" +#include "connection_proxy.h" +#include "eval_request.h" +#include "eval_response.h" +#include "helpers/zmq_socket.h" +#include "commands/jobs_client_commands.h" -job_receiver::job_receiver(std::shared_ptr context, +job_receiver::job_receiver(const std::shared_ptr &context, std::shared_ptr evaluator, std::shared_ptr logger) : socket_(*context, ZMQ_PAIR), evaluator_(evaluator), logger_(logger) diff --git a/src/job/job_receiver.h b/src/job/job_receiver.h index 7756f63e..1791264b 100644 --- a/src/job/job_receiver.h +++ b/src/job/job_receiver.h @@ -6,8 +6,8 @@ #include #include #include "job_evaluator_interface.h" -#include "../commands/command_holder.h" -#include "../helpers/logger.h" +#include "commands/command_holder.h" +#include "helpers/logger.h" /** * Job receiver handles incoming requests from broker_connection and @@ -28,7 +28,7 @@ class job_receiver * @param evaluator evaluator which will evaluate received tasks * @param logger pointer to logging class */ - job_receiver(std::shared_ptr context, + job_receiver(const std::shared_ptr &context, std::shared_ptr evaluator, std::shared_ptr logger); diff --git a/src/job/progress_callback.cpp b/src/job/progress_callback.cpp index cd6023cc..57beab05 100644 --- a/src/job/progress_callback.cpp +++ b/src/job/progress_callback.cpp @@ -1,9 +1,10 @@ #include "progress_callback.h" -#include "../helpers/zmq_socket.h" -#include "../helpers/logger.h" -#include "../connection_proxy.h" +#include "helpers/zmq_socket.h" +#include "helpers/logger.h" +#include "connection_proxy.h" -progress_callback::progress_callback(std::shared_ptr context, std::shared_ptr logger) +progress_callback::progress_callback( + const std::shared_ptr &context, std::shared_ptr logger) : socket_(*context, ZMQ_PAIR), command_("progress"), connected_(false), logger_(logger) { if (logger_ == nullptr) { logger_ = helpers::create_null_logger(); } diff --git a/src/job/progress_callback.h b/src/job/progress_callback.h index 9ba8a18b..467622ff 100644 --- a/src/job/progress_callback.h +++ b/src/job/progress_callback.h @@ -58,18 +58,18 @@ class progress_callback : public progress_callback_interface * @param context zmq context structure * @param logger pointer to logging class */ - progress_callback(std::shared_ptr context, std::shared_ptr logger); + progress_callback(const std::shared_ptr &context, std::shared_ptr logger); - virtual void job_archive_downloaded(const std::string &job_id); - virtual void job_build_failed(const std::string &job_id); - virtual void job_finished(const std::string &job_id); - virtual void job_results_uploaded(const std::string &job_id); - virtual void job_started(const std::string &job_id); - virtual void job_ended(const std::string &job_id); - virtual void job_aborted(const std::string &job_id); - virtual void task_completed(const std::string &job_id, const std::string &task_id); - virtual void task_failed(const std::string &job_id, const std::string &task_id); - virtual void task_skipped(const std::string &job_id, const std::string &task_id); + void job_archive_downloaded(const std::string &job_id) override; + void job_build_failed(const std::string &job_id) override; + void job_finished(const std::string &job_id) override; + void job_results_uploaded(const std::string &job_id) override; + void job_started(const std::string &job_id) override; + void job_ended(const std::string &job_id) override; + void job_aborted(const std::string &job_id) override; + void task_completed(const std::string &job_id, const std::string &task_id) override; + void task_failed(const std::string &job_id, const std::string &task_id) override; + void task_skipped(const std::string &job_id, const std::string &task_id) override; }; #endif // RECODEX_WORKER_PROGRESS_CALLBACK_H diff --git a/src/job/progress_callback_interface.h b/src/job/progress_callback_interface.h index 633e08bc..1a584fb5 100644 --- a/src/job/progress_callback_interface.h +++ b/src/job/progress_callback_interface.h @@ -1,6 +1,8 @@ #ifndef RECODEX_WORKER_PROGRESS_CALLBACK_BASE_H #define RECODEX_WORKER_PROGRESS_CALLBACK_BASE_H +#include + /** * Callback which is used in @ref job_evaluator and @ref job itself to indicate its state. * Can be used to inform user about evaluating particular submissions/jobs. @@ -12,9 +14,7 @@ class progress_callback_interface /** * Stated for completion and for derived classes. */ - virtual ~progress_callback_interface() - { - } + virtual ~progress_callback_interface() = default; /** * Indicates that job archive was successfully downloaded from fileserver. @@ -92,43 +92,43 @@ class progress_callback_interface class empty_progress_callback : public progress_callback_interface { public: - virtual void job_archive_downloaded(const std::string &job_id) + void job_archive_downloaded(const std::string &job_id) override { } - virtual void job_build_failed(const std::string &job_id) + void job_build_failed(const std::string &job_id) override { } - virtual void job_finished(const std::string &job_id) + void job_finished(const std::string &job_id) override { } - virtual void job_results_uploaded(const std::string &job_id) + void job_results_uploaded(const std::string &job_id) override { } - virtual void job_started(const std::string &job_id) + void job_started(const std::string &job_id) override { } - virtual void job_ended(const std::string &job_id) + void job_ended(const std::string &job_id) override { } - virtual void job_aborted(const std::string &job_id) + void job_aborted(const std::string &job_id) override { } - virtual void task_completed(const std::string &job_id, const std::string &task_id) + void task_completed(const std::string &job_id, const std::string &task_id) override { } - virtual void task_failed(const std::string &job_id, const std::string &task_id) + void task_failed(const std::string &job_id, const std::string &task_id) override { } - virtual void task_skipped(const std::string &job_id, const std::string &task_id) + void task_skipped(const std::string &job_id, const std::string &task_id) override { } }; diff --git a/src/sandbox/isolate_sandbox.cpp b/src/sandbox/isolate_sandbox.cpp index bc348ddf..4e20d0d0 100644 --- a/src/sandbox/isolate_sandbox.cpp +++ b/src/sandbox/isolate_sandbox.cpp @@ -16,7 +16,7 @@ #define BOOST_FILESYSTEM_NO_DEPRECATED #define BOOST_NO_CXX11_SCOPED_ENUMS #include -#include "../helpers/filesystem.h" +#include "helpers/filesystem.h" namespace fs = boost::filesystem; @@ -39,7 +39,7 @@ namespace isolate_sandbox::isolate_sandbox(std::shared_ptr sandbox_config, sandbox_limits limits, - size_t id, + std::size_t id, const std::string &temp_dir, const std::string &data_dir, std::shared_ptr logger) @@ -366,8 +366,8 @@ sandbox_results isolate_sandbox::process_meta_file() if (meta_stream.is_open()) { std::string line; while (std::getline(meta_stream, line)) { - size_t pos = line.find(':'); - size_t value_size = line.size() - (pos + 1); + std::size_t pos = line.find(':'); + std::size_t value_size = line.size() - (pos + 1); auto first = line.substr(0, pos); auto second = line.substr(pos + 1, value_size); if (first == "time") { diff --git a/src/sandbox/isolate_sandbox.h b/src/sandbox/isolate_sandbox.h index bbcd515b..7b620882 100644 --- a/src/sandbox/isolate_sandbox.h +++ b/src/sandbox/isolate_sandbox.h @@ -5,9 +5,9 @@ #include #include -#include "../helpers/logger.h" +#include "helpers/logger.h" #include "sandbox_base.h" -#include "../config/sandbox_config.h" +#include "config/sandbox_config.h" /** * Class implementing operations with Isolate sandbox. @@ -41,15 +41,15 @@ class isolate_sandbox : public sandbox_base */ isolate_sandbox(std::shared_ptr sandbox_config, sandbox_limits limits, - size_t id, + std::size_t id, const std::string &temp_dir, const std::string &data_dir, std::shared_ptr logger = nullptr); /** * Destructor. */ - virtual ~isolate_sandbox(); - virtual sandbox_results run(const std::string &binary, const std::vector &arguments); + ~isolate_sandbox() override; + sandbox_results run(const std::string &binary, const std::vector &arguments) override; private: /** General sandbox configuration */ @@ -59,7 +59,7 @@ class isolate_sandbox : public sandbox_base /** Logger */ std::shared_ptr logger_; /** Identifier of this isolate's instance. Must be unique on each server. */ - size_t id_; + std::size_t id_; /** Name of isolate binary - defaults "isolate" */ std::string isolate_binary_; /** Path to temporary directory used by sandboxes. Subdir with "id_" value will be created. */ diff --git a/src/sandbox/sandbox_base.h b/src/sandbox/sandbox_base.h index dacaca53..8d9d35ef 100644 --- a/src/sandbox/sandbox_base.h +++ b/src/sandbox/sandbox_base.h @@ -8,9 +8,9 @@ #include #include #include "spdlog/spdlog.h" -#include "../config/sandbox_limits.h" -#include "../config/task_results.h" -#include "../helpers/format.h" +#include "config/sandbox_limits.h" +#include "config/task_results.h" +#include "helpers/format.h" /** @@ -22,9 +22,8 @@ class sandbox_base /** * Destructor. */ - virtual ~sandbox_base() - { - } + virtual ~sandbox_base() = default; + /** * Get sandboxed directory (to copy files inside, ...) */ @@ -32,6 +31,7 @@ class sandbox_base { return sandboxed_dir_; } + /** * Run sandbox. * @param binary Name of binary to run. Must be accessible from inside the sandbox. @@ -61,6 +61,7 @@ class sandbox_exception : public std::exception sandbox_exception() : what_("Generic sandbox exception") { } + /** * Constructor with custom error message. * @param what Custom message. @@ -68,16 +69,16 @@ class sandbox_exception : public std::exception sandbox_exception(const std::string &what) : what_(what) { } + /** * Destructor. */ - virtual ~sandbox_exception() - { - } + ~sandbox_exception() override = default; + /** * Get message describing the issue. */ - virtual const char *what() const noexcept + const char *what() const noexcept override { return what_.c_str(); } diff --git a/src/tasks/create_params.h b/src/tasks/create_params.h index 8271ec69..43637a00 100644 --- a/src/tasks/create_params.h +++ b/src/tasks/create_params.h @@ -3,17 +3,17 @@ #include #include -#include "../config/worker_config.h" -#include "../config/sandbox_config.h" -#include "../config/sandbox_limits.h" -#include "../config/task_metadata.h" +#include "config/worker_config.h" +#include "config/sandbox_config.h" +#include "config/sandbox_limits.h" +#include "config/task_metadata.h" /** data for proper construction of @ref external_task class */ struct create_params { /** unique worker identification on this machine */ std::shared_ptr worker_conf; /** unique integer which means order in config file */ - size_t id; + std::size_t id; /** structure containing information loaded about task */ std::shared_ptr task_meta; /** limits for sandbox */ diff --git a/src/tasks/external_task.cpp b/src/tasks/external_task.cpp index 51d44784..c7cf4068 100644 --- a/src/tasks/external_task.cpp +++ b/src/tasks/external_task.cpp @@ -1,7 +1,7 @@ #include "external_task.h" -#include "../sandbox/isolate_sandbox.h" -#include "../helpers/string_utils.h" -#include "../helpers/filesystem.h" +#include "sandbox/isolate_sandbox.h" +#include "helpers/string_utils.h" +#include "helpers/filesystem.h" #include #include #define BOOST_FILESYSTEM_NO_DEPRECATED @@ -32,10 +32,6 @@ external_task::external_task(const create_params &data) sandbox_check(); } -external_task::~external_task() -{ -} - void external_task::sandbox_check() { bool found = false; @@ -128,7 +124,7 @@ void external_task::results_output_init() } } -fs::path external_task::find_path_outside_sandbox(std::string file) +fs::path external_task::find_path_outside_sandbox(const std::string &file) { return helpers::find_path_outside_sandbox( file, sandbox_config_->chdir, limits_->bound_dirs, evaluation_dir_.string()); @@ -152,10 +148,10 @@ void external_task::get_results_output(std::shared_ptr result) } void external_task::process_results_output( - std::shared_ptr result, fs::path stdout_path, fs::path stderr_path) + const std::shared_ptr &result, const fs::path &stdout_path, const fs::path &stderr_path) { if (sandbox_config_->output) { - size_t max_length = worker_config_->get_max_output_length(); + std::size_t max_length = worker_config_->get_max_output_length(); std::string result_stdout(max_length, 0); std::string result_stderr(max_length, 0); @@ -189,9 +185,9 @@ void external_task::process_results_output( } } -void external_task::process_carboncopy_output(fs::path stdout_path, fs::path stderr_path) +void external_task::process_carboncopy_output(const fs::path &stdout_path, const fs::path &stderr_path) { - size_t max_length = worker_config_->get_max_carboncopy_length(); + std::size_t max_length = worker_config_->get_max_carboncopy_length(); if (!sandbox_config_->carboncopy_stdout.empty()) { std::ifstream infile(stdout_path.string(), std::ios::binary); std::ofstream copy_file(sandbox_config_->carboncopy_stdout, std::ios::binary); @@ -221,7 +217,7 @@ void external_task::process_carboncopy_output(fs::path stdout_path, fs::path std } } -void external_task::make_binary_executable(std::string binary) +void external_task::make_binary_executable(const std::string &binary) { fs::path binary_path; try { diff --git a/src/tasks/external_task.h b/src/tasks/external_task.h index 191b4aab..9c029f26 100644 --- a/src/tasks/external_task.h +++ b/src/tasks/external_task.h @@ -5,8 +5,8 @@ #include #include "task_base.h" #include "create_params.h" -#include "../sandbox/sandbox_base.h" -#include "../config/sandbox_limits.h" +#include "sandbox/sandbox_base.h" +#include "config/sandbox_limits.h" /** @@ -31,14 +31,14 @@ class external_task : public task_base /** * Destructor, empty right now. */ - virtual ~external_task(); + ~external_task() override = default; /** * Runs given program and parameters in constructed sandbox. * @return @ref task_results with @a sandbox_status item properly set * @throws sandbox_exception if fatal error occured in sandbox */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; /** * Get sandbox_limits structure, given during construction. @@ -66,13 +66,13 @@ class external_task : public task_base * @param file file pointing inside sandbox * @return path of the directory and the file outside sandbox */ - fs::path find_path_outside_sandbox(std::string file); + fs::path find_path_outside_sandbox(const std::string &file); /** * If binary file provided as argument does not have executable flag, try to set it. * @param binary */ - void make_binary_executable(std::string binary); + void make_binary_executable(const std::string &binary); /** * Initialize output if requested. @@ -83,8 +83,9 @@ class external_task : public task_base * @param result to which stdout and err will be assigned */ void get_results_output(std::shared_ptr result); - void process_results_output(std::shared_ptr result, fs::path stdout_path, fs::path stderr_path); - void process_carboncopy_output(fs::path stdout_path, fs::path stderr_path); + void process_results_output( + const std::shared_ptr &result, const fs::path &stdout_path, const fs::path &stderr_path); + void process_carboncopy_output(const fs::path &stdout_path, const fs::path &stderr_path); /** Worker default configuration */ std::shared_ptr worker_config_; diff --git a/src/tasks/internal/archivate_task.cpp b/src/tasks/internal/archivate_task.cpp index e92449a2..46a9de5e 100644 --- a/src/tasks/internal/archivate_task.cpp +++ b/src/tasks/internal/archivate_task.cpp @@ -1,8 +1,8 @@ #include "archivate_task.h" -#include "../../archives/archivator.h" +#include "archives/archivator.h" -archivate_task::archivate_task(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) +archivate_task::archivate_task(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { if (task_meta_->cmd_args.size() != 2) { throw task_exception( @@ -11,11 +11,6 @@ archivate_task::archivate_task(size_t id, std::shared_ptr task_me } -archivate_task::~archivate_task() -{ -} - - std::shared_ptr archivate_task::run() { std::shared_ptr result(new task_results()); diff --git a/src/tasks/internal/archivate_task.h b/src/tasks/internal/archivate_task.h index eede4d5f..7c422979 100644 --- a/src/tasks/internal/archivate_task.h +++ b/src/tasks/internal/archivate_task.h @@ -1,7 +1,7 @@ #ifndef RECODEX_WORKER_INTERNAL_ARCHIVATE_TASK_H #define RECODEX_WORKER_INTERNAL_ARCHIVATE_TASK_H -#include "../task_base.h" +#include "tasks/task_base.h" /** @@ -18,16 +18,16 @@ class archivate_task : public task_base * For more info about archivation see @ref archivator class. * @throws task_exception on invalid number of arguments. */ - archivate_task(size_t id, std::shared_ptr task_meta); + archivate_task(std::size_t id, std::shared_ptr task_meta); /** * Destructor. */ - virtual ~archivate_task(); + ~archivate_task() override = default; /** * Run the action. * @return Evaluation results to be pushed back to frontend. */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; }; #endif // RECODEX_WORKER_INTERNAL_ARCHIVATE_TASK_H diff --git a/src/tasks/internal/cp_task.cpp b/src/tasks/internal/cp_task.cpp index 67e72afc..fa89cece 100644 --- a/src/tasks/internal/cp_task.cpp +++ b/src/tasks/internal/cp_task.cpp @@ -7,7 +7,7 @@ namespace fs = boost::filesystem; -cp_task::cp_task(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) +cp_task::cp_task(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { if (task_meta_->cmd_args.size() != 2) { throw task_exception( @@ -16,11 +16,6 @@ cp_task::cp_task(size_t id, std::shared_ptr task_meta) : task_bas } -cp_task::~cp_task() -{ -} - - std::shared_ptr cp_task::run() { std::shared_ptr result(new task_results()); diff --git a/src/tasks/internal/cp_task.h b/src/tasks/internal/cp_task.h index f94ccadb..f94f73f3 100644 --- a/src/tasks/internal/cp_task.h +++ b/src/tasks/internal/cp_task.h @@ -1,7 +1,7 @@ #ifndef RECODEX_WORKER_INTERNAL_CP_TASK_H #define RECODEX_WORKER_INTERNAL_CP_TASK_H -#include "../task_base.h" +#include "tasks/task_base.h" /** @@ -18,16 +18,16 @@ class cp_task : public task_base * http://www.boost.org/doc/libs/1_59_0_b1/libs/filesystem/doc/reference.html#copy. * @throws task_exception on invalid number of arguments. */ - cp_task(size_t id, std::shared_ptr task_meta); + cp_task(std::size_t id, std::shared_ptr task_meta); /** * Destructor. */ - virtual ~cp_task(); + ~cp_task() override = default; /** * Run the action. * @return Evaluation results to be pushed back to frontend. */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; }; #endif // RECODEX_WORKER_INTERNAL_CP_TASK_H diff --git a/src/tasks/internal/dump_dir_task.cpp b/src/tasks/internal/dump_dir_task.cpp index 97c13d8a..1e39b511 100644 --- a/src/tasks/internal/dump_dir_task.cpp +++ b/src/tasks/internal/dump_dir_task.cpp @@ -1,7 +1,7 @@ #include #include "dump_dir_task.h" -dump_dir_task::dump_dir_task(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) +dump_dir_task::dump_dir_task(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { if (task_meta->cmd_args.size() < 2) { throw task_exception("Wrong number of arguments. Required: 2 (1 optional), Actual: " + @@ -9,17 +9,13 @@ dump_dir_task::dump_dir_task(size_t id, std::shared_ptr task_meta } } -dump_dir_task::~dump_dir_task() -{ -} - std::shared_ptr dump_dir_task::run() { auto results = std::make_shared(); fs::path src_root(task_meta_->cmd_args[0]); fs::path dest_root(task_meta_->cmd_args[1]); - auto limit = read_task_arg(task_meta_->cmd_args, 2, 128); + auto limit = read_task_arg(task_meta_->cmd_args, 2, 128); limit *= 1024; // The argument is in kilobytes fs::recursive_directory_iterator directory_iterator(src_root), directory_iterator_end; @@ -48,7 +44,7 @@ std::shared_ptr dump_dir_task::run() } } - size_t size = fs::file_size(path); + std::size_t size = fs::file_size(path); if (size <= limit) { auto return_code = copy_file(path, dest_path); diff --git a/src/tasks/internal/dump_dir_task.h b/src/tasks/internal/dump_dir_task.h index 019dc417..c724a295 100644 --- a/src/tasks/internal/dump_dir_task.h +++ b/src/tasks/internal/dump_dir_task.h @@ -2,7 +2,7 @@ #define RECODEX_WORKER_INTERNAL_CP_DIR_TASK_H #include -#include "../task_base.h" +#include "tasks/task_base.h" namespace fs = boost::filesystem; @@ -21,18 +21,18 @@ class dump_dir_task : public task_base * @a cmd_args entry has 2 or 3 arguments - the source, destination, and optionally a limit * @throws task_exception on invalid number of arguments. */ - dump_dir_task(size_t id, std::shared_ptr task_meta); + dump_dir_task(std::size_t id, std::shared_ptr task_meta); /** * Destructor. */ - virtual ~dump_dir_task(); + ~dump_dir_task() override = default; /** * Run the action. * @return Evaluation results to be pushed back to frontend. */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; private: boost::system::error_code copy_file(const fs::path &src, const fs::path &dest); diff --git a/src/tasks/internal/exists_task.cpp b/src/tasks/internal/exists_task.cpp index 994eac3a..ae987c70 100644 --- a/src/tasks/internal/exists_task.cpp +++ b/src/tasks/internal/exists_task.cpp @@ -7,23 +7,18 @@ namespace fs = boost::filesystem; -exists_task::exists_task(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) +exists_task::exists_task(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { if (task_meta_->cmd_args.size() < 2) { throw task_exception("At least two arguments required."); } } -exists_task::~exists_task() -{ -} - - std::shared_ptr exists_task::run() { std::shared_ptr result(new task_results()); try { - for (size_t i = 1; i < task_meta_->cmd_args.size(); ++i) { + for (std::size_t i = 1; i < task_meta_->cmd_args.size(); ++i) { std::string file = task_meta_->cmd_args[i]; if (!fs::exists(file)) { result->status = task_status::FAILED; diff --git a/src/tasks/internal/exists_task.h b/src/tasks/internal/exists_task.h index c76b5dc0..a1391b2d 100644 --- a/src/tasks/internal/exists_task.h +++ b/src/tasks/internal/exists_task.h @@ -1,7 +1,7 @@ #ifndef RECODEX_WORKER_INTERNAL_EXISTS_TASK_H #define RECODEX_WORKER_INTERNAL_EXISTS_TASK_H -#include "../task_base.h" +#include "tasks/task_base.h" /** @@ -17,16 +17,16 @@ class exists_task : public task_base * @a cmd_args entry has at least one argument - names of files/folders which should be checked. * @throws task_exception when wrong arguments provided. */ - exists_task(size_t id, std::shared_ptr task_meta); + exists_task(std::size_t id, std::shared_ptr task_meta); /** * Destructor. */ - virtual ~exists_task(); + ~exists_task() override = default; /** * Run the action. * @return Evaluation results to be pushed back to frontend. */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; }; #endif // RECODEX_WORKER_INTERNAL_EXISTS_TASK_H diff --git a/src/tasks/internal/extract_task.cpp b/src/tasks/internal/extract_task.cpp index 591202ef..f89b14c6 100644 --- a/src/tasks/internal/extract_task.cpp +++ b/src/tasks/internal/extract_task.cpp @@ -1,8 +1,8 @@ #include "extract_task.h" -#include "../../archives/archivator.h" +#include "archives/archivator.h" -extract_task::extract_task(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) +extract_task::extract_task(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { if (task_meta_->cmd_args.size() != 2) { throw task_exception( @@ -11,11 +11,6 @@ extract_task::extract_task(size_t id, std::shared_ptr task_meta) } -extract_task::~extract_task() -{ -} - - std::shared_ptr extract_task::run() { std::shared_ptr result(new task_results()); diff --git a/src/tasks/internal/extract_task.h b/src/tasks/internal/extract_task.h index 819dff63..7030ed67 100644 --- a/src/tasks/internal/extract_task.h +++ b/src/tasks/internal/extract_task.h @@ -1,7 +1,7 @@ #ifndef RECODEX_WORKER_INTERNAL_EXTRACT_TASK_H #define RECODEX_WORKER_INTERNAL_EXTRACT_TASK_H -#include "../task_base.h" +#include "tasks/task_base.h" /** @@ -18,16 +18,16 @@ class extract_task : public task_base * For more info about archivation see @ref archivator class. * @throws task_exception on invalid number of arguments. */ - extract_task(size_t id, std::shared_ptr task_meta); + extract_task(std::size_t id, std::shared_ptr task_meta); /** * Destructor. */ - virtual ~extract_task(); + ~extract_task() override = default; /** * Run the action. * @return Evaluation results to be pushed back to frontend. */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; }; #endif // RECODEX_WORKER_INTERNAL_EXTRACT_TASK_H diff --git a/src/tasks/internal/fetch_task.cpp b/src/tasks/internal/fetch_task.cpp index bc1eeecc..61fa5d05 100644 --- a/src/tasks/internal/fetch_task.cpp +++ b/src/tasks/internal/fetch_task.cpp @@ -2,7 +2,7 @@ fetch_task::fetch_task( - size_t id, std::shared_ptr task_meta, std::shared_ptr filemanager) + std::size_t id, std::shared_ptr task_meta, std::shared_ptr filemanager) : task_base(id, task_meta), filemanager_(filemanager) { if (task_meta_->cmd_args.size() != 2) { @@ -12,11 +12,6 @@ fetch_task::fetch_task( } -fetch_task::~fetch_task() -{ -} - - std::shared_ptr fetch_task::run() { std::shared_ptr result(new task_results()); diff --git a/src/tasks/internal/fetch_task.h b/src/tasks/internal/fetch_task.h index 5ae6531b..8522346d 100644 --- a/src/tasks/internal/fetch_task.h +++ b/src/tasks/internal/fetch_task.h @@ -1,8 +1,8 @@ #ifndef RECODEX_WORKER_INTERNAL_FETCH_TASK_H #define RECODEX_WORKER_INTERNAL_FETCH_TASK_H -#include "../task_base.h" -#include "../../fileman/file_manager_interface.h" +#include "tasks/task_base.h" +#include "fileman/file_manager_interface.h" #include @@ -21,16 +21,16 @@ class fetch_task : public task_base * @throws task_exception on invalid number of arguments. */ fetch_task( - size_t id, std::shared_ptr task_meta, std::shared_ptr filemanager); + std::size_t id, std::shared_ptr task_meta, std::shared_ptr filemanager); /** * Destructor. */ - virtual ~fetch_task(); + ~fetch_task() override = default; /** * Run the action. * @return Evaluation results to be pushed back to frontend. */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; private: /** Pointer to filemanager instance. */ diff --git a/src/tasks/internal/mkdir_task.cpp b/src/tasks/internal/mkdir_task.cpp index 5df41dc6..34dac68a 100644 --- a/src/tasks/internal/mkdir_task.cpp +++ b/src/tasks/internal/mkdir_task.cpp @@ -7,17 +7,12 @@ namespace fs = boost::filesystem; -mkdir_task::mkdir_task(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) +mkdir_task::mkdir_task(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { if (task_meta_->cmd_args.empty()) { throw task_exception("At least one argument required."); } } -mkdir_task::~mkdir_task() -{ -} - - std::shared_ptr mkdir_task::run() { std::shared_ptr result(new task_results()); diff --git a/src/tasks/internal/mkdir_task.h b/src/tasks/internal/mkdir_task.h index 318a2dc5..d7bec22b 100644 --- a/src/tasks/internal/mkdir_task.h +++ b/src/tasks/internal/mkdir_task.h @@ -1,7 +1,7 @@ #ifndef RECODEX_WORKER_INTERNAL_MKDIR_TASK_H #define RECODEX_WORKER_INTERNAL_MKDIR_TASK_H -#include "../task_base.h" +#include "tasks/task_base.h" /** @@ -17,11 +17,11 @@ class mkdir_task : public task_base * @a cmd_args entry has at least one argument - names of directories to be created. * @throws task_exception when no argument provided. */ - mkdir_task(size_t id, std::shared_ptr task_meta); + mkdir_task(std::size_t id, std::shared_ptr task_meta); /** * Destructor. */ - virtual ~mkdir_task(); + ~mkdir_task() override = default; /** * Run the action. For every created directory the group write and others write permissions * are added to default ones. For more info about directory creation see @@ -29,7 +29,7 @@ class mkdir_task : public task_base * @note If any of directories cannot be created, all already created directories are removed. * @return Evaluation results to be pushed back to frontend. */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; }; #endif // RECODEX_WORKER_INTERNAL_MKDIR_TASK_H diff --git a/src/tasks/internal/rename_task.cpp b/src/tasks/internal/rename_task.cpp index 6f4171af..1a6a4e96 100644 --- a/src/tasks/internal/rename_task.cpp +++ b/src/tasks/internal/rename_task.cpp @@ -7,7 +7,7 @@ namespace fs = boost::filesystem; -rename_task::rename_task(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) +rename_task::rename_task(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { if (task_meta_->cmd_args.size() != 2) { throw task_exception( @@ -16,11 +16,6 @@ rename_task::rename_task(size_t id, std::shared_ptr task_meta) : } -rename_task::~rename_task() -{ -} - - std::shared_ptr rename_task::run() { std::shared_ptr result(new task_results()); diff --git a/src/tasks/internal/rename_task.h b/src/tasks/internal/rename_task.h index 1c35c4eb..f54ec8ef 100644 --- a/src/tasks/internal/rename_task.h +++ b/src/tasks/internal/rename_task.h @@ -1,7 +1,7 @@ #ifndef RECODEX_WORKER_INTERNAL_RENAME_TASK_H #define RECODEX_WORKER_INTERNAL_RENAME_TASK_H -#include "../task_base.h" +#include "tasks/task_base.h" /** @@ -21,16 +21,16 @@ class rename_task : public task_base * for more info. * @throws task_exception when no argument provided. */ - rename_task(size_t id, std::shared_ptr task_meta); + rename_task(std::size_t id, std::shared_ptr task_meta); /** * Destructor. */ - virtual ~rename_task(); + ~rename_task() override = default; /** * Run the action. * @return Evaluation results to be pushed back to frontend. */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; }; #endif // RECODEX_WORKER_INTERNAL_RENAME_TASK_H diff --git a/src/tasks/internal/rm_task.cpp b/src/tasks/internal/rm_task.cpp index d701842d..7d127564 100644 --- a/src/tasks/internal/rm_task.cpp +++ b/src/tasks/internal/rm_task.cpp @@ -7,17 +7,12 @@ namespace fs = boost::filesystem; -rm_task::rm_task(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) +rm_task::rm_task(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { if (task_meta_->cmd_args.empty()) { throw task_exception("At least one argument required."); } } -rm_task::~rm_task() -{ -} - - std::shared_ptr rm_task::run() { std::shared_ptr result(new task_results()); diff --git a/src/tasks/internal/rm_task.h b/src/tasks/internal/rm_task.h index daa3e58d..b5a73018 100644 --- a/src/tasks/internal/rm_task.h +++ b/src/tasks/internal/rm_task.h @@ -1,7 +1,7 @@ #ifndef RECODEX_WORKER_INTERNAL_RM_TASK_H #define RECODEX_WORKER_INTERNAL_RM_TASK_H -#include "../task_base.h" +#include "tasks/task_base.h" /** @@ -17,18 +17,18 @@ class rm_task : public task_base * @a cmd_args entry has at least one argument - names of files and directories to be removed. * @throws task_exception when no argument provided. */ - rm_task(size_t id, std::shared_ptr task_meta); + rm_task(std::size_t id, std::shared_ptr task_meta); /** * Destructor. */ - virtual ~rm_task(); + ~rm_task() override = default; /** * Run the action. It tries to delete all entries first. When any of items cannot be deleted, * exception is throuwn, otherwise normal result is returned. For more info about removing function see * http://www.boost.org/doc/libs/1_59_0_b1/libs/filesystem/doc/reference.html#remove_all. * @return Evaluation results to be pushed back to frontend. */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; }; #endif // RECODEX_WORKER_INTERNAL_RM_TASK_H diff --git a/src/tasks/internal/truncate_task.cpp b/src/tasks/internal/truncate_task.cpp index 43d53007..22934294 100644 --- a/src/tasks/internal/truncate_task.cpp +++ b/src/tasks/internal/truncate_task.cpp @@ -3,7 +3,7 @@ namespace fs = boost::filesystem; -truncate_task::truncate_task(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) +truncate_task::truncate_task(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { if (task_meta->cmd_args.size() < 2) { throw task_exception( @@ -11,16 +11,13 @@ truncate_task::truncate_task(size_t id, std::shared_ptr task_meta } } -truncate_task::~truncate_task() -{ -} std::shared_ptr truncate_task::run() { auto results = std::make_shared(); fs::path file(task_meta_->cmd_args[0]); - auto limit = read_task_arg(task_meta_->cmd_args, 1, 128); + auto limit = read_task_arg(task_meta_->cmd_args, 1, 128); limit *= 1024; if (fs::file_size(file) > limit) { diff --git a/src/tasks/internal/truncate_task.h b/src/tasks/internal/truncate_task.h index 55d6b1b5..c362f1bf 100644 --- a/src/tasks/internal/truncate_task.h +++ b/src/tasks/internal/truncate_task.h @@ -1,7 +1,7 @@ #ifndef RECODEX_WORKER_TRUNCATE_TASK_H #define RECODEX_WORKER_TRUNCATE_TASK_H -#include "../task_base.h" +#include "tasks/task_base.h" class truncate_task : public task_base @@ -14,11 +14,16 @@ class truncate_task : public task_base * @a cmd_args entry has just 2 arguments - the name of the file to be truncated and the desired size * @throws task_exception on invalid number of arguments. */ - truncate_task(size_t id, std::shared_ptr task_meta); - - virtual ~truncate_task(); - - virtual std::shared_ptr run(); + truncate_task(std::size_t id, std::shared_ptr task_meta); + /** + * Destructor. + */ + ~truncate_task() override = default; + /** + * Run the action. + * @return Evaluation results to be pushed back to frontend. + */ + std::shared_ptr run() override; }; diff --git a/src/tasks/root_task.cpp b/src/tasks/root_task.cpp index 89fa5622..4073db21 100644 --- a/src/tasks/root_task.cpp +++ b/src/tasks/root_task.cpp @@ -1,10 +1,6 @@ #include "root_task.h" -root_task::root_task(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) -{ -} - -root_task::~root_task() +root_task::root_task(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { } diff --git a/src/tasks/root_task.h b/src/tasks/root_task.h index 78dc6c8a..7879b6c0 100644 --- a/src/tasks/root_task.h +++ b/src/tasks/root_task.h @@ -22,17 +22,17 @@ class root_task : public task_base * @param id Unique identificator of load order of tasks. * @param task_meta Variable containing further info about task. */ - root_task(size_t id, std::shared_ptr task_meta = std::make_shared()); + root_task(std::size_t id, std::shared_ptr task_meta = std::make_shared()); /** * Empty destructor. */ - virtual ~root_task(); + ~root_task() override = default; /** * Empty function. Has to be stated for completeness. * @return Always @a nullptr. */ - virtual std::shared_ptr run(); + std::shared_ptr run() override; }; #endif // RECODEX_WORKER_FAKE_TASK_HPP diff --git a/src/tasks/task_base.cpp b/src/tasks/task_base.cpp index df93945c..9947c333 100644 --- a/src/tasks/task_base.cpp +++ b/src/tasks/task_base.cpp @@ -1,14 +1,10 @@ #include "task_base.h" -task_base::task_base(size_t id, std::shared_ptr task_meta) +task_base::task_base(std::size_t id, std::shared_ptr task_meta) : id_(id), task_meta_(task_meta), execute_(true) { } -task_base::~task_base() -{ -} - void task_base::add_children(std::shared_ptr add) { if (add == nullptr) { return; } diff --git a/src/tasks/task_base.h b/src/tasks/task_base.h index 3ef78cdc..f428972a 100644 --- a/src/tasks/task_base.h +++ b/src/tasks/task_base.h @@ -6,8 +6,8 @@ #include #include #include -#include "../config/task_results.h" -#include "../config/task_metadata.h" +#include "config/task_results.h" +#include "config/task_metadata.h" /** @@ -32,11 +32,11 @@ class task_base * @param id Unique identificator of load order of tasks. * @param task_meta Variable containing further info about task. */ - task_base(size_t id, std::shared_ptr task_meta); + task_base(std::size_t id, std::shared_ptr task_meta); /** * Virtual destructor. */ - virtual ~task_base(); + virtual ~task_base() = default; /** * This method runs operation which this task is supposed to do. @@ -70,7 +70,7 @@ class task_base * This number expresses order in job configuration. * @return Task's identification number. */ - size_t get_id(); + std::size_t get_id(); /** * Unique task ID which was stated in job configuration. * @return Unique textual description of current task. @@ -81,7 +81,7 @@ class task_base * Lower number = higher priority. * @return Priority. */ - size_t get_priority(); + std::size_t get_priority(); /** * Get failing policy. If @a true than failure of this task will cause * mmediate exit of job evaluation. @@ -129,7 +129,7 @@ class task_base protected: /** Unique integer ID of task. */ - size_t id_; + std::size_t id_; /** Information about this task loaded from configuration file. */ std::shared_ptr task_meta_; /** If true task can be executed safely, otherwise its not wise. */ @@ -154,6 +154,7 @@ class task_exception : public std::exception task_exception() : what_("Generic task exception") { } + /** * Exception with some brief description. * @param what textual description of a problem @@ -161,17 +162,17 @@ class task_exception : public std::exception task_exception(const std::string &what) : what_(what) { } + /** * Virtual destructor. */ - virtual ~task_exception() - { - } + ~task_exception() override = default; + /** * Return description of this exception. * @return Cause description as C string. */ - virtual const char *what() const noexcept + const char *what() const noexcept override { return what_.c_str(); } @@ -182,7 +183,7 @@ class task_exception : public std::exception }; template -T read_task_arg(const std::vector &args, const size_t index, const T &default_value = T()) +T read_task_arg(const std::vector &args, const std::size_t index, const T &default_value = T()) { if (index >= args.size()) { return default_value; } @@ -207,7 +208,7 @@ class task_compare * @param b Second task to compare. * @return @a true if parameter a is lesser than b */ - bool operator()(std::shared_ptr a, std::shared_ptr b) + bool operator()(const std::shared_ptr &a, const std::shared_ptr &b) { if (a->get_priority() > b->get_priority()) { return true; diff --git a/src/tasks/task_factory.cpp b/src/tasks/task_factory.cpp index b23112f5..89427445 100644 --- a/src/tasks/task_factory.cpp +++ b/src/tasks/task_factory.cpp @@ -5,11 +5,7 @@ task_factory::task_factory(std::shared_ptr fileman) : fi { } -task_factory::~task_factory() -{ -} - -std::shared_ptr task_factory::create_internal_task(size_t id, std::shared_ptr task_meta) +std::shared_ptr task_factory::create_internal_task(std::size_t id, std::shared_ptr task_meta) { std::shared_ptr task; diff --git a/src/tasks/task_factory.h b/src/tasks/task_factory.h index bf9b83db..25dc8959 100644 --- a/src/tasks/task_factory.h +++ b/src/tasks/task_factory.h @@ -15,7 +15,7 @@ #include "internal/rename_task.h" #include "internal/rm_task.h" #include "internal/exists_task.h" -#include "../fileman/file_manager_interface.h" +#include "fileman/file_manager_interface.h" /** @@ -33,7 +33,7 @@ class task_factory : public task_factory_interface /** * Virtual destructor */ - virtual ~task_factory(); + ~task_factory() override = default; /** * Create internal task. This could be one of predefined operations like move or copy file, @@ -46,15 +46,15 @@ class task_factory : public task_factory_interface * @return Pointer to task's base type holding proper task type. If requested task type is unknown, @a nullptr * is returned. */ - virtual std::shared_ptr create_internal_task( - size_t id, std::shared_ptr task_meta = nullptr); + std::shared_ptr create_internal_task( + std::size_t id, std::shared_ptr task_meta = nullptr) override; /** * Created task which will run in sandboxed environment. * @param data Structure holding creating parameters for external (sandboxed) tasks. * @return Pointer to task's base type holding proper task type. */ - virtual std::shared_ptr create_sandboxed_task(const create_params &data); + std::shared_ptr create_sandboxed_task(const create_params &data) override; private: /** Pointer to given file manager instance. */ diff --git a/src/tasks/task_factory_interface.h b/src/tasks/task_factory_interface.h index a0feb7d4..f7baa49d 100644 --- a/src/tasks/task_factory_interface.h +++ b/src/tasks/task_factory_interface.h @@ -4,7 +4,7 @@ #include #include "task_base.h" #include "create_params.h" -#include "../fileman/file_manager_interface.h" +#include "fileman/file_manager_interface.h" /** @@ -16,9 +16,7 @@ class task_factory_interface /** * Virtual destructor for proper destruction of inherited classes. */ - virtual ~task_factory_interface() - { - } + virtual ~task_factory_interface() = default; /** * Create internal task. This could be one of predefined operations like move or copy file, @@ -29,7 +27,7 @@ class task_factory_interface * @return Pointer to task's base type holding proper task type. */ virtual std::shared_ptr create_internal_task( - size_t id, std::shared_ptr task_meta = nullptr) = 0; + std::size_t id, std::shared_ptr task_meta = nullptr) = 0; /** * Created task which will run in sandboxed environment. diff --git a/src/worker_core.cpp b/src/worker_core.cpp index 78204c88..f3a14bd2 100644 --- a/src/worker_core.cpp +++ b/src/worker_core.cpp @@ -105,7 +105,7 @@ void worker_core::load_config() return; } -void worker_core::force_exit(std::string msg) +void worker_core::force_exit(const std::string &msg) { // write to log if (msg != "") { diff --git a/src/worker_core.h b/src/worker_core.h index 10b0491c..7dc77c08 100644 --- a/src/worker_core.h +++ b/src/worker_core.h @@ -85,7 +85,7 @@ class worker_core * Exit whole application with return code 1. * @param msg string which is copied to stderr and logger if initialized. */ - void force_exit(std::string msg = ""); + void force_exit(const std::string &msg = ""); /** * Parse cmd line params given in constructor. diff --git a/tests/archivator.cpp b/tests/archivator.cpp index 7ec2aec6..0ff77b56 100644 --- a/tests/archivator.cpp +++ b/tests/archivator.cpp @@ -2,7 +2,7 @@ #include #include -#include "../src/archives/archivator.h" +#include "archives/archivator.h" #define BOOST_FILESYSTEM_NO_DEPRECATED #define BOOST_NO_CXX11_SCOPED_ENUMS @@ -97,7 +97,7 @@ TEST(Archivator, CompressAbsolutePath) ASSERT_NO_THROW(archivator::decompress(result_path.string(), fs::temp_directory_path().string())); ASSERT_TRUE(fs::is_regular_file(extracted_path / "test_file.txt")); - ASSERT_EQ((size_t) 7, fs::file_size(extracted_path / "test_file.txt")); + ASSERT_EQ((std::size_t) 7, fs::file_size(extracted_path / "test_file.txt")); fs::remove_all(archive_path); fs::remove_all(extracted_path); diff --git a/tests/broker_connection.cpp b/tests/broker_connection.cpp index b9b6a8b4..d8a2c55a 100644 --- a/tests/broker_connection.cpp +++ b/tests/broker_connection.cpp @@ -5,8 +5,8 @@ #include #include -#include "../src/config/worker_config.h" -#include "../src/broker_connection.h" +#include "config/worker_config.h" +#include "broker_connection.h" #include "mocks.h" using namespace testing; diff --git a/tests/build_job_metadata.cpp b/tests/build_job_metadata.cpp index c5516da5..4bc84064 100644 --- a/tests/build_job_metadata.cpp +++ b/tests/build_job_metadata.cpp @@ -1,7 +1,7 @@ #include #include -#include "../src/helpers/config.h" +#include "helpers/config.h" using namespace testing; using namespace std; diff --git a/tests/cache_manager.cpp b/tests/cache_manager.cpp index 67db4bdb..f4326283 100644 --- a/tests/cache_manager.cpp +++ b/tests/cache_manager.cpp @@ -5,7 +5,7 @@ #include #include -#include "../src/fileman/cache_manager.h" +#include "fileman/cache_manager.h" using namespace testing; using namespace std; diff --git a/tests/dump_dir_task.cpp b/tests/dump_dir_task.cpp index f8c1d4f0..a9e98e20 100644 --- a/tests/dump_dir_task.cpp +++ b/tests/dump_dir_task.cpp @@ -3,7 +3,7 @@ #include #include #include -#include "../src/tasks/internal/dump_dir_task.h" +#include "tasks/internal/dump_dir_task.h" namespace fs = boost::filesystem; @@ -41,10 +41,10 @@ class exists_task_test : public ::testing::Test fs::remove(target); } - void create_file(const fs::path &path, size_t size) + void create_file(const fs::path &path, std::size_t size) { std::ofstream f(path.string()); - for (size_t i = 0; i < size; i++) { f << "a"; } + for (std::size_t i = 0; i < size; i++) { f << "a"; } f.close(); } }; diff --git a/tests/exists_task.cpp b/tests/exists_task.cpp index 36af3bd7..1ac30fc5 100644 --- a/tests/exists_task.cpp +++ b/tests/exists_task.cpp @@ -3,7 +3,7 @@ #include #include #include -#include "../src/tasks/internal/exists_task.h" +#include "tasks/internal/exists_task.h" namespace fs = boost::filesystem; diff --git a/tests/fallback_file_manager.cpp b/tests/fallback_file_manager.cpp index 867041a6..e77f48fa 100644 --- a/tests/fallback_file_manager.cpp +++ b/tests/fallback_file_manager.cpp @@ -6,7 +6,7 @@ #include #include "mocks.h" -#include "../src/fileman/fallback_file_manager.h" +#include "fileman/fallback_file_manager.h" using namespace testing; using namespace std; diff --git a/tests/filesystem.cpp b/tests/filesystem.cpp index 4b3177ed..59d1dc96 100644 --- a/tests/filesystem.cpp +++ b/tests/filesystem.cpp @@ -1,7 +1,7 @@ #include #include -#include "../src/helpers/filesystem.h" +#include "helpers/filesystem.h" typedef std::tuple bound_dirs_tuple; typedef std::vector bound_dirs_type; diff --git a/tests/http_manager.cpp b/tests/http_manager.cpp index 12936a76..33553a74 100644 --- a/tests/http_manager.cpp +++ b/tests/http_manager.cpp @@ -8,7 +8,7 @@ #define BOOST_NO_CXX11_SCOPED_ENUMS #include -#include "../src/fileman/http_manager.h" +#include "fileman/http_manager.h" using namespace testing; using namespace std; diff --git a/tests/isolate_sandbox.cpp b/tests/isolate_sandbox.cpp index 36692be1..6a90a1a7 100644 --- a/tests/isolate_sandbox.cpp +++ b/tests/isolate_sandbox.cpp @@ -10,7 +10,7 @@ namespace fs = boost::filesystem; #include #include -#include "../src/sandbox/isolate_sandbox.h" +#include "sandbox/isolate_sandbox.h" TEST(IsolateSandbox, BasicCreation) { diff --git a/tests/job.cpp b/tests/job.cpp index ebf88efc..b271b4e1 100644 --- a/tests/job.cpp +++ b/tests/job.cpp @@ -10,13 +10,13 @@ using namespace boost::filesystem; #include "mocks.h" -#include "../src/job/job.h" -#include "../src/job/job_exception.h" -#include "../src/helpers/config.h" +#include "job/job.h" +#include "job/job_exception.h" +#include "helpers/config.h" -#include "../src/tasks/task_factory_interface.h" -#include "../src/tasks/external_task.h" -#include "../src/config/worker_config.h" +#include "tasks/task_factory_interface.h" +#include "tasks/external_task.h" +#include "config/worker_config.h" using namespace testing; @@ -115,7 +115,7 @@ std::shared_ptr get_worker_default_meta() } std::shared_ptr get_simple_task( - const std::string &name, size_t priority, const std::vector &deps) + const std::string &name, std::size_t priority, const std::vector &deps) { std::shared_ptr task = std::make_shared(); task->task_id = name; @@ -564,7 +564,7 @@ TEST(job_test, correctly_executed_job) job_meta->tasks.push_back(get_simple_task("F", 3, {"D"})); job_meta->tasks.push_back(get_simple_task("G", 7, {"F"})); - size_t tasks_count = job_meta->tasks.size() + 1; + std::size_t tasks_count = job_meta->tasks.size() + 1; auto worker_conf = std::make_shared(); auto default_limits = get_default_limits(); @@ -581,7 +581,7 @@ TEST(job_test, correctly_executed_job) auto failed_results = std::make_shared(); failed_results->status = task_status::FAILED; - for (size_t i = 1; i < tasks_count; i++) { + for (std::size_t i = 1; i < tasks_count; i++) { mock_tasks.push_back(std::make_shared(i, job_meta->tasks[i - 1])); } { @@ -589,7 +589,7 @@ TEST(job_test, correctly_executed_job) // expect root task to be created EXPECT_CALL((*factory), create_internal_task(0, _)).WillOnce(Return(empty_task)); - for (size_t i = 1; i < tasks_count; i++) { + for (std::size_t i = 1; i < tasks_count; i++) { // expect tasks A to G to be created EXPECT_CALL((*factory), create_internal_task(i, job_meta->tasks[i - 1])) .WillOnce(Return(mock_tasks[i - 1])); @@ -604,7 +604,7 @@ TEST(job_test, correctly_executed_job) EXPECT_CALL(*progress_callback, job_ended(_)).Times(1); } { // ! out of sequence calling - for (size_t i = 1; i < tasks_count - 2; i++) { + for (std::size_t i = 1; i < tasks_count - 2; i++) { // expect tasks A to G will be executed each at once EXPECT_CALL(*mock_tasks[i - 1], run()).WillOnce(Return(empty_results)); } @@ -677,7 +677,7 @@ TEST(job_test, internal_error_job) failed_results->status = task_status::FAILED; failed_results->error_message = "failed internal exec"; - for (size_t i = 0; i < job_meta->tasks.size(); i++) { + for (std::size_t i = 0; i < job_meta->tasks.size(); i++) { mock_tasks.push_back(std::make_shared(i + 1, job_meta->tasks[i])); } @@ -686,7 +686,7 @@ TEST(job_test, internal_error_job) // expect root task to be created EXPECT_CALL((*factory), create_internal_task(0, _)).WillOnce(Return(empty_task)); - for (size_t i = 0; i < mock_tasks.size(); i++) { + for (std::size_t i = 0; i < mock_tasks.size(); i++) { // expect tasks to be created EXPECT_CALL((*factory), create_internal_task(i + 1, job_meta->tasks[i])).WillOnce(Return(mock_tasks[i])); } diff --git a/tests/job_config.cpp b/tests/job_config.cpp index 77aa21f0..148cd4ad 100644 --- a/tests/job_config.cpp +++ b/tests/job_config.cpp @@ -8,7 +8,7 @@ #include using namespace boost::filesystem; -#include "../src/helpers/config.h" +#include "helpers/config.h" using namespace helpers; diff --git a/tests/job_receiver.cpp b/tests/job_receiver.cpp index faa43978..da5db65d 100644 --- a/tests/job_receiver.cpp +++ b/tests/job_receiver.cpp @@ -5,9 +5,9 @@ #include #include "mocks.h" -#include "../src/job/job_receiver.h" -#include "../src/eval_request.h" -#include "../src/connection_proxy.h" +#include "job/job_receiver.h" +#include "eval_request.h" +#include "connection_proxy.h" using namespace testing; diff --git a/tests/mocks.h b/tests/mocks.h index 580d4c83..659e5b53 100644 --- a/tests/mocks.h +++ b/tests/mocks.h @@ -12,12 +12,12 @@ #include #include -#include "../src/config/worker_config.h" -#include "../src/broker_connection.h" -#include "../src/fileman/file_manager_interface.h" -#include "../src/tasks/task_factory_interface.h" -#include "../src/job/progress_callback_interface.h" -#include "../src/job/job_evaluator_interface.h" +#include "config/worker_config.h" +#include "broker_connection.h" +#include "fileman/file_manager_interface.h" +#include "tasks/task_factory_interface.h" +#include "job/progress_callback_interface.h" +#include "job/job_evaluator_interface.h" using namespace testing; @@ -37,10 +37,10 @@ class mock_worker_config : public worker_config MOCK_CONST_METHOD0(get_headers, const worker_config::header_map_t &()); MOCK_CONST_METHOD0(get_broker_ping_interval, std::chrono::milliseconds()); MOCK_CONST_METHOD0(get_hwgroup, const std::string &()); - MOCK_CONST_METHOD0(get_worker_id, size_t()); + MOCK_CONST_METHOD0(get_worker_id, std::size_t()); MOCK_CONST_METHOD0(get_worker_description, const std::string &()); MOCK_CONST_METHOD0(get_limits, const sandbox_limits &()); - MOCK_CONST_METHOD0(get_max_output_length, size_t()); + MOCK_CONST_METHOD0(get_max_output_length, std::size_t()); }; /** @@ -85,7 +85,7 @@ class mock_task_factory : public task_factory_interface virtual ~mock_task_factory() { } - MOCK_METHOD2(create_internal_task, std::shared_ptr(size_t, std::shared_ptr)); + MOCK_METHOD2(create_internal_task, std::shared_ptr(std::size_t, std::shared_ptr)); MOCK_METHOD1(create_sandboxed_task, std::shared_ptr(const create_params &)); }; @@ -95,11 +95,11 @@ class mock_task_factory : public task_factory_interface class mock_task : public task_base { public: - mock_task(size_t id, std::string str_id = "") : task_base(id, std::make_shared()) + mock_task(std::size_t id, std::string str_id = "") : task_base(id, std::make_shared()) { this->task_meta_->task_id = str_id; } - mock_task(size_t id, std::shared_ptr meta) : task_base(id, meta) + mock_task(std::size_t id, std::shared_ptr meta) : task_base(id, meta) { } mock_task() : mock_task(0) diff --git a/tests/progress_callback.cpp b/tests/progress_callback.cpp index e97014f2..21f887d5 100644 --- a/tests/progress_callback.cpp +++ b/tests/progress_callback.cpp @@ -3,8 +3,8 @@ #include #include -#include "../src/connection_proxy.h" -#include "../src/job/progress_callback.h" +#include "connection_proxy.h" +#include "job/progress_callback.h" using namespace testing; diff --git a/tests/string_utils.cpp b/tests/string_utils.cpp index cd02e385..6b4acaa5 100644 --- a/tests/string_utils.cpp +++ b/tests/string_utils.cpp @@ -1,7 +1,7 @@ #include #include -#include "../src/helpers/string_utils.h" +#include "helpers/string_utils.h" void test_filter_scenario(std::string expected, std::string text) diff --git a/tests/tasks.cpp b/tests/tasks.cpp index b7793ef9..962177f6 100644 --- a/tests/tasks.cpp +++ b/tests/tasks.cpp @@ -2,19 +2,19 @@ #include #include -#include "../src/tasks/internal/archivate_task.h" -#include "../src/tasks/internal/cp_task.h" -#include "../src/tasks/internal/extract_task.h" -#include "../src/tasks/internal/mkdir_task.h" -#include "../src/tasks/internal/rename_task.h" -#include "../src/tasks/internal/rm_task.h" -#include "../src/tasks/internal/fetch_task.h" -#include "../src/tasks/internal/exists_task.h" -#include "../src/tasks/external_task.h" -#include "../src/tasks/root_task.h" -#include "../src/tasks/task_factory.h" -#include "../src/tasks/create_params.h" -#include "../src/config/sandbox_config.h" +#include "tasks/internal/archivate_task.h" +#include "tasks/internal/cp_task.h" +#include "tasks/internal/extract_task.h" +#include "tasks/internal/mkdir_task.h" +#include "tasks/internal/rename_task.h" +#include "tasks/internal/rm_task.h" +#include "tasks/internal/fetch_task.h" +#include "tasks/internal/exists_task.h" +#include "tasks/external_task.h" +#include "tasks/root_task.h" +#include "tasks/task_factory.h" +#include "tasks/create_params.h" +#include "config/sandbox_config.h" #include "mocks.h" @@ -138,7 +138,7 @@ TEST(Tasks, InternalExistsTask) class test_task_base : public task_base { public: - test_task_base(size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) + test_task_base(std::size_t id, std::shared_ptr task_meta) : task_base(id, task_meta) { } virtual ~test_task_base() @@ -157,8 +157,8 @@ TEST(Tasks, TaskBase) std::vector dep{"dep1", "dep2", "dep3"}; EXPECT_EQ(base.get_dependencies(), dep); EXPECT_EQ(base.get_fatal_failure(), false); - EXPECT_EQ(base.get_id(), static_cast(1)); - EXPECT_EQ(base.get_priority(), static_cast(3)); + EXPECT_EQ(base.get_id(), static_cast(1)); + EXPECT_EQ(base.get_priority(), static_cast(3)); EXPECT_EQ(base.get_task_id(), "id2"); EXPECT_TRUE(base.get_children().empty()); auto children = std::shared_ptr(new test_task_base(2, get_task_meta())); diff --git a/tests/topological_sort.cpp b/tests/topological_sort.cpp index da5e0cf2..855b913d 100644 --- a/tests/topological_sort.cpp +++ b/tests/topological_sort.cpp @@ -1,13 +1,13 @@ #include #include -#include "../src/helpers/topological_sort.h" +#include "helpers/topological_sort.h" class test_task : public task_base { public: test_task() = delete; - test_task(size_t id, std::shared_ptr task_meta = std::make_shared()) + test_task(std::size_t id, std::shared_ptr task_meta = std::make_shared()) : task_base(id, task_meta) { } @@ -26,7 +26,7 @@ using namespace std; TEST(topological_sort_test, top_sort_1) { // initialization - size_t id = 0; + std::size_t id = 0; vector> result; vector> expected; @@ -84,8 +84,8 @@ TEST(topological_sort_test, top_sort_1) TEST(topological_sort_test, top_sort_2) { // initialization - size_t id = 0; - map eff_ind; + std::size_t id = 0; + map eff_ind; vector> result; vector> expected; @@ -153,7 +153,7 @@ TEST(topological_sort_test, top_sort_2) TEST(topological_sort_test, top_sort_3) { // initialization - size_t id = 0; + std::size_t id = 0; vector> result; vector> expected; @@ -224,7 +224,7 @@ TEST(topological_sort_test, top_sort_3) TEST(topological_sort_test, top_sort_4) { // initialization - size_t id = 0; + std::size_t id = 0; vector> result; vector> expected; @@ -276,7 +276,7 @@ TEST(topological_sort_test, top_sort_4) TEST(topological_sort_test, top_sort_5) { // initialization - size_t id = 0; + std::size_t id = 0; vector> result; vector> expected_result; @@ -340,7 +340,7 @@ TEST(topological_sort_test, top_sort_5) TEST(topological_sort_test, top_sort_6) { // initialization - size_t id = 0; + std::size_t id = 0; vector> result; vector> expected_result; @@ -405,7 +405,7 @@ TEST(topological_sort_test, top_sort_6) TEST(topological_sort_test, top_sort_cycle_1) { // initialization - size_t id = 0; + std::size_t id = 0; vector> result; vector> expected_result; @@ -459,7 +459,7 @@ TEST(topological_sort_test, top_sort_cycle_1) TEST(topological_sort_test, top_sort_cycle_2) { // initialization - size_t id = 0; + std::size_t id = 0; vector> result; vector> expected_result; diff --git a/tests/truncate_task.cpp b/tests/truncate_task.cpp index a587183e..abf91e1a 100644 --- a/tests/truncate_task.cpp +++ b/tests/truncate_task.cpp @@ -3,7 +3,7 @@ #include #include #include -#include "../src/tasks/internal/truncate_task.h" +#include "tasks/internal/truncate_task.h" namespace fs = boost::filesystem; @@ -32,10 +32,10 @@ class truncate_task_test : public ::testing::Test fs::remove(root); } - void create_file(const fs::path &path, size_t size) + void create_file(const fs::path &path, std::size_t size) { std::ofstream f(path.string()); - for (size_t i = 0; i < size; i++) { f << "a"; } + for (std::size_t i = 0; i < size; i++) { f << "a"; } f.close(); } }; diff --git a/tests/worker_config.cpp b/tests/worker_config.cpp index 8d3a31ee..4d585c65 100644 --- a/tests/worker_config.cpp +++ b/tests/worker_config.cpp @@ -1,7 +1,7 @@ #include #include -#include "../src/config/worker_config.h" +#include "config/worker_config.h" TEST(worker_config, load_yaml_basic) { @@ -98,7 +98,7 @@ TEST(worker_config, load_yaml_basic) expected_filemans.push_back(expected_fileman); ASSERT_STREQ("tcp://localhost:1234", config.get_broker_uri().c_str()); - ASSERT_EQ((size_t) 8, config.get_worker_id()); + ASSERT_EQ((std::size_t) 8, config.get_worker_id()); ASSERT_EQ("/tmp/working_dir", config.get_working_directory()); ASSERT_STREQ("/tmp/isoeval/cache", config.get_cache_dir().c_str()); ASSERT_EQ(expected_headers, config.get_headers()); @@ -107,9 +107,9 @@ TEST(worker_config, load_yaml_basic) ASSERT_EQ(expected_log, config.get_log_config()); ASSERT_EQ(expected_filemans, config.get_filemans_configs()); ASSERT_EQ(std::chrono::milliseconds(5487), config.get_broker_ping_interval()); - ASSERT_EQ((size_t) 1245, config.get_max_broker_liveness()); - ASSERT_EQ((size_t) 1024, config.get_max_output_length()); - ASSERT_EQ((size_t) 1048576, config.get_max_carboncopy_length()); + ASSERT_EQ((std::size_t) 1245, config.get_max_broker_liveness()); + ASSERT_EQ((std::size_t) 1024, config.get_max_output_length()); + ASSERT_EQ((std::size_t) 1048576, config.get_max_carboncopy_length()); ASSERT_EQ(true, config.get_cleanup_submission()); }