diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 925bf0201..06c0c6ab3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -16,7 +16,7 @@ linux-builder: - unzip artifacts.zip - export LIBOPENSHOT_AUDIO_DIR=$CI_PROJECT_DIR/build/install-x64 - mkdir -p build; cd build; - - cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -D"PYTHON_MODULE_PATH=python" -D"RUBY_MODULE_PATH=ruby" -DCMAKE_BUILD_TYPE:STRING=Release -DUSE_SYSTEM_JSONCPP=0 ../ + - cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -D"PYTHON_MODULE_PATH=python" -D"RUBY_MODULE_PATH=ruby" -DCMAKE_BUILD_TYPE:STRING=Release -DAPPIMAGE_BUILD=1 -DUSE_SYSTEM_JSONCPP=0 ../ - make - make install - make doc @@ -27,7 +27,7 @@ linux-builder: except: - tags tags: - - linux + - linux-bionic mac-builder: stage: build-libopenshot @@ -43,7 +43,7 @@ mac-builder: - unzip artifacts.zip - export LIBOPENSHOT_AUDIO_DIR=$CI_PROJECT_DIR/build/install-x64 - mkdir -p build; cd build; - - cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -DCMAKE_CXX_COMPILER=/usr/local/opt/gcc48/bin/g++-4.8 -DCMAKE_C_COMPILER=/usr/local/opt/gcc48/bin/gcc-4.8 -DCMAKE_PREFIX_PATH=/usr/local/qt5/5.5/clang_64 -DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.6/include/python3.6m -DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.6/lib/libpython3.6.dylib -DPYTHON_MODULE_PATH=python -DPython_FRAMEWORKS=/Library/Frameworks/Python.framework/ -D"CMAKE_BUILD_TYPE:STRING=Release" -D"CMAKE_OSX_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk" -D"CMAKE_OSX_DEPLOYMENT_TARGET=10.9" -D"CMAKE_INSTALL_RPATH_USE_LINK_PATH=1" -D"ENABLE_RUBY=0" ../ + - cmake -DCMAKE_EXE_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_SHARED_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DCMAKE_PREFIX_PATH=/usr/local/qt5.15.X/qt5.15/5.15.0/clang_64/ -DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.6/include/python3.6m -DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.6/lib/libpython3.6.dylib -DPYTHON_MODULE_PATH=python -DPython_FRAMEWORKS=/Library/Frameworks/Python.framework/ -D"CMAKE_BUILD_TYPE:STRING=Release" -D"CMAKE_OSX_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk" -D"CMAKE_OSX_DEPLOYMENT_TARGET=10.9" -D"CMAKE_INSTALL_RPATH_USE_LINK_PATH=1" -D"ENABLE_RUBY=0" ../ - make - make install - echo -e "CI_PROJECT_NAME:$CI_PROJECT_NAME\nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME\nCI_COMMIT_SHA:$CI_COMMIT_SHA\nCI_JOB_ID:$CI_JOB_ID" > "install-x64/share/$CI_PROJECT_NAME" diff --git a/.travis.yml b/.travis.yml index 241d5be3d..ab338bdde 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,11 +32,6 @@ addons: - curl jobs: - - # The FFmpeg 3.2 backport PPA has gone missing - allow_failures: - - name: "FFmpeg 3.2 GCC (Ubuntu 16.04 Xenial)" - include: - name: "Coverage + FFmpeg 3.4 GCC (Ubuntu 18.04 Bionic)" env: @@ -99,31 +94,6 @@ jobs: - libavresample-dev - libomp-dev - - name: "FFmpeg 3.2 GCC (Ubuntu 16.04 Xenial)" - env: - - BUILD_VERSION=ffmpeg32 - - CMAKE_EXTRA_ARGS="" - - TEST_TARGET="os_test" - os: linux - dist: xenial - addons: - apt: - sources: - - sourceline: 'ppa:openshot.developers/libopenshot-daily' - - sourceline: 'ppa:jon-hedgerows/ffmpeg-backports' - packages: - - *p_common - - libavresample-dev - - libavcodec57 - - libavdevice57 - - libavfilter6 - - libavformat57 - - libavresample3 - - libavutil55 - - libpostproc54 - - libswresample2 - - libswscale4 - - name: "FFmpeg 2 GCC (Ubuntu 16.04 Xenial)" env: - BUILD_VERSION=ffmpeg2 diff --git a/CMakeLists.txt b/CMakeLists.txt index bb3fe8cc8..94b09879b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -76,6 +76,7 @@ option(DISABLE_BUNDLED_JSONCPP "Don't fall back to bundled JsonCpp" OFF) option(ENABLE_IWYU "Enable 'Include What You Use' scanner (CMake 3.3+)" OFF) option(ENABLE_TESTS "Build unit tests (requires UnitTest++)" ON) option(ENABLE_DOCS "Build API documentation (requires Doxygen)" ON) +option(APPIMAGE_BUILD "Build to install in an AppImage (Linux only)" OFF) # Legacy commandline override if (DISABLE_TESTS) @@ -91,12 +92,6 @@ if(DEFINED ENABLE_TESTS) set(ENABLE_TESTS ${ENABLE_TESTS} CACHE BOOL "Build unit tests (requires UnitTest++)" FORCE) endif() -########## Configure Version.h header ############## -configure_file(include/OpenShotVersion.h.in include/OpenShotVersion.h @ONLY) -# We'll want that installed later -install(FILES ${CMAKE_CURRENT_BINARY_DIR}/include/OpenShotVersion.h - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/libopenshot) - #### Work around a GCC < 9 bug with handling of _Pragma() in macros #### See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578 if ((${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") AND @@ -142,6 +137,12 @@ endif() ############## PROCESS src/ DIRECTORIES ############## add_subdirectory(src) +########## Configure Version.h header ############## +configure_file(include/OpenShotVersion.h.in include/OpenShotVersion.h @ONLY) +# We'll want that installed later +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/include/OpenShotVersion.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/libopenshot) + ################### DOCUMENTATION ################### # Find Doxygen (used for documentation) set(DOCS_ENABLED FALSE) # Only set true if Doxygen is found and configured diff --git a/README.md b/README.md index cf69c1cfe..6105915d3 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -OpenShot Video Library (libopenshot) is a free, open-source C++ library dedicated to -delivering high quality video editing, animation, and playback solutions to the -world. +OpenShot Video Library (libopenshot) is a free, open-source C++ library +dedicated to delivering high quality video editing, animation, and playback +solutions to the world. ## Build Status @@ -15,7 +15,8 @@ world. * Time Mapping (Curve-based Slow Down, Speed Up, Reverse) * Audio Mixing & Resampling (Curve-based) * Audio Plug-ins (VST & AU) -* Audio Drivers (ASIO, WASAPI, DirectSound, CoreAudio, iPhone Audio, ALSA, JACK, and Android) +* Audio Drivers (ASIO, WASAPI, DirectSound, CoreAudio, iPhone Audio, + ALSA, JACK, and Android) * Telecine and Inverse Telecine (Film to TV, TV to Film) * Frame Rate Conversions * Multi-Processor Support (Performance) @@ -27,8 +28,8 @@ world. ## Install -Detailed instructions for building libopenshot and libopenshot-audio for each OS. These instructions -are also available in the /docs/ source folder. +Detailed instructions for building libopenshot and libopenshot-audio for +each OS. These instructions are also available in the `/docs/` source folder. * [Linux](https://github.com/OpenShot/libopenshot/wiki/Linux-Build-Instructions) * [Mac](https://github.com/OpenShot/libopenshot/wiki/Mac-Build-Instructions) @@ -36,10 +37,12 @@ are also available in the /docs/ source folder. ## Hardware Acceleration -OpenShot now supports experimental hardware acceleration, both for encoding and -decoding videos. When enabled, this can either speed up those operations or slow -them down, depending on the power and features supported by your graphics card. -Please see [doc/HW-ACCELL.md](doc/HW-ACCEL.md) for more information. +OpenShot now supports experimental hardware acceleration, both for encoding +and decoding videos. When enabled, this can either speed up those operations +or slow them down, depending on the power and features supported by your +graphics card. + +Please see [`doc/HW-ACCEL.md`](doc/HW-ACCEL.md) for more information. ## Documentation @@ -51,10 +54,11 @@ make doc ## Developers -Are you interested in becoming more involved in the development of -OpenShot? Build exciting new features, fix bugs, make friends, and become a hero! -Please read the [step-by-step](https://github.com/OpenShot/openshot-qt/wiki/Become-a-Developer) -instructions for getting source code, configuring dependencies, and building OpenShot. +Are you interested in becoming more involved in the development of OpenShot? +Build exciting new features, fix bugs, make friends, and become a hero! +Please read the [step-by-step](https://github.com/OpenShot/openshot-qt/wiki/Become-a-Developer) +instructions for getting source code, configuring dependencies, and building +OpenShot. ## Report a bug @@ -72,7 +76,7 @@ https://github.com/OpenShot/libopenshot/issues ### License -Copyright (c) 2008-2019 OpenShot Studios, LLC. +Copyright (c) 2008-2020 OpenShot Studios, LLC. OpenShot Library (libopenshot) is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License diff --git a/include/CacheDisk.h b/include/CacheDisk.h index 09ebd4abc..c60f5f6e3 100644 --- a/include/CacheDisk.h +++ b/include/CacheDisk.h @@ -38,8 +38,6 @@ #include "Frame.h" #include "Exceptions.h" #include -#include -#include namespace openshot { diff --git a/include/ChunkReader.h b/include/ChunkReader.h index cd7cd67f1..50b260838 100644 --- a/include/ChunkReader.h +++ b/include/ChunkReader.h @@ -37,10 +37,10 @@ #include #include #include -#include -#include +#include #include #include +#include #include "Json.h" #include "CacheMemory.h" #include "Exceptions.h" diff --git a/include/ChunkWriter.h b/include/ChunkWriter.h index 5fee5a375..a63e4ca12 100644 --- a/include/ChunkWriter.h +++ b/include/ChunkWriter.h @@ -34,19 +34,19 @@ #include "ReaderBase.h" #include "WriterBase.h" #include "FFmpegWriter.h" +#include "CacheMemory.h" +#include "Exceptions.h" +#include "Json.h" #include #include #include #include -#include -#include -#include +#include #include #include -#include "CacheMemory.h" -#include "Exceptions.h" -#include "Json.h" +#include +#include namespace openshot diff --git a/include/Clip.h b/include/Clip.h index 0fbed1599..0c98526a0 100644 --- a/include/Clip.h +++ b/include/Clip.h @@ -169,6 +169,9 @@ namespace openshot { /// Return the list of effects on the timeline std::list Effects() { return effects; }; + /// Look up an effect by ID + openshot::EffectBase* GetEffect(const std::string& id); + /// @brief Get an openshot::Frame object for a specific frame number of this timeline. /// /// @returns The requested frame (containing the image) @@ -253,8 +256,6 @@ namespace openshot { openshot::Keyframe has_audio; ///< An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes) openshot::Keyframe has_video; ///< An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) }; +} // namespace - -} - -#endif +#endif // OPENSHOT_CLIP_H diff --git a/include/DummyReader.h b/include/DummyReader.h index e9c90968a..9a75751d0 100644 --- a/include/DummyReader.h +++ b/include/DummyReader.h @@ -53,6 +53,12 @@ namespace openshot * A dummy reader can be created with any framerate or samplerate. This is useful in unit * tests that need to test different framerates or samplerates. * + * @note Timeline does buffering by requesting more frames than it + * strictly needs. Thus if you use this DummyReader with a custom + * cache in a Timeline, make sure it has enough + * frames. Specifically you need some frames after the last frame + * you plan to access through the Timeline. + * * @code * // Create cache object to store fake Frame objects * CacheMemory cache; diff --git a/include/FFmpegUtilities.h b/include/FFmpegUtilities.h index b4ec951f8..c637f9445 100644 --- a/include/FFmpegUtilities.h +++ b/include/FFmpegUtilities.h @@ -40,7 +40,7 @@ #ifndef IS_FFMPEG_3_2 #define IS_FFMPEG_3_2 (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 64, 101)) #endif - + #ifndef HAVE_HW_ACCEL #define HAVE_HW_ACCEL (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 107, 100)) #endif @@ -122,6 +122,9 @@ #ifndef PIX_FMT_YUV420P #define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P #endif + #ifndef PIX_FMT_YUV444P + #define PIX_FMT_YUV444P AV_PIX_FMT_YUV444P + #endif // FFmpeg's libavutil/common.h defines an RSHIFT incompatible with Ruby's // definition in ruby/config.h, so we move it to FF_RSHIFT @@ -177,13 +180,13 @@ #define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path) #define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0) #define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) av_opt_set(priv_data, name, value, 0); avcodec_parameters_from_context(av_stream->codecpar, avcodec); - #define AV_FORMAT_NEW_STREAM(oc, st_codec, av_codec, av_st) av_st = avformat_new_stream(oc, NULL);\ + #define AV_FORMAT_NEW_STREAM(oc, st_codec_ctx, av_codec, av_st) av_st = avformat_new_stream(oc, NULL);\ if (!av_st) \ throw OutOfMemory("Could not allocate memory for the video stream.", path); \ c = avcodec_alloc_context3(av_codec); \ - st_codec = c; \ + st_codec_ctx = c; \ av_st->codecpar->codec_id = av_codec->id; - #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) avcodec_parameters_from_context(av_stream->codecpar, av_codec); + #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec_ctx) avcodec_parameters_from_context(av_stream->codecpar, av_codec_ctx); #elif IS_FFMPEG_3_2 #define AV_REGISTER_ALL av_register_all(); #define AVCODEC_REGISTER_ALL avcodec_register_all(); diff --git a/include/FFmpegWriter.h b/include/FFmpegWriter.h index 37fa22dce..98fbbb590 100644 --- a/include/FFmpegWriter.h +++ b/include/FFmpegWriter.h @@ -164,8 +164,8 @@ namespace openshot { AVOutputFormat *fmt; AVFormatContext *oc; AVStream *audio_st, *video_st; - AVCodecContext *video_codec; - AVCodecContext *audio_codec; + AVCodecContext *video_codec_ctx; + AVCodecContext *audio_codec_ctx; SwsContext *img_convert_ctx; int16_t *samples; uint8_t *audio_outbuf; diff --git a/include/Fraction.h b/include/Fraction.h index 9ffcda1fd..88c16fbc0 100644 --- a/include/Fraction.h +++ b/include/Fraction.h @@ -31,7 +31,7 @@ #ifndef OPENSHOT_FRACTION_H #define OPENSHOT_FRACTION_H -#include +#include namespace openshot { diff --git a/include/FrameMapper.h b/include/FrameMapper.h index c98d7b719..e78401a9b 100644 --- a/include/FrameMapper.h +++ b/include/FrameMapper.h @@ -33,7 +33,7 @@ #include #include -#include +#include #include #include #include "CacheMemory.h" @@ -138,7 +138,6 @@ namespace openshot */ class FrameMapper : public ReaderBase { private: - bool is_open; bool field_toggle; // Internal odd / even toggle (used when building the mapping) Fraction original; // The original frame rate Fraction target; // The target frame rate diff --git a/include/KeyFrame.h b/include/KeyFrame.h index ee58da8f0..6424473a9 100644 --- a/include/KeyFrame.h +++ b/include/KeyFrame.h @@ -33,7 +33,7 @@ #include #include -#include +#include #include #include #include "Exceptions.h" diff --git a/include/MagickUtilities.h b/include/MagickUtilities.h index 74365904a..953b85ee0 100644 --- a/include/MagickUtilities.h +++ b/include/MagickUtilities.h @@ -32,7 +32,11 @@ #ifdef USE_IMAGEMAGICK +// Exclude a warning message with IM6 headers +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wignored-qualifiers" #include "Magick++.h" +#pragma GCC diagnostic pop // Determine ImageMagick version, as IM7 isn't fully // backwards compatible diff --git a/include/OpenMPUtilities.h b/include/OpenMPUtilities.h index 9810d636e..30bdd199a 100644 --- a/include/OpenMPUtilities.h +++ b/include/OpenMPUtilities.h @@ -32,8 +32,8 @@ #define OPENSHOT_OPENMP_UTILITIES_H #include -#include -#include +#include +#include #include "Settings.h" diff --git a/include/OpenShotVersion.h.in b/include/OpenShotVersion.h.in index e15662b2e..5e86e8ce7 100644 --- a/include/OpenShotVersion.h.in +++ b/include/OpenShotVersion.h.in @@ -42,6 +42,15 @@ #define OPENSHOT_VERSION_SO @PROJECT_SO_VERSION@ /// Shared object version number. This increments any time the API and ABI changes (so old apps will no longer link) +// Useful dependency versioning / feature availability +#cmakedefine QT_VERSION_STR "@QT_VERSION_STR@" +#cmakedefine AVCODEC_VERSION_STR "@AVCODEC_VERSION_STR@" +#cmakedefine AVFORMAT_VERSION_STR "@AVFORMAT_VERSION_STR@" +#cmakedefine AVUTIL_VERSION_STR "@AVUTIL_VERSION_STR@" +#cmakedefine01 HAVE_IMAGEMAGICK +#cmakedefine01 HAVE_RESVG +#cmakedefine01 APPIMAGE_BUILD + #include namespace openshot @@ -67,4 +76,4 @@ namespace openshot openshot::OpenShotVersion GetVersion(); } -#endif // OPENSHOT_VERSION_H \ No newline at end of file +#endif // OPENSHOT_VERSION_H diff --git a/include/Profiles.h b/include/Profiles.h index 3b5ebd0d1..1de7c0722 100644 --- a/include/Profiles.h +++ b/include/Profiles.h @@ -35,12 +35,12 @@ #include #include #include -#include -#include -#include +#include +#include +#include #include -#include -#include +#include +#include #include "Exceptions.h" #include "Fraction.h" #include "Json.h" diff --git a/include/Qt/PlayerDemo.h b/include/Qt/PlayerDemo.h index f444334b5..8dca83fc1 100644 --- a/include/Qt/PlayerDemo.h +++ b/include/Qt/PlayerDemo.h @@ -31,12 +31,12 @@ #ifndef OPENSHOT_PLAYER_DEMO_H #define OPENSHOT_PLAYER_DEMO_H -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include #include "VideoRenderWidget.h" diff --git a/include/QtUtilities.h b/include/QtUtilities.h new file mode 100644 index 000000000..3f9637d93 --- /dev/null +++ b/include/QtUtilities.h @@ -0,0 +1,44 @@ +/** + * @file + * @brief Header file for QtUtilities (compatibiity overlay) + * @author FeRD (Frank Dana) + */ + +/* LICENSE + * + * Copyright (c) 2008-2020 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_QT_UTILITIES_H +#define OPENSHOT_QT_UTILITIES_H + +#include +#include + +// Fix Qt::endl for older Qt versions +// From: https://bugreports.qt.io/browse/QTBUG-82680 +#if QT_VERSION < QT_VERSION_CHECK(5, 14, 0) +namespace Qt { + using TextStreamFunction = QTextStream& (*)(QTextStream&); + constexpr TextStreamFunction endl = ::endl; +} +#endif + +#endif // OPENSHOT_QT_UTILITIES_H diff --git a/include/ReaderBase.h b/include/ReaderBase.h index dfb9873d4..c997b76dc 100644 --- a/include/ReaderBase.h +++ b/include/ReaderBase.h @@ -43,7 +43,7 @@ #include "Frame.h" #include "Json.h" #include "ZmqLogger.h" -#include +#include #include #include #include @@ -111,10 +111,16 @@ namespace openshot openshot::ReaderInfo info; /// Parent clip object of this reader (which can be unparented and NULL) - openshot::ClipBase* GetClip(); + inline openshot::ClipBase* GetParentClip() { return parent; }; + + /// Deprecated alias for GetParentClip() + inline openshot::ClipBase* GetClip() { return parent; }; /// Set parent clip object of this reader - void SetClip(openshot::ClipBase* clip); + inline void SetParentClip(openshot::ClipBase* clip) { parent = clip; }; + + /// Deprecated alias for SetParentClip() + inline void SetClip(openshot::ClipBase* clip) { parent = clip; }; /// Close the reader (and any resources it was consuming) virtual void Close() = 0; diff --git a/include/RendererBase.h b/include/RendererBase.h index 1c526937c..5e65ea87a 100644 --- a/include/RendererBase.h +++ b/include/RendererBase.h @@ -32,7 +32,7 @@ #define OPENSHOT_RENDERER_BASE_H #include "Frame.h" -#include // for realloc +#include // for realloc #include namespace openshot diff --git a/include/Settings.h b/include/Settings.h index 554e7c286..da85b89e0 100644 --- a/include/Settings.h +++ b/include/Settings.h @@ -35,11 +35,11 @@ #include #include #include -#include +#include #include #include -#include -#include +#include +#include #include #include #include "JuceHeader.h" diff --git a/include/Timeline.h b/include/Timeline.h index 932b04acf..5c275abbc 100644 --- a/include/Timeline.h +++ b/include/Timeline.h @@ -61,7 +61,7 @@ namespace openshot { /// from lowest layer to top layer (since that is the sequence they need to be combined), and then /// by position (left to right). struct CompareClips{ - bool operator()( Clip* lhs, Clip* rhs){ + bool operator()( openshot::Clip* lhs, openshot::Clip* rhs){ if( lhs->Layer() < rhs->Layer() ) return true; if( lhs->Layer() == rhs->Layer() && lhs->Position() <= rhs->Position() ) return true; return false; @@ -71,13 +71,28 @@ namespace openshot { /// from lowest layer to top layer (since that is sequence clips are combined), and then by /// position, and then by effect order. struct CompareEffects{ - bool operator()( EffectBase* lhs, EffectBase* rhs){ + bool operator()( openshot::EffectBase* lhs, openshot::EffectBase* rhs){ if( lhs->Layer() < rhs->Layer() ) return true; if( lhs->Layer() == rhs->Layer() && lhs->Position() < rhs->Position() ) return true; if( lhs->Layer() == rhs->Layer() && lhs->Position() == rhs->Position() && lhs->Order() > rhs->Order() ) return true; return false; }}; + /// Comparison method for finding the far end of the timeline, by locating + /// the Clip with the highest end-frame number using std::max_element + struct CompareClipEndFrames { + bool operator()(const openshot::Clip* lhs, const openshot::Clip* rhs) { + return (lhs->Position() + lhs->Duration()) + <= (rhs->Position() + rhs->Duration()); + }}; + + /// Like CompareClipEndFrames, but for effects + struct CompareEffectEndFrames { + bool operator()(const openshot::EffectBase* lhs, const openshot::EffectBase* rhs) { + return (lhs->Position() + lhs->Duration()) + <= (rhs->Position() + rhs->Duration()); + }}; + /** * @brief This class represents a timeline * @@ -146,33 +161,33 @@ namespace openshot { * t.Close(); * @endcode */ - class Timeline : public ReaderBase { + class Timeline : public openshot::ReaderBase { private: bool is_open; /// clips; /// closing_clips; /// open_clips; /// effects; /// allocated_frame_mappers; ///< all the frame mappers we allocated and must free + std::list clips; /// closing_clips; /// open_clips; /// effects; /// allocated_frame_mappers; ///< all the frame mappers we allocated and must free bool managed_cache; ///< Does this timeline instance manage the cache object std::string path; ///< Optional path of loaded UTF-8 OpenShot JSON project file /// Process a new layer of video or audio - void add_layer(std::shared_ptr new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume); + void add_layer(std::shared_ptr new_frame, openshot::Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume); /// Apply a FrameMapper to a clip which matches the settings of this timeline - void apply_mapper_to_clip(Clip* clip); + void apply_mapper_to_clip(openshot::Clip* clip); /// Apply JSON Diffs to various objects contained in this timeline void apply_json_to_clips(Json::Value change); /// find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include); + std::vector find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include); /// Get or generate a blank frame - std::shared_ptr GetOrCreateFrame(Clip* clip, int64_t number); + std::shared_ptr GetOrCreateFrame(openshot::Clip* clip, int64_t number); /// Apply effects to the source frame (if any) - std::shared_ptr apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer); + std::shared_ptr apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer); /// Compare 2 floating point numbers for equality bool isEqual(double a, double b); @@ -198,7 +213,7 @@ namespace openshot { void sort_effects(); /// Update the list of 'opened' clips - void update_open_clips(Clip *clip, bool does_clip_intersect); + void update_open_clips(openshot::Clip *clip, bool does_clip_intersect); public: @@ -209,7 +224,7 @@ namespace openshot { /// @param sample_rate The sample rate of the timeline's audio /// @param channels The number of audio channels of the timeline /// @param channel_layout The channel layout (i.e. mono, stereo, 3 point surround, etc...) - Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout); + Timeline(int width, int height, openshot::Fraction fps, int sample_rate, int channels, openshot::ChannelLayout channel_layout); /// @brief Constructor for the timeline (which loads a JSON structure from a file path, and initializes a timeline) /// @param projectPath The path of the UTF-8 *.osp project file (JSON contents). Contents will be loaded automatically. @@ -220,11 +235,11 @@ namespace openshot { /// @brief Add an openshot::Clip to the timeline /// @param clip Add an openshot::Clip to the timeline. A clip can contain any type of Reader. - void AddClip(Clip* clip); + void AddClip(openshot::Clip* clip); /// @brief Add an effect to the timeline /// @param effect Add an effect to the timeline. An effect can modify the audio or video of an openshot::Frame. - void AddEffect(EffectBase* effect); + void AddEffect(openshot::EffectBase* effect); /// Apply the timeline's framerate and samplerate to all clips void ApplyMapperToClips(); @@ -239,34 +254,48 @@ namespace openshot { void ClearAllCache(); /// Return a list of clips on the timeline - std::list Clips() { return clips; }; + std::list Clips() { return clips; }; + + /// Look up a single clip by ID + openshot::ClipBase* GetClip(const std::string& id); + + /// Look up a clip effect by ID + openshot::EffectBase* GetClipEffect(const std::string& id); + + /// Look up a timeline effect by ID + openshot::EffectBase* GetEffect(const std::string& id); + + /// Look up the end time of the latest timeline element + double GetMaxTime(); + /// Look up the end frame number of the latest element on the timeline + int64_t GetMaxFrame(); /// Close the timeline reader (and any resources it was consuming) void Close() override; /// Return the list of effects on the timeline - std::list Effects() { return effects; }; + std::list Effects() { return effects; }; /// Get the cache object used by this reader - CacheBase* GetCache() override { return final_cache; }; + openshot::CacheBase* GetCache() override { return final_cache; }; /// Set the cache object used by this reader. You must now manage the lifecycle /// of this cache object though (Timeline will not delete it for you). - void SetCache(CacheBase* new_cache); + void SetCache(openshot::CacheBase* new_cache); /// Get an openshot::Frame object for a specific frame number of this timeline. /// /// @returns The requested frame (containing the image) /// @param requested_frame The frame number that is requested. - std::shared_ptr GetFrame(int64_t requested_frame) override; + std::shared_ptr GetFrame(int64_t requested_frame) override; // Curves for the viewport - Keyframe viewport_scale; /// #include #include -#include +#include #include #include -#include -#include +#include +#include #include #include #include "JuceHeader.h" diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 17b738637..dec7c8535 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -29,52 +29,65 @@ include(FeatureSummary) include(GNUInstallDirs) +# Enable IN_LIST in older CMake +if (POLICY CMP0057) + cmake_policy(SET CMP0057 NEW) +endif() + +# Juce requires either DEBUG or NDEBUG to be defined on MacOS. +# -DNDEBUG is set by cmake for all release configs, so add +# -DDEBUG for debug builds. We'll do this for all OSes, even +# though only MacOS requires it. +set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DDEBUG") +# Make sure we've picked some build type, default to debug +if(NOT DEFINED CMAKE_BUILD_TYPE OR CMAKE_BUILD_TYPE STREQUAL "") + set(CMAKE_BUILD_TYPE "Debug") +endif() + ################ WINDOWS ################## # Set some compiler options for Windows # required for libopenshot-audio headers if (WIN32) - add_definitions( -DIGNORE_JUCE_HYPOT=1 ) - set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -include cmath") + add_definitions( -DIGNORE_JUCE_HYPOT=1 ) + set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -include cmath") endif() if (APPLE) # If you still get errors compiling with GCC 4.8, mac headers need to be patched: http://hamelot.co.uk/programming/osx-gcc-dispatch_block_t-has-not-been-declared-invalid-typedef/ set_property(GLOBAL PROPERTY JUCE_MAC "JUCE_MAC") - add_definitions(-DNDEBUG) set(EXTENSION "mm") - set(JUCE_PLATFORM_SPECIFIC_DIR build/macosx/platform_specific_code) set(JUCE_PLATFORM_SPECIFIC_LIBRARIES "-framework Carbon -framework Cocoa -framework CoreFoundation -framework CoreAudio -framework CoreMidi -framework IOKit -framework AGL -framework AudioToolbox -framework QuartzCore -lobjc -framework Accelerate") + + # Prevent compiling with __cxx11 + add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) endif() ################ IMAGE MAGICK ################## # Set the Quantum Depth that ImageMagick was built with (default to 16 bits) IF (MAGICKCORE_QUANTUM_DEPTH) - add_definitions( -DMAGICKCORE_QUANTUM_DEPTH=${MAGICKCORE_QUANTUM_DEPTH} ) + add_definitions( -DMAGICKCORE_QUANTUM_DEPTH=${MAGICKCORE_QUANTUM_DEPTH} ) ELSE (MAGICKCORE_QUANTUM_DEPTH) - add_definitions( -DMAGICKCORE_QUANTUM_DEPTH=16 ) + add_definitions( -DMAGICKCORE_QUANTUM_DEPTH=16 ) ENDIF (MAGICKCORE_QUANTUM_DEPTH) IF (MAGICKCORE_HDRI_ENABLE) - add_definitions( -DMAGICKCORE_HDRI_ENABLE=${MAGICKCORE_HDRI_ENABLE} ) + add_definitions( -DMAGICKCORE_HDRI_ENABLE=${MAGICKCORE_HDRI_ENABLE} ) ELSE (MAGICKCORE_HDRI_ENABLE) - add_definitions( -DMAGICKCORE_HDRI_ENABLE=0 ) + add_definitions( -DMAGICKCORE_HDRI_ENABLE=0 ) ENDIF (MAGICKCORE_HDRI_ENABLE) -IF (OPENSHOT_IMAGEMAGICK_COMPATIBILITY) - add_definitions( -DOPENSHOT_IMAGEMAGICK_COMPATIBILITY=${OPENSHOT_IMAGEMAGICK_COMPATIBILITY} ) -ELSE (OPENSHOT_IMAGEMAGICK_COMPATIBILITY) - add_definitions( -DOPENSHOT_IMAGEMAGICK_COMPATIBILITY=0 ) -ENDIF (OPENSHOT_IMAGEMAGICK_COMPATIBILITY) # Find the ImageMagick++ library find_package(ImageMagick COMPONENTS Magick++ MagickWand MagickCore) if (ImageMagick_FOUND) - # Include ImageMagick++ headers (needed for compile) - include_directories(${ImageMagick_INCLUDE_DIRS}) + # Include ImageMagick++ headers (needed for compile) + include_directories(${ImageMagick_INCLUDE_DIRS}) - # define a global var (used in the C++) - add_definitions( -DUSE_IMAGEMAGICK=1 ) - set(CMAKE_SWIG_FLAGS "-DUSE_IMAGEMAGICK=1") + # define a global var (used in the C++) + add_definitions( -DUSE_IMAGEMAGICK=1 ) + list(APPEND CMAKE_SWIG_FLAGS "-DUSE_IMAGEMAGICK=1") + set(HAVE_IMAGEMAGICK TRUE CACHE BOOL "Building with ImageMagick support" FORCE) + mark_as_advanced(HAVE_IMAGEMAGICK) endif() ################# LIBOPENSHOT-AUDIO ################### @@ -88,15 +101,15 @@ include_directories(${LIBOPENSHOT_AUDIO_INCLUDE_DIRS}) # Find BlackMagic DeckLinkAPI libraries if (ENABLE_BLACKMAGIC) - find_package(BlackMagic) + find_package(BlackMagic) - if (BLACKMAGIC_FOUND) - # Include Blackmagic headers (needed for compile) - include_directories(${BLACKMAGIC_INCLUDE_DIR}) + if (BLACKMAGIC_FOUND) + # Include Blackmagic headers (needed for compile) + include_directories(${BLACKMAGIC_INCLUDE_DIR}) - # define a global var (used in the C++) - add_definitions( -DUSE_BLACKMAGIC=1 ) - set(CMAKE_SWIG_FLAGS "-DUSE_BLACKMAGIC=1") + # define a global var (used in the C++) + add_definitions( -DUSE_BLACKMAGIC=1 ) + list(APPEND CMAKE_SWIG_FLAGS "-DUSE_BLACKMAGIC=1") endif() endif() @@ -151,7 +164,7 @@ set(OPENSHOT_SOURCES Fraction.cpp Frame.cpp FrameMapper.cpp - Json.cpp + Json.cpp KeyFrame.cpp OpenShotVersion.cpp ZmqLogger.cpp @@ -219,7 +232,7 @@ set_target_properties(openshot PROPERTIES # Add optional ImageMagic-dependent sources if(ImageMagick_FOUND) - target_sources(openshot PRIVATE + target_sources(openshot PRIVATE ImageReader.cpp ImageWriter.cpp TextReader.cpp) @@ -248,7 +261,7 @@ target_include_directories(openshot ################### JSONCPP ##################### # Include jsoncpp headers (needed for JSON parsing) if (USE_SYSTEM_JSONCPP) - message(STATUS "Looking for system jsoncpp") + message(STATUS "Looking for system jsoncpp") # Either an installed config or our find module will # create the IMPORTED target jsoncpp_lib if successful find_package(jsoncpp) @@ -280,7 +293,7 @@ endif () ################# QT5 ################### # Find QT5 libraries -set(_qt_components Widgets Core Gui Multimedia MultimediaWidgets) +set(_qt_components Core Gui Widgets) find_package(Qt5 COMPONENTS ${_qt_components} REQUIRED) foreach(_qt_comp IN LISTS _qt_components) @@ -289,22 +302,29 @@ foreach(_qt_comp IN LISTS _qt_components) endif() endforeach() +# Keep track of Qt version, to embed in our version header +set(QT_VERSION_STR ${Qt5Core_VERSION_STRING} CACHE STRING "Qt version linked with" FORCE) +mark_as_advanced(QT_VERSION_STR) + ################### FFMPEG ##################### # Find FFmpeg libraries (used for video encoding / decoding) find_package(FFmpeg REQUIRED COMPONENTS avcodec avdevice avformat avutil swscale) -foreach(ff_comp avcodec avdevice avformat avfilter avutil postproc swscale swresample avresample) +set(all_comps avcodec avdevice avformat avfilter avutil postproc swscale swresample avresample) +set(version_comps avcodec avformat avutil) + +foreach(ff_comp IN LISTS all_comps) if(TARGET FFmpeg::${ff_comp}) - target_link_libraries(openshot PUBLIC FFmpeg::${ff_comp}) + target_link_libraries(openshot PUBLIC FFmpeg::${ff_comp}) + # Keep track of some FFmpeg lib versions, to embed in our version header + if(${ff_comp} IN_LIST version_comps) + string(TOUPPER ${ff_comp} v_name) + set(${v_name}_VERSION_STR ${${ff_comp}_VERSION} CACHE STRING "${ff_comp} version used" FORCE) + mark_as_advanced(${v_name}_VERSION_STR) + endif() endif() -endforeach() -################### Threads #################### -# Threading library -- uses IMPORTED target Threads::Threads (since CMake 3.1) -set(CMAKE_THREAD_PREFER_PTHREAD TRUE) -set(THREADS_PREFER_PTHREAD_FLAG TRUE) -find_package(Threads REQUIRED) -target_link_libraries(openshot PUBLIC Threads::Threads) +endforeach() ################### OPENMP ##################### # Check for OpenMP (used for multi-core processing) @@ -317,7 +337,6 @@ if(NOT TARGET OpenMP::OpenMP_CXX) add_library(OpenMP_TARGET INTERFACE) add_library(OpenMP::OpenMP_CXX ALIAS OpenMP_TARGET) target_compile_options(OpenMP_TARGET INTERFACE ${OpenMP_CXX_FLAGS}) - target_link_libraries(OpenMP_TARGET INTERFACE Threads::Threads) target_link_libraries(OpenMP_TARGET INTERFACE ${OpenMP_CXX_FLAGS}) endif() @@ -333,7 +352,7 @@ find_package(cppzmq QUIET) # Creates cppzmq target # Link ZeroMQ library if (TARGET libzmq) - target_link_libraries(openshot PUBLIC libzmq) + target_link_libraries(openshot PUBLIC libzmq) endif() # Include cppzmq headers, if not bundled into libzmq if (TARGET cppzmq) @@ -350,7 +369,7 @@ if(DEFINED ENV{RESVGDIR} AND NOT DEFINED RESVG_ROOT) endif() # Find resvg library (used for rendering svg files) -FIND_PACKAGE(RESVG) +find_package(RESVG) # Include resvg headers (optional SVG library) if (TARGET RESVG::resvg) @@ -358,13 +377,17 @@ if (TARGET RESVG::resvg) target_link_libraries(openshot PUBLIC RESVG::resvg) target_compile_definitions(openshot PUBLIC "-DUSE_RESVG=1") - set(CMAKE_SWIG_FLAGS "-DUSE_RESVG=1") + list(APPEND CMAKE_SWIG_FLAGS "-DUSE_RESVG=1") + + set(HAVE_RESVG TRUE CACHE BOOL "Building with Resvg support" FORCE) + mark_as_advanced(HAVE_RESVG) + endif() ############### LINK LIBRARY ################# # Link remaining dependency libraries target_link_libraries(openshot PUBLIC - ${LIBOPENSHOT_AUDIO_LIBRARIES} + ${LIBOPENSHOT_AUDIO_LIBRARIES} ${PROFILER}) if(ImageMagick_FOUND) @@ -376,8 +399,8 @@ if(BLACKMAGIC_FOUND) endif() if(WIN32) - # Required for exception handling on Windows - target_link_libraries(openshot PUBLIC "imagehlp" "dbghelp" ) + # Required for exception handling on Windows + target_link_libraries(openshot PUBLIC "imagehlp" "dbghelp" ) endif() @@ -386,9 +409,9 @@ endif() add_executable(openshot-example examples/Example.cpp) # Define path to test input files -SET(TEST_MEDIA_PATH "${PROJECT_SOURCE_DIR}/src/examples/") +set(TEST_MEDIA_PATH "${PROJECT_SOURCE_DIR}/src/examples/") if (WIN32) - STRING(REPLACE "/" "\\\\" TEST_MEDIA_PATH TEST_MEDIA_PATH) + string(REPLACE "/" "\\\\" TEST_MEDIA_PATH TEST_MEDIA_PATH) endif() target_compile_definitions(openshot-example PRIVATE -DTEST_MEDIA_PATH="${TEST_MEDIA_PATH}" ) @@ -408,12 +431,12 @@ target_link_libraries(openshot-player openshot) ############### TEST BLACKMAGIC CAPTURE APP ################ if (BLACKMAGIC_FOUND) - # Create test executable - add_executable(openshot-blackmagic - examples/ExampleBlackmagic.cpp) + # Create test executable + add_executable(openshot-blackmagic + examples/ExampleBlackmagic.cpp) - # Link test executable to the new library - target_link_libraries(openshot-blackmagic openshot) + # Link test executable to the new library + target_link_libraries(openshot-blackmagic openshot) endif() ############### INCLUDE SWIG BINDINGS ################ @@ -433,13 +456,13 @@ install(DIRECTORY ${CMAKE_SOURCE_DIR}/include/ ############### CPACK PACKAGING ############## if(MINGW) - set(CPACK_GENERATOR "NSIS") + set(CPACK_GENERATOR "NSIS") endif() if(UNIX AND NOT APPLE) - set(CPACK_GENERATOR "DEB") + set(CPACK_GENERATOR "DEB") endif() #if(UNIX AND APPLE) -# set(CPACK_GENERATOR "DragNDrop") +# set(CPACK_GENERATOR "DragNDrop") #endif() set(CPACK_DEBIAN_PACKAGE_MAINTAINER "Jonathan Thomas") #required diff --git a/src/CacheDisk.cpp b/src/CacheDisk.cpp index bb2e12c42..91153c8f4 100644 --- a/src/CacheDisk.cpp +++ b/src/CacheDisk.cpp @@ -29,6 +29,10 @@ */ #include "../include/CacheDisk.h" +#include "../include/QtUtilities.h" +#include +#include +#include using namespace std; using namespace openshot; @@ -191,10 +195,10 @@ void CacheDisk::Add(std::shared_ptr frame) if (audio_file.open(QIODevice::WriteOnly)) { QTextStream audio_stream(&audio_file); - audio_stream << frame->SampleRate() << endl; - audio_stream << frame->GetAudioChannelsCount() << endl; - audio_stream << frame->GetAudioSamplesCount() << endl; - audio_stream << frame->ChannelsLayout() << endl; + audio_stream << frame->SampleRate() << Qt::endl; + audio_stream << frame->GetAudioChannelsCount() << Qt::endl; + audio_stream << frame->GetAudioSamplesCount() << Qt::endl; + audio_stream << frame->ChannelsLayout() << Qt::endl; // Loop through all samples for (int channel = 0; channel < frame->GetAudioChannelsCount(); channel++) @@ -202,7 +206,7 @@ void CacheDisk::Add(std::shared_ptr frame) // Get audio for this channel float *samples = frame->GetAudioSamples(channel); for (int sample = 0; sample < frame->GetAudioSamplesCount(); sample++) - audio_stream << samples[sample] << endl; + audio_stream << samples[sample] << Qt::endl; } } diff --git a/src/Clip.cpp b/src/Clip.cpp index d9f694408..bb704acc5 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -153,7 +153,7 @@ Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), alloca // Update duration and set parent if (reader) { End(reader->info.duration); - reader->SetClip(this); + reader->SetParentClip(this); } } @@ -210,7 +210,7 @@ Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(N // Update duration and set parent if (reader) { End(reader->info.duration); - reader->SetClip(this); + reader->SetParentClip(this); allocated_reader = reader; init_reader_rotation(); } @@ -239,7 +239,7 @@ void Clip::Reader(ReaderBase* new_reader) reader = new_reader; // set parent - reader->SetClip(this); + reader->SetParentClip(this); // Init rotation (if any) init_reader_rotation(); @@ -368,6 +368,18 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame) throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method."); } +// Look up an effect by ID +openshot::EffectBase* Clip::GetEffect(const std::string& id) +{ + // Find the matching effect (if any) + for (const auto& effect : effects) { + if (effect->Id() == id) { + return effect; + } + } + return nullptr; +} + // Get file extension std::string Clip::get_file_extension(std::string path) { @@ -993,7 +1005,7 @@ void Clip::SetJsonValue(const Json::Value root) { // mark as managed reader and set parent if (reader) { - reader->SetClip(this); + reader->SetParentClip(this); allocated_reader = reader; } diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp index c8ce141fe..694e75f10 100644 --- a/src/FFmpegReader.cpp +++ b/src/FFmpegReader.cpp @@ -33,6 +33,9 @@ #include "../include/FFmpegReader.h" +#include // for std::this_thread::sleep_for +#include // for std::chrono::milliseconds + #define ENABLE_VAAPI 0 #if HAVE_HW_ACCEL @@ -925,7 +928,7 @@ std::shared_ptr FFmpegReader::ReadStream(int64_t requested_frame) { // Wait if too many frames are being processed while (processing_video_frames_size + processing_audio_frames_size >= minimum_packets) { - usleep(2500); + std::this_thread::sleep_for(std::chrono::milliseconds(3)); const GenericScopedLock lock(processingCriticalSection); processing_video_frames_size = processing_video_frames.size(); processing_audio_frames_size = processing_audio_frames.size(); @@ -1293,7 +1296,7 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) { if (max_height <= 0) max_height = info.height; - Clip *parent = (Clip *) GetClip(); + Clip *parent = (Clip *) GetParentClip(); if (parent) { if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) { // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) @@ -1716,7 +1719,7 @@ void FFmpegReader::Seek(int64_t requested_frame) { // Wait for any processing frames to complete while (processing_video_frames_size + processing_audio_frames_size > 0) { - usleep(2500); + std::this_thread::sleep_for(std::chrono::milliseconds(3)); const GenericScopedLock lock(processingCriticalSection); processing_video_frames_size = processing_video_frames.size(); processing_audio_frames_size = processing_audio_frames.size(); @@ -1865,8 +1868,20 @@ void FFmpegReader::UpdatePTSOffset(bool is_video) { // VIDEO PACKET if (video_pts_offset == 99999) // Has the offset been set yet? { - // Find the difference between PTS and frame number (no more than 10 timebase units allowed) - video_pts_offset = 0 - std::max(GetVideoPTS(), (int64_t) info.video_timebase.ToInt() * 10); + // Find the difference between PTS and frame number + video_pts_offset = 0 - GetVideoPTS(); + + // Find the difference between PTS and frame number + // Also, determine if PTS is invalid (too far away from zero) + // We compare the PTS to the timebase value equal to 1 second (which means the PTS + // must be within the -1 second to +1 second of zero, otherwise we ignore it) + // TODO: Please see https://github.com/OpenShot/libopenshot/pull/565#issuecomment-690985272 + // for ideas to improve this logic. + int64_t max_offset = info.video_timebase.Reciprocal().ToFloat(); + if (video_pts_offset < -max_offset || video_pts_offset > max_offset) { + // Ignore PTS, it seems invalid + video_pts_offset = 0; + } // debug output ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::UpdatePTSOffset (Video)", "video_pts_offset", video_pts_offset, "is_video", is_video); @@ -1875,8 +1890,18 @@ void FFmpegReader::UpdatePTSOffset(bool is_video) { // AUDIO PACKET if (audio_pts_offset == 99999) // Has the offset been set yet? { - // Find the difference between PTS and frame number (no more than 10 timebase units allowed) - audio_pts_offset = 0 - std::max(packet->pts, (int64_t) info.audio_timebase.ToInt() * 10); + // Find the difference between PTS and frame number + // Also, determine if PTS is invalid (too far away from zero) + // We compare the PTS to the timebase value equal to 1 second (which means the PTS + // must be within the -1 second to +1 second of zero, otherwise we ignore it) + // TODO: Please see https://github.com/OpenShot/libopenshot/pull/565#issuecomment-690985272 + // for ideas to improve this logic. + audio_pts_offset = 0 - packet->pts; + int64_t max_offset = info.audio_timebase.Reciprocal().ToFloat(); + if (audio_pts_offset < -max_offset || audio_pts_offset > max_offset) { + // Ignore PTS, it seems invalid + audio_pts_offset = 0; + } // debug output ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::UpdatePTSOffset (Audio)", "audio_pts_offset", audio_pts_offset, "is_video", is_video); diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index d960d6771..b2aec5f1e 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -87,7 +87,7 @@ FFmpegWriter::FFmpegWriter(std::string path) : path(path), fmt(NULL), oc(NULL), audio_st(NULL), video_st(NULL), samples(NULL), audio_outbuf(NULL), audio_outbuf_size(0), audio_input_frame_size(0), audio_input_position(0), initial_audio_input_frame_size(0), img_convert_ctx(NULL), cache_size(8), num_of_rescalers(32), - rescaler_position(0), video_codec(NULL), audio_codec(NULL), is_writing(false), write_video_count(0), write_audio_count(0), + rescaler_position(0), video_codec_ctx(NULL), audio_codec_ctx(NULL), is_writing(false), write_video_count(0), write_audio_count(0), original_sample_rate(0), original_channels(0), avr(NULL), avr_planar(NULL), is_open(false), prepare_streams(false), write_header(false), write_trailer(false), audio_encoder_buffer_size(0), audio_encoder_buffer(NULL) { @@ -339,11 +339,11 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va if (info.has_video && stream == VIDEO_STREAM && video_st) { st = video_st; // Get codec context - c = AV_GET_CODEC_PAR_CONTEXT(st, video_codec); + c = AV_GET_CODEC_PAR_CONTEXT(st, video_codec_ctx); } else if (info.has_audio && stream == AUDIO_STREAM && audio_st) { st = audio_st; // Get codec context - c = AV_GET_CODEC_PAR_CONTEXT(st, audio_codec); + c = AV_GET_CODEC_PAR_CONTEXT(st, audio_codec_ctx); } else throw NoStreamsFound("The stream was not found. Be sure to call PrepareStreams() first.", path); @@ -438,6 +438,7 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va av_opt_set_int(c->priv_data, "qp", std::min(std::stoi(value), 51), 0); // 0-51 if (std::stoi(value) == 0) { av_opt_set(c->priv_data, "preset", "veryslow", 0); + c->pix_fmt = PIX_FMT_YUV444P; // no chroma subsampling } break; case AV_CODEC_ID_HEVC : @@ -498,6 +499,7 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va av_opt_set_int(c->priv_data, "crf", std::min(std::stoi(value), 51), 0); // 0-51 if (std::stoi(value) == 0) { av_opt_set(c->priv_data, "preset", "veryslow", 0); + c->pix_fmt = PIX_FMT_YUV444P; // no chroma subsampling } break; case AV_CODEC_ID_HEVC : @@ -537,7 +539,7 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va switch (c->codec_id) { case AV_CODEC_ID_AV1 : c->bit_rate = 0; - if (strstr(info.vcodec.c_str(), "svt_av1") != NULL) { + if (strstr(info.vcodec.c_str(), "svtav1") != NULL) { av_opt_set_int(c->priv_data, "qp", std::min(std::stoi(value),63), 0); } else if (strstr(info.vcodec.c_str(), "rav1e") != NULL) { @@ -839,13 +841,13 @@ void FFmpegWriter::WriteTrailer() { // Flush encoders void FFmpegWriter::flush_encoders() { - if (info.has_audio && audio_codec && AV_GET_CODEC_TYPE(audio_st) == AVMEDIA_TYPE_AUDIO && AV_GET_CODEC_ATTRIBUTES(audio_st, audio_codec)->frame_size <= 1) + if (info.has_audio && audio_codec_ctx && AV_GET_CODEC_TYPE(audio_st) == AVMEDIA_TYPE_AUDIO && AV_GET_CODEC_ATTRIBUTES(audio_st, audio_codec_ctx)->frame_size <= 1) return; #if (LIBAVFORMAT_VERSION_MAJOR < 58) - if (info.has_video && video_codec && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && (oc->oformat->flags & AVFMT_RAWPICTURE) && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO) + if (info.has_video && video_codec_ctx && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && (oc->oformat->flags & AVFMT_RAWPICTURE) && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO) return; #else - if (info.has_video && video_codec && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO) + if (info.has_video && video_codec_ctx && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO) return; #endif @@ -854,7 +856,7 @@ void FFmpegWriter::flush_encoders() { for (;;) { // Increment PTS (in frames and scaled to the codec's timebase) - write_video_count += av_rescale_q(1, (AVRational) {info.fps.den, info.fps.num}, video_codec->time_base); + write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base); AVPacket pkt; av_init_packet(&pkt); @@ -872,22 +874,22 @@ void FFmpegWriter::flush_encoders() { #pragma omp critical (write_video_packet) { // Encode video packet (latest version of FFmpeg) - error_code = avcodec_send_frame(video_codec, NULL); + error_code = avcodec_send_frame(video_codec_ctx, NULL); got_packet = 0; while (error_code >= 0) { - error_code = avcodec_receive_packet(video_codec, &pkt); + error_code = avcodec_receive_packet(video_codec_ctx, &pkt); if (error_code == AVERROR(EAGAIN)|| error_code == AVERROR_EOF) { got_packet = 0; // Write packet - avcodec_flush_buffers(video_codec); + avcodec_flush_buffers(video_codec_ctx); break; } if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, video_codec->time_base, video_st->time_base); + pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base); if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, video_codec->time_base, video_st->time_base); + pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base); if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, video_codec->time_base, video_st->time_base); + pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base); pkt.stream_index = video_st->index; error_code = av_interleaved_write_frame(oc, &pkt); } @@ -896,18 +898,18 @@ void FFmpegWriter::flush_encoders() { #if LIBAVFORMAT_VERSION_MAJOR >= 54 // Encode video packet (older than FFmpeg 3.2) - error_code = avcodec_encode_video2(video_codec, &pkt, NULL, &got_packet); + error_code = avcodec_encode_video2(video_codec_ctx, &pkt, NULL, &got_packet); #else // Encode video packet (even older version of FFmpeg) int video_outbuf_size = 0; /* encode the image */ - int out_size = avcodec_encode_video(video_codec, NULL, video_outbuf_size, NULL); + int out_size = avcodec_encode_video(video_codec_ctx, NULL, video_outbuf_size, NULL); /* if zero size, it means the image was buffered */ if (out_size > 0) { - if(video_codec->coded_frame->key_frame) + if(video_codec_ctx->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.data= video_outbuf; pkt.size= out_size; @@ -927,11 +929,11 @@ void FFmpegWriter::flush_encoders() { // set the timestamp if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, video_codec->time_base, video_st->time_base); + pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base); if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, video_codec->time_base, video_st->time_base); + pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base); if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, video_codec->time_base, video_st->time_base); + pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base); pkt.stream_index = video_st->index; // Write packet @@ -948,9 +950,9 @@ void FFmpegWriter::flush_encoders() { // Increment PTS (in samples and scaled to the codec's timebase) #if LIBAVFORMAT_VERSION_MAJOR >= 54 // for some reason, it requires me to multiply channels X 2 - write_audio_count += av_rescale_q(audio_input_position / (audio_codec->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)), (AVRational){1, info.sample_rate}, audio_codec->time_base); + write_audio_count += av_rescale_q(audio_input_position / (audio_codec_ctx->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)), av_make_q(1, info.sample_rate), audio_codec_ctx->time_base); #else - write_audio_count += av_rescale_q(audio_input_position / audio_codec->channels, (AVRational){1, info.sample_rate}, audio_codec->time_base); + write_audio_count += av_rescale_q(audio_input_position / audio_codec_ctx->channels, av_make_q(1, info.sample_rate), audio_codec_ctx->time_base); #endif AVPacket pkt; @@ -963,9 +965,9 @@ void FFmpegWriter::flush_encoders() { int error_code = 0; int got_packet = 0; #if IS_FFMPEG_3_2 - error_code = avcodec_send_frame(audio_codec, NULL); + error_code = avcodec_send_frame(audio_codec_ctx, NULL); #else - error_code = avcodec_encode_audio2(audio_codec, &pkt, NULL, &got_packet); + error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, NULL, &got_packet); #endif if (error_code < 0) { ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (std::string)av_err2str(error_code) + "]", "error_code", error_code); @@ -980,11 +982,11 @@ void FFmpegWriter::flush_encoders() { // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase) if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, audio_codec->time_base, audio_st->time_base); + pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base); if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, audio_codec->time_base, audio_st->time_base); + pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base); if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, audio_codec->time_base, audio_st->time_base); + pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base); // set stream pkt.stream_index = audio_st->index; @@ -1101,8 +1103,13 @@ AVStream *FFmpegWriter::add_audio_stream() { if (codec == NULL) throw InvalidCodec("A valid audio codec could not be found for this file.", path); + // Free any previous memory allocations + if (audio_codec_ctx != NULL) { + AV_FREE_CONTEXT(audio_codec_ctx); + } + // Create a new audio stream - AV_FORMAT_NEW_STREAM(oc, audio_codec, codec, st) + AV_FORMAT_NEW_STREAM(oc, audio_codec_ctx, codec, st) c->codec_id = codec->id; #if LIBAVFORMAT_VERSION_MAJOR >= 53 @@ -1185,7 +1192,7 @@ AVStream *FFmpegWriter::add_video_stream() { throw InvalidCodec("A valid video codec could not be found for this file.", path); // Create a new video stream - AV_FORMAT_NEW_STREAM(oc, video_codec, codec, st) + AV_FORMAT_NEW_STREAM(oc, video_codec_ctx, codec, st) c->codec_id = codec->id; #if LIBAVFORMAT_VERSION_MAJOR >= 53 @@ -1202,8 +1209,10 @@ AVStream *FFmpegWriter::add_video_stream() { ) { c->bit_rate = info.video_bit_rate; if (info.video_bit_rate >= 1500000) { - c->qmin = 2; - c->qmax = 30; + if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) { + c->qmin = 2; + c->qmax = 30; + } } // Here should be the setting for low fixed bitrate // Defaults are used because mpeg2 otherwise had problems @@ -1230,7 +1239,7 @@ AVStream *FFmpegWriter::add_video_stream() { info.video_bit_rate = calculated_quality; } // medium } - if (strstr(info.vcodec.c_str(), "svt_av1") != NULL) { + if (strstr(info.vcodec.c_str(), "svtav1") != NULL) { av_opt_set_int(c->priv_data, "preset", 6, 0); av_opt_set_int(c->priv_data, "forced-idr",1,0); } @@ -1361,15 +1370,15 @@ AVStream *FFmpegWriter::add_video_stream() { // open audio codec void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st) { AVCodec *codec; - AV_GET_CODEC_FROM_STREAM(st, audio_codec) + AV_GET_CODEC_FROM_STREAM(st, audio_codec_ctx) // Set number of threads equal to number of processors (not to exceed 16) - audio_codec->thread_count = std::min(FF_NUM_PROCESSORS, 16); + audio_codec_ctx->thread_count = std::min(FF_NUM_PROCESSORS, 16); // Find the audio encoder codec = avcodec_find_encoder_by_name(info.acodec.c_str()); if (!codec) - codec = avcodec_find_encoder(audio_codec->codec_id); + codec = avcodec_find_encoder(audio_codec_ctx->codec_id); if (!codec) throw InvalidCodec("Could not find codec", path); @@ -1378,16 +1387,16 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st) { av_dict_set(&opts, "strict", "experimental", 0); // Open the codec - if (avcodec_open2(audio_codec, codec, &opts) < 0) + if (avcodec_open2(audio_codec_ctx, codec, &opts) < 0) throw InvalidCodec("Could not open audio codec", path); - AV_COPY_PARAMS_FROM_CONTEXT(st, audio_codec); + AV_COPY_PARAMS_FROM_CONTEXT(st, audio_codec_ctx); // Free options av_dict_free(&opts); // Calculate the size of the input frame (i..e how many samples per packet), and the output buffer // TODO: Ugly hack for PCM codecs (will be removed ASAP with new PCM support to compute the input frame size in samples - if (audio_codec->frame_size <= 1) { + if (audio_codec_ctx->frame_size <= 1) { // No frame size found... so calculate audio_input_frame_size = 50000 / info.channels; @@ -1404,7 +1413,7 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st) { } } else { // Set frame size based on the codec - audio_input_frame_size = audio_codec->frame_size; + audio_input_frame_size = audio_codec_ctx->frame_size; } // Set the initial frame size (since it might change during resampling) @@ -1426,16 +1435,16 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st) { av_dict_set(&st->metadata, iter->first.c_str(), iter->second.c_str(), 0); } - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec->thread_count", audio_codec->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE); + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec_ctx->thread_count", audio_codec_ctx->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE); } // open video codec void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) { AVCodec *codec; - AV_GET_CODEC_FROM_STREAM(st, video_codec) + AV_GET_CODEC_FROM_STREAM(st, video_codec_ctx) // Set number of threads equal to number of processors (not to exceed 16) - video_codec->thread_count = std::min(FF_NUM_PROCESSORS, 16); + video_codec_ctx->thread_count = std::min(FF_NUM_PROCESSORS, 16); #if HAVE_HW_ACCEL if (hw_en_on && hw_en_supported) { @@ -1490,8 +1499,8 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) { throw InvalidCodec("Could not find codec", path); /* Force max_b_frames to 0 in some cases (i.e. for mjpeg image sequences */ - if (video_codec->max_b_frames && video_codec->codec_id != AV_CODEC_ID_MPEG4 && video_codec->codec_id != AV_CODEC_ID_MPEG1VIDEO && video_codec->codec_id != AV_CODEC_ID_MPEG2VIDEO) - video_codec->max_b_frames = 0; + if (video_codec_ctx->max_b_frames && video_codec_ctx->codec_id != AV_CODEC_ID_MPEG4 && video_codec_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO && video_codec_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO) + video_codec_ctx->max_b_frames = 0; // Init options AVDictionary *opts = NULL; @@ -1499,7 +1508,7 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) { #if HAVE_HW_ACCEL if (hw_en_on && hw_en_supported) { - video_codec->pix_fmt = hw_en_av_pix_fmt; + video_codec_ctx->pix_fmt = hw_en_av_pix_fmt; // for the list of possible options, see the list of codec-specific options: // e.g. ffmpeg -h encoder=h264_vaapi or ffmpeg -h encoder=hevc_vaapi @@ -1509,23 +1518,23 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) { // which is ffmpeg version-specific. if (hw_en_av_pix_fmt == AV_PIX_FMT_VAAPI) { int64_t qp; - if (av_opt_get_int(video_codec->priv_data, "qp", 0, &qp) != 0 || qp == 0) { + if (av_opt_get_int(video_codec_ctx->priv_data, "qp", 0, &qp) != 0 || qp == 0) { // unless "qp" was set for CQP, switch to VBR RC mode - av_opt_set(video_codec->priv_data, "rc_mode", "VBR", 0); + av_opt_set(video_codec_ctx->priv_data, "rc_mode", "VBR", 0); // In the current state (ffmpeg-4.2-4 libva-mesa-driver-19.1.5-1) to use VBR, // one has to specify both bit_rate and maxrate, otherwise a small low quality file is generated on Intel iGPU). - video_codec->rc_max_rate = video_codec->bit_rate; + video_codec_ctx->rc_max_rate = video_codec_ctx->bit_rate; } } - switch (video_codec->codec_id) { + switch (video_codec_ctx->codec_id) { case AV_CODEC_ID_H264: - video_codec->max_b_frames = 0; // At least this GPU doesn't support b-frames - video_codec->profile = FF_PROFILE_H264_BASELINE | FF_PROFILE_H264_CONSTRAINED; - av_opt_set(video_codec->priv_data, "preset", "slow", 0); - av_opt_set(video_codec->priv_data, "tune", "zerolatency", 0); - av_opt_set(video_codec->priv_data, "vprofile", "baseline", AV_OPT_SEARCH_CHILDREN); + video_codec_ctx->max_b_frames = 0; // At least this GPU doesn't support b-frames + video_codec_ctx->profile = FF_PROFILE_H264_BASELINE | FF_PROFILE_H264_CONSTRAINED; + av_opt_set(video_codec_ctx->priv_data, "preset", "slow", 0); + av_opt_set(video_codec_ctx->priv_data, "tune", "zerolatency", 0); + av_opt_set(video_codec_ctx->priv_data, "vprofile", "baseline", AV_OPT_SEARCH_CHILDREN); break; case AV_CODEC_ID_HEVC: // tested to work with defaults @@ -1535,13 +1544,13 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) { break; default: ZmqLogger::Instance()->AppendDebugMethod("No codec-specific options defined for this codec. HW encoding may fail", - "codec_id", video_codec->codec_id); + "codec_id", video_codec_ctx->codec_id); break; } // set hw_frames_ctx for encoder's AVCodecContext int err; - if ((err = set_hwframe_ctx(video_codec, hw_device_ctx, info.width, info.height)) < 0) { + if ((err = set_hwframe_ctx(video_codec_ctx, hw_device_ctx, info.width, info.height)) < 0) { ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video (set_hwframe_ctx) ERROR faled to set hwframe context", "width", info.width, "height", info.height, av_err2str(err), -1); } @@ -1549,9 +1558,9 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) { #endif // HAVE_HW_ACCEL /* open the codec */ - if (avcodec_open2(video_codec, codec, &opts) < 0) + if (avcodec_open2(video_codec_ctx, codec, &opts) < 0) throw InvalidCodec("Could not open video codec", path); - AV_COPY_PARAMS_FROM_CONTEXT(st, video_codec); + AV_COPY_PARAMS_FROM_CONTEXT(st, video_codec_ctx); // Free options av_dict_free(&opts); @@ -1561,7 +1570,7 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) { av_dict_set(&st->metadata, iter->first.c_str(), iter->second.c_str(), 0); } - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video", "video_codec->thread_count", video_codec->thread_count); + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video", "video_codec_ctx->thread_count", video_codec_ctx->thread_count); } @@ -1639,7 +1648,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets", "is_final", is_final, "total_frame_samples", total_frame_samples, "channel_layout_in_frame", channel_layout_in_frame, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "LAYOUT_MONO", LAYOUT_MONO); // Keep track of the original sample format - AVSampleFormat output_sample_fmt = audio_codec->sample_fmt; + AVSampleFormat output_sample_fmt = audio_codec_ctx->sample_fmt; AVFrame *audio_frame = NULL; if (!is_final) { @@ -1655,7 +1664,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { } // Do not convert audio to planar format (yet). We need to keep everything interleaved at this point. - switch (audio_codec->sample_fmt) { + switch (audio_codec_ctx->sample_fmt) { case AV_SAMPLE_FMT_FLTP: { output_sample_fmt = AV_SAMPLE_FMT_FLT; break; @@ -1766,8 +1775,8 @@ void FFmpegWriter::write_audio_packets(bool is_final) { // Convert to planar (if needed by audio codec) AVFrame *frame_final = AV_ALLOCATE_FRAME(); AV_RESET_FRAME(frame_final); - if (av_sample_fmt_is_planar(audio_codec->sample_fmt)) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)", "in_sample_fmt", output_sample_fmt, "out_sample_fmt", audio_codec->sample_fmt, "in_sample_rate", info.sample_rate, "out_sample_rate", info.sample_rate, "in_channels", info.channels, "out_channels", info.channels); + if (av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) { + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)", "in_sample_fmt", output_sample_fmt, "out_sample_fmt", audio_codec_ctx->sample_fmt, "in_sample_rate", info.sample_rate, "out_sample_rate", info.sample_rate, "in_channels", info.channels, "out_channels", info.channels); // setup resample context if (!avr_planar) { @@ -1775,7 +1784,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { av_opt_set_int(avr_planar, "in_channel_layout", info.channel_layout, 0); av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0); av_opt_set_int(avr_planar, "in_sample_fmt", output_sample_fmt, 0); - av_opt_set_int(avr_planar, "out_sample_fmt", audio_codec->sample_fmt, 0); // planar not allowed here + av_opt_set_int(avr_planar, "out_sample_fmt", audio_codec_ctx->sample_fmt, 0); // planar not allowed here av_opt_set_int(avr_planar, "in_sample_rate", info.sample_rate, 0); av_opt_set_int(avr_planar, "out_sample_rate", info.sample_rate, 0); av_opt_set_int(avr_planar, "in_channels", info.channels, 0); @@ -1801,7 +1810,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { // Create output frame (and allocate arrays) frame_final->nb_samples = audio_input_frame_size; - av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, frame_final->nb_samples, audio_codec->sample_fmt, 0); + av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, frame_final->nb_samples, audio_codec_ctx->sample_fmt, 0); // Convert audio samples int nb_samples = SWR_CONVERT(avr_planar, // audio resample context @@ -1814,7 +1823,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { // Copy audio samples over original samples if (nb_samples > 0) - memcpy(samples, frame_final->data[0], nb_samples * av_get_bytes_per_sample(audio_codec->sample_fmt) * info.channels); + memcpy(samples, frame_final->data[0], nb_samples * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) * info.channels); // deallocate AVFrame av_freep(&(audio_frame->data[0])); @@ -1826,16 +1835,16 @@ void FFmpegWriter::write_audio_packets(bool is_final) { } else { // Create a new array final_samples = (int16_t *) av_malloc( - sizeof(int16_t) * audio_input_position * (av_get_bytes_per_sample(audio_codec->sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16))); + sizeof(int16_t) * audio_input_position * (av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16))); // Copy audio into buffer for frame - memcpy(final_samples, samples, audio_input_position * av_get_bytes_per_sample(audio_codec->sample_fmt)); + memcpy(final_samples, samples, audio_input_position * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt)); // Init the nb_samples property frame_final->nb_samples = audio_input_frame_size; // Fill the final_frame AVFrame with audio (non planar) - avcodec_fill_audio_frame(frame_final, audio_codec->channels, audio_codec->sample_fmt, (uint8_t *) final_samples, + avcodec_fill_audio_frame(frame_final, audio_codec_ctx->channels, audio_codec_ctx->sample_fmt, (uint8_t *) final_samples, audio_encoder_buffer_size, 0); } @@ -1860,18 +1869,18 @@ void FFmpegWriter::write_audio_packets(bool is_final) { int error_code; int ret = 0; int frame_finished = 0; - error_code = ret = avcodec_send_frame(audio_codec, frame_final); + error_code = ret = avcodec_send_frame(audio_codec_ctx, frame_final); if (ret < 0 && ret != AVERROR(EINVAL) && ret != AVERROR_EOF) { - avcodec_send_frame(audio_codec, NULL); + avcodec_send_frame(audio_codec_ctx, NULL); } else { if (ret >= 0) pkt.size = 0; - ret = avcodec_receive_packet(audio_codec, &pkt); + ret = avcodec_receive_packet(audio_codec_ctx, &pkt); if (ret >= 0) frame_finished = 1; if(ret == AVERROR(EINVAL) || ret == AVERROR_EOF) { - avcodec_flush_buffers(audio_codec); + avcodec_flush_buffers(audio_codec_ctx); ret = 0; } if (ret >= 0) { @@ -1885,7 +1894,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { got_packet_ptr = ret; #else // Encode audio (older versions of FFmpeg) - int error_code = avcodec_encode_audio2(audio_codec, &pkt, frame_final, &got_packet_ptr); + int error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, frame_final, &got_packet_ptr); #endif /* if zero size, it means the image was buffered */ if (error_code == 0 && got_packet_ptr) { @@ -1896,11 +1905,11 @@ void FFmpegWriter::write_audio_packets(bool is_final) { // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase) if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, audio_codec->time_base, audio_st->time_base); + pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base); if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, audio_codec->time_base, audio_st->time_base); + pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base); if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, audio_codec->time_base, audio_st->time_base); + pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base); // set stream pkt.stream_index = audio_st->index; @@ -2014,7 +2023,7 @@ void FFmpegWriter::process_video_packet(std::shared_ptr frame) { frame_final = allocate_avframe((AVPixelFormat)(video_st->codecpar->format), info.width, info.height, &bytes_final, NULL); } #else - AVFrame *frame_final = allocate_avframe(video_codec->pix_fmt, info.width, info.height, &bytes_final, NULL); + AVFrame *frame_final = allocate_avframe(video_codec_ctx->pix_fmt, info.width, info.height, &bytes_final, NULL); #endif // IS_FFMPEG_3_2 // Fill with data @@ -2057,7 +2066,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra pkt.size = sizeof(AVPicture); // Increment PTS (in frames and scaled to the codec's timebase) - write_video_count += av_rescale_q(1, (AVRational) {info.fps.den, info.fps.num}, video_codec->time_base); + write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base); pkt.pts = write_video_count; /* write the compressed frame in the media file */ @@ -2083,7 +2092,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra uint8_t *video_outbuf = NULL; // Increment PTS (in frames and scaled to the codec's timebase) - write_video_count += av_rescale_q(1, (AVRational) {info.fps.den, info.fps.num}, video_codec->time_base); + write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base); // Assign the initial AVFrame PTS from the frame counter frame_final->pts = write_video_count; @@ -2092,7 +2101,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra if (!(hw_frame = av_frame_alloc())) { fprintf(stderr, "Error code: av_hwframe_alloc\n"); } - if (av_hwframe_get_buffer(video_codec->hw_frames_ctx, hw_frame, 0) < 0) { + if (av_hwframe_get_buffer(video_codec_ctx->hw_frames_ctx, hw_frame, 0) < 0) { fprintf(stderr, "Error code: av_hwframe_get_buffer\n"); } if (!hw_frame->hw_frames_ctx) { @@ -2114,11 +2123,11 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra #if HAVE_HW_ACCEL if (hw_en_on && hw_en_supported) { - ret = avcodec_send_frame(video_codec, hw_frame); //hw_frame!!! + ret = avcodec_send_frame(video_codec_ctx, hw_frame); //hw_frame!!! } else #endif // HAVE_HW_ACCEL { - ret = avcodec_send_frame(video_codec, frame_final); + ret = avcodec_send_frame(video_codec_ctx, frame_final); } error_code = ret; if (ret < 0 ) { @@ -2129,14 +2138,14 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra if (ret == AVERROR_EOF ) { std::cerr << "Frame AVERROR_EOF" << "\n"; } - avcodec_send_frame(video_codec, NULL); + avcodec_send_frame(video_codec_ctx, NULL); } else { while (ret >= 0) { - ret = avcodec_receive_packet(video_codec, &pkt); + ret = avcodec_receive_packet(video_codec_ctx, &pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { - avcodec_flush_buffers(video_codec); + avcodec_flush_buffers(video_codec_ctx); got_packet_ptr = 0; break; } @@ -2149,7 +2158,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra #else #if LIBAVFORMAT_VERSION_MAJOR >= 54 // Write video packet (older than FFmpeg 3.2) - error_code = avcodec_encode_video2(video_codec, &pkt, frame_final, &got_packet_ptr); + error_code = avcodec_encode_video2(video_codec_ctx, &pkt, frame_final, &got_packet_ptr); if (error_code != 0) { std::cerr << "Frame AVERROR_EOF" << "\n"; } @@ -2162,11 +2171,11 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra video_outbuf = (uint8_t*) av_malloc(200000); /* encode the image */ - int out_size = avcodec_encode_video(video_codec, video_outbuf, video_outbuf_size, frame_final); + int out_size = avcodec_encode_video(video_codec_ctx, video_outbuf, video_outbuf_size, frame_final); /* if zero size, it means the image was buffered */ if (out_size > 0) { - if(video_codec->coded_frame->key_frame) + if(video_codec_ctx->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; pkt.data= video_outbuf; pkt.size= out_size; @@ -2186,11 +2195,11 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra // set the timestamp if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, video_codec->time_base, video_st->time_base); + pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base); if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, video_codec->time_base, video_st->time_base); + pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base); if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, video_codec->time_base, video_st->time_base); + pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base); pkt.stream_index = video_st->index; /* write the compressed frame in the media file */ diff --git a/src/Frame.cpp b/src/Frame.cpp index 764b9651a..483df7688 100644 --- a/src/Frame.cpp +++ b/src/Frame.cpp @@ -30,6 +30,9 @@ #include "../include/Frame.h" +#include // for std::this_thread::sleep_for +#include // for std::chrono::milliseconds + using namespace std; using namespace openshot; @@ -992,7 +995,7 @@ void Frame::Play() // Output error (if any) if (error.isNotEmpty()) { - cout << "Error on initialise(): " << error.toStdString() << endl; + cout << "Error on initialise(): " << error << endl; } juce::AudioSourcePlayer audioSourcePlayer; @@ -1028,7 +1031,7 @@ void Frame::Play() while (transport1.isPlaying()) { cout << "playing" << endl; - usleep(1000000); + std::this_thread::sleep_for(std::chrono::seconds(1)); } cout << "DONE!!!" << endl; diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp index 4b213f81c..4c561f8fe 100644 --- a/src/FrameMapper.cpp +++ b/src/FrameMapper.cpp @@ -61,9 +61,9 @@ FrameMapper::FrameMapper(ReaderBase *reader, Fraction target, PulldownType targe // Destructor FrameMapper::~FrameMapper() { - if (is_open) - // Auto Close if not already - Close(); + + // Auto Close if not already + Close(); reader = NULL; } @@ -487,7 +487,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame) // includes some additional input samples on first iteration, // and continues the offset to ensure that the sample rate // converter isn't input limited. - const int EXTRA_INPUT_SAMPLES = 20; + const int EXTRA_INPUT_SAMPLES = 100; // Extend end sample count by an additional EXTRA_INPUT_SAMPLES samples copy_samples.sample_end += EXTRA_INPUT_SAMPLES; diff --git a/src/Qt/AudioPlaybackThread.cpp b/src/Qt/AudioPlaybackThread.cpp index 44c837e74..2ee1009c5 100644 --- a/src/Qt/AudioPlaybackThread.cpp +++ b/src/Qt/AudioPlaybackThread.cpp @@ -31,6 +31,9 @@ #include "../../include/Qt/AudioPlaybackThread.h" +#include // for std::this_thread::sleep_for +#include // for std::chrono::milliseconds + namespace openshot { @@ -194,7 +197,7 @@ namespace openshot transport.start(); while (!threadShouldExit() && transport.isPlaying() && is_playing) - usleep(2500); + std::this_thread::sleep_for(std::chrono::milliseconds(2)); // Stop audio and shutdown transport Stop(); diff --git a/src/Qt/PlayerDemo.cpp b/src/Qt/PlayerDemo.cpp index ba20e5cfe..e5f0e11d0 100644 --- a/src/Qt/PlayerDemo.cpp +++ b/src/Qt/PlayerDemo.cpp @@ -28,11 +28,18 @@ * along with OpenShot Library. If not, see . */ -#include "stdio.h" +#include #include "../../include/QtPlayer.h" #include "../../include/Qt/PlayerDemo.h" #include #include +#include +#include +#include +#include +#include +#include +#include PlayerDemo::PlayerDemo(QWidget *parent) : QWidget(parent) diff --git a/src/Qt/PlayerPrivate.cpp b/src/Qt/PlayerPrivate.cpp index e7c881f8c..db6f2ff10 100644 --- a/src/Qt/PlayerPrivate.cpp +++ b/src/Qt/PlayerPrivate.cpp @@ -31,6 +31,9 @@ #include "../../include/Qt/PlayerPrivate.h" +#include // for std::this_thread::sleep_for +#include // for std::chrono milliseconds, high_resolution_clock + namespace openshot { // Constructor @@ -54,109 +57,120 @@ namespace openshot // Start thread void PlayerPrivate::run() { - // bail if no reader set - if (!reader) - return; - - // Start the threads - if (reader->info.has_audio) - audioPlayback->startThread(8); - if (reader->info.has_video) { - videoCache->startThread(2); - videoPlayback->startThread(4); - } - - while (!threadShouldExit()) { - - // Calculate the milliseconds a single frame should stay on the screen - double frame_time = (1000.0 / reader->info.fps.ToDouble()); - - // Get the start time (to track how long a frame takes to render) - const Time t1 = Time::getCurrentTime(); - - // Get the current video frame (if it's different) - frame = getFrame(); - - // Experimental Pausing Code (if frame has not changed) - if ((speed == 0 && video_position == last_video_position) || (video_position > reader->info.video_length)) { - speed = 0; - sleep(frame_time); - continue; - } - - // Set the video frame on the video thread and render frame - videoPlayback->frame = frame; - videoPlayback->render.signal(); - - // Keep track of the last displayed frame - last_video_position = video_position; - - // How many frames ahead or behind is the video thread? - int64_t video_frame_diff = 0; - if (reader->info.has_audio && reader->info.has_video) { - if (speed != 1) - // Set audio frame again (since we are not in normal speed, and not paused) - audioPlayback->Seek(video_position); - - // Only calculate this if a reader contains both an audio and video thread - audio_position = audioPlayback->getCurrentFramePosition(); - video_frame_diff = video_position - audio_position; - } - - // Get the end time (to track how long a frame takes to render) - const Time t2 = Time::getCurrentTime(); - - // Determine how many milliseconds it took to render the frame - int64_t render_time = t2.toMilliseconds() - t1.toMilliseconds(); - - // Calculate the amount of time to sleep (by subtracting the render time) - int sleep_time = int(frame_time - render_time); - - // Debug - ZmqLogger::Instance()->AppendDebugMethod("PlayerPrivate::run (determine sleep)", "video_frame_diff", video_frame_diff, "video_position", video_position, "audio_position", audio_position, "speed", speed, "render_time", render_time, "sleep_time", sleep_time); - - // Adjust drift (if more than a few frames off between audio and video) - if (video_frame_diff > 0 && reader->info.has_audio && reader->info.has_video) - // Since the audio and video threads are running independently, they will quickly get out of sync. - // To fix this, we calculate how far ahead or behind the video frame is, and adjust the amount of time - // the frame is displayed on the screen (i.e. the sleep time). If a frame is ahead of the audio, - // we sleep for longer. If a frame is behind the audio, we sleep less (or not at all), in order for - // the video to catch up. - sleep_time += (video_frame_diff * (1000.0 / reader->info.fps.ToDouble())); - - - else if (video_frame_diff < -10 && reader->info.has_audio && reader->info.has_video) { - // Skip frame(s) to catch up to the audio (if more than 10 frames behind) - video_position += abs(video_frame_diff) / 2; // Seek forward 1/2 the difference - sleep_time = 0; // Don't sleep now... immediately go to next position - } - - // Sleep (leaving the video frame on the screen for the correct amount of time) - if (sleep_time > 0) usleep(sleep_time * 1000); - - } + // bail if no reader set + if (!reader) + return; + + // Start the threads + if (reader->info.has_audio) + audioPlayback->startThread(8); + if (reader->info.has_video) { + videoCache->startThread(2); + videoPlayback->startThread(4); + } + + using std::chrono::duration_cast; + + // Types for storing time durations in whole and fractional milliseconds + using ms = std::chrono::milliseconds; + using double_ms = std::chrono::duration; + + // Calculate on-screen time for a single frame in milliseconds + const auto frame_duration = double_ms(1000.0 / reader->info.fps.ToDouble()); + + while (!threadShouldExit()) { + // Get the start time (to track how long a frame takes to render) + const auto time1 = std::chrono::high_resolution_clock::now(); + + // Get the current video frame (if it's different) + frame = getFrame(); + + // Experimental Pausing Code (if frame has not changed) + if ((speed == 0 && video_position == last_video_position) + || (video_position > reader->info.video_length) + ) { + speed = 0; + std::this_thread::sleep_for(frame_duration); + continue; + } + + // Set the video frame on the video thread and render frame + videoPlayback->frame = frame; + videoPlayback->render.signal(); + + // Keep track of the last displayed frame + last_video_position = video_position; + + // How many frames ahead or behind is the video thread? + int64_t video_frame_diff = 0; + if (reader->info.has_audio && reader->info.has_video) { + if (speed != 1) + // Set audio frame again (since we are not in normal speed, and not paused) + audioPlayback->Seek(video_position); + + // Only calculate this if a reader contains both an audio and video thread + audio_position = audioPlayback->getCurrentFramePosition(); + video_frame_diff = video_position - audio_position; + } + + // Get the end time (to track how long a frame takes to render) + const auto time2 = std::chrono::high_resolution_clock::now(); + + // Determine how many milliseconds it took to render the frame + const auto render_time = double_ms(time2 - time1); + + // Calculate the amount of time to sleep (by subtracting the render time) + auto sleep_time = duration_cast(frame_duration - render_time); + + // Debug + ZmqLogger::Instance()->AppendDebugMethod("PlayerPrivate::run (determine sleep)", "video_frame_diff", video_frame_diff, "video_position", video_position, "audio_position", audio_position, "speed", speed, "render_time(ms)", render_time.count(), "sleep_time(ms)", sleep_time.count()); + + // Adjust drift (if more than a few frames off between audio and video) + if (video_frame_diff > 0 && reader->info.has_audio && reader->info.has_video) { + // Since the audio and video threads are running independently, + // they will quickly get out of sync. To fix this, we calculate + // how far ahead or behind the video frame is, and adjust the amount + // of time the frame is displayed on the screen (i.e. the sleep time). + // If a frame is ahead of the audio, we sleep for longer. + // If a frame is behind the audio, we sleep less (or not at all), + // in order for the video to catch up. + sleep_time += duration_cast(video_frame_diff * frame_duration); + } + + else if (video_frame_diff < -10 && reader->info.has_audio && reader->info.has_video) { + // Skip frame(s) to catch up to the audio (if more than 10 frames behind) + video_position += std::fabs(video_frame_diff) / 2; // Seek forward 1/2 the difference + sleep_time = sleep_time.zero(); // Don't sleep now... immediately go to next position + } + + // Sleep (leaving the video frame on the screen for the correct amount of time) + if (sleep_time > sleep_time.zero()) { + std::this_thread::sleep_for(sleep_time); + } + + } } // Get the next displayed frame (based on speed and direction) std::shared_ptr PlayerPrivate::getFrame() { - try { - // Get the next frame (based on speed) - if (video_position + speed >= 1 && video_position + speed <= reader->info.video_length) - video_position = video_position + speed; - - if (frame && frame->number == video_position && video_position == last_video_position) { - // return cached frame - return frame; - } - else - { - // Update cache on which frame was retrieved - videoCache->setCurrentFramePosition(video_position); - - // return frame from reader - return reader->GetFrame(video_position); - } + try { + // Get the next frame (based on speed) + if (video_position + speed >= 1 && video_position + speed <= reader->info.video_length) + video_position = video_position + speed; + + if (frame && frame->number == video_position && video_position == last_video_position) { + // return cached frame + return frame; + } + else + { + // Update cache on which frame was retrieved + videoCache->setCurrentFramePosition(video_position); + + // return frame from reader + return reader->GetFrame(video_position); + } } catch (const ReaderClosed & e) { // ... diff --git a/src/Qt/VideoCacheThread.cpp b/src/Qt/VideoCacheThread.cpp index 6cff46ba8..4279e2381 100644 --- a/src/Qt/VideoCacheThread.cpp +++ b/src/Qt/VideoCacheThread.cpp @@ -31,6 +31,9 @@ #include "../../include/Qt/VideoCacheThread.h" #include +#include // for std::this_thread::sleep_for +#include // for std::chrono::milliseconds + namespace openshot { // Constructor @@ -81,10 +84,14 @@ namespace openshot // Start the thread void VideoCacheThread::run() { - while (!threadShouldExit() && is_playing) { + // Types for storing time durations in whole and fractional milliseconds + using ms = std::chrono::milliseconds; + using double_ms = std::chrono::duration; + + // Calculate on-screen time for a single frame in milliseconds + const auto frame_duration = double_ms(1000.0 / reader->info.fps.ToDouble()); - // Calculate sleep time for frame rate - double frame_time = (1000.0 / reader->info.fps.ToDouble()); + while (!threadShouldExit() && is_playing) { // Cache frames before the other threads need them // Cache frames up to the max frames @@ -117,7 +124,7 @@ namespace openshot } // Sleep for 1 frame length - usleep(frame_time * 1000); + std::this_thread::sleep_for(frame_duration); } return; diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp index cf64ef930..59060ccd7 100644 --- a/src/QtImageReader.cpp +++ b/src/QtImageReader.cpp @@ -187,7 +187,7 @@ std::shared_ptr QtImageReader::GetFrame(int64_t requested_frame) if (max_height <= 0) max_height = info.height; - Clip* parent = (Clip*) GetClip(); + Clip* parent = (Clip*) GetParentClip(); if (parent) { if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) { // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) diff --git a/src/ReaderBase.cpp b/src/ReaderBase.cpp index 474dc624e..6799d95ec 100644 --- a/src/ReaderBase.cpp +++ b/src/ReaderBase.cpp @@ -249,13 +249,3 @@ void ReaderBase::SetJsonValue(const Json::Value root) { } } } - -/// Parent clip object of this reader (which can be unparented and NULL) -openshot::ClipBase* ReaderBase::GetClip() { - return parent; -} - -/// Set parent clip object of this reader -void ReaderBase::SetClip(openshot::ClipBase* clip) { - parent = clip; -} diff --git a/src/Timeline.cpp b/src/Timeline.cpp index 124058ac2..b2f46519c 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -263,6 +263,67 @@ void Timeline::RemoveClip(Clip* clip) clips.remove(clip); } +// Look up a clip +openshot::ClipBase* Timeline::GetClip(const std::string& id) +{ + // Find the matching clip (if any) + for (const auto& clip : clips) { + if (clip->Id() == id) { + return clip; + } + } + return nullptr; +} + +// Look up a timeline effect +openshot::EffectBase* Timeline::GetEffect(const std::string& id) +{ + // Find the matching effect (if any) + for (const auto& effect : effects) { + if (effect->Id() == id) { + return effect; + } + } + return nullptr; +} + +openshot::EffectBase* Timeline::GetClipEffect(const std::string& id) +{ + // Search all clips for matching effect ID + for (const auto& clip : clips) { + const auto e = clip->GetEffect(id); + if (e != nullptr) { + return e; + } + } + return nullptr; +} + +// Compute the end time of the latest timeline element +double Timeline::GetMaxTime() { + double last_clip = 0.0; + double last_effect = 0.0; + + if (!clips.empty()) { + const auto max_clip = std::max_element( + clips.begin(), clips.end(), CompareClipEndFrames()); + last_clip = (*max_clip)->Position() + (*max_clip)->Duration(); + } + if (!effects.empty()) { + const auto max_effect = std::max_element( + effects.begin(), effects.end(), CompareEffectEndFrames()); + last_effect = (*max_effect)->Position() + (*max_effect)->Duration(); + } + return std::max(last_clip, last_effect); +} + +// Compute the highest frame# based on the latest time and FPS +int64_t Timeline::GetMaxFrame() { + double fps = info.fps.ToDouble(); + auto max_time = GetMaxTime(); + return std::round(max_time * fps) + 1; +} + // Apply a FrameMapper to a clip which matches the settings of this timeline void Timeline::apply_mapper_to_clip(Clip* clip) { diff --git a/src/ZmqLogger.cpp b/src/ZmqLogger.cpp index a4cee7149..d7377a2f4 100644 --- a/src/ZmqLogger.cpp +++ b/src/ZmqLogger.cpp @@ -37,6 +37,12 @@ #endif using namespace openshot; +#include +#include +#include +#include +#include // for std::this_thread::sleep_for +#include // for std::duration::microseconds // Global reference to logger diff --git a/src/effects/Pixelate.cpp b/src/effects/Pixelate.cpp index c993915c5..ed219e4da 100644 --- a/src/effects/Pixelate.cpp +++ b/src/effects/Pixelate.cpp @@ -33,7 +33,7 @@ using namespace openshot; /// Blank constructor, useful when using Json to load the effect properties -Pixelate::Pixelate() : pixelization(0.7), left(0.0), top(0.0), right(0.0), bottom(0.0) { +Pixelate::Pixelate() : pixelization(0.5), left(0.0), top(0.0), right(0.0), bottom(0.0) { // Init effect properties init_effect_details(); } @@ -68,7 +68,7 @@ std::shared_ptr Pixelate::GetFrame(std::shared_ptr frame, int64_t std::shared_ptr frame_image = frame->GetImage(); // Get current keyframe values - double pixelization_value = 1.0 - std::min(fabs(pixelization.GetValue(frame_number)), 1.0); + double pixelization_value = std::min(pow(0.001, fabs(pixelization.GetValue(frame_number))), 1.0); double left_value = left.GetValue(frame_number); double top_value = top.GetValue(frame_number); double right_value = right.GetValue(frame_number); @@ -82,8 +82,12 @@ std::shared_ptr Pixelate::GetFrame(std::shared_ptr frame, int64_t QRect area(QPoint(0,0), frame_image->size()); area = area.marginsRemoved({int(left_value * w), int(top_value * h), int(right_value * w), int(bottom_value * h)}); + int scale_to = (int) (area.width() * pixelization_value); + if (scale_to < 1) { + scale_to = 1; // Not less than one pixel + } // Copy and scale pixels in area to be pixelated - auto frame_scaled = frame_image->copy(area).scaledToWidth(area.width() * pixelization_value, Qt::SmoothTransformation); + auto frame_scaled = frame_image->copy(area).scaledToWidth(scale_to, Qt::SmoothTransformation); // Draw pixelated image back over original QPainter painter(frame_image.get()); diff --git a/src/examples/ExampleHtml.cpp b/src/examples/ExampleHtml.cpp index f315e2527..576961bc5 100644 --- a/src/examples/ExampleHtml.cpp +++ b/src/examples/ExampleHtml.cpp @@ -32,11 +32,12 @@ #include #include #include -#include "../../include/OpenShot.h" -//#include "../../include/CrashHandler.h" #include #include +#include "../../include/OpenShot.h" +#include "../../include/CrashHandler.h" + using namespace openshot; int main(int argc, char* argv[]) { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 7ccddba8d..3d0dd2d48 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -36,6 +36,11 @@ if(WIN32) set(CMAKE_CXX_FLAGS " ${CMAKE_CXX_FLAGS} -include cmath") endif() +if (APPLE) + # Prevent compiling with __cxx11 + add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) +endif() + ################### UNITTEST++ ##################### # Find UnitTest++ libraries (used for unit testing) find_package(UnitTest++) @@ -80,7 +85,6 @@ if(ImageMagick_FOUND) # define a global var (used in the C++) add_definitions( -DUSE_IMAGEMAGICK=1 ) - set(CMAKE_SWIG_FLAGS "-DUSE_IMAGEMAGICK=1") endif() ################# LIBOPENSHOT-AUDIO ################### diff --git a/tests/Settings_Tests.cpp b/tests/Settings_Tests.cpp index b63b56a8c..65bd66a66 100644 --- a/tests/Settings_Tests.cpp +++ b/tests/Settings_Tests.cpp @@ -41,7 +41,7 @@ TEST(Settings_Default_Constructor) // Create an empty color Settings *s = Settings::Instance(); - CHECK_EQUAL(0, s->HARDWARE_DECODER); + CHECK_EQUAL(12, s->OMP_THREADS); CHECK_EQUAL(false, s->HIGH_QUALITY_SCALING); CHECK_EQUAL(false, s->WAIT_FOR_VIDEO_PROCESSING_TASK); } @@ -50,15 +50,15 @@ TEST(Settings_Change_Settings) { // Create an empty color Settings *s = Settings::Instance(); - s->HARDWARE_DECODER = 1; + s->OMP_THREADS = 8; s->HIGH_QUALITY_SCALING = true; s->WAIT_FOR_VIDEO_PROCESSING_TASK = true; - CHECK_EQUAL(1, s->HARDWARE_DECODER); + CHECK_EQUAL(8, s->OMP_THREADS); CHECK_EQUAL(true, s->HIGH_QUALITY_SCALING); CHECK_EQUAL(true, s->WAIT_FOR_VIDEO_PROCESSING_TASK); - CHECK_EQUAL(1, s->HARDWARE_DECODER); + CHECK_EQUAL(8, Settings::Instance()->OMP_THREADS); CHECK_EQUAL(true, Settings::Instance()->HIGH_QUALITY_SCALING); CHECK_EQUAL(true, Settings::Instance()->WAIT_FOR_VIDEO_PROCESSING_TASK); } diff --git a/tests/Timeline_Tests.cpp b/tests/Timeline_Tests.cpp index 946693a1d..47683b792 100644 --- a/tests/Timeline_Tests.cpp +++ b/tests/Timeline_Tests.cpp @@ -36,7 +36,10 @@ using namespace std; using namespace openshot; -TEST(Timeline_Constructor) +SUITE(Timeline) +{ + +TEST(Constructor) { // Create a default fraction (should be 1/1) Fraction fps(30000,1000); @@ -54,7 +57,7 @@ TEST(Timeline_Constructor) CHECK_EQUAL(240, t2.info.height); } -TEST(Timeline_Width_and_Height_Functions) +TEST(Width_and_Height_Functions) { // Create a default fraction (should be 1/1) Fraction fps(30000,1000); @@ -79,7 +82,7 @@ TEST(Timeline_Width_and_Height_Functions) CHECK_EQUAL(400, t1.info.height); } -TEST(Timeline_Framerate) +TEST(Framerate) { // Create a default fraction (should be 1/1) Fraction fps(24,1); @@ -89,7 +92,7 @@ TEST(Timeline_Framerate) CHECK_CLOSE(24.0f, t1.info.fps.ToFloat(), 0.00001); } -TEST(Timeline_Check_Two_Track_Video) +TEST(Check_Two_Track_Video) { // Create a reader stringstream path; @@ -186,7 +189,7 @@ TEST(Timeline_Check_Two_Track_Video) t.Close(); } -TEST(Timeline_Clip_Order) +TEST(Clip_Order) { // Create a timeline Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); @@ -281,7 +284,7 @@ TEST(Timeline_Clip_Order) } -TEST(Timeline_Effect_Order) +TEST(Effect_Order) { // Create a timeline Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); @@ -413,7 +416,127 @@ TEST(Timeline_Effect_Order) t.Close(); } -TEST(Timeline_Effect_Blur) +TEST(GetClip_by_id) +{ + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); + + stringstream path1; + path1 << TEST_MEDIA_PATH << "interlaced.png"; + auto media_path1 = path1.str(); + + stringstream path2; + path2 << TEST_MEDIA_PATH << "front.png"; + auto media_path2 = path2.str(); + + Clip clip1(media_path1); + std::string clip1_id("CLIP00001"); + clip1.Id(clip1_id); + clip1.Layer(1); + + Clip clip2(media_path2); + std::string clip2_id("CLIP00002"); + clip2.Id(clip2_id); + clip2.Layer(2); + + t.AddClip(&clip1); + t.AddClip(&clip2); + + auto matched = t.GetClip(clip1_id); + CHECK_EQUAL(clip1_id, matched->Id()); + CHECK_EQUAL(1, matched->Layer()); + + auto matched2 = t.GetClip(clip2_id); + CHECK_EQUAL(clip2_id, matched2->Id()); + CHECK_EQUAL(false, matched2->Layer() < 2); + + auto matched3 = t.GetClip("BAD_ID"); + CHECK_EQUAL(true, matched3 == nullptr); +} + +TEST(GetClipEffect_by_id) +{ + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); + + stringstream path1; + path1 << TEST_MEDIA_PATH << "interlaced.png"; + auto media_path1 = path1.str(); + + // Create a clip, nothing special + Clip clip1(media_path1); + std::string clip1_id("CLIP00001"); + clip1.Id(clip1_id); + clip1.Layer(1); + + // Add a blur effect + Keyframe horizontal_radius(5.0); + Keyframe vertical_radius(5.0); + Keyframe sigma(3.0); + Keyframe iterations(3.0); + Blur blur1(horizontal_radius, vertical_radius, sigma, iterations); + std::string blur1_id("EFFECT00011"); + blur1.Id(blur1_id); + clip1.AddEffect(&blur1); + + // A second clip, different layer + Clip clip2(media_path1); + std::string clip2_id("CLIP00002"); + clip2.Id(clip2_id); + clip2.Layer(2); + + // Some effects for clip2 + Negate neg2; + std::string neg2_id("EFFECT00021"); + neg2.Id(neg2_id); + neg2.Layer(2); + clip2.AddEffect(&neg2); + Blur blur2(horizontal_radius, vertical_radius, sigma, iterations); + std::string blur2_id("EFFECT00022"); + blur2.Id(blur2_id); + blur2.Layer(2); + clip2.AddEffect(&blur2); + + t.AddClip(&clip1); + + // Check that we can look up clip1's effect + auto match1 = t.GetClipEffect("EFFECT00011"); + CHECK_EQUAL(blur1_id, match1->Id()); + + // clip2 hasn't been added yet, shouldn't be found + match1 = t.GetClipEffect(blur2_id); + CHECK_EQUAL(true, match1 == nullptr); + + t.AddClip(&clip2); + + // Check that blur2 can now be found via clip2 + match1 = t.GetClipEffect(blur2_id); + CHECK_EQUAL(blur2_id, match1->Id()); + CHECK_EQUAL(2, match1->Layer()); +} + +TEST(GetEffect_by_id) +{ + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); + + // Create a timeline effect + Keyframe horizontal_radius(5.0); + Keyframe vertical_radius(5.0); + Keyframe sigma(3.0); + Keyframe iterations(3.0); + Blur blur1(horizontal_radius, vertical_radius, sigma, iterations); + std::string blur1_id("EFFECT00011"); + blur1.Id(blur1_id); + blur1.Layer(1); + t.AddEffect(&blur1); + + auto match1 = t.GetEffect(blur1_id); + CHECK_EQUAL(blur1_id, match1->Id()); + CHECK_EQUAL(1, match1->Layer()); + + match1 = t.GetEffect("NOSUCHNAME"); + CHECK_EQUAL(true, match1 == nullptr); +} + +TEST(Effect_Blur) { // Create a timeline Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); @@ -443,3 +566,39 @@ TEST(Timeline_Effect_Blur) // Close reader t.Close(); } + +TEST(GetMaxFrame_GetMaxTime) +{ + // Create a timeline + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); + + stringstream path1; + path1 << TEST_MEDIA_PATH << "interlaced.png"; + Clip clip1(path1.str()); + clip1.Layer(1); + clip1.Position(50); + clip1.End(45); + t.AddClip(&clip1); + + CHECK_CLOSE(95.0, t.GetMaxTime(), 0.001); + CHECK_EQUAL(95 * 30 + 1, t.GetMaxFrame()); + + Clip clip2(path1.str()); + clip2.Layer(2); + clip2.Position(0); + clip2.End(55); + t.AddClip(&clip2); + + CHECK_EQUAL(95 * 30 + 1, t.GetMaxFrame()); + CHECK_CLOSE(95.0, t.GetMaxTime(), 0.001); + + clip2.Position(100); + clip1.Position(80); + CHECK_EQUAL(155 * 30 + 1, t.GetMaxFrame()); + CHECK_CLOSE(155.0, t.GetMaxTime(), 0.001); + t.RemoveClip(&clip2); + CHECK_EQUAL(125 * 30 + 1, t.GetMaxFrame()); + CHECK_CLOSE(125.0, t.GetMaxTime(), 0.001); +} + +} // SUITE