diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 000000000..756bf1e46 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,4 @@ +# Add 'build' label to CMake changes +build: + - /**/CMakeList.txt + - /cmake/**/*.cmake diff --git a/.github/workflows/label.yml b/.github/workflows/label.yml new file mode 100644 index 000000000..e90b599b9 --- /dev/null +++ b/.github/workflows/label.yml @@ -0,0 +1,19 @@ +# This workflow will triage pull requests and apply a label based on the +# paths that are modified in the pull request. +# +# To use this workflow, you will need to set up a .github/labeler.yml +# file with configuration. For more information, see: +# https://github.com/actions/labeler/blob/master/README.md + +name: Labeler +on: [pull_request] + +jobs: + label: + + runs-on: ubuntu-latest + + steps: + - uses: actions/labeler@v2 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4e8fc2786..c15bcb984 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -43,7 +43,7 @@ mac-builder: - unzip artifacts.zip - export LIBOPENSHOT_AUDIO_DIR=$CI_PROJECT_DIR/build/install-x64 - mkdir -p build; cd build; - - cmake -DCMAKE_EXE_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_SHARED_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DCMAKE_PREFIX_PATH=/usr/local/qt5.15.X/qt5.15/5.15.0/clang_64/ -DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.6/include/python3.6m -DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.6/lib/libpython3.6.dylib -DPYTHON_MODULE_PATH=python -DPython_FRAMEWORKS=/Library/Frameworks/Python.framework/ -D"CMAKE_BUILD_TYPE:STRING=Release" -D"CMAKE_OSX_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk" -D"CMAKE_OSX_DEPLOYMENT_TARGET=10.9" -D"CMAKE_INSTALL_RPATH_USE_LINK_PATH=1" -D"ENABLE_RUBY=0" ../ + - cmake -DCMAKE_EXE_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_SHARED_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -DCMAKE_PREFIX_PATH=/usr/local/qt5.15.X/qt5.15/5.15.0/clang_64/ -DPYTHON_INCLUDE_DIR=/Library/Frameworks/Python.framework/Versions/3.6/include/python3.6m -DPYTHON_LIBRARY=/Library/Frameworks/Python.framework/Versions/3.6/lib/libpython3.6.dylib -DPYTHON_MODULE_PATH=python -DPython_FRAMEWORKS=/Library/Frameworks/Python.framework/ -D"CMAKE_BUILD_TYPE:STRING=Release" -D"CMAKE_OSX_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk" -D"CMAKE_OSX_DEPLOYMENT_TARGET=10.9" -D"CMAKE_INSTALL_RPATH_USE_LINK_PATH=1" -D"ENABLE_RUBY=0" ../ - make - make install - echo -e "CI_PROJECT_NAME:$CI_PROJECT_NAME\nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME\nCI_COMMIT_SHA:$CI_COMMIT_SHA\nCI_JOB_ID:$CI_JOB_ID" > "install-x64/share/$CI_PROJECT_NAME" diff --git a/.travis.yml b/.travis.yml index 241d5be3d..ab338bdde 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,11 +32,6 @@ addons: - curl jobs: - - # The FFmpeg 3.2 backport PPA has gone missing - allow_failures: - - name: "FFmpeg 3.2 GCC (Ubuntu 16.04 Xenial)" - include: - name: "Coverage + FFmpeg 3.4 GCC (Ubuntu 18.04 Bionic)" env: @@ -99,31 +94,6 @@ jobs: - libavresample-dev - libomp-dev - - name: "FFmpeg 3.2 GCC (Ubuntu 16.04 Xenial)" - env: - - BUILD_VERSION=ffmpeg32 - - CMAKE_EXTRA_ARGS="" - - TEST_TARGET="os_test" - os: linux - dist: xenial - addons: - apt: - sources: - - sourceline: 'ppa:openshot.developers/libopenshot-daily' - - sourceline: 'ppa:jon-hedgerows/ffmpeg-backports' - packages: - - *p_common - - libavresample-dev - - libavcodec57 - - libavdevice57 - - libavfilter6 - - libavformat57 - - libavresample3 - - libavutil55 - - libpostproc54 - - libswresample2 - - libswscale4 - - name: "FFmpeg 2 GCC (Ubuntu 16.04 Xenial)" env: - BUILD_VERSION=ffmpeg2 diff --git a/README.md b/README.md index cf69c1cfe..6105915d3 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -OpenShot Video Library (libopenshot) is a free, open-source C++ library dedicated to -delivering high quality video editing, animation, and playback solutions to the -world. +OpenShot Video Library (libopenshot) is a free, open-source C++ library +dedicated to delivering high quality video editing, animation, and playback +solutions to the world. ## Build Status @@ -15,7 +15,8 @@ world. * Time Mapping (Curve-based Slow Down, Speed Up, Reverse) * Audio Mixing & Resampling (Curve-based) * Audio Plug-ins (VST & AU) -* Audio Drivers (ASIO, WASAPI, DirectSound, CoreAudio, iPhone Audio, ALSA, JACK, and Android) +* Audio Drivers (ASIO, WASAPI, DirectSound, CoreAudio, iPhone Audio, + ALSA, JACK, and Android) * Telecine and Inverse Telecine (Film to TV, TV to Film) * Frame Rate Conversions * Multi-Processor Support (Performance) @@ -27,8 +28,8 @@ world. ## Install -Detailed instructions for building libopenshot and libopenshot-audio for each OS. These instructions -are also available in the /docs/ source folder. +Detailed instructions for building libopenshot and libopenshot-audio for +each OS. These instructions are also available in the `/docs/` source folder. * [Linux](https://github.com/OpenShot/libopenshot/wiki/Linux-Build-Instructions) * [Mac](https://github.com/OpenShot/libopenshot/wiki/Mac-Build-Instructions) @@ -36,10 +37,12 @@ are also available in the /docs/ source folder. ## Hardware Acceleration -OpenShot now supports experimental hardware acceleration, both for encoding and -decoding videos. When enabled, this can either speed up those operations or slow -them down, depending on the power and features supported by your graphics card. -Please see [doc/HW-ACCELL.md](doc/HW-ACCEL.md) for more information. +OpenShot now supports experimental hardware acceleration, both for encoding +and decoding videos. When enabled, this can either speed up those operations +or slow them down, depending on the power and features supported by your +graphics card. + +Please see [`doc/HW-ACCEL.md`](doc/HW-ACCEL.md) for more information. ## Documentation @@ -51,10 +54,11 @@ make doc ## Developers -Are you interested in becoming more involved in the development of -OpenShot? Build exciting new features, fix bugs, make friends, and become a hero! -Please read the [step-by-step](https://github.com/OpenShot/openshot-qt/wiki/Become-a-Developer) -instructions for getting source code, configuring dependencies, and building OpenShot. +Are you interested in becoming more involved in the development of OpenShot? +Build exciting new features, fix bugs, make friends, and become a hero! +Please read the [step-by-step](https://github.com/OpenShot/openshot-qt/wiki/Become-a-Developer) +instructions for getting source code, configuring dependencies, and building +OpenShot. ## Report a bug @@ -72,7 +76,7 @@ https://github.com/OpenShot/libopenshot/issues ### License -Copyright (c) 2008-2019 OpenShot Studios, LLC. +Copyright (c) 2008-2020 OpenShot Studios, LLC. OpenShot Library (libopenshot) is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License diff --git a/cmake/Modules/FindZMQ.cmake b/cmake/Modules/FindZMQ.cmake deleted file mode 100644 index 50d27bc54..000000000 --- a/cmake/Modules/FindZMQ.cmake +++ /dev/null @@ -1,24 +0,0 @@ -# - Try to find ZMQ -# Once done this will define -# ZMQ_FOUND - System has ZMQ -# ZMQ_INCLUDE_DIRS - The ZMQ include directories -# ZMQ_LIBRARIES - The libraries needed to use ZMQ -# ZMQ_DEFINITIONS - Compiler switches required for using ZMQ - -find_path ( ZMQ_INCLUDE_DIR zmq.h - PATHS /usr/include/ - /usr/local/include/ - $ENV{ZMQDIR}/include/ ) - -find_library ( ZMQ_LIBRARY NAMES zmq - PATHS /usr/lib/ - /usr/local/lib/ - $ENV{ZMQDIR}/lib/ ) - -set ( ZMQ_LIBRARIES ${ZMQ_LIBRARY} ) -set ( ZMQ_INCLUDE_DIRS ${ZMQ_INCLUDE_DIR} ) - -include ( FindPackageHandleStandardArgs ) -# handle the QUIETLY and REQUIRED arguments and set ZMQ_FOUND to TRUE -# if all listed variables are TRUE -find_package_handle_standard_args ( ZMQ DEFAULT_MSG ZMQ_LIBRARY ZMQ_INCLUDE_DIR ) \ No newline at end of file diff --git a/include/CacheDisk.h b/include/CacheDisk.h index 09ebd4abc..c60f5f6e3 100644 --- a/include/CacheDisk.h +++ b/include/CacheDisk.h @@ -38,8 +38,6 @@ #include "Frame.h" #include "Exceptions.h" #include -#include -#include namespace openshot { diff --git a/include/Clip.h b/include/Clip.h index 5499db42f..3799ff966 100644 --- a/include/Clip.h +++ b/include/Clip.h @@ -190,6 +190,9 @@ namespace openshot { /// Return the list of effects on the timeline std::list Effects() { return effects; }; + /// Look up an effect by ID + openshot::EffectBase* GetEffect(const std::string& id); + /// @brief Get an openshot::Frame object for a specific frame number of this timeline. /// /// @returns The requested frame (containing the image) @@ -277,8 +280,6 @@ namespace openshot { openshot::Keyframe has_audio; ///< An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes) openshot::Keyframe has_video; ///< An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) }; +} // namespace - -} - -#endif +#endif // OPENSHOT_CLIP_H diff --git a/include/FFmpegUtilities.h b/include/FFmpegUtilities.h index 7daf0bc56..c637f9445 100644 --- a/include/FFmpegUtilities.h +++ b/include/FFmpegUtilities.h @@ -122,6 +122,9 @@ #ifndef PIX_FMT_YUV420P #define PIX_FMT_YUV420P AV_PIX_FMT_YUV420P #endif + #ifndef PIX_FMT_YUV444P + #define PIX_FMT_YUV444P AV_PIX_FMT_YUV444P + #endif // FFmpeg's libavutil/common.h defines an RSHIFT incompatible with Ruby's // definition in ruby/config.h, so we move it to FF_RSHIFT diff --git a/include/QtUtilities.h b/include/QtUtilities.h new file mode 100644 index 000000000..3f9637d93 --- /dev/null +++ b/include/QtUtilities.h @@ -0,0 +1,44 @@ +/** + * @file + * @brief Header file for QtUtilities (compatibiity overlay) + * @author FeRD (Frank Dana) + */ + +/* LICENSE + * + * Copyright (c) 2008-2020 OpenShot Studios, LLC + * . This file is part of + * OpenShot Library (libopenshot), an open-source project dedicated to + * delivering high quality video editing and animation solutions to the + * world. For more information visit . + * + * OpenShot Library (libopenshot) is free software: you can redistribute it + * and/or modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * OpenShot Library (libopenshot) is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with OpenShot Library. If not, see . + */ + +#ifndef OPENSHOT_QT_UTILITIES_H +#define OPENSHOT_QT_UTILITIES_H + +#include +#include + +// Fix Qt::endl for older Qt versions +// From: https://bugreports.qt.io/browse/QTBUG-82680 +#if QT_VERSION < QT_VERSION_CHECK(5, 14, 0) +namespace Qt { + using TextStreamFunction = QTextStream& (*)(QTextStream&); + constexpr TextStreamFunction endl = ::endl; +} +#endif + +#endif // OPENSHOT_QT_UTILITIES_H diff --git a/include/ReaderBase.h b/include/ReaderBase.h index ab7657537..c997b76dc 100644 --- a/include/ReaderBase.h +++ b/include/ReaderBase.h @@ -111,10 +111,16 @@ namespace openshot openshot::ReaderInfo info; /// Parent clip object of this reader (which can be unparented and NULL) - openshot::ClipBase* GetClip(); + inline openshot::ClipBase* GetParentClip() { return parent; }; + + /// Deprecated alias for GetParentClip() + inline openshot::ClipBase* GetClip() { return parent; }; /// Set parent clip object of this reader - void SetClip(openshot::ClipBase* clip); + inline void SetParentClip(openshot::ClipBase* clip) { parent = clip; }; + + /// Deprecated alias for SetParentClip() + inline void SetClip(openshot::ClipBase* clip) { parent = clip; }; /// Close the reader (and any resources it was consuming) virtual void Close() = 0; diff --git a/include/Timeline.h b/include/Timeline.h index 932b04acf..5c275abbc 100644 --- a/include/Timeline.h +++ b/include/Timeline.h @@ -61,7 +61,7 @@ namespace openshot { /// from lowest layer to top layer (since that is the sequence they need to be combined), and then /// by position (left to right). struct CompareClips{ - bool operator()( Clip* lhs, Clip* rhs){ + bool operator()( openshot::Clip* lhs, openshot::Clip* rhs){ if( lhs->Layer() < rhs->Layer() ) return true; if( lhs->Layer() == rhs->Layer() && lhs->Position() <= rhs->Position() ) return true; return false; @@ -71,13 +71,28 @@ namespace openshot { /// from lowest layer to top layer (since that is sequence clips are combined), and then by /// position, and then by effect order. struct CompareEffects{ - bool operator()( EffectBase* lhs, EffectBase* rhs){ + bool operator()( openshot::EffectBase* lhs, openshot::EffectBase* rhs){ if( lhs->Layer() < rhs->Layer() ) return true; if( lhs->Layer() == rhs->Layer() && lhs->Position() < rhs->Position() ) return true; if( lhs->Layer() == rhs->Layer() && lhs->Position() == rhs->Position() && lhs->Order() > rhs->Order() ) return true; return false; }}; + /// Comparison method for finding the far end of the timeline, by locating + /// the Clip with the highest end-frame number using std::max_element + struct CompareClipEndFrames { + bool operator()(const openshot::Clip* lhs, const openshot::Clip* rhs) { + return (lhs->Position() + lhs->Duration()) + <= (rhs->Position() + rhs->Duration()); + }}; + + /// Like CompareClipEndFrames, but for effects + struct CompareEffectEndFrames { + bool operator()(const openshot::EffectBase* lhs, const openshot::EffectBase* rhs) { + return (lhs->Position() + lhs->Duration()) + <= (rhs->Position() + rhs->Duration()); + }}; + /** * @brief This class represents a timeline * @@ -146,33 +161,33 @@ namespace openshot { * t.Close(); * @endcode */ - class Timeline : public ReaderBase { + class Timeline : public openshot::ReaderBase { private: bool is_open; /// clips; /// closing_clips; /// open_clips; /// effects; /// allocated_frame_mappers; ///< all the frame mappers we allocated and must free + std::list clips; /// closing_clips; /// open_clips; /// effects; /// allocated_frame_mappers; ///< all the frame mappers we allocated and must free bool managed_cache; ///< Does this timeline instance manage the cache object std::string path; ///< Optional path of loaded UTF-8 OpenShot JSON project file /// Process a new layer of video or audio - void add_layer(std::shared_ptr new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume); + void add_layer(std::shared_ptr new_frame, openshot::Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume); /// Apply a FrameMapper to a clip which matches the settings of this timeline - void apply_mapper_to_clip(Clip* clip); + void apply_mapper_to_clip(openshot::Clip* clip); /// Apply JSON Diffs to various objects contained in this timeline void apply_json_to_clips(Json::Value change); /// find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include); + std::vector find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include); /// Get or generate a blank frame - std::shared_ptr GetOrCreateFrame(Clip* clip, int64_t number); + std::shared_ptr GetOrCreateFrame(openshot::Clip* clip, int64_t number); /// Apply effects to the source frame (if any) - std::shared_ptr apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer); + std::shared_ptr apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer); /// Compare 2 floating point numbers for equality bool isEqual(double a, double b); @@ -198,7 +213,7 @@ namespace openshot { void sort_effects(); /// Update the list of 'opened' clips - void update_open_clips(Clip *clip, bool does_clip_intersect); + void update_open_clips(openshot::Clip *clip, bool does_clip_intersect); public: @@ -209,7 +224,7 @@ namespace openshot { /// @param sample_rate The sample rate of the timeline's audio /// @param channels The number of audio channels of the timeline /// @param channel_layout The channel layout (i.e. mono, stereo, 3 point surround, etc...) - Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout); + Timeline(int width, int height, openshot::Fraction fps, int sample_rate, int channels, openshot::ChannelLayout channel_layout); /// @brief Constructor for the timeline (which loads a JSON structure from a file path, and initializes a timeline) /// @param projectPath The path of the UTF-8 *.osp project file (JSON contents). Contents will be loaded automatically. @@ -220,11 +235,11 @@ namespace openshot { /// @brief Add an openshot::Clip to the timeline /// @param clip Add an openshot::Clip to the timeline. A clip can contain any type of Reader. - void AddClip(Clip* clip); + void AddClip(openshot::Clip* clip); /// @brief Add an effect to the timeline /// @param effect Add an effect to the timeline. An effect can modify the audio or video of an openshot::Frame. - void AddEffect(EffectBase* effect); + void AddEffect(openshot::EffectBase* effect); /// Apply the timeline's framerate and samplerate to all clips void ApplyMapperToClips(); @@ -239,34 +254,48 @@ namespace openshot { void ClearAllCache(); /// Return a list of clips on the timeline - std::list Clips() { return clips; }; + std::list Clips() { return clips; }; + + /// Look up a single clip by ID + openshot::ClipBase* GetClip(const std::string& id); + + /// Look up a clip effect by ID + openshot::EffectBase* GetClipEffect(const std::string& id); + + /// Look up a timeline effect by ID + openshot::EffectBase* GetEffect(const std::string& id); + + /// Look up the end time of the latest timeline element + double GetMaxTime(); + /// Look up the end frame number of the latest element on the timeline + int64_t GetMaxFrame(); /// Close the timeline reader (and any resources it was consuming) void Close() override; /// Return the list of effects on the timeline - std::list Effects() { return effects; }; + std::list Effects() { return effects; }; /// Get the cache object used by this reader - CacheBase* GetCache() override { return final_cache; }; + openshot::CacheBase* GetCache() override { return final_cache; }; /// Set the cache object used by this reader. You must now manage the lifecycle /// of this cache object though (Timeline will not delete it for you). - void SetCache(CacheBase* new_cache); + void SetCache(openshot::CacheBase* new_cache); /// Get an openshot::Frame object for a specific frame number of this timeline. /// /// @returns The requested frame (containing the image) /// @param requested_frame The frame number that is requested. - std::shared_ptr GetFrame(int64_t requested_frame) override; + std::shared_ptr GetFrame(int64_t requested_frame) override; // Curves for the viewport - Keyframe viewport_scale; /// +#include +#include using namespace std; using namespace openshot; @@ -191,10 +195,10 @@ void CacheDisk::Add(std::shared_ptr frame) if (audio_file.open(QIODevice::WriteOnly)) { QTextStream audio_stream(&audio_file); - audio_stream << frame->SampleRate() << endl; - audio_stream << frame->GetAudioChannelsCount() << endl; - audio_stream << frame->GetAudioSamplesCount() << endl; - audio_stream << frame->ChannelsLayout() << endl; + audio_stream << frame->SampleRate() << Qt::endl; + audio_stream << frame->GetAudioChannelsCount() << Qt::endl; + audio_stream << frame->GetAudioSamplesCount() << Qt::endl; + audio_stream << frame->ChannelsLayout() << Qt::endl; // Loop through all samples for (int channel = 0; channel < frame->GetAudioChannelsCount(); channel++) @@ -202,7 +206,7 @@ void CacheDisk::Add(std::shared_ptr frame) // Get audio for this channel float *samples = frame->GetAudioSamples(channel); for (int sample = 0; sample < frame->GetAudioSamplesCount(); sample++) - audio_stream << samples[sample] << endl; + audio_stream << samples[sample] << Qt::endl; } } diff --git a/src/Clip.cpp b/src/Clip.cpp index 29e4b8c57..570424925 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -153,7 +153,7 @@ Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), alloca // Update duration and set parent if (reader) { End(reader->info.duration); - reader->SetClip(this); + reader->SetParentClip(this); } } @@ -210,7 +210,7 @@ Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(N // Update duration and set parent if (reader) { End(reader->info.duration); - reader->SetClip(this); + reader->SetParentClip(this); allocated_reader = reader; init_reader_rotation(); } @@ -239,7 +239,7 @@ void Clip::Reader(ReaderBase* new_reader) reader = new_reader; // set parent - reader->SetClip(this); + reader->SetParentClip(this); // Init rotation (if any) init_reader_rotation(); @@ -368,6 +368,18 @@ std::shared_ptr Clip::GetFrame(int64_t requested_frame) throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method."); } +// Look up an effect by ID +openshot::EffectBase* Clip::GetEffect(const std::string& id) +{ + // Find the matching effect (if any) + for (const auto& effect : effects) { + if (effect->Id() == id) { + return effect; + } + } + return nullptr; +} + // Get file extension std::string Clip::get_file_extension(std::string path) { @@ -993,7 +1005,7 @@ void Clip::SetJsonValue(const Json::Value root) { // mark as managed reader and set parent if (reader) { - reader->SetClip(this); + reader->SetParentClip(this); allocated_reader = reader; } diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp index a85618639..78a39c212 100644 --- a/src/FFmpegReader.cpp +++ b/src/FFmpegReader.cpp @@ -33,6 +33,9 @@ #include "../include/FFmpegReader.h" +#include // for std::this_thread::sleep_for +#include // for std::chrono::milliseconds + #define ENABLE_VAAPI 0 #if HAVE_HW_ACCEL @@ -928,7 +931,7 @@ std::shared_ptr FFmpegReader::ReadStream(int64_t requested_frame) { // Wait if too many frames are being processed while (processing_video_frames_size + processing_audio_frames_size >= minimum_packets) { - usleep(2500); + std::this_thread::sleep_for(std::chrono::milliseconds(3)); const GenericScopedLock lock(processingCriticalSection); processing_video_frames_size = processing_video_frames.size(); processing_audio_frames_size = processing_audio_frames.size(); @@ -1296,7 +1299,7 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) { if (max_height <= 0) max_height = info.height; - Clip *parent = (Clip *) GetClip(); + Clip *parent = (Clip *) GetParentClip(); if (parent) { if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) { // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) @@ -1719,7 +1722,7 @@ void FFmpegReader::Seek(int64_t requested_frame) { // Wait for any processing frames to complete while (processing_video_frames_size + processing_audio_frames_size > 0) { - usleep(2500); + std::this_thread::sleep_for(std::chrono::milliseconds(3)); const GenericScopedLock lock(processingCriticalSection); processing_video_frames_size = processing_video_frames.size(); processing_audio_frames_size = processing_audio_frames.size(); @@ -1868,8 +1871,20 @@ void FFmpegReader::UpdatePTSOffset(bool is_video) { // VIDEO PACKET if (video_pts_offset == 99999) // Has the offset been set yet? { - // Find the difference between PTS and frame number (no more than 10 timebase units allowed) - video_pts_offset = 0 - std::max(GetVideoPTS(), (int64_t) info.video_timebase.ToInt() * 10); + // Find the difference between PTS and frame number + video_pts_offset = 0 - GetVideoPTS(); + + // Find the difference between PTS and frame number + // Also, determine if PTS is invalid (too far away from zero) + // We compare the PTS to the timebase value equal to 1 second (which means the PTS + // must be within the -1 second to +1 second of zero, otherwise we ignore it) + // TODO: Please see https://github.com/OpenShot/libopenshot/pull/565#issuecomment-690985272 + // for ideas to improve this logic. + int64_t max_offset = info.video_timebase.Reciprocal().ToFloat(); + if (video_pts_offset < -max_offset || video_pts_offset > max_offset) { + // Ignore PTS, it seems invalid + video_pts_offset = 0; + } // debug output ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::UpdatePTSOffset (Video)", "video_pts_offset", video_pts_offset, "is_video", is_video); @@ -1878,8 +1893,18 @@ void FFmpegReader::UpdatePTSOffset(bool is_video) { // AUDIO PACKET if (audio_pts_offset == 99999) // Has the offset been set yet? { - // Find the difference between PTS and frame number (no more than 10 timebase units allowed) - audio_pts_offset = 0 - std::max(packet->pts, (int64_t) info.audio_timebase.ToInt() * 10); + // Find the difference between PTS and frame number + // Also, determine if PTS is invalid (too far away from zero) + // We compare the PTS to the timebase value equal to 1 second (which means the PTS + // must be within the -1 second to +1 second of zero, otherwise we ignore it) + // TODO: Please see https://github.com/OpenShot/libopenshot/pull/565#issuecomment-690985272 + // for ideas to improve this logic. + audio_pts_offset = 0 - packet->pts; + int64_t max_offset = info.audio_timebase.Reciprocal().ToFloat(); + if (audio_pts_offset < -max_offset || audio_pts_offset > max_offset) { + // Ignore PTS, it seems invalid + audio_pts_offset = 0; + } // debug output ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::UpdatePTSOffset (Audio)", "audio_pts_offset", audio_pts_offset, "is_video", is_video); diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index 5797d09e1..b2aec5f1e 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -438,6 +438,7 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va av_opt_set_int(c->priv_data, "qp", std::min(std::stoi(value), 51), 0); // 0-51 if (std::stoi(value) == 0) { av_opt_set(c->priv_data, "preset", "veryslow", 0); + c->pix_fmt = PIX_FMT_YUV444P; // no chroma subsampling } break; case AV_CODEC_ID_HEVC : @@ -498,6 +499,7 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va av_opt_set_int(c->priv_data, "crf", std::min(std::stoi(value), 51), 0); // 0-51 if (std::stoi(value) == 0) { av_opt_set(c->priv_data, "preset", "veryslow", 0); + c->pix_fmt = PIX_FMT_YUV444P; // no chroma subsampling } break; case AV_CODEC_ID_HEVC : @@ -537,7 +539,7 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va switch (c->codec_id) { case AV_CODEC_ID_AV1 : c->bit_rate = 0; - if (strstr(info.vcodec.c_str(), "svt_av1") != NULL) { + if (strstr(info.vcodec.c_str(), "svtav1") != NULL) { av_opt_set_int(c->priv_data, "qp", std::min(std::stoi(value),63), 0); } else if (strstr(info.vcodec.c_str(), "rav1e") != NULL) { @@ -1237,7 +1239,7 @@ AVStream *FFmpegWriter::add_video_stream() { info.video_bit_rate = calculated_quality; } // medium } - if (strstr(info.vcodec.c_str(), "svt_av1") != NULL) { + if (strstr(info.vcodec.c_str(), "svtav1") != NULL) { av_opt_set_int(c->priv_data, "preset", 6, 0); av_opt_set_int(c->priv_data, "forced-idr",1,0); } diff --git a/src/Frame.cpp b/src/Frame.cpp index 53158a723..4bd8f03f7 100644 --- a/src/Frame.cpp +++ b/src/Frame.cpp @@ -30,6 +30,9 @@ #include "../include/Frame.h" +#include // for std::this_thread::sleep_for +#include // for std::chrono::milliseconds + using namespace std; using namespace openshot; @@ -1079,7 +1082,7 @@ void Frame::Play() while (transport1.isPlaying()) { cout << "playing" << endl; - usleep(1000000); + std::this_thread::sleep_for(std::chrono::seconds(1)); } cout << "DONE!!!" << endl; diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp index e1e5700c2..4c561f8fe 100644 --- a/src/FrameMapper.cpp +++ b/src/FrameMapper.cpp @@ -487,7 +487,7 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame) // includes some additional input samples on first iteration, // and continues the offset to ensure that the sample rate // converter isn't input limited. - const int EXTRA_INPUT_SAMPLES = 20; + const int EXTRA_INPUT_SAMPLES = 100; // Extend end sample count by an additional EXTRA_INPUT_SAMPLES samples copy_samples.sample_end += EXTRA_INPUT_SAMPLES; diff --git a/src/Qt/AudioPlaybackThread.cpp b/src/Qt/AudioPlaybackThread.cpp index 44c837e74..2ee1009c5 100644 --- a/src/Qt/AudioPlaybackThread.cpp +++ b/src/Qt/AudioPlaybackThread.cpp @@ -31,6 +31,9 @@ #include "../../include/Qt/AudioPlaybackThread.h" +#include // for std::this_thread::sleep_for +#include // for std::chrono::milliseconds + namespace openshot { @@ -194,7 +197,7 @@ namespace openshot transport.start(); while (!threadShouldExit() && transport.isPlaying() && is_playing) - usleep(2500); + std::this_thread::sleep_for(std::chrono::milliseconds(2)); // Stop audio and shutdown transport Stop(); diff --git a/src/Qt/PlayerPrivate.cpp b/src/Qt/PlayerPrivate.cpp index e7c881f8c..db6f2ff10 100644 --- a/src/Qt/PlayerPrivate.cpp +++ b/src/Qt/PlayerPrivate.cpp @@ -31,6 +31,9 @@ #include "../../include/Qt/PlayerPrivate.h" +#include // for std::this_thread::sleep_for +#include // for std::chrono milliseconds, high_resolution_clock + namespace openshot { // Constructor @@ -54,109 +57,120 @@ namespace openshot // Start thread void PlayerPrivate::run() { - // bail if no reader set - if (!reader) - return; - - // Start the threads - if (reader->info.has_audio) - audioPlayback->startThread(8); - if (reader->info.has_video) { - videoCache->startThread(2); - videoPlayback->startThread(4); - } - - while (!threadShouldExit()) { - - // Calculate the milliseconds a single frame should stay on the screen - double frame_time = (1000.0 / reader->info.fps.ToDouble()); - - // Get the start time (to track how long a frame takes to render) - const Time t1 = Time::getCurrentTime(); - - // Get the current video frame (if it's different) - frame = getFrame(); - - // Experimental Pausing Code (if frame has not changed) - if ((speed == 0 && video_position == last_video_position) || (video_position > reader->info.video_length)) { - speed = 0; - sleep(frame_time); - continue; - } - - // Set the video frame on the video thread and render frame - videoPlayback->frame = frame; - videoPlayback->render.signal(); - - // Keep track of the last displayed frame - last_video_position = video_position; - - // How many frames ahead or behind is the video thread? - int64_t video_frame_diff = 0; - if (reader->info.has_audio && reader->info.has_video) { - if (speed != 1) - // Set audio frame again (since we are not in normal speed, and not paused) - audioPlayback->Seek(video_position); - - // Only calculate this if a reader contains both an audio and video thread - audio_position = audioPlayback->getCurrentFramePosition(); - video_frame_diff = video_position - audio_position; - } - - // Get the end time (to track how long a frame takes to render) - const Time t2 = Time::getCurrentTime(); - - // Determine how many milliseconds it took to render the frame - int64_t render_time = t2.toMilliseconds() - t1.toMilliseconds(); - - // Calculate the amount of time to sleep (by subtracting the render time) - int sleep_time = int(frame_time - render_time); - - // Debug - ZmqLogger::Instance()->AppendDebugMethod("PlayerPrivate::run (determine sleep)", "video_frame_diff", video_frame_diff, "video_position", video_position, "audio_position", audio_position, "speed", speed, "render_time", render_time, "sleep_time", sleep_time); - - // Adjust drift (if more than a few frames off between audio and video) - if (video_frame_diff > 0 && reader->info.has_audio && reader->info.has_video) - // Since the audio and video threads are running independently, they will quickly get out of sync. - // To fix this, we calculate how far ahead or behind the video frame is, and adjust the amount of time - // the frame is displayed on the screen (i.e. the sleep time). If a frame is ahead of the audio, - // we sleep for longer. If a frame is behind the audio, we sleep less (or not at all), in order for - // the video to catch up. - sleep_time += (video_frame_diff * (1000.0 / reader->info.fps.ToDouble())); - - - else if (video_frame_diff < -10 && reader->info.has_audio && reader->info.has_video) { - // Skip frame(s) to catch up to the audio (if more than 10 frames behind) - video_position += abs(video_frame_diff) / 2; // Seek forward 1/2 the difference - sleep_time = 0; // Don't sleep now... immediately go to next position - } - - // Sleep (leaving the video frame on the screen for the correct amount of time) - if (sleep_time > 0) usleep(sleep_time * 1000); - - } + // bail if no reader set + if (!reader) + return; + + // Start the threads + if (reader->info.has_audio) + audioPlayback->startThread(8); + if (reader->info.has_video) { + videoCache->startThread(2); + videoPlayback->startThread(4); + } + + using std::chrono::duration_cast; + + // Types for storing time durations in whole and fractional milliseconds + using ms = std::chrono::milliseconds; + using double_ms = std::chrono::duration; + + // Calculate on-screen time for a single frame in milliseconds + const auto frame_duration = double_ms(1000.0 / reader->info.fps.ToDouble()); + + while (!threadShouldExit()) { + // Get the start time (to track how long a frame takes to render) + const auto time1 = std::chrono::high_resolution_clock::now(); + + // Get the current video frame (if it's different) + frame = getFrame(); + + // Experimental Pausing Code (if frame has not changed) + if ((speed == 0 && video_position == last_video_position) + || (video_position > reader->info.video_length) + ) { + speed = 0; + std::this_thread::sleep_for(frame_duration); + continue; + } + + // Set the video frame on the video thread and render frame + videoPlayback->frame = frame; + videoPlayback->render.signal(); + + // Keep track of the last displayed frame + last_video_position = video_position; + + // How many frames ahead or behind is the video thread? + int64_t video_frame_diff = 0; + if (reader->info.has_audio && reader->info.has_video) { + if (speed != 1) + // Set audio frame again (since we are not in normal speed, and not paused) + audioPlayback->Seek(video_position); + + // Only calculate this if a reader contains both an audio and video thread + audio_position = audioPlayback->getCurrentFramePosition(); + video_frame_diff = video_position - audio_position; + } + + // Get the end time (to track how long a frame takes to render) + const auto time2 = std::chrono::high_resolution_clock::now(); + + // Determine how many milliseconds it took to render the frame + const auto render_time = double_ms(time2 - time1); + + // Calculate the amount of time to sleep (by subtracting the render time) + auto sleep_time = duration_cast(frame_duration - render_time); + + // Debug + ZmqLogger::Instance()->AppendDebugMethod("PlayerPrivate::run (determine sleep)", "video_frame_diff", video_frame_diff, "video_position", video_position, "audio_position", audio_position, "speed", speed, "render_time(ms)", render_time.count(), "sleep_time(ms)", sleep_time.count()); + + // Adjust drift (if more than a few frames off between audio and video) + if (video_frame_diff > 0 && reader->info.has_audio && reader->info.has_video) { + // Since the audio and video threads are running independently, + // they will quickly get out of sync. To fix this, we calculate + // how far ahead or behind the video frame is, and adjust the amount + // of time the frame is displayed on the screen (i.e. the sleep time). + // If a frame is ahead of the audio, we sleep for longer. + // If a frame is behind the audio, we sleep less (or not at all), + // in order for the video to catch up. + sleep_time += duration_cast(video_frame_diff * frame_duration); + } + + else if (video_frame_diff < -10 && reader->info.has_audio && reader->info.has_video) { + // Skip frame(s) to catch up to the audio (if more than 10 frames behind) + video_position += std::fabs(video_frame_diff) / 2; // Seek forward 1/2 the difference + sleep_time = sleep_time.zero(); // Don't sleep now... immediately go to next position + } + + // Sleep (leaving the video frame on the screen for the correct amount of time) + if (sleep_time > sleep_time.zero()) { + std::this_thread::sleep_for(sleep_time); + } + + } } // Get the next displayed frame (based on speed and direction) std::shared_ptr PlayerPrivate::getFrame() { - try { - // Get the next frame (based on speed) - if (video_position + speed >= 1 && video_position + speed <= reader->info.video_length) - video_position = video_position + speed; - - if (frame && frame->number == video_position && video_position == last_video_position) { - // return cached frame - return frame; - } - else - { - // Update cache on which frame was retrieved - videoCache->setCurrentFramePosition(video_position); - - // return frame from reader - return reader->GetFrame(video_position); - } + try { + // Get the next frame (based on speed) + if (video_position + speed >= 1 && video_position + speed <= reader->info.video_length) + video_position = video_position + speed; + + if (frame && frame->number == video_position && video_position == last_video_position) { + // return cached frame + return frame; + } + else + { + // Update cache on which frame was retrieved + videoCache->setCurrentFramePosition(video_position); + + // return frame from reader + return reader->GetFrame(video_position); + } } catch (const ReaderClosed & e) { // ... diff --git a/src/Qt/VideoCacheThread.cpp b/src/Qt/VideoCacheThread.cpp index 6cff46ba8..4279e2381 100644 --- a/src/Qt/VideoCacheThread.cpp +++ b/src/Qt/VideoCacheThread.cpp @@ -31,6 +31,9 @@ #include "../../include/Qt/VideoCacheThread.h" #include +#include // for std::this_thread::sleep_for +#include // for std::chrono::milliseconds + namespace openshot { // Constructor @@ -81,10 +84,14 @@ namespace openshot // Start the thread void VideoCacheThread::run() { - while (!threadShouldExit() && is_playing) { + // Types for storing time durations in whole and fractional milliseconds + using ms = std::chrono::milliseconds; + using double_ms = std::chrono::duration; + + // Calculate on-screen time for a single frame in milliseconds + const auto frame_duration = double_ms(1000.0 / reader->info.fps.ToDouble()); - // Calculate sleep time for frame rate - double frame_time = (1000.0 / reader->info.fps.ToDouble()); + while (!threadShouldExit() && is_playing) { // Cache frames before the other threads need them // Cache frames up to the max frames @@ -117,7 +124,7 @@ namespace openshot } // Sleep for 1 frame length - usleep(frame_time * 1000); + std::this_thread::sleep_for(frame_duration); } return; diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp index cf64ef930..59060ccd7 100644 --- a/src/QtImageReader.cpp +++ b/src/QtImageReader.cpp @@ -187,7 +187,7 @@ std::shared_ptr QtImageReader::GetFrame(int64_t requested_frame) if (max_height <= 0) max_height = info.height; - Clip* parent = (Clip*) GetClip(); + Clip* parent = (Clip*) GetParentClip(); if (parent) { if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) { // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) diff --git a/src/ReaderBase.cpp b/src/ReaderBase.cpp index 474dc624e..6799d95ec 100644 --- a/src/ReaderBase.cpp +++ b/src/ReaderBase.cpp @@ -249,13 +249,3 @@ void ReaderBase::SetJsonValue(const Json::Value root) { } } } - -/// Parent clip object of this reader (which can be unparented and NULL) -openshot::ClipBase* ReaderBase::GetClip() { - return parent; -} - -/// Set parent clip object of this reader -void ReaderBase::SetClip(openshot::ClipBase* clip) { - parent = clip; -} diff --git a/src/Timeline.cpp b/src/Timeline.cpp index 124058ac2..b2f46519c 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -263,6 +263,67 @@ void Timeline::RemoveClip(Clip* clip) clips.remove(clip); } +// Look up a clip +openshot::ClipBase* Timeline::GetClip(const std::string& id) +{ + // Find the matching clip (if any) + for (const auto& clip : clips) { + if (clip->Id() == id) { + return clip; + } + } + return nullptr; +} + +// Look up a timeline effect +openshot::EffectBase* Timeline::GetEffect(const std::string& id) +{ + // Find the matching effect (if any) + for (const auto& effect : effects) { + if (effect->Id() == id) { + return effect; + } + } + return nullptr; +} + +openshot::EffectBase* Timeline::GetClipEffect(const std::string& id) +{ + // Search all clips for matching effect ID + for (const auto& clip : clips) { + const auto e = clip->GetEffect(id); + if (e != nullptr) { + return e; + } + } + return nullptr; +} + +// Compute the end time of the latest timeline element +double Timeline::GetMaxTime() { + double last_clip = 0.0; + double last_effect = 0.0; + + if (!clips.empty()) { + const auto max_clip = std::max_element( + clips.begin(), clips.end(), CompareClipEndFrames()); + last_clip = (*max_clip)->Position() + (*max_clip)->Duration(); + } + if (!effects.empty()) { + const auto max_effect = std::max_element( + effects.begin(), effects.end(), CompareEffectEndFrames()); + last_effect = (*max_effect)->Position() + (*max_effect)->Duration(); + } + return std::max(last_clip, last_effect); +} + +// Compute the highest frame# based on the latest time and FPS +int64_t Timeline::GetMaxFrame() { + double fps = info.fps.ToDouble(); + auto max_time = GetMaxTime(); + return std::round(max_time * fps) + 1; +} + // Apply a FrameMapper to a clip which matches the settings of this timeline void Timeline::apply_mapper_to_clip(Clip* clip) { diff --git a/src/ZmqLogger.cpp b/src/ZmqLogger.cpp index 103de73d7..b72992308 100644 --- a/src/ZmqLogger.cpp +++ b/src/ZmqLogger.cpp @@ -36,6 +36,12 @@ using namespace std; using namespace openshot; +#include +#include +#include +#include +#include // for std::this_thread::sleep_for +#include // for std::duration::microseconds // Global reference to logger @@ -108,7 +114,7 @@ void ZmqLogger::Connection(std::string new_connection) } // Sleeping to allow connection to wake up (0.25 seconds) - usleep(250000); + std::this_thread::sleep_for(std::chrono::milliseconds(250)); } void ZmqLogger::Log(std::string message) diff --git a/src/effects/Pixelate.cpp b/src/effects/Pixelate.cpp index c993915c5..ed219e4da 100644 --- a/src/effects/Pixelate.cpp +++ b/src/effects/Pixelate.cpp @@ -33,7 +33,7 @@ using namespace openshot; /// Blank constructor, useful when using Json to load the effect properties -Pixelate::Pixelate() : pixelization(0.7), left(0.0), top(0.0), right(0.0), bottom(0.0) { +Pixelate::Pixelate() : pixelization(0.5), left(0.0), top(0.0), right(0.0), bottom(0.0) { // Init effect properties init_effect_details(); } @@ -68,7 +68,7 @@ std::shared_ptr Pixelate::GetFrame(std::shared_ptr frame, int64_t std::shared_ptr frame_image = frame->GetImage(); // Get current keyframe values - double pixelization_value = 1.0 - std::min(fabs(pixelization.GetValue(frame_number)), 1.0); + double pixelization_value = std::min(pow(0.001, fabs(pixelization.GetValue(frame_number))), 1.0); double left_value = left.GetValue(frame_number); double top_value = top.GetValue(frame_number); double right_value = right.GetValue(frame_number); @@ -82,8 +82,12 @@ std::shared_ptr Pixelate::GetFrame(std::shared_ptr frame, int64_t QRect area(QPoint(0,0), frame_image->size()); area = area.marginsRemoved({int(left_value * w), int(top_value * h), int(right_value * w), int(bottom_value * h)}); + int scale_to = (int) (area.width() * pixelization_value); + if (scale_to < 1) { + scale_to = 1; // Not less than one pixel + } // Copy and scale pixels in area to be pixelated - auto frame_scaled = frame_image->copy(area).scaledToWidth(area.width() * pixelization_value, Qt::SmoothTransformation); + auto frame_scaled = frame_image->copy(area).scaledToWidth(scale_to, Qt::SmoothTransformation); // Draw pixelated image back over original QPainter painter(frame_image.get()); diff --git a/tests/Settings_Tests.cpp b/tests/Settings_Tests.cpp index b63b56a8c..65bd66a66 100644 --- a/tests/Settings_Tests.cpp +++ b/tests/Settings_Tests.cpp @@ -41,7 +41,7 @@ TEST(Settings_Default_Constructor) // Create an empty color Settings *s = Settings::Instance(); - CHECK_EQUAL(0, s->HARDWARE_DECODER); + CHECK_EQUAL(12, s->OMP_THREADS); CHECK_EQUAL(false, s->HIGH_QUALITY_SCALING); CHECK_EQUAL(false, s->WAIT_FOR_VIDEO_PROCESSING_TASK); } @@ -50,15 +50,15 @@ TEST(Settings_Change_Settings) { // Create an empty color Settings *s = Settings::Instance(); - s->HARDWARE_DECODER = 1; + s->OMP_THREADS = 8; s->HIGH_QUALITY_SCALING = true; s->WAIT_FOR_VIDEO_PROCESSING_TASK = true; - CHECK_EQUAL(1, s->HARDWARE_DECODER); + CHECK_EQUAL(8, s->OMP_THREADS); CHECK_EQUAL(true, s->HIGH_QUALITY_SCALING); CHECK_EQUAL(true, s->WAIT_FOR_VIDEO_PROCESSING_TASK); - CHECK_EQUAL(1, s->HARDWARE_DECODER); + CHECK_EQUAL(8, Settings::Instance()->OMP_THREADS); CHECK_EQUAL(true, Settings::Instance()->HIGH_QUALITY_SCALING); CHECK_EQUAL(true, Settings::Instance()->WAIT_FOR_VIDEO_PROCESSING_TASK); } diff --git a/tests/Timeline_Tests.cpp b/tests/Timeline_Tests.cpp index 946693a1d..47683b792 100644 --- a/tests/Timeline_Tests.cpp +++ b/tests/Timeline_Tests.cpp @@ -36,7 +36,10 @@ using namespace std; using namespace openshot; -TEST(Timeline_Constructor) +SUITE(Timeline) +{ + +TEST(Constructor) { // Create a default fraction (should be 1/1) Fraction fps(30000,1000); @@ -54,7 +57,7 @@ TEST(Timeline_Constructor) CHECK_EQUAL(240, t2.info.height); } -TEST(Timeline_Width_and_Height_Functions) +TEST(Width_and_Height_Functions) { // Create a default fraction (should be 1/1) Fraction fps(30000,1000); @@ -79,7 +82,7 @@ TEST(Timeline_Width_and_Height_Functions) CHECK_EQUAL(400, t1.info.height); } -TEST(Timeline_Framerate) +TEST(Framerate) { // Create a default fraction (should be 1/1) Fraction fps(24,1); @@ -89,7 +92,7 @@ TEST(Timeline_Framerate) CHECK_CLOSE(24.0f, t1.info.fps.ToFloat(), 0.00001); } -TEST(Timeline_Check_Two_Track_Video) +TEST(Check_Two_Track_Video) { // Create a reader stringstream path; @@ -186,7 +189,7 @@ TEST(Timeline_Check_Two_Track_Video) t.Close(); } -TEST(Timeline_Clip_Order) +TEST(Clip_Order) { // Create a timeline Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); @@ -281,7 +284,7 @@ TEST(Timeline_Clip_Order) } -TEST(Timeline_Effect_Order) +TEST(Effect_Order) { // Create a timeline Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); @@ -413,7 +416,127 @@ TEST(Timeline_Effect_Order) t.Close(); } -TEST(Timeline_Effect_Blur) +TEST(GetClip_by_id) +{ + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); + + stringstream path1; + path1 << TEST_MEDIA_PATH << "interlaced.png"; + auto media_path1 = path1.str(); + + stringstream path2; + path2 << TEST_MEDIA_PATH << "front.png"; + auto media_path2 = path2.str(); + + Clip clip1(media_path1); + std::string clip1_id("CLIP00001"); + clip1.Id(clip1_id); + clip1.Layer(1); + + Clip clip2(media_path2); + std::string clip2_id("CLIP00002"); + clip2.Id(clip2_id); + clip2.Layer(2); + + t.AddClip(&clip1); + t.AddClip(&clip2); + + auto matched = t.GetClip(clip1_id); + CHECK_EQUAL(clip1_id, matched->Id()); + CHECK_EQUAL(1, matched->Layer()); + + auto matched2 = t.GetClip(clip2_id); + CHECK_EQUAL(clip2_id, matched2->Id()); + CHECK_EQUAL(false, matched2->Layer() < 2); + + auto matched3 = t.GetClip("BAD_ID"); + CHECK_EQUAL(true, matched3 == nullptr); +} + +TEST(GetClipEffect_by_id) +{ + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); + + stringstream path1; + path1 << TEST_MEDIA_PATH << "interlaced.png"; + auto media_path1 = path1.str(); + + // Create a clip, nothing special + Clip clip1(media_path1); + std::string clip1_id("CLIP00001"); + clip1.Id(clip1_id); + clip1.Layer(1); + + // Add a blur effect + Keyframe horizontal_radius(5.0); + Keyframe vertical_radius(5.0); + Keyframe sigma(3.0); + Keyframe iterations(3.0); + Blur blur1(horizontal_radius, vertical_radius, sigma, iterations); + std::string blur1_id("EFFECT00011"); + blur1.Id(blur1_id); + clip1.AddEffect(&blur1); + + // A second clip, different layer + Clip clip2(media_path1); + std::string clip2_id("CLIP00002"); + clip2.Id(clip2_id); + clip2.Layer(2); + + // Some effects for clip2 + Negate neg2; + std::string neg2_id("EFFECT00021"); + neg2.Id(neg2_id); + neg2.Layer(2); + clip2.AddEffect(&neg2); + Blur blur2(horizontal_radius, vertical_radius, sigma, iterations); + std::string blur2_id("EFFECT00022"); + blur2.Id(blur2_id); + blur2.Layer(2); + clip2.AddEffect(&blur2); + + t.AddClip(&clip1); + + // Check that we can look up clip1's effect + auto match1 = t.GetClipEffect("EFFECT00011"); + CHECK_EQUAL(blur1_id, match1->Id()); + + // clip2 hasn't been added yet, shouldn't be found + match1 = t.GetClipEffect(blur2_id); + CHECK_EQUAL(true, match1 == nullptr); + + t.AddClip(&clip2); + + // Check that blur2 can now be found via clip2 + match1 = t.GetClipEffect(blur2_id); + CHECK_EQUAL(blur2_id, match1->Id()); + CHECK_EQUAL(2, match1->Layer()); +} + +TEST(GetEffect_by_id) +{ + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); + + // Create a timeline effect + Keyframe horizontal_radius(5.0); + Keyframe vertical_radius(5.0); + Keyframe sigma(3.0); + Keyframe iterations(3.0); + Blur blur1(horizontal_radius, vertical_radius, sigma, iterations); + std::string blur1_id("EFFECT00011"); + blur1.Id(blur1_id); + blur1.Layer(1); + t.AddEffect(&blur1); + + auto match1 = t.GetEffect(blur1_id); + CHECK_EQUAL(blur1_id, match1->Id()); + CHECK_EQUAL(1, match1->Layer()); + + match1 = t.GetEffect("NOSUCHNAME"); + CHECK_EQUAL(true, match1 == nullptr); +} + +TEST(Effect_Blur) { // Create a timeline Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); @@ -443,3 +566,39 @@ TEST(Timeline_Effect_Blur) // Close reader t.Close(); } + +TEST(GetMaxFrame_GetMaxTime) +{ + // Create a timeline + Timeline t(640, 480, Fraction(30, 1), 44100, 2, LAYOUT_STEREO); + + stringstream path1; + path1 << TEST_MEDIA_PATH << "interlaced.png"; + Clip clip1(path1.str()); + clip1.Layer(1); + clip1.Position(50); + clip1.End(45); + t.AddClip(&clip1); + + CHECK_CLOSE(95.0, t.GetMaxTime(), 0.001); + CHECK_EQUAL(95 * 30 + 1, t.GetMaxFrame()); + + Clip clip2(path1.str()); + clip2.Layer(2); + clip2.Position(0); + clip2.End(55); + t.AddClip(&clip2); + + CHECK_EQUAL(95 * 30 + 1, t.GetMaxFrame()); + CHECK_CLOSE(95.0, t.GetMaxTime(), 0.001); + + clip2.Position(100); + clip1.Position(80); + CHECK_EQUAL(155 * 30 + 1, t.GetMaxFrame()); + CHECK_CLOSE(155.0, t.GetMaxTime(), 0.001); + t.RemoveClip(&clip2); + CHECK_EQUAL(125 * 30 + 1, t.GetMaxFrame()); + CHECK_CLOSE(125.0, t.GetMaxTime(), 0.001); +} + +} // SUITE