diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 72696ebe1..f3ac30332 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -9,6 +9,8 @@ jobs:
compiler: [gcc, clang]
env:
CC: ${{ matrix.compiler }}
+ CODECOV_TOKEN: 'dc94d508-39d3-4369-b1c6-321749f96f7c'
+
steps:
- uses: actions/checkout@v2
diff --git a/.gitignore b/.gitignore
index 5d00d580c..3f706e28c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,6 @@
.project
.cproject
/.metadata/
+cmake-build-debug/*
tags
*~
-
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 3ce3829ba..61e9defc1 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -20,12 +20,14 @@ linux-builder:
- export LIBOPENSHOT_AUDIO_DIR=$CI_PROJECT_DIR/build/install-x64
- mkdir -p build; cd build;
- cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -D"PYTHON_MODULE_PATH=python" -D"RUBY_MODULE_PATH=ruby" -DCMAKE_BUILD_TYPE:STRING=Release -DAPPIMAGE_BUILD=1 -DUSE_SYSTEM_JSONCPP=0 ../
- - make
+ - make -j 4
- make install
- make doc
- ~/auto-update-docs "$CI_PROJECT_DIR/build" "$CI_COMMIT_REF_NAME"
- - echo -e "CI_PROJECT_NAME:$CI_PROJECT_NAME\nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME\nCI_COMMIT_SHA:$CI_COMMIT_SHA\nCI_JOB_ID:$CI_JOB_ID" > "install-x64/share/$CI_PROJECT_NAME"
- - git log $(git describe --tags --abbrev=0 '@^')..@ --oneline --no-abbrev --date=short --no-merges --pretty="tformat:$GIT_LOG_FORMAT" > "install-x64/share/$CI_PROJECT_NAME.log"
+ - PROJECT_VERSION=$(grep -E '^set\(PROJECT_VERSION_FULL "(.*)' ../CMakeLists.txt | awk '{print $2}' | tr -d '")')
+ - PROJECT_SO=$(grep -E '^set\(PROJECT_SO_VERSION (.*)' ../CMakeLists.txt | awk '{print $2}' | tr -d ')')
+ - echo -e "CI_PROJECT_NAME:$CI_PROJECT_NAME\nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME\nCI_COMMIT_SHA:$CI_COMMIT_SHA\nCI_JOB_ID:$CI_JOB_ID\nCI_PIPELINE_ID:$CI_PIPELINE_ID\nVERSION:$PROJECT_VERSION\nSO:$PROJECT_SO" > "install-x64/share/$CI_PROJECT_NAME"
+ - git log $(git describe --tags --abbrev=0 @^)..@ --oneline --pretty=format:"- %C(auto,yellow)%h%C(auto,magenta)% %C(auto,blue)%>(12,trunc)%ad %C(auto,green)%<(25,trunc)%aN%C(auto,reset)%s%C(auto,red)% gD% D" --date=short > "install-x64/share/$CI_PROJECT_NAME.log"
when: always
except:
- tags
@@ -47,10 +49,12 @@ mac-builder:
- export LIBOPENSHOT_AUDIO_DIR=$CI_PROJECT_DIR/build/install-x64
- mkdir -p build; cd build;
- cmake -DCMAKE_EXE_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_SHARED_LINKER_FLAGS="-stdlib=libc++" -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR/build/install-x64" -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_C_COMPILER=clang -D"CMAKE_BUILD_TYPE:STRING=Release" -D"CMAKE_OSX_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk" -D"CMAKE_OSX_DEPLOYMENT_TARGET=10.9" -DCMAKE_PREFIX_PATH=/usr/local/qt5.15.X/qt5.15/5.15.0/clang_64/ -D"CMAKE_INSTALL_RPATH_USE_LINK_PATH=1" -D"ENABLE_RUBY=0" ../
- - make
+ - make -j 9
- make install
- - echo -e "CI_PROJECT_NAME:$CI_PROJECT_NAME\nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME\nCI_COMMIT_SHA:$CI_COMMIT_SHA\nCI_JOB_ID:$CI_JOB_ID" > "install-x64/share/$CI_PROJECT_NAME"
- - git log $(git describe --tags --abbrev=0 '@^')..@ --oneline --no-abbrev --date=short --no-merges --pretty="tformat:$GIT_LOG_FORMAT" > "install-x64/share/$CI_PROJECT_NAME.log"
+ - PROJECT_VERSION=$(grep -E '^set\(PROJECT_VERSION_FULL "(.*)' ../CMakeLists.txt | awk '{print $2}' | tr -d '")')
+ - PROJECT_SO=$(grep -E '^set\(PROJECT_SO_VERSION (.*)' ../CMakeLists.txt | awk '{print $2}' | tr -d ')')
+ - echo -e "CI_PROJECT_NAME:$CI_PROJECT_NAME\nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME\nCI_COMMIT_SHA:$CI_COMMIT_SHA\nCI_JOB_ID:$CI_JOB_ID\nCI_PIPELINE_ID:$CI_PIPELINE_ID\nVERSION:$PROJECT_VERSION\nSO:$PROJECT_SO" > "install-x64/share/$CI_PROJECT_NAME"
+ - git log $(git describe --tags --abbrev=0 @^)..@ --oneline --pretty=format:"- %C(auto,yellow)%h%C(auto,magenta)% %C(auto,blue)%>(12,trunc)%ad %C(auto,green)%<(25,trunc)%aN%C(auto,reset)%s%C(auto,red)% gD% D" --date=short > "install-x64/share/$CI_PROJECT_NAME.log"
when: always
except:
- tags
@@ -75,9 +79,11 @@ windows-builder-x64:
- cd build
- cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR\build\install-x64" -D"PYTHON_MODULE_PATH=python" -D"RUBY_MODULE_PATH=ruby" -G "MSYS Makefiles" -DCMAKE_MAKE_PROGRAM=mingw32-make -D"CMAKE_BUILD_TYPE:STRING=Release" ../
- mingw32-make install
- - New-Item -path "install-x64/share/" -Name "$CI_PROJECT_NAME" -Value "CI_PROJECT_NAME:$CI_PROJECT_NAME`nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME`nCI_COMMIT_SHA:$CI_COMMIT_SHA`nCI_JOB_ID:$CI_JOB_ID" -ItemType file -force
+ - $PROJECT_VERSION = (Select-String -Path "../CMakeLists.txt" -Pattern '^set\(PROJECT_VERSION_FULL "(.*)\"' | %{$_.Matches.Groups[1].value})
+ - $PROJECT_SO = (Select-String -Path "../CMakeLists.txt" -Pattern '^set\(PROJECT_SO_VERSION (.*)\)' | %{$_.Matches.Groups[1].value})
+ - New-Item -path "install-x64/share/" -Name "$CI_PROJECT_NAME" -Value "CI_PROJECT_NAME:$CI_PROJECT_NAME`nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME`nCI_COMMIT_SHA:$CI_COMMIT_SHA`nCI_JOB_ID:$CI_JOB_ID`nCI_PIPELINE_ID:$CI_PIPELINE_ID`nVERSION:$PROJECT_VERSION`nSO:$PROJECT_SO" -ItemType file -force
- $PREV_GIT_LABEL=(git describe --tags --abbrev=0 '@^')
- - git log "$PREV_GIT_LABEL..@" --oneline --no-abbrev --date=short --no-merges --pretty="tformat:$GIT_LOG_FORMAT" > "install-x64/share/$CI_PROJECT_NAME.log"
+ - git log "$PREV_GIT_LABEL..@" --oneline --pretty=format:"- %C(auto,yellow)%h%C(auto,magenta)% %C(auto,blue)%>(12,trunc)%ad %C(auto,green)%<(25,trunc)%aN%C(auto,reset)%s%C(auto,red)% gD% D" --date=short > "install-x64/share/$CI_PROJECT_NAME.log"
when: always
except:
- tags
@@ -102,9 +108,11 @@ windows-builder-x86:
- cd build
- cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D"CMAKE_INSTALL_PREFIX:PATH=$CI_PROJECT_DIR\build\install-x86" -D"PYTHON_MODULE_PATH=python" -D"RUBY_MODULE_PATH=ruby" -G "MSYS Makefiles" -DCMAKE_MAKE_PROGRAM=mingw32-make -D"CMAKE_BUILD_TYPE:STRING=Release" -D"CMAKE_CXX_FLAGS=-m32" -D"CMAKE_EXE_LINKER_FLAGS=-Wl,--large-address-aware" -D"CMAKE_C_FLAGS=-m32" ../
- mingw32-make install
- - New-Item -path "install-x86/share/" -Name "$CI_PROJECT_NAME" -Value "CI_PROJECT_NAME:$CI_PROJECT_NAME`nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME`nCI_COMMIT_SHA:$CI_COMMIT_SHA`nCI_JOB_ID:$CI_JOB_ID" -ItemType file -force
+ - $PROJECT_VERSION = (Select-String -Path "../CMakeLists.txt" -Pattern '^set\(PROJECT_VERSION_FULL "(.*)\"' | %{$_.Matches.Groups[1].value})
+ - $PROJECT_SO = (Select-String -Path "../CMakeLists.txt" -Pattern '^set\(PROJECT_SO_VERSION (.*)\)' | %{$_.Matches.Groups[1].value})
+ - New-Item -path "install-x86/share/" -Name "$CI_PROJECT_NAME" -Value "CI_PROJECT_NAME:$CI_PROJECT_NAME`nCI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME`nCI_COMMIT_SHA:$CI_COMMIT_SHA`nCI_JOB_ID:$CI_JOB_ID`nCI_PIPELINE_ID:$CI_PIPELINE_ID`nVERSION:$PROJECT_VERSION`nSO:$PROJECT_SO" -ItemType file -force
- $PREV_GIT_LABEL=(git describe --tags --abbrev=0 '@^')
- - git log "$PREV_GIT_LABEL..@" --oneline --no-abbrev --date=short --no-merges --pretty="tformat:$GIT_LOG_FORMAT" > "install-x86/share/$CI_PROJECT_NAME.log"
+ - git log "$PREV_GIT_LABEL..@" --oneline --pretty=format:"- %C(auto,yellow)%h%C(auto,magenta)% %C(auto,blue)%>(12,trunc)%ad %C(auto,green)%<(25,trunc)%aN%C(auto,reset)%s%C(auto,red)% gD% D" --date=short > "install-x86/share/$CI_PROJECT_NAME.log"
when: always
except:
- tags
diff --git a/CMakeLists.txt b/CMakeLists.txt
index ff71dde75..85aa26409 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -40,7 +40,7 @@ For more information, please visit .
set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/Modules")
################ PROJECT VERSION ####################
-set(PROJECT_VERSION_FULL "0.2.5-dev2")
+set(PROJECT_VERSION_FULL "0.2.5-dev3")
set(PROJECT_SO_VERSION 19)
# Remove the dash and anything following, to get the #.#.# version for project()
diff --git a/examples/Example.cpp b/examples/Example.cpp
index 2cd770712..a11bb5aba 100644
--- a/examples/Example.cpp
+++ b/examples/Example.cpp
@@ -39,51 +39,48 @@ using namespace openshot;
int main(int argc, char* argv[]) {
- Settings *s = Settings::Instance();
- s->HARDWARE_DECODER = 2; // 1 VA-API, 2 NVDEC, 6 VDPAU
- s->HW_DE_DEVICE_SET = 0;
-
- std::string input_filepath = TEST_MEDIA_PATH;
- input_filepath += "sintel_trailer-720p.mp4";
-
- FFmpegReader r9(input_filepath);
+ // Types for storing time durations in whole and fractional milliseconds
+ using ms = std::chrono::milliseconds;
+ using s = std::chrono::seconds;
+ using double_ms = std::chrono::duration;
+
+ // FFmpeg Reader performance test
+ const auto total_1 = std::chrono::high_resolution_clock::now();
+ FFmpegReader r9("/home/jonathan/Videos/sintel_trailer-1080p.mp4");
r9.Open();
- r9.DisplayInfo();
-
- /* WRITER ---------------- */
- FFmpegWriter w9("metadata.mp4");
-
- // Set options
- w9.SetAudioOptions(true, "libmp3lame", r9.info.sample_rate, r9.info.channels, r9.info.channel_layout, 128000);
- w9.SetVideoOptions(true, "libx264", r9.info.fps, 1024, 576, Fraction(1,1), false, false, 3000000);
+ for (long int frame = 1; frame <= 1000; frame++)
+ {
+ const auto time1 = std::chrono::high_resolution_clock::now();
+ std::shared_ptr f = r9.GetFrame(frame);
+ const auto time2 = std::chrono::high_resolution_clock::now();
+ std::cout << "FFmpegReader: " << frame << " (" << double_ms(time2 - time1).count() << " ms)" << std::endl;
+ }
+ const auto total_2 = std::chrono::high_resolution_clock::now();
+ auto total_sec = std::chrono::duration_cast(total_2 - total_1);
+ std::cout << "FFmpegReader TOTAL: " << total_sec.count() << " ms" << std::endl;
+ r9.Close();
- w9.info.metadata["title"] = "testtest";
- w9.info.metadata["artist"] = "aaa";
- w9.info.metadata["album"] = "bbb";
- w9.info.metadata["year"] = "2015";
- w9.info.metadata["description"] = "ddd";
- w9.info.metadata["comment"] = "eee";
- w9.info.metadata["comment"] = "comment";
- w9.info.metadata["copyright"] = "copyright OpenShot!";
- // Open writer
- w9.Open();
+ // Timeline Reader performance test
+ Timeline tm(r9.info.width, r9.info.height, r9.info.fps, r9.info.sample_rate, r9.info.channels, r9.info.channel_layout);
+ Clip *c = new Clip(&r9);
+ tm.AddClip(c);
+ tm.Open();
- for (long int frame = 1; frame <= 100; frame++)
+ const auto total_3 = std::chrono::high_resolution_clock::now();
+ for (long int frame = 1; frame <= 1000; frame++)
{
- //int frame_number = (rand() % 750) + 1;
- int frame_number = frame;
- std::shared_ptr f = r9.GetFrame(frame_number);
- w9.WriteFrame(f);
+ const auto time1 = std::chrono::high_resolution_clock::now();
+ std::shared_ptr f = tm.GetFrame(frame);
+ const auto time2 = std::chrono::high_resolution_clock::now();
+ std::cout << "Timeline: " << frame << " (" << double_ms(time2 - time1).count() << " ms)" << std::endl;
}
+ const auto total_4 = std::chrono::high_resolution_clock::now();
+ total_sec = std::chrono::duration_cast(total_4 - total_3);
+ std::cout << "Timeline TOTAL: " << total_sec.count() << " ms" << std::endl;
+ tm.Close();
- // Close writer & reader
- w9.Close();
-
- // Close timeline
- r9.Close();
-
- std::cout << "Completed successfully!" << std::endl;
+ std::cout << "Completed successfully!" << std::endl;
return 0;
}
diff --git a/examples/qt-demo/main.cpp b/examples/qt-demo/main.cpp
index 8c02795ab..5d4120aca 100644
--- a/examples/qt-demo/main.cpp
+++ b/examples/qt-demo/main.cpp
@@ -29,11 +29,16 @@
*/
#include "Qt/PlayerDemo.h"
-
+#include "ZmqLogger.h"
#include
int main(int argc, char *argv[])
{
+ // Enable logging for openshot-player since this is primarily used for
+ // profiling and debugging video playback issues.
+ openshot::ZmqLogger::Instance()->Enable(true);
+ openshot::ZmqLogger::Instance()->Path("./player.log");
+
QApplication app(argc, argv);
PlayerDemo demo;
demo.show();
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 56e953707..387ab59e8 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -35,8 +35,8 @@ if (POLICY CMP0057)
endif()
############### PROFILING #################
-#set(PROFILER "/usr/lib//usr/lib/libprofiler.so.0.4.5")
-#set(PROFILER "/usr/lib/libtcmalloc.so.4")
+#set(PROFILER "/usr/lib/x86_64-linux-gnu/libprofiler.so.0")
+#set(PROFILER "/usr/lib/x86_64-linux-gnu/libtcmalloc.so.4")
if(CMAKE_VERSION VERSION_LESS 3.3)
# IWYU wasn't supported internally in 3.2
diff --git a/src/Clip.cpp b/src/Clip.cpp
index eb22d225b..0928f90af 100644
--- a/src/Clip.cpp
+++ b/src/Clip.cpp
@@ -105,7 +105,7 @@ void Clip::init_settings()
parentTrackedObject = nullptr;
parentClipObject = NULL;
- // Init reader info struct and cache size
+ // Init reader info struct
init_reader_settings();
}
@@ -117,9 +117,6 @@ void Clip::init_reader_settings() {
// Initialize info struct
info = reader->info;
-
- // Initialize Clip cache
- cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
}
}
@@ -168,7 +165,7 @@ Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), alloca
if (reader) {
End(reader->info.duration);
reader->ParentClip(this);
- // Init reader info struct and cache size
+ // Init reader info struct
init_reader_settings();
}
}
@@ -228,7 +225,7 @@ Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(N
End(reader->info.duration);
reader->ParentClip(this);
allocated_reader = reader;
- // Init reader info struct and cache size
+ // Init reader info struct
init_reader_settings();
}
}
@@ -292,7 +289,7 @@ void Clip::Reader(ReaderBase* new_reader)
// set parent
reader->ParentClip(this);
- // Init reader info struct and cache size
+ // Init reader info struct
init_reader_settings();
}
@@ -386,7 +383,7 @@ std::shared_ptr Clip::GetFrame(int64_t frame_number)
}
// Use an existing openshot::Frame object and draw this Clip's frame onto it
-std::shared_ptr Clip::GetFrame(std::shared_ptr frame, int64_t frame_number)
+std::shared_ptr Clip::GetFrame(std::shared_ptr background_frame, int64_t frame_number)
{
// Check for open reader (or throw exception)
if (!is_open)
@@ -397,28 +394,6 @@ std::shared_ptr Clip::GetFrame(std::shared_ptr frame, in
// Adjust out of bounds frame number
frame_number = adjust_frame_number_minimum(frame_number);
- // Check the cache for this frame
- std::shared_ptr cached_frame = cache.GetFrame(frame_number);
- if (cached_frame) {
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Clip::GetFrame", "returned cached frame", frame_number);
-
- // Return the cached frame
- return cached_frame;
- }
-
- // Adjust has_video and has_audio overrides
- int enabled_audio = has_audio.GetInt(frame_number);
- if (enabled_audio == -1 && reader && reader->info.has_audio)
- enabled_audio = 1;
- else if (enabled_audio == -1 && reader && !reader->info.has_audio)
- enabled_audio = 0;
- int enabled_video = has_video.GetInt(frame_number);
- if (enabled_video == -1 && reader && reader->info.has_video)
- enabled_video = 1;
- else if (enabled_video == -1 && reader && !reader->info.has_audio)
- enabled_video = 0;
-
// Is a time map detected
int64_t new_frame_number = frame_number;
int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(frame_number));
@@ -426,49 +401,20 @@ std::shared_ptr Clip::GetFrame(std::shared_ptr frame, in
new_frame_number = time_mapped_number;
// Now that we have re-mapped what frame number is needed, go and get the frame pointer
- std::shared_ptr original_frame;
- original_frame = GetOrCreateFrame(new_frame_number);
-
- // Copy the image from the odd field
- if (enabled_video)
- frame->AddImage(std::make_shared(*original_frame->GetImage()));
-
- // Loop through each channel, add audio
- if (enabled_audio && reader->info.has_audio)
- for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
- frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
+ std::shared_ptr original_frame = GetOrCreateFrame(new_frame_number);
// Get time mapped frame number (used to increase speed, change direction, etc...)
// TODO: Handle variable # of samples, since this resamples audio for different speeds (only when time curve is set)
- get_time_mapped_frame(frame, new_frame_number);
-
- // Adjust # of samples to match requested (the interaction with time curves will make this tricky)
- // TODO: Implement move samples to/from next frame
+ get_time_mapped_frame(original_frame, new_frame_number);
// Apply effects to the frame (if any)
- apply_effects(frame);
-
- // Determine size of image (from Timeline or Reader)
- int width = 0;
- int height = 0;
- if (timeline) {
- // Use timeline size (if available)
- width = timeline->preview_width;
- height = timeline->preview_height;
- } else {
- // Fallback to clip size
- width = reader->info.width;
- height = reader->info.height;
- }
+ apply_effects(original_frame);
// Apply keyframe / transforms
- apply_keyframes(frame, width, height);
-
- // Cache frame
- cache.Add(frame);
+ apply_keyframes(original_frame, background_frame->GetImage());
// Return processed 'frame'
- return frame;
+ return original_frame;
}
else
// Throw error if reader not initialized
@@ -749,7 +695,6 @@ std::shared_ptr Clip::GetOrCreateFrame(int64_t number)
// Create a new copy of reader frame
// This allows a clip to modify the pixels and audio of this frame without
// changing the underlying reader's frame data
- //std::shared_ptr reader_copy(new Frame(number, 1, 1, "#000000", reader_frame->GetAudioSamplesCount(), reader_frame->GetAudioChannelsCount()));
auto reader_copy = std::make_shared(*reader_frame.get());
reader_copy->SampleRate(reader_frame->SampleRate());
reader_copy->ChannelsLayout(reader_frame->ChannelsLayout());
@@ -1014,9 +959,6 @@ void Clip::SetJsonValue(const Json::Value root) {
// Set parent data
ClipBase::SetJsonValue(root);
- // Clear cache
- cache.Clear();
-
// Set data from Json (if key is found)
if (!root["parentObjectId"].isNull()){
parentObjectId = root["parentObjectId"].asString();
@@ -1248,9 +1190,6 @@ void Clip::AddEffect(EffectBase* effect)
void Clip::RemoveEffect(EffectBase* effect)
{
effects.remove(effect);
-
- // Clear cache
- cache.Clear();
}
// Apply effects to the source frame (if any)
@@ -1271,29 +1210,94 @@ bool Clip::isEqual(double a, double b)
return fabs(a - b) < 0.000001;
}
+// Apply keyframes to the source frame (if any)
+void Clip::apply_keyframes(std::shared_ptr frame, std::shared_ptr background_canvas) {
+ // Skip out if video was disabled or only an audio frame (no visualisation in use)
+ if (has_video.GetInt(frame->number) == 0 ||
+ (!Waveform() && !Reader()->info.has_video))
+ // Skip the rest of the image processing for performance reasons
+ return;
+
+ // Get image from clip
+ std::shared_ptr source_image = frame->GetImage();
+
+ /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
+ if (Waveform())
+ {
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform());
+
+ // Get the color of the waveform
+ int red = wave_color.red.GetInt(frame->number);
+ int green = wave_color.green.GetInt(frame->number);
+ int blue = wave_color.blue.GetInt(frame->number);
+ int alpha = wave_color.alpha.GetInt(frame->number);
+
+ // Generate Waveform Dynamically (the size of the timeline)
+ source_image = frame->GetWaveform(background_canvas->width(), background_canvas->height(), red, green, blue, alpha);
+ }
+
+ // Size of final image
+ int width = background_canvas->width();
+ int height = background_canvas->height();
+
+ // Get transform from clip's keyframes
+ QTransform transform = get_transform(frame, width, height);
+
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number);
+
+ // Load timeline's new frame image into a QPainter
+ QPainter painter(background_canvas.get());
+ painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
+
+ // Apply transform (translate, rotate, scale)
+ painter.setTransform(transform);
+
+ // Composite a new layer onto the image
+ painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
+ painter.drawImage(0, 0, *source_image);
+
+ if (timeline) {
+ Timeline *t = (Timeline *) timeline;
+
+ // Draw frame #'s on top of image (if needed)
+ if (display != FRAME_DISPLAY_NONE) {
+ std::stringstream frame_number_str;
+ switch (display) {
+ case (FRAME_DISPLAY_NONE):
+ // This is only here to prevent unused-enum warnings
+ break;
+
+ case (FRAME_DISPLAY_CLIP):
+ frame_number_str << frame->number;
+ break;
+
+ case (FRAME_DISPLAY_TIMELINE):
+ frame_number_str << (position * t->info.fps.ToFloat()) + frame->number;
+ break;
+
+ case (FRAME_DISPLAY_BOTH):
+ frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
+ break;
+ }
+
+ // Draw frame number on top of image
+ painter.setPen(QColor("#ffffff"));
+ painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
+ }
+ }
+ painter.end();
+
+ // Add new QImage to frame
+ frame->AddImage(background_canvas);
+}
// Apply keyframes to the source frame (if any)
-void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
+QTransform Clip::get_transform(std::shared_ptr frame, int width, int height)
{
- // Get actual frame image data
- std::shared_ptr source_image = frame->GetImage();
-
- /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
- if (Waveform())
- {
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform());
-
- // Get the color of the waveform
- int red = wave_color.red.GetInt(frame->number);
- int green = wave_color.green.GetInt(frame->number);
- int blue = wave_color.blue.GetInt(frame->number);
- int alpha = wave_color.alpha.GetInt(frame->number);
-
- // Generate Waveform Dynamically (the size of the timeline)
- source_image = frame->GetWaveform(width, height, red, green, blue, alpha);
- frame->AddImage(std::shared_ptr(source_image));
- }
+ // Get image from clip
+ std::shared_ptr source_image = frame->GetImage();
/* ALPHA & OPACITY */
if (alpha.GetValue(frame->number) != 1.0)
@@ -1315,7 +1319,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
}
// Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number);
+ ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number);
}
/* RESIZE SOURCE IMAGE - based on scale type */
@@ -1332,21 +1336,21 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
source_size.scale(width, height, Qt::KeepAspectRatio);
// Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
+ ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
case (SCALE_STRETCH): {
source_size.scale(width, height, Qt::IgnoreAspectRatio);
// Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
+ ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
case (SCALE_CROP): {
source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);
// Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
+ ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
case (SCALE_NONE): {
@@ -1358,7 +1362,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
source_size.scale(width * source_width_ratio, height * source_height_ratio, Qt::KeepAspectRatio);
// Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
+ ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
}
@@ -1484,7 +1488,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
}
// Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height);
+ ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height);
QTransform transform;
@@ -1498,7 +1502,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
float origin_y_value = origin_y.GetValue(frame->number);
// Transform source image (if needed)
- ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
+ ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
if (!isEqual(x, 0) || !isEqual(y, 0)) {
// TRANSLATE/MOVE CLIP
@@ -1520,56 +1524,5 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height)
transform.scale(source_width_scale, source_height_scale);
}
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number);
-
- /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
- auto new_image = std::make_shared(QSize(width, height), source_image->format());
- new_image->fill(QColor(QString::fromStdString("#00000000")));
-
- // Load timeline's new frame image into a QPainter
- QPainter painter(new_image.get());
- painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
-
- // Apply transform (translate, rotate, scale)
- painter.setTransform(transform);
-
- // Composite a new layer onto the image
- painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
- painter.drawImage(0, 0, *source_image);
-
- if (timeline) {
- Timeline *t = (Timeline *) timeline;
-
- // Draw frame #'s on top of image (if needed)
- if (display != FRAME_DISPLAY_NONE) {
- std::stringstream frame_number_str;
- switch (display) {
- case (FRAME_DISPLAY_NONE):
- // This is only here to prevent unused-enum warnings
- break;
-
- case (FRAME_DISPLAY_CLIP):
- frame_number_str << frame->number;
- break;
-
- case (FRAME_DISPLAY_TIMELINE):
- frame_number_str << (position * t->info.fps.ToFloat()) + frame->number;
- break;
-
- case (FRAME_DISPLAY_BOTH):
- frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
- break;
- }
-
- // Draw frame number on top of image
- painter.setPen(QColor("#ffffff"));
- painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
- }
- }
-
- painter.end();
-
- // Add new QImage to frame
- frame->AddImage(new_image);
+ return transform;
}
diff --git a/src/Clip.h b/src/Clip.h
index 7fe1c9144..7fff971bb 100644
--- a/src/Clip.h
+++ b/src/Clip.h
@@ -143,8 +143,11 @@ namespace openshot {
/// Apply effects to the source frame (if any)
void apply_effects(std::shared_ptr frame);
- /// Apply keyframes to the source frame (if any)
- void apply_keyframes(std::shared_ptr frame, int width, int height);
+ /// Apply keyframes to an openshot::Frame and use an existing QImage as a background image (if any)
+ void apply_keyframes(std::shared_ptr frame, std::shared_ptr background_canvas);
+
+ /// Get QTransform from keyframes
+ QTransform get_transform(std::shared_ptr frame, int width, int height);
/// Get file extension
std::string get_file_extension(std::string path);
@@ -191,8 +194,8 @@ namespace openshot {
/// Destructor
virtual ~Clip();
- /// Get the cache object used by this clip
- CacheMemory* GetCache() override { return &cache; };
+ /// Get the cache object (always return NULL for this reader)
+ openshot::CacheMemory* GetCache() override { return NULL; };
/// Determine if reader is open or closed
bool IsOpen() override { return is_open; };
@@ -245,9 +248,9 @@ namespace openshot {
/// rendered.
///
/// @returns The modified openshot::Frame object
- /// @param frame This is ignored on Clip, due to caching optimizations. This frame instance is clobbered with the source frame.
+ /// @param background_frame The frame object to use as a background canvas (i.e. an existing Timeline openshot::Frame instance)
/// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
- std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override;
+ std::shared_ptr GetFrame(std::shared_ptr background_frame, int64_t frame_number);
/// Open the internal reader
void Open() override;
diff --git a/src/ClipBase.h b/src/ClipBase.h
index 9e30ea1d1..484b18e53 100644
--- a/src/ClipBase.h
+++ b/src/ClipBase.h
@@ -95,11 +95,11 @@ namespace openshot {
/// @brief This method is required for all derived classes of ClipBase, and returns a
/// modified openshot::Frame object
///
- /// The frame object is passed into this method and used as a starting point (pixels and audio).
+ /// The frame object is passed into this method and used as a starting point / background (pixels).
/// All Clip keyframes and effects are resolved into pixels.
///
/// @returns The modified openshot::Frame object
- /// @param frame The frame object that needs the clip or effect applied to it
+ /// @param frame This is ignored on Clip, due to caching optimizations. This frame instance is clobbered with the source frame.
/// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline.
virtual std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) = 0;
diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp
index 59276e815..a64e52303 100644
--- a/src/FFmpegReader.cpp
+++ b/src/FFmpegReader.cpp
@@ -93,22 +93,16 @@ FFmpegReader::FFmpegReader(const std::string& path, bool inspect_reader)
check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
- packet(NULL) {
-
- // Configure OpenMP parallelism
- // Default number of threads per section
- omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
- // Allow nested parallel sections as deeply as supported
- omp_set_max_active_levels(OPEN_MP_MAX_ACTIVE);
+ packet(NULL), max_concurrent_frames(OPEN_MP_NUM_PROCESSORS) {
// Initialize FFMpeg, and register all formats and codecs
AV_REGISTER_ALL
AVCODEC_REGISTER_ALL
// Init cache
- working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
- missing_frames.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
- final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
+ working_cache.SetMaxBytesFromInfo(max_concurrent_frames * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
+ missing_frames.SetMaxBytesFromInfo(max_concurrent_frames * 2, info.width, info.height, info.sample_rate, info.channels);
+ final_cache.SetMaxBytesFromInfo(max_concurrent_frames * 2, info.width, info.height, info.sample_rate, info.channels);
// Open and Close the reader, to populate its attributes (such as height, width, etc...)
if (inspect_reader) {
@@ -561,9 +555,9 @@ void FFmpegReader::Open() {
previous_packet_location.sample_start = 0;
// Adjust cache size based on size of frame and audio
- working_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
- missing_frames.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
- final_cache.SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
+ working_cache.SetMaxBytesFromInfo(max_concurrent_frames * info.fps.ToDouble() * 2, info.width, info.height, info.sample_rate, info.channels);
+ missing_frames.SetMaxBytesFromInfo(max_concurrent_frames * 2, info.width, info.height, info.sample_rate, info.channels);
+ final_cache.SetMaxBytesFromInfo(max_concurrent_frames * 2, info.width, info.height, info.sample_rate, info.channels);
// Mark as "open"
is_open = true;
@@ -847,47 +841,44 @@ std::shared_ptr FFmpegReader::GetFrame(int64_t requested_frame) {
// Return the cached frame
return frame;
} else {
-#pragma omp critical (ReadStream)
- {
- // Check the cache a 2nd time (due to a potential previous lock)
- frame = final_cache.GetFrame(requested_frame);
- if (frame) {
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetFrame", "returned cached frame on 2nd look", requested_frame);
-
- // Return the cached frame
- } else {
- // Frame is not in cache
- // Reset seek count
- seek_count = 0;
-
- // Check for first frame (always need to get frame 1 before other frames, to correctly calculate offsets)
- if (last_frame == 0 && requested_frame != 1)
- // Get first frame
- ReadStream(1);
-
- // Are we within X frames of the requested frame?
- int64_t diff = requested_frame - last_frame;
- if (diff >= 1 && diff <= 20) {
- // Continue walking the stream
- frame = ReadStream(requested_frame);
- } else {
- // Greater than 30 frames away, or backwards, we need to seek to the nearest key frame
- if (enable_seek)
- // Only seek if enabled
- Seek(requested_frame);
-
- else if (!enable_seek && diff < 0) {
- // Start over, since we can't seek, and the requested frame is smaller than our position
- Close();
- Open();
- }
-
- // Then continue walking the stream
- frame = ReadStream(requested_frame);
- }
- }
- } //omp critical
+ // Check the cache a 2nd time (due to a potential previous lock)
+ frame = final_cache.GetFrame(requested_frame);
+ if (frame) {
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetFrame", "returned cached frame on 2nd look", requested_frame);
+
+ // Return the cached frame
+ } else {
+ // Frame is not in cache
+ // Reset seek count
+ seek_count = 0;
+
+ // Check for first frame (always need to get frame 1 before other frames, to correctly calculate offsets)
+ if (last_frame == 0 && requested_frame != 1)
+ // Get first frame
+ ReadStream(1);
+
+ // Are we within X frames of the requested frame?
+ int64_t diff = requested_frame - last_frame;
+ if (diff >= 1 && diff <= 20) {
+ // Continue walking the stream
+ frame = ReadStream(requested_frame);
+ } else {
+ // Greater than 30 frames away, or backwards, we need to seek to the nearest key frame
+ if (enable_seek)
+ // Only seek if enabled
+ Seek(requested_frame);
+
+ else if (!enable_seek && diff < 0) {
+ // Start over, since we can't seek, and the requested frame is smaller than our position
+ Close();
+ Open();
+ }
+
+ // Then continue walking the stream
+ frame = ReadStream(requested_frame);
+ }
+ }
return frame;
}
}
@@ -902,141 +893,129 @@ std::shared_ptr FFmpegReader::ReadStream(int64_t requested_frame) {
// Minimum number of packets to process (for performance reasons)
int packets_processed = 0;
- int minimum_packets = OPEN_MP_NUM_PROCESSORS;
+ int minimum_packets = 1;
int max_packets = 4096;
// Debug output
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream", "requested_frame", requested_frame, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS);
-
-#pragma omp parallel
- {
-#pragma omp single
- {
- // Loop through the stream until the correct frame is found
- while (true) {
- // Get the next packet into a local variable called packet
- packet_error = GetNextPacket();
-
- int processing_video_frames_size = 0;
- int processing_audio_frames_size = 0;
- {
- const GenericScopedLock lock(processingCriticalSection);
- processing_video_frames_size = processing_video_frames.size();
- processing_audio_frames_size = processing_audio_frames.size();
- }
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream", "requested_frame", requested_frame, "max_concurrent_frames", max_concurrent_frames);
- // Wait if too many frames are being processed
- while (processing_video_frames_size + processing_audio_frames_size >= minimum_packets) {
- std::this_thread::sleep_for(std::chrono::milliseconds(3));
- const GenericScopedLock lock(processingCriticalSection);
- processing_video_frames_size = processing_video_frames.size();
- processing_audio_frames_size = processing_audio_frames.size();
- }
+ // Loop through the stream until the correct frame is found
+ while (true) {
+ // Get the next packet into a local variable called packet
+ packet_error = GetNextPacket();
- // Get the next packet (if any)
- if (packet_error < 0) {
- // Break loop when no more packets found
- end_of_stream = true;
- break;
- }
+ int processing_video_frames_size = 0;
+ int processing_audio_frames_size = 0;
+ {
+ const GenericScopedLock lock(processingCriticalSection);
+ processing_video_frames_size = processing_video_frames.size();
+ processing_audio_frames_size = processing_audio_frames.size();
+ }
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (GetNextPacket)", "requested_frame", requested_frame, "processing_video_frames_size", processing_video_frames_size, "processing_audio_frames_size", processing_audio_frames_size, "minimum_packets", minimum_packets, "packets_processed", packets_processed, "is_seeking", is_seeking);
+ // Wait if too many frames are being processed
+ while (processing_video_frames_size + processing_audio_frames_size >= minimum_packets) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(3));
+ const GenericScopedLock lock(processingCriticalSection);
+ processing_video_frames_size = processing_video_frames.size();
+ processing_audio_frames_size = processing_audio_frames.size();
+ }
- // Video packet
- if (info.has_video && packet->stream_index == videoStream) {
- // Reset this counter, since we have a video packet
- num_packets_since_video_frame = 0;
+ // Get the next packet (if any)
+ if (packet_error < 0) {
+ // Break loop when no more packets found
+ end_of_stream = true;
+ break;
+ }
- // Check the status of a seek (if any)
- if (is_seeking)
-#pragma omp critical (openshot_seek)
- check_seek = CheckSeek(true);
- else
- check_seek = false;
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (GetNextPacket)", "requested_frame", requested_frame, "processing_video_frames_size", processing_video_frames_size, "processing_audio_frames_size", processing_audio_frames_size, "minimum_packets", minimum_packets, "packets_processed", packets_processed, "is_seeking", is_seeking);
- if (check_seek) {
- // Jump to the next iteration of this loop
- continue;
- }
+ // Video packet
+ if (info.has_video && packet->stream_index == videoStream) {
+ // Reset this counter, since we have a video packet
+ num_packets_since_video_frame = 0;
- // Packet may become NULL on Close inside Seek if CheckSeek returns false
- if (!packet)
- // Jump to the next iteration of this loop
- continue;
+ // Check the status of a seek (if any)
+ if (is_seeking) {
+ check_seek = CheckSeek(true);
+ } else {
+ check_seek = false;
+ }
- // Get the AVFrame from the current packet
- frame_finished = GetAVFrame();
+ if (check_seek) {
+ // Jump to the next iteration of this loop
+ continue;
+ }
- // Check if the AVFrame is finished and set it
- if (frame_finished) {
- // Update PTS / Frame Offset (if any)
- UpdatePTSOffset(true);
+ // Packet may become NULL on Close inside Seek if CheckSeek returns false
+ if (!packet) {
+ // Jump to the next iteration of this loop
+ continue;
+ }
- // Process Video Packet
- ProcessVideoPacket(requested_frame);
+ // Get the AVFrame from the current packet
+ frame_finished = GetAVFrame();
- if (openshot::Settings::Instance()->WAIT_FOR_VIDEO_PROCESSING_TASK) {
- // Wait on each OMP task to complete before moving on to the next one. This slows
- // down processing considerably, but might be more stable on some systems.
-#pragma omp taskwait
- }
- }
+ // Check if the AVFrame is finished and set it
+ if (frame_finished) {
+ // Update PTS / Frame Offset (if any)
+ UpdatePTSOffset(true);
- }
- // Audio packet
- else if (info.has_audio && packet->stream_index == audioStream) {
- // Increment this (to track # of packets since the last video packet)
- num_packets_since_video_frame++;
-
- // Check the status of a seek (if any)
- if (is_seeking)
-#pragma omp critical (openshot_seek)
- check_seek = CheckSeek(false);
- else
- check_seek = false;
-
- if (check_seek) {
- // Jump to the next iteration of this loop
- continue;
- }
+ // Process Video Packet
+ ProcessVideoPacket(requested_frame);
+ }
- // Packet may become NULL on Close inside Seek if CheckSeek returns false
- if (!packet)
- // Jump to the next iteration of this loop
- continue;
+ }
+ // Audio packet
+ else if (info.has_audio && packet->stream_index == audioStream) {
+ // Increment this (to track # of packets since the last video packet)
+ num_packets_since_video_frame++;
+
+ // Check the status of a seek (if any)
+ if (is_seeking) {
+ check_seek = CheckSeek(false);
+ } else {
+ check_seek = false;
+ }
- // Update PTS / Frame Offset (if any)
- UpdatePTSOffset(false);
+ if (check_seek) {
+ // Jump to the next iteration of this loop
+ continue;
+ }
- // Determine related video frame and starting sample # from audio PTS
- AudioLocation location = GetAudioPTSLocation(packet->pts);
+ // Packet may become NULL on Close inside Seek if CheckSeek returns false
+ if (!packet) {
+ // Jump to the next iteration of this loop
+ continue;
+ }
- // Process Audio Packet
- ProcessAudioPacket(requested_frame, location.frame, location.sample_start);
- }
+ // Update PTS / Frame Offset (if any)
+ UpdatePTSOffset(false);
- // Check if working frames are 'finished'
- if (!is_seeking) {
- // Check for final frames
- CheckWorkingFrames(false, requested_frame);
- }
+ // Determine related video frame and starting sample # from audio PTS
+ AudioLocation location = GetAudioPTSLocation(packet->pts);
- // Check if requested 'final' frame is available
- bool is_cache_found = (final_cache.GetFrame(requested_frame) != NULL);
+ // Process Audio Packet
+ ProcessAudioPacket(requested_frame, location.frame, location.sample_start);
+ }
- // Increment frames processed
- packets_processed++;
+ // Check if working frames are 'finished'
+ if (!is_seeking) {
+ // Check for final frames
+ CheckWorkingFrames(false, requested_frame);
+ }
- // Break once the frame is found
- if ((is_cache_found && packets_processed >= minimum_packets) || packets_processed > max_packets)
- break;
+ // Check if requested 'final' frame is available
+ bool is_cache_found = (final_cache.GetFrame(requested_frame) != NULL);
- } // end while
+ // Increment frames processed
+ packets_processed++;
- } // end omp single
+ // Break once the frame is found
+ if ((is_cache_found && packets_processed >= minimum_packets) || packets_processed > max_packets)
+ break;
- } // end omp parallel
+ } // end while
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (Completed)", "packets_processed", packets_processed, "end_of_stream", end_of_stream, "largest_frame_processed", largest_frame_processed, "Working Cache Count", working_cache.Count());
@@ -1072,24 +1051,19 @@ std::shared_ptr FFmpegReader::ReadStream(int64_t requested_frame) {
int FFmpegReader::GetNextPacket() {
int found_packet = 0;
AVPacket *next_packet;
-#pragma omp critical(getnextpacket)
- {
- next_packet = new AVPacket();
- found_packet = av_read_frame(pFormatCtx, next_packet);
+ next_packet = new AVPacket();
+ found_packet = av_read_frame(pFormatCtx, next_packet);
-
- if (packet) {
- // Remove previous packet before getting next one
- RemoveAVPacket(packet);
- packet = NULL;
- }
-
- if (found_packet >= 0) {
- // Update current packet pointer
- packet = next_packet;
- }
- else
- delete next_packet;
+ if (packet) {
+ // Remove previous packet before getting next one
+ RemoveAVPacket(packet);
+ packet = NULL;
+ }
+ if (found_packet >= 0) {
+ // Update current packet pointer
+ packet = next_packet;
+ } else {
+ delete next_packet;
}
// Return if packet was found (or error number)
return found_packet;
@@ -1102,12 +1076,10 @@ bool FFmpegReader::GetAVFrame() {
// Decode video frame
AVFrame *next_frame = AV_ALLOCATE_FRAME();
-#pragma omp critical (packet_cache)
- {
-#if IS_FFMPEG_3_2
- frameFinished = 0;
- ret = avcodec_send_packet(pCodecCtx, packet);
+#if IS_FFMPEG_3_2
+ frameFinished = 0;
+ ret = avcodec_send_packet(pCodecCtx, packet);
#if HAVE_HW_ACCEL
// Get the format from the variables set in get_hw_dec_format
@@ -1186,7 +1158,6 @@ bool FFmpegReader::GetAVFrame() {
info.height);
}
#endif // IS_FFMPEG_3_2
- }
// deallocate the frame
AV_FREE_FRAME(&next_frame);
@@ -1271,142 +1242,133 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) {
const GenericScopedLock lock(processingCriticalSection);
processing_video_frames[current_frame] = current_frame;
-#pragma omp task firstprivate(current_frame, my_frame, height, width, video_length, pix_fmt)
- {
- // Create variables for a RGB Frame (since most videos are not in RGB, we must convert it)
- AVFrame *pFrameRGB = NULL;
- int numBytes;
- uint8_t *buffer = NULL;
-
- // Allocate an AVFrame structure
- pFrameRGB = AV_ALLOCATE_FRAME();
- if (pFrameRGB == NULL)
- throw OutOfBoundsFrame("Convert Image Broke!", current_frame, video_length);
-
- // Determine the max size of this source image (based on the timeline's size, the scaling mode,
- // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible,
- // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline
- // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in
- // the future.
- int max_width = info.width;
- int max_height = info.height;
-
- Clip *parent = (Clip *) ParentClip();
- if (parent) {
- if (parent->ParentTimeline()) {
- // Set max width/height based on parent clip's timeline (if attached to a timeline)
- max_width = parent->ParentTimeline()->preview_width;
- max_height = parent->ParentTimeline()->preview_height;
- }
- if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) {
- // Best fit or Stretch scaling (based on max timeline size * scaling keyframes)
- float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
- float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
- max_width = std::max(float(max_width), max_width * max_scale_x);
- max_height = std::max(float(max_height), max_height * max_scale_y);
-
- } else if (parent->scale == SCALE_CROP) {
- // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes)
- float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
- float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
- QSize width_size(max_width * max_scale_x,
- round(max_width / (float(info.width) / float(info.height))));
- QSize height_size(round(max_height / (float(info.height) / float(info.width))),
- max_height * max_scale_y);
- // respect aspect ratio
- if (width_size.width() >= max_width && width_size.height() >= max_height) {
- max_width = std::max(max_width, width_size.width());
- max_height = std::max(max_height, width_size.height());
- } else {
- max_width = std::max(max_width, height_size.width());
- max_height = std::max(max_height, height_size.height());
- }
-
- } else {
- // No scaling, use original image size (slower)
- max_width = info.width;
- max_height = info.height;
- }
+ // Create variables for a RGB Frame (since most videos are not in RGB, we must convert it)
+ AVFrame *pFrameRGB = NULL;
+ uint8_t *buffer = NULL;
+
+ // Allocate an AVFrame structure
+ pFrameRGB = AV_ALLOCATE_FRAME();
+ if (pFrameRGB == NULL)
+ throw OutOfBoundsFrame("Convert Image Broke!", current_frame, video_length);
+
+ // Determine the max size of this source image (based on the timeline's size, the scaling mode,
+ // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible,
+ // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline
+ // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in
+ // the future.
+ int max_width = info.width;
+ int max_height = info.height;
+
+ Clip *parent = (Clip *) ParentClip();
+ if (parent) {
+ if (parent->ParentTimeline()) {
+ // Set max width/height based on parent clip's timeline (if attached to a timeline)
+ max_width = parent->ParentTimeline()->preview_width;
+ max_height = parent->ParentTimeline()->preview_height;
}
-
- // Determine if image needs to be scaled (for performance reasons)
- int original_height = height;
- if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) {
- // Override width and height (but maintain aspect ratio)
- float ratio = float(width) / float(height);
- int possible_width = round(max_height * ratio);
- int possible_height = round(max_width / ratio);
-
- if (possible_width <= max_width) {
- // use calculated width, and max_height
- width = possible_width;
- height = max_height;
+ if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) {
+ // Best fit or Stretch scaling (based on max timeline size * scaling keyframes)
+ float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
+ float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
+ max_width = std::max(float(max_width), max_width * max_scale_x);
+ max_height = std::max(float(max_height), max_height * max_scale_y);
+
+ } else if (parent->scale == SCALE_CROP) {
+ // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes)
+ float max_scale_x = parent->scale_x.GetMaxPoint().co.Y;
+ float max_scale_y = parent->scale_y.GetMaxPoint().co.Y;
+ QSize width_size(max_width * max_scale_x,
+ round(max_width / (float(info.width) / float(info.height))));
+ QSize height_size(round(max_height / (float(info.height) / float(info.width))),
+ max_height * max_scale_y);
+ // respect aspect ratio
+ if (width_size.width() >= max_width && width_size.height() >= max_height) {
+ max_width = std::max(max_width, width_size.width());
+ max_height = std::max(max_height, width_size.height());
} else {
- // use max_width, and calculated height
- width = max_width;
- height = possible_height;
+ max_width = std::max(max_width, height_size.width());
+ max_height = std::max(max_height, height_size.height());
}
- }
- // Determine required buffer size and allocate buffer
- numBytes = AV_GET_IMAGE_SIZE(PIX_FMT_RGBA, width, height);
-
-#pragma omp critical (video_buffer)
- buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
-
- // Copy picture data from one AVFrame (or AVPicture) to another one.
- AV_COPY_PICTURE_DATA(pFrameRGB, buffer, PIX_FMT_RGBA, width, height);
+ } else {
+ // No scaling, use original image size (slower)
+ max_width = info.width;
+ max_height = info.height;
+ }
+ }
- int scale_mode = SWS_FAST_BILINEAR;
- if (openshot::Settings::Instance()->HIGH_QUALITY_SCALING) {
- scale_mode = SWS_BICUBIC;
+ // Determine if image needs to be scaled (for performance reasons)
+ int original_height = height;
+ if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) {
+ // Override width and height (but maintain aspect ratio)
+ float ratio = float(width) / float(height);
+ int possible_width = round(max_height * ratio);
+ int possible_height = round(max_width / ratio);
+
+ if (possible_width <= max_width) {
+ // use calculated width, and max_height
+ width = possible_width;
+ height = max_height;
+ } else {
+ // use max_width, and calculated height
+ width = max_width;
+ height = possible_height;
}
- SwsContext *img_convert_ctx = sws_getContext(info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width,
- height, PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL);
+ }
- // Resize / Convert to RGB
- sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0,
- original_height, pFrameRGB->data, pFrameRGB->linesize);
+ // Determine required buffer size and allocate buffer
+ const int bytes_per_pixel = 4;
+ int buffer_size = width * height * bytes_per_pixel;
+ buffer = new unsigned char[buffer_size]();
- // Create or get the existing frame object
- std::shared_ptr f = CreateFrame(current_frame);
+ // Copy picture data from one AVFrame (or AVPicture) to another one.
+ AV_COPY_PICTURE_DATA(pFrameRGB, buffer, PIX_FMT_RGBA, width, height);
- // Add Image data to frame
- if (!ffmpeg_has_alpha(AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx))) {
- // Add image with no alpha channel, Speed optimization
- f->AddImage(width, height, 4, QImage::Format_RGBA8888_Premultiplied, buffer);
- } else {
- // Add image with alpha channel (this will be converted to premultipled when needed, but is slower)
- f->AddImage(width, height, 4, QImage::Format_RGBA8888, buffer);
- }
+ int scale_mode = SWS_FAST_BILINEAR;
+ if (openshot::Settings::Instance()->HIGH_QUALITY_SCALING) {
+ scale_mode = SWS_BICUBIC;
+ }
+ SwsContext *img_convert_ctx = sws_getContext(info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width,
+ height, PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL);
- // Update working cache
- working_cache.Add(f);
+ // Resize / Convert to RGB
+ sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0,
+ original_height, pFrameRGB->data, pFrameRGB->linesize);
- // Keep track of last last_video_frame
-#pragma omp critical (video_buffer)
- last_video_frame = f;
+ // Create or get the existing frame object
+ std::shared_ptr f = CreateFrame(current_frame);
- // Free the RGB image
- av_free(buffer);
- AV_FREE_FRAME(&pFrameRGB);
+ // Add Image data to frame
+ if (!ffmpeg_has_alpha(AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx))) {
+ // Add image with no alpha channel, Speed optimization
+ f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888_Premultiplied, buffer);
+ } else {
+ // Add image with alpha channel (this will be converted to premultipled when needed, but is slower)
+ f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888, buffer);
+ }
- // Remove frame and packet
- RemoveAVFrame(my_frame);
- sws_freeContext(img_convert_ctx);
+ // Update working cache
+ working_cache.Add(f);
- // Remove video frame from list of processing video frames
- {
- const GenericScopedLock lock(processingCriticalSection);
- processing_video_frames.erase(current_frame);
- processed_video_frames[current_frame] = current_frame;
- }
+ // Keep track of last last_video_frame
+ last_video_frame = f;
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessVideoPacket (After)", "requested_frame", requested_frame, "current_frame", current_frame, "f->number", f->number);
+ // Free the RGB image
+ AV_FREE_FRAME(&pFrameRGB);
- } // end omp task
+ // Remove frame and packet
+ RemoveAVFrame(my_frame);
+ sws_freeContext(img_convert_ctx);
+ // Remove video frame from list of processing video frames
+ {
+ const GenericScopedLock lock(processingCriticalSection);
+ processing_video_frames.erase(current_frame);
+ processed_video_frames[current_frame] = current_frame;
+ }
+
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessVideoPacket (After)", "requested_frame", requested_frame, "current_frame", current_frame, "f->number", f->number);
}
// Process an audio packet
@@ -1435,8 +1397,6 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
int packet_samples = 0;
int data_size = 0;
-#pragma omp critical (ProcessAudioPacket)
- {
#if IS_FFMPEG_3_2
int ret = 0;
frame_finished = 1;
@@ -1467,7 +1427,6 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr
#else
int used = avcodec_decode_audio4(aCodecCtx, audio_frame, &frame_finished, packet);
#endif
- }
if (frame_finished) {
@@ -1763,7 +1722,7 @@ void FFmpegReader::Seek(int64_t requested_frame) {
seek_count++;
// If seeking near frame 1, we need to close and re-open the file (this is more reliable than seeking)
- int buffer_amount = std::max(OPEN_MP_NUM_PROCESSORS, 8);
+ int buffer_amount = std::max(max_concurrent_frames, 8);
if (requested_frame - buffer_amount < 20) {
// Close and re-open file (basically seeking to frame 1)
Close();
@@ -2208,7 +2167,7 @@ void FFmpegReader::CheckWorkingFrames(bool end_of_stream, int64_t requested_fram
break;
// Remove frames which are too old
- if (f && f->number < (requested_frame - (OPEN_MP_NUM_PROCESSORS * 2))) {
+ if (f->number < (requested_frame - (max_concurrent_frames * 2))) {
working_cache.Remove(f->number);
}
@@ -2416,13 +2375,10 @@ void FFmpegReader::RemoveAVFrame(AVFrame *remove_frame) {
// Remove pFrame (if exists)
if (remove_frame) {
// Free memory
-#pragma omp critical (packet_cache)
- {
- av_freep(&remove_frame->data[0]);
+ av_freep(&remove_frame->data[0]);
#ifndef WIN32
- AV_FREE_FRAME(&remove_frame);
+ AV_FREE_FRAME(&remove_frame);
#endif
- }
}
}
diff --git a/src/FFmpegReader.h b/src/FFmpegReader.h
index 78572cac0..b1758dd36 100644
--- a/src/FFmpegReader.h
+++ b/src/FFmpegReader.h
@@ -108,6 +108,7 @@ namespace openshot {
bool check_interlace;
bool check_fps;
bool has_missing_frames;
+ int max_concurrent_frames;
CacheMemory working_cache;
CacheMemory missing_frames;
diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp
index 210a6e435..0cc4daa2d 100644
--- a/src/FFmpegWriter.cpp
+++ b/src/FFmpegWriter.cpp
@@ -98,12 +98,6 @@ FFmpegWriter::FFmpegWriter(const std::string& path) :
info.has_audio = false;
info.has_video = false;
- // Configure OpenMP parallelism
- // Default number of threads per block
- omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
- // Allow nested parallel sections as deeply as supported
- omp_set_max_active_levels(OPEN_MP_MAX_ACTIVE);
-
// Initialize FFMpeg, and register all formats and codecs
AV_REGISTER_ALL
@@ -728,85 +722,75 @@ void FFmpegWriter::write_queued_frames() {
// Create blank exception
bool has_error_encoding_video = false;
-#pragma omp parallel
- {
-#pragma omp single
- {
- // Process all audio frames (in a separate thread)
- if (info.has_audio && audio_st && !queued_audio_frames.empty())
- write_audio_packets(false);
+ // Process all audio frames (in a separate thread)
+ if (info.has_audio && audio_st && !queued_audio_frames.empty())
+ write_audio_packets(false);
- // Loop through each queued image frame
- while (!queued_video_frames.empty()) {
- // Get front frame (from the queue)
- std::shared_ptr frame = queued_video_frames.front();
+ // Loop through each queued image frame
+ while (!queued_video_frames.empty()) {
+ // Get front frame (from the queue)
+ std::shared_ptr frame = queued_video_frames.front();
- // Add to processed queue
- processed_frames.push_back(frame);
+ // Add to processed queue
+ processed_frames.push_back(frame);
- // Encode and add the frame to the output file
- if (info.has_video && video_st)
- process_video_packet(frame);
+ // Encode and add the frame to the output file
+ if (info.has_video && video_st)
+ process_video_packet(frame);
- // Remove front item
- queued_video_frames.pop_front();
+ // Remove front item
+ queued_video_frames.pop_front();
- } // end while
- } // end omp single
+ } // end while
-#pragma omp single
- {
- // Loop back through the frames (in order), and write them to the video file
- while (!processed_frames.empty()) {
- // Get front frame (from the queue)
- std::shared_ptr frame = processed_frames.front();
-
- if (info.has_video && video_st) {
- // Add to deallocate queue (so we can remove the AVFrames when we are done)
- deallocate_frames.push_back(frame);
-
- // Does this frame's AVFrame still exist
- if (av_frames.count(frame)) {
- // Get AVFrame
- AVFrame *frame_final = av_frames[frame];
-
- // Write frame to video file
- bool success = write_video_packet(frame, frame_final);
- if (!success)
- has_error_encoding_video = true;
- }
- }
- // Remove front item
- processed_frames.pop_front();
- }
+ // Loop back through the frames (in order), and write them to the video file
+ while (!processed_frames.empty()) {
+ // Get front frame (from the queue)
+ std::shared_ptr frame = processed_frames.front();
- // Loop through, and deallocate AVFrames
- while (!deallocate_frames.empty()) {
- // Get front frame (from the queue)
- std::shared_ptr frame = deallocate_frames.front();
+ if (info.has_video && video_st) {
+ // Add to deallocate queue (so we can remove the AVFrames when we are done)
+ deallocate_frames.push_back(frame);
- // Does this frame's AVFrame still exist
- if (av_frames.count(frame)) {
- // Get AVFrame
- AVFrame *av_frame = av_frames[frame];
+ // Does this frame's AVFrame still exist
+ if (av_frames.count(frame)) {
+ // Get AVFrame
+ AVFrame *frame_final = av_frames[frame];
- // Deallocate AVPicture and AVFrame
- av_freep(&(av_frame->data[0]));
- AV_FREE_FRAME(&av_frame);
- av_frames.erase(frame);
- }
+ // Write frame to video file
+ bool success = write_video_packet(frame, frame_final);
+ if (!success)
+ has_error_encoding_video = true;
+ }
+ }
- // Remove front item
- deallocate_frames.pop_front();
- }
+ // Remove front item
+ processed_frames.pop_front();
+ }
+
+ // Loop through, and deallocate AVFrames
+ while (!deallocate_frames.empty()) {
+ // Get front frame (from the queue)
+ std::shared_ptr frame = deallocate_frames.front();
+
+ // Does this frame's AVFrame still exist
+ if (av_frames.count(frame)) {
+ // Get AVFrame
+ AVFrame *av_frame = av_frames[frame];
- // Done writing
- is_writing = false;
+ // Deallocate AVPicture and AVFrame
+ av_freep(&(av_frame->data[0]));
+ AV_FREE_FRAME(&av_frame);
+ av_frames.erase(frame);
+ }
- } // end omp single
+ // Remove front item
+ deallocate_frames.pop_front();
+ }
- } // end omp parallel
+ // Done writing
+ is_writing = false;
// Raise exception from main thread
if (has_error_encoding_video)
@@ -881,29 +865,26 @@ void FFmpegWriter::flush_encoders() {
int error_code = 0;
#if IS_FFMPEG_3_2
- #pragma omp critical (write_video_packet)
- {
- // Encode video packet (latest version of FFmpeg)
- error_code = avcodec_send_frame(video_codec_ctx, NULL);
- got_packet = 0;
- while (error_code >= 0) {
- error_code = avcodec_receive_packet(video_codec_ctx, &pkt);
- if (error_code == AVERROR(EAGAIN)|| error_code == AVERROR_EOF) {
- got_packet = 0;
- // Write packet
- avcodec_flush_buffers(video_codec_ctx);
- break;
- }
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base);
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base);
- if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base);
- pkt.stream_index = video_st->index;
- error_code = av_interleaved_write_frame(oc, &pkt);
- }
- }
+ // Encode video packet (latest version of FFmpeg)
+ error_code = avcodec_send_frame(video_codec_ctx, NULL);
+ got_packet = 0;
+ while (error_code >= 0) {
+ error_code = avcodec_receive_packet(video_codec_ctx, &pkt);
+ if (error_code == AVERROR(EAGAIN)|| error_code == AVERROR_EOF) {
+ got_packet = 0;
+ // Write packet
+ avcodec_flush_buffers(video_codec_ctx);
+ break;
+ }
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base);
+ if (pkt.dts != AV_NOPTS_VALUE)
+ pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base);
+ if (pkt.duration > 0)
+ pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base);
+ pkt.stream_index = video_st->index;
+ error_code = av_interleaved_write_frame(oc, &pkt);
+ }
#else // IS_FFMPEG_3_2
// Encode video packet (older than FFmpeg 3.2)
@@ -1554,408 +1535,404 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
// write all queued frames' audio to the video file
void FFmpegWriter::write_audio_packets(bool is_final) {
-#pragma omp task firstprivate(is_final)
- {
- // Init audio buffers / variables
- int total_frame_samples = 0;
- int frame_position = 0;
- int channels_in_frame = 0;
- int sample_rate_in_frame = 0;
- int samples_in_frame = 0;
- ChannelLayout channel_layout_in_frame = LAYOUT_MONO; // default channel layout
-
- // Create a new array (to hold all S16 audio samples, for the current queued frames
- unsigned int all_queued_samples_size = sizeof(int16_t) * (queued_audio_frames.size() * AVCODEC_MAX_AUDIO_FRAME_SIZE);
- int16_t *all_queued_samples = (int16_t *) av_malloc(all_queued_samples_size);
- int16_t *all_resampled_samples = NULL;
- int16_t *final_samples_planar = NULL;
- int16_t *final_samples = NULL;
-
- // Loop through each queued audio frame
- while (!queued_audio_frames.empty()) {
- // Get front frame (from the queue)
- std::shared_ptr frame = queued_audio_frames.front();
-
- // Get the audio details from this frame
- sample_rate_in_frame = frame->SampleRate();
- samples_in_frame = frame->GetAudioSamplesCount();
- channels_in_frame = frame->GetAudioChannelsCount();
- channel_layout_in_frame = frame->ChannelsLayout();
-
- // Get audio sample array
- float *frame_samples_float = NULL;
- // Get samples interleaved together (c1 c2 c1 c2 c1 c2)
- frame_samples_float = frame->GetInterleavedAudioSamples(sample_rate_in_frame, NULL, &samples_in_frame);
-
- // Calculate total samples
- total_frame_samples = samples_in_frame * channels_in_frame;
-
- // Translate audio sample values back to 16 bit integers with saturation
- const int16_t max16 = 32767;
- const int16_t min16 = -32768;
- for (int s = 0; s < total_frame_samples; s++, frame_position++) {
- float valF = frame_samples_float[s] * (1 << 15);
- int16_t conv;
- if (valF > max16) {
- conv = max16;
- } else if (valF < min16) {
- conv = min16;
- } else {
- conv = int(valF + 32768.5) - 32768; // +0.5 is for rounding
- }
-
- // Copy into buffer
- all_queued_samples[frame_position] = conv;
- }
-
- // Deallocate float array
- delete[] frame_samples_float;
-
- // Remove front item
- queued_audio_frames.pop_front();
-
- } // end while
+ // Init audio buffers / variables
+ int total_frame_samples = 0;
+ int frame_position = 0;
+ int channels_in_frame = 0;
+ int sample_rate_in_frame = 0;
+ int samples_in_frame = 0;
+ ChannelLayout channel_layout_in_frame = LAYOUT_MONO; // default channel layout
+
+ // Create a new array (to hold all S16 audio samples, for the current queued frames
+ unsigned int all_queued_samples_size = sizeof(int16_t) * (queued_audio_frames.size() * AVCODEC_MAX_AUDIO_FRAME_SIZE);
+ int16_t *all_queued_samples = (int16_t *) av_malloc(all_queued_samples_size);
+ int16_t *all_resampled_samples = NULL;
+ int16_t *final_samples_planar = NULL;
+ int16_t *final_samples = NULL;
+
+ // Loop through each queued audio frame
+ while (!queued_audio_frames.empty()) {
+ // Get front frame (from the queue)
+ std::shared_ptr frame = queued_audio_frames.front();
+
+ // Get the audio details from this frame
+ sample_rate_in_frame = frame->SampleRate();
+ samples_in_frame = frame->GetAudioSamplesCount();
+ channels_in_frame = frame->GetAudioChannelsCount();
+ channel_layout_in_frame = frame->ChannelsLayout();
+
+ // Get audio sample array
+ float *frame_samples_float = NULL;
+ // Get samples interleaved together (c1 c2 c1 c2 c1 c2)
+ frame_samples_float = frame->GetInterleavedAudioSamples(sample_rate_in_frame, NULL, &samples_in_frame);
+
+ // Calculate total samples
+ total_frame_samples = samples_in_frame * channels_in_frame;
+
+ // Translate audio sample values back to 16 bit integers with saturation
+ const int16_t max16 = 32767;
+ const int16_t min16 = -32768;
+ for (int s = 0; s < total_frame_samples; s++, frame_position++) {
+ float valF = frame_samples_float[s] * (1 << 15);
+ int16_t conv;
+ if (valF > max16) {
+ conv = max16;
+ } else if (valF < min16) {
+ conv = min16;
+ } else {
+ conv = int(valF + 32768.5) - 32768; // +0.5 is for rounding
+ }
+
+ // Copy into buffer
+ all_queued_samples[frame_position] = conv;
+ }
+ // Deallocate float array
+ delete[] frame_samples_float;
- // Update total samples (since we've combined all queued frames)
- total_frame_samples = frame_position;
- int remaining_frame_samples = total_frame_samples;
- int samples_position = 0;
+ // Remove front item
+ queued_audio_frames.pop_front();
+ } // end while
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets", "is_final", is_final, "total_frame_samples", total_frame_samples, "channel_layout_in_frame", channel_layout_in_frame, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "LAYOUT_MONO", LAYOUT_MONO);
- // Keep track of the original sample format
- AVSampleFormat output_sample_fmt = audio_codec_ctx->sample_fmt;
+ // Update total samples (since we've combined all queued frames)
+ total_frame_samples = frame_position;
+ int remaining_frame_samples = total_frame_samples;
+ int samples_position = 0;
- AVFrame *audio_frame = NULL;
- if (!is_final) {
- // Create input frame (and allocate arrays)
- audio_frame = AV_ALLOCATE_FRAME();
- AV_RESET_FRAME(audio_frame);
- audio_frame->nb_samples = total_frame_samples / channels_in_frame;
- // Fill input frame with sample data
- int error_code = avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) all_queued_samples, all_queued_samples_size, 0);
- if (error_code < 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code);
- }
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets", "is_final", is_final, "total_frame_samples", total_frame_samples, "channel_layout_in_frame", channel_layout_in_frame, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "LAYOUT_MONO", LAYOUT_MONO);
- // Do not convert audio to planar format (yet). We need to keep everything interleaved at this point.
- switch (audio_codec_ctx->sample_fmt) {
- case AV_SAMPLE_FMT_FLTP: {
- output_sample_fmt = AV_SAMPLE_FMT_FLT;
- break;
- }
- case AV_SAMPLE_FMT_S32P: {
- output_sample_fmt = AV_SAMPLE_FMT_S32;
- break;
- }
- case AV_SAMPLE_FMT_S16P: {
- output_sample_fmt = AV_SAMPLE_FMT_S16;
- break;
- }
- case AV_SAMPLE_FMT_U8P: {
- output_sample_fmt = AV_SAMPLE_FMT_U8;
- break;
- }
- default: {
- // This is only here to silence unused-enum warnings
- break;
- }
- }
-
- // Update total samples & input frame size (due to bigger or smaller data types)
- total_frame_samples *= (float(info.sample_rate) / sample_rate_in_frame); // adjust for different byte sizes
- total_frame_samples *= (float(info.channels) / channels_in_frame); // adjust for different # of channels
-
- // Create output frame (and allocate arrays)
- AVFrame *audio_converted = AV_ALLOCATE_FRAME();
- AV_RESET_FRAME(audio_converted);
- audio_converted->nb_samples = total_frame_samples / channels_in_frame;
- av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_converted->nb_samples, output_sample_fmt, 0);
-
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (1st resampling)", "in_sample_fmt", AV_SAMPLE_FMT_S16, "out_sample_fmt", output_sample_fmt, "in_sample_rate", sample_rate_in_frame, "out_sample_rate", info.sample_rate, "in_channels", channels_in_frame, "out_channels", info.channels);
-
- // setup resample context
- if (!avr) {
- avr = SWR_ALLOC();
- av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0);
- av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
- av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
- av_opt_set_int(avr, "out_sample_fmt", output_sample_fmt, 0); // planar not allowed here
- av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0);
- av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
- av_opt_set_int(avr, "in_channels", channels_in_frame, 0);
- av_opt_set_int(avr, "out_channels", info.channels, 0);
- SWR_INIT(avr);
- }
- int nb_samples = 0;
-
- // Convert audio samples
- nb_samples = SWR_CONVERT(
- avr, // audio resample context
- audio_converted->data, // output data pointers
- audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
- audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
- audio_frame->data, // input data pointers
- audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
- audio_frame->nb_samples // number of input samples to convert
- );
-
- // Set remaining samples
- remaining_frame_samples = nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
-
- // Create a new array (to hold all resampled S16 audio samples)
- all_resampled_samples = (int16_t *) av_malloc(
- sizeof(int16_t) * nb_samples * info.channels
- * (av_get_bytes_per_sample(output_sample_fmt) /
- av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) )
- );
-
- // Copy audio samples over original samples
- memcpy(all_resampled_samples, audio_converted->data[0], nb_samples * info.channels * av_get_bytes_per_sample(output_sample_fmt));
-
- // Remove converted audio
- av_freep(&(audio_frame->data[0]));
- AV_FREE_FRAME(&audio_frame);
- av_freep(&audio_converted->data[0]);
- AV_FREE_FRAME(&audio_converted);
- all_queued_samples = NULL; // this array cleared with above call
-
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (Successfully completed 1st resampling)", "nb_samples", nb_samples, "remaining_frame_samples", remaining_frame_samples);
- }
-
- // Loop until no more samples
- while (remaining_frame_samples > 0 || is_final) {
- // Get remaining samples needed for this packet
- int remaining_packet_samples = (audio_input_frame_size * info.channels) - audio_input_position;
-
- // Determine how many samples we need
- int diff = 0;
- if (remaining_frame_samples >= remaining_packet_samples) {
- diff = remaining_packet_samples;
- } else {
- diff = remaining_frame_samples;
- }
-
- // Copy frame samples into the packet samples array
- if (!is_final)
- //TODO: Make this more sane
- memcpy(
- samples + (audio_input_position
- * (av_get_bytes_per_sample(output_sample_fmt) /
- av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) )
- ),
- all_resampled_samples + samples_position,
- diff * av_get_bytes_per_sample(output_sample_fmt)
- );
-
- // Increment counters
- audio_input_position += diff;
- samples_position += diff * (av_get_bytes_per_sample(output_sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16));
- remaining_frame_samples -= diff;
-
- // Do we have enough samples to proceed?
- if (audio_input_position < (audio_input_frame_size * info.channels) && !is_final)
- // Not enough samples to encode... so wait until the next frame
- break;
+ // Keep track of the original sample format
+ AVSampleFormat output_sample_fmt = audio_codec_ctx->sample_fmt;
- // Convert to planar (if needed by audio codec)
- AVFrame *frame_final = AV_ALLOCATE_FRAME();
- AV_RESET_FRAME(frame_final);
- if (av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) {
- ZmqLogger::Instance()->AppendDebugMethod(
- "FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)",
- "in_sample_fmt", output_sample_fmt,
- "out_sample_fmt", audio_codec_ctx->sample_fmt,
- "in_sample_rate", info.sample_rate,
- "out_sample_rate", info.sample_rate,
- "in_channels", info.channels,
- "out_channels", info.channels
- );
-
- // setup resample context
- if (!avr_planar) {
- avr_planar = SWR_ALLOC();
- av_opt_set_int(avr_planar, "in_channel_layout", info.channel_layout, 0);
- av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0);
- av_opt_set_int(avr_planar, "in_sample_fmt", output_sample_fmt, 0);
- av_opt_set_int(avr_planar, "out_sample_fmt", audio_codec_ctx->sample_fmt, 0); // planar not allowed here
- av_opt_set_int(avr_planar, "in_sample_rate", info.sample_rate, 0);
- av_opt_set_int(avr_planar, "out_sample_rate", info.sample_rate, 0);
- av_opt_set_int(avr_planar, "in_channels", info.channels, 0);
- av_opt_set_int(avr_planar, "out_channels", info.channels, 0);
- SWR_INIT(avr_planar);
- }
+ AVFrame *audio_frame = NULL;
+ if (!is_final) {
+ // Create input frame (and allocate arrays)
+ audio_frame = AV_ALLOCATE_FRAME();
+ AV_RESET_FRAME(audio_frame);
+ audio_frame->nb_samples = total_frame_samples / channels_in_frame;
- // Create input frame (and allocate arrays)
- audio_frame = AV_ALLOCATE_FRAME();
- AV_RESET_FRAME(audio_frame);
- audio_frame->nb_samples = audio_input_position / info.channels;
-
- // Create a new array
- final_samples_planar = (int16_t *) av_malloc(
- sizeof(int16_t) * audio_frame->nb_samples * info.channels
- * (av_get_bytes_per_sample(output_sample_fmt) /
- av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) )
- );
-
- // Copy audio into buffer for frame
- memcpy(final_samples_planar, samples, audio_frame->nb_samples * info.channels * av_get_bytes_per_sample(output_sample_fmt));
-
- // Fill input frame with sample data
- avcodec_fill_audio_frame(audio_frame, info.channels, output_sample_fmt,
- (uint8_t *) final_samples_planar, audio_encoder_buffer_size, 0);
-
- // Create output frame (and allocate arrays)
- frame_final->nb_samples = audio_input_frame_size;
- av_samples_alloc(frame_final->data, frame_final->linesize, info.channels,
- frame_final->nb_samples, audio_codec_ctx->sample_fmt, 0);
-
- // Convert audio samples
- int nb_samples = SWR_CONVERT(
- avr_planar, // audio resample context
- frame_final->data, // output data pointers
- frame_final->linesize[0], // output plane size, in bytes. (0 if unknown)
- frame_final->nb_samples, // maximum number of samples that the output buffer can hold
- audio_frame->data, // input data pointers
- audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
- audio_frame->nb_samples // number of input samples to convert
- );
-
- // Copy audio samples over original samples
- if (nb_samples > 0) {
- memcpy(samples, frame_final->data[0],
- nb_samples * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) * info.channels);
+ // Fill input frame with sample data
+ int error_code = avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) all_queued_samples, all_queued_samples_size, 0);
+ if (error_code < 0) {
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code);
}
- // deallocate AVFrame
- av_freep(&(audio_frame->data[0]));
- AV_FREE_FRAME(&audio_frame);
- all_queued_samples = NULL; // this array cleared with above call
-
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (Successfully completed 2nd resampling for Planar formats)", "nb_samples", nb_samples);
-
- } else {
- // Create a new array
- final_samples = (int16_t *) av_malloc(
- sizeof(int16_t) * audio_input_position
- * (av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) /
- av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) )
- );
-
- // Copy audio into buffer for frame
- memcpy(final_samples, samples,
- audio_input_position * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt));
+ // Do not convert audio to planar format (yet). We need to keep everything interleaved at this point.
+ switch (audio_codec_ctx->sample_fmt) {
+ case AV_SAMPLE_FMT_FLTP: {
+ output_sample_fmt = AV_SAMPLE_FMT_FLT;
+ break;
+ }
+ case AV_SAMPLE_FMT_S32P: {
+ output_sample_fmt = AV_SAMPLE_FMT_S32;
+ break;
+ }
+ case AV_SAMPLE_FMT_S16P: {
+ output_sample_fmt = AV_SAMPLE_FMT_S16;
+ break;
+ }
+ case AV_SAMPLE_FMT_U8P: {
+ output_sample_fmt = AV_SAMPLE_FMT_U8;
+ break;
+ }
+ default: {
+ // This is only here to silence unused-enum warnings
+ break;
+ }
+ }
- // Init the nb_samples property
- frame_final->nb_samples = audio_input_frame_size;
+ // Update total samples & input frame size (due to bigger or smaller data types)
+ total_frame_samples *= (float(info.sample_rate) / sample_rate_in_frame); // adjust for different byte sizes
+ total_frame_samples *= (float(info.channels) / channels_in_frame); // adjust for different # of channels
+
+ // Create output frame (and allocate arrays)
+ AVFrame *audio_converted = AV_ALLOCATE_FRAME();
+ AV_RESET_FRAME(audio_converted);
+ audio_converted->nb_samples = total_frame_samples / channels_in_frame;
+ av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_converted->nb_samples, output_sample_fmt, 0);
+
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (1st resampling)", "in_sample_fmt", AV_SAMPLE_FMT_S16, "out_sample_fmt", output_sample_fmt, "in_sample_rate", sample_rate_in_frame, "out_sample_rate", info.sample_rate, "in_channels", channels_in_frame, "out_channels", info.channels);
+
+ // setup resample context
+ if (!avr) {
+ avr = SWR_ALLOC();
+ av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0);
+ av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
+ av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
+ av_opt_set_int(avr, "out_sample_fmt", output_sample_fmt, 0); // planar not allowed here
+ av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0);
+ av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
+ av_opt_set_int(avr, "in_channels", channels_in_frame, 0);
+ av_opt_set_int(avr, "out_channels", info.channels, 0);
+ SWR_INIT(avr);
+ }
+ int nb_samples = 0;
+
+ // Convert audio samples
+ nb_samples = SWR_CONVERT(
+ avr, // audio resample context
+ audio_converted->data, // output data pointers
+ audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
+ audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
+ audio_frame->data, // input data pointers
+ audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
+ audio_frame->nb_samples // number of input samples to convert
+ );
+
+ // Set remaining samples
+ remaining_frame_samples = nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
+
+ // Create a new array (to hold all resampled S16 audio samples)
+ all_resampled_samples = (int16_t *) av_malloc(
+ sizeof(int16_t) * nb_samples * info.channels
+ * (av_get_bytes_per_sample(output_sample_fmt) /
+ av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) )
+ );
+
+ // Copy audio samples over original samples
+ memcpy(all_resampled_samples, audio_converted->data[0], nb_samples * info.channels * av_get_bytes_per_sample(output_sample_fmt));
+
+ // Remove converted audio
+ av_freep(&(audio_frame->data[0]));
+ AV_FREE_FRAME(&audio_frame);
+ av_freep(&audio_converted->data[0]);
+ AV_FREE_FRAME(&audio_converted);
+ all_queued_samples = NULL; // this array cleared with above call
+
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (Successfully completed 1st resampling)", "nb_samples", nb_samples, "remaining_frame_samples", remaining_frame_samples);
+ }
+
+ // Loop until no more samples
+ while (remaining_frame_samples > 0 || is_final) {
+ // Get remaining samples needed for this packet
+ int remaining_packet_samples = (audio_input_frame_size * info.channels) - audio_input_position;
+
+ // Determine how many samples we need
+ int diff = 0;
+ if (remaining_frame_samples >= remaining_packet_samples) {
+ diff = remaining_packet_samples;
+ } else {
+ diff = remaining_frame_samples;
+ }
- // Fill the final_frame AVFrame with audio (non planar)
- avcodec_fill_audio_frame(frame_final, audio_codec_ctx->channels,
- audio_codec_ctx->sample_fmt, (uint8_t *) final_samples,
- audio_encoder_buffer_size, 0);
- }
+ // Copy frame samples into the packet samples array
+ if (!is_final)
+ //TODO: Make this more sane
+ memcpy(
+ samples + (audio_input_position
+ * (av_get_bytes_per_sample(output_sample_fmt) /
+ av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) )
+ ),
+ all_resampled_samples + samples_position,
+ diff * av_get_bytes_per_sample(output_sample_fmt)
+ );
+
+ // Increment counters
+ audio_input_position += diff;
+ samples_position += diff * (av_get_bytes_per_sample(output_sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16));
+ remaining_frame_samples -= diff;
+
+ // Do we have enough samples to proceed?
+ if (audio_input_position < (audio_input_frame_size * info.channels) && !is_final)
+ // Not enough samples to encode... so wait until the next frame
+ break;
+
+ // Convert to planar (if needed by audio codec)
+ AVFrame *frame_final = AV_ALLOCATE_FRAME();
+ AV_RESET_FRAME(frame_final);
+ if (av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) {
+ ZmqLogger::Instance()->AppendDebugMethod(
+ "FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)",
+ "in_sample_fmt", output_sample_fmt,
+ "out_sample_fmt", audio_codec_ctx->sample_fmt,
+ "in_sample_rate", info.sample_rate,
+ "out_sample_rate", info.sample_rate,
+ "in_channels", info.channels,
+ "out_channels", info.channels
+ );
+
+ // setup resample context
+ if (!avr_planar) {
+ avr_planar = SWR_ALLOC();
+ av_opt_set_int(avr_planar, "in_channel_layout", info.channel_layout, 0);
+ av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0);
+ av_opt_set_int(avr_planar, "in_sample_fmt", output_sample_fmt, 0);
+ av_opt_set_int(avr_planar, "out_sample_fmt", audio_codec_ctx->sample_fmt, 0); // planar not allowed here
+ av_opt_set_int(avr_planar, "in_sample_rate", info.sample_rate, 0);
+ av_opt_set_int(avr_planar, "out_sample_rate", info.sample_rate, 0);
+ av_opt_set_int(avr_planar, "in_channels", info.channels, 0);
+ av_opt_set_int(avr_planar, "out_channels", info.channels, 0);
+ SWR_INIT(avr_planar);
+ }
+
+ // Create input frame (and allocate arrays)
+ audio_frame = AV_ALLOCATE_FRAME();
+ AV_RESET_FRAME(audio_frame);
+ audio_frame->nb_samples = audio_input_position / info.channels;
+
+ // Create a new array
+ final_samples_planar = (int16_t *) av_malloc(
+ sizeof(int16_t) * audio_frame->nb_samples * info.channels
+ * (av_get_bytes_per_sample(output_sample_fmt) /
+ av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) )
+ );
+
+ // Copy audio into buffer for frame
+ memcpy(final_samples_planar, samples, audio_frame->nb_samples * info.channels * av_get_bytes_per_sample(output_sample_fmt));
+
+ // Fill input frame with sample data
+ avcodec_fill_audio_frame(audio_frame, info.channels, output_sample_fmt,
+ (uint8_t *) final_samples_planar, audio_encoder_buffer_size, 0);
+
+ // Create output frame (and allocate arrays)
+ frame_final->nb_samples = audio_input_frame_size;
+ av_samples_alloc(frame_final->data, frame_final->linesize, info.channels,
+ frame_final->nb_samples, audio_codec_ctx->sample_fmt, 0);
+
+ // Convert audio samples
+ int nb_samples = SWR_CONVERT(
+ avr_planar, // audio resample context
+ frame_final->data, // output data pointers
+ frame_final->linesize[0], // output plane size, in bytes. (0 if unknown)
+ frame_final->nb_samples, // maximum number of samples that the output buffer can hold
+ audio_frame->data, // input data pointers
+ audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
+ audio_frame->nb_samples // number of input samples to convert
+ );
+
+ // Copy audio samples over original samples
+ if (nb_samples > 0) {
+ memcpy(samples, frame_final->data[0],
+ nb_samples * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) * info.channels);
+ }
+
+ // deallocate AVFrame
+ av_freep(&(audio_frame->data[0]));
+ AV_FREE_FRAME(&audio_frame);
+ all_queued_samples = NULL; // this array cleared with above call
+
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (Successfully completed 2nd resampling for Planar formats)", "nb_samples", nb_samples);
+
+ } else {
+ // Create a new array
+ final_samples = (int16_t *) av_malloc(
+ sizeof(int16_t) * audio_input_position
+ * (av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) /
+ av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) )
+ );
+
+ // Copy audio into buffer for frame
+ memcpy(final_samples, samples,
+ audio_input_position * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt));
+
+ // Init the nb_samples property
+ frame_final->nb_samples = audio_input_frame_size;
+
+ // Fill the final_frame AVFrame with audio (non planar)
+ avcodec_fill_audio_frame(frame_final, audio_codec_ctx->channels,
+ audio_codec_ctx->sample_fmt, (uint8_t *) final_samples,
+ audio_encoder_buffer_size, 0);
+ }
- // Increment PTS (in samples)
- write_audio_count += FFMIN(audio_input_frame_size, audio_input_position);
- frame_final->pts = write_audio_count; // Set the AVFrame's PTS
+ // Increment PTS (in samples)
+ write_audio_count += FFMIN(audio_input_frame_size, audio_input_position);
+ frame_final->pts = write_audio_count; // Set the AVFrame's PTS
- // Init the packet
- AVPacket pkt;
- av_init_packet(&pkt);
- pkt.data = audio_encoder_buffer;
- pkt.size = audio_encoder_buffer_size;
+ // Init the packet
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.data = audio_encoder_buffer;
+ pkt.size = audio_encoder_buffer_size;
- // Set the packet's PTS prior to encoding
- pkt.pts = pkt.dts = write_audio_count;
+ // Set the packet's PTS prior to encoding
+ pkt.pts = pkt.dts = write_audio_count;
- /* encode the audio samples */
- int got_packet_ptr = 0;
+ /* encode the audio samples */
+ int got_packet_ptr = 0;
#if IS_FFMPEG_3_2
- // Encode audio (latest version of FFmpeg)
- int error_code;
- int ret = 0;
- int frame_finished = 0;
- error_code = ret = avcodec_send_frame(audio_codec_ctx, frame_final);
- if (ret < 0 && ret != AVERROR(EINVAL) && ret != AVERROR_EOF) {
- avcodec_send_frame(audio_codec_ctx, NULL);
- }
- else {
- if (ret >= 0)
- pkt.size = 0;
- ret = avcodec_receive_packet(audio_codec_ctx, &pkt);
- if (ret >= 0)
- frame_finished = 1;
- if(ret == AVERROR(EINVAL) || ret == AVERROR_EOF) {
- avcodec_flush_buffers(audio_codec_ctx);
- ret = 0;
- }
- if (ret >= 0) {
- ret = frame_finished;
- }
- }
- if (!pkt.data && !frame_finished)
- {
- ret = -1;
- }
- got_packet_ptr = ret;
+ // Encode audio (latest version of FFmpeg)
+ int error_code;
+ int ret = 0;
+ int frame_finished = 0;
+ error_code = ret = avcodec_send_frame(audio_codec_ctx, frame_final);
+ if (ret < 0 && ret != AVERROR(EINVAL) && ret != AVERROR_EOF) {
+ avcodec_send_frame(audio_codec_ctx, NULL);
+ }
+ else {
+ if (ret >= 0)
+ pkt.size = 0;
+ ret = avcodec_receive_packet(audio_codec_ctx, &pkt);
+ if (ret >= 0)
+ frame_finished = 1;
+ if(ret == AVERROR(EINVAL) || ret == AVERROR_EOF) {
+ avcodec_flush_buffers(audio_codec_ctx);
+ ret = 0;
+ }
+ if (ret >= 0) {
+ ret = frame_finished;
+ }
+ }
+ if (!pkt.data && !frame_finished)
+ {
+ ret = -1;
+ }
+ got_packet_ptr = ret;
#else
- // Encode audio (older versions of FFmpeg)
- int error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, frame_final, &got_packet_ptr);
+ // Encode audio (older versions of FFmpeg)
+ int error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, frame_final, &got_packet_ptr);
#endif
- /* if zero size, it means the image was buffered */
- if (error_code == 0 && got_packet_ptr) {
-
- // Since the PTS can change during encoding, set the value again. This seems like a huge hack,
- // but it fixes lots of PTS related issues when I do this.
- pkt.pts = pkt.dts = write_audio_count;
-
- // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase)
- if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base);
- if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base);
- if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base);
-
- // set stream
- pkt.stream_index = audio_st->index;
- pkt.flags |= AV_PKT_FLAG_KEY;
-
- /* write the compressed frame in the media file */
- error_code = av_interleaved_write_frame(oc, &pkt);
- }
-
- if (error_code < 0) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code);
- }
-
- // deallocate AVFrame
- av_freep(&(frame_final->data[0]));
- AV_FREE_FRAME(&frame_final);
-
- // deallocate memory for packet
- AV_FREE_PACKET(&pkt);
-
- // Reset position
- audio_input_position = 0;
- is_final = false;
- }
+ /* if zero size, it means the image was buffered */
+ if (error_code == 0 && got_packet_ptr) {
+
+ // Since the PTS can change during encoding, set the value again. This seems like a huge hack,
+ // but it fixes lots of PTS related issues when I do this.
+ pkt.pts = pkt.dts = write_audio_count;
+
+ // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase)
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base);
+ if (pkt.dts != AV_NOPTS_VALUE)
+ pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base);
+ if (pkt.duration > 0)
+ pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base);
+
+ // set stream
+ pkt.stream_index = audio_st->index;
+ pkt.flags |= AV_PKT_FLAG_KEY;
+
+ /* write the compressed frame in the media file */
+ error_code = av_interleaved_write_frame(oc, &pkt);
+ }
- // Delete arrays (if needed)
- if (all_resampled_samples) {
- av_freep(&all_resampled_samples);
- all_resampled_samples = NULL;
- }
- if (all_queued_samples) {
- av_freep(&all_queued_samples);
- all_queued_samples = NULL;
- }
+ if (error_code < 0) {
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code);
+ }
- } // end task
+ // deallocate AVFrame
+ av_freep(&(frame_final->data[0]));
+ AV_FREE_FRAME(&frame_final);
+
+ // deallocate memory for packet
+ AV_FREE_PACKET(&pkt);
+
+ // Reset position
+ audio_input_position = 0;
+ is_final = false;
+ }
+
+ // Delete arrays (if needed)
+ if (all_resampled_samples) {
+ av_freep(&all_resampled_samples);
+ all_resampled_samples = NULL;
+ }
+ if (all_queued_samples) {
+ av_freep(&all_queued_samples);
+ all_queued_samples = NULL;
+ }
}
// Allocate an AVFrame object
@@ -2006,53 +1983,47 @@ void FFmpegWriter::process_video_packet(std::shared_ptr frame) {
if (rescaler_position == num_of_rescalers)
rescaler_position = 0;
-#pragma omp task firstprivate(frame, scaler, source_image_width, source_image_height)
- {
- // Allocate an RGB frame & final output frame
- int bytes_source = 0;
- int bytes_final = 0;
- AVFrame *frame_source = NULL;
- const uchar *pixels = NULL;
+ // Allocate an RGB frame & final output frame
+ int bytes_source = 0;
+ int bytes_final = 0;
+ AVFrame *frame_source = NULL;
+ const uchar *pixels = NULL;
- // Get a list of pixels from source image
- pixels = frame->GetPixels();
+ // Get a list of pixels from source image
+ pixels = frame->GetPixels();
- // Init AVFrame for source image & final (converted image)
- frame_source = allocate_avframe(PIX_FMT_RGBA, source_image_width, source_image_height, &bytes_source, (uint8_t *) pixels);
+ // Init AVFrame for source image & final (converted image)
+ frame_source = allocate_avframe(PIX_FMT_RGBA, source_image_width, source_image_height, &bytes_source, (uint8_t *) pixels);
#if IS_FFMPEG_3_2
- AVFrame *frame_final;
- #if HAVE_HW_ACCEL
- if (hw_en_on && hw_en_supported) {
- frame_final = allocate_avframe(AV_PIX_FMT_NV12, info.width, info.height, &bytes_final, NULL);
- } else
- #endif // HAVE_HW_ACCEL
- {
- frame_final = allocate_avframe(
- (AVPixelFormat)(video_st->codecpar->format),
- info.width, info.height, &bytes_final, NULL
- );
- }
+ AVFrame *frame_final;
+#if HAVE_HW_ACCEL
+ if (hw_en_on && hw_en_supported) {
+ frame_final = allocate_avframe(AV_PIX_FMT_NV12, info.width, info.height, &bytes_final, NULL);
+ } else
+#endif // HAVE_HW_ACCEL
+ {
+ frame_final = allocate_avframe(
+ (AVPixelFormat)(video_st->codecpar->format),
+ info.width, info.height, &bytes_final, NULL
+ );
+ }
#else
- AVFrame *frame_final = allocate_avframe(video_codec_ctx->pix_fmt, info.width, info.height, &bytes_final, NULL);
+ AVFrame *frame_final = allocate_avframe(video_codec_ctx->pix_fmt, info.width, info.height, &bytes_final, NULL);
#endif // IS_FFMPEG_3_2
- // Fill with data
- AV_COPY_PICTURE_DATA(frame_source, (uint8_t *) pixels, PIX_FMT_RGBA, source_image_width, source_image_height);
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::process_video_packet", "frame->number", frame->number, "bytes_source", bytes_source, "bytes_final", bytes_final);
-
- // Resize & convert pixel format
- sws_scale(scaler, frame_source->data, frame_source->linesize, 0,
- source_image_height, frame_final->data, frame_final->linesize);
-
- // Add resized AVFrame to av_frames map
-#pragma omp critical (av_frames_section)
- add_avframe(frame, frame_final);
+ // Fill with data
+ AV_COPY_PICTURE_DATA(frame_source, (uint8_t *) pixels, PIX_FMT_RGBA, source_image_width, source_image_height);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::process_video_packet", "frame->number", frame->number, "bytes_source", bytes_source, "bytes_final", bytes_final);
- // Deallocate memory
- AV_FREE_FRAME(&frame_source);
+ // Resize & convert pixel format
+ sws_scale(scaler, frame_source->data, frame_source->linesize, 0,
+ source_image_height, frame_final->data, frame_final->linesize);
- } // end task
+ // Add resized AVFrame to av_frames map
+ add_avframe(frame, frame_final);
+ // Deallocate memory
+ AV_FREE_FRAME(&frame_source);
}
// write video frame
diff --git a/src/Frame.cpp b/src/Frame.cpp
index d25def6dd..5644db58c 100644
--- a/src/Frame.cpp
+++ b/src/Frame.cpp
@@ -189,10 +189,10 @@ std::shared_ptr Frame::GetWaveform(int width, int height, int Red, int G
int total_width = 0;
// Loop through each audio channel
- int Y = 100;
+ float Y = 100.0;
for (int channel = 0; channel < audio->getNumChannels(); channel++)
{
- int X = 0;
+ float X = 0.0;
// Get audio for this channel
const float *samples = audio->getReadPointer(channel);
@@ -200,23 +200,15 @@ std::shared_ptr Frame::GetWaveform(int width, int height, int Red, int G
for (int sample = 0; sample < GetAudioSamplesCount(); sample++, X++)
{
// Sample value (scaled to -100 to 100)
- float value = samples[sample] * 100;
+ float value = samples[sample] * 100.0;
// Append a line segment for each sample
- if (value != 0.0) {
- // LINE
- lines.push_back(QPointF(X,Y));
- lines.push_back(QPointF(X,Y-value));
- }
- else {
- // DOT
- lines.push_back(QPointF(X,Y));
- lines.push_back(QPointF(X,Y));
- }
+ lines.push_back(QPointF(X,Y+1.0));
+ lines.push_back(QPointF(X,(Y-value)+1.0));
}
// Add Channel Label Coordinate
- labels.push_back(QPointF(5, Y - 5));
+ labels.push_back(QPointF(5.0, Y - 5.0));
// Increment Y
Y += (200 + height_padding);
@@ -232,24 +224,19 @@ std::shared_ptr Frame::GetWaveform(int width, int height, int Red, int G
QPainter painter(wave_image.get());
// Set pen color
- painter.setPen(QColor(Red, Green, Blue, Alpha));
+ QPen pen;
+ pen.setColor(QColor(Red, Green, Blue, Alpha));
+ pen.setWidthF(1.0);
+ pen.setStyle(Qt::SolidLine);
+ painter.setPen(pen);
// Draw the waveform
painter.drawLines(lines);
painter.end();
- // Loop through the channels labels (and draw the text)
- // TODO: Configure Fonts in Qt5 correctly, so the drawText method does not crash
-// painter.setFont(QFont(QString("Arial"), 16, 1, false));
-// for (int channel = 0; channel < labels.size(); channel++) {
-// stringstream label;
-// label << "Channel " << channel;
-// painter.drawText(labels.at(channel), QString::fromStdString(label.str()));
-// }
-
// Resize Image (if requested)
if (width != total_width || height != total_height) {
- QImage scaled_wave_image = wave_image->scaled(width, height, Qt::IgnoreAspectRatio, Qt::FastTransformation);
+ QImage scaled_wave_image = wave_image->scaled(width, height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
wave_image = std::make_shared(scaled_wave_image);
}
}
@@ -754,13 +741,11 @@ void Frame::AddColor(int new_width, int new_height, std::string new_color)
// Create new image object, and fill with pixel data
const GenericScopedLock lock(addingImageSection);
- #pragma omp critical (AddImage)
- {
- image = std::make_shared(new_width, new_height, QImage::Format_RGBA8888_Premultiplied);
+ image = std::make_shared(new_width, new_height, QImage::Format_RGBA8888_Premultiplied);
+
+ // Fill with solid color
+ image->fill(QColor(QString::fromStdString(color)));
- // Fill with solid color
- image->fill(QColor(QString::fromStdString(color)));
- }
// Update height and width
width = image->width();
height = image->height();
@@ -775,12 +760,7 @@ void Frame::AddImage(
// Create new buffer
{
const GenericScopedLock lock(addingImageSection);
- int buffer_size = new_width * new_height * bytes_per_pixel;
- qbuffer = new unsigned char[buffer_size]();
-
- // Copy buffer data
- memcpy((unsigned char*)qbuffer, pixels_, buffer_size);
-
+ qbuffer = pixels_;
} // Release addingImageSection lock
// Create new image object from pixel data
@@ -804,19 +784,16 @@ void Frame::AddImage(std::shared_ptr new_image)
// assign image data
const GenericScopedLock lock(addingImageSection);
- #pragma omp critical (AddImage)
- {
- image = new_image;
+ image = new_image;
- // Always convert to Format_RGBA8888_Premultiplied (if different)
- if (image->format() != QImage::Format_RGBA8888_Premultiplied)
- *image = image->convertToFormat(QImage::Format_RGBA8888_Premultiplied);
+ // Always convert to Format_RGBA8888_Premultiplied (if different)
+ if (image->format() != QImage::Format_RGBA8888_Premultiplied)
+ *image = image->convertToFormat(QImage::Format_RGBA8888_Premultiplied);
- // Update height and width
- width = image->width();
- height = image->height();
- has_image_data = true;
- }
+ // Update height and width
+ width = image->width();
+ height = image->height();
+ has_image_data = true;
}
// Add (or replace) pixel data to the frame (for only the odd or even lines)
@@ -834,15 +811,12 @@ void Frame::AddImage(std::shared_ptr new_image, bool only_odd_lines)
} else {
// Ignore image of different sizes or formats
bool ret=false;
- #pragma omp critical (AddImage)
- {
- if (image == new_image || image->size() != new_image->size()) {
- ret = true;
- }
- else if (new_image->format() != QImage::Format_RGBA8888_Premultiplied) {
- new_image = std::make_shared(
+ if (image == new_image || image->size() != new_image->size()) {
+ ret = true;
+ }
+ else if (new_image->format() != QImage::Format_RGBA8888_Premultiplied) {
+ new_image = std::make_shared(
new_image->convertToFormat(QImage::Format_RGBA8888_Premultiplied));
- }
}
if (ret) {
return;
@@ -850,26 +824,23 @@ void Frame::AddImage(std::shared_ptr new_image, bool only_odd_lines)
// Get the frame's image
const GenericScopedLock lock(addingImageSection);
- #pragma omp critical (AddImage)
- {
- unsigned char *pixels = image->bits();
- const unsigned char *new_pixels = new_image->constBits();
+ unsigned char *pixels = image->bits();
+ const unsigned char *new_pixels = new_image->constBits();
- // Loop through the scanlines of the image (even or odd)
- int start = 0;
- if (only_odd_lines)
- start = 1;
+ // Loop through the scanlines of the image (even or odd)
+ int start = 0;
+ if (only_odd_lines)
+ start = 1;
- for (int row = start; row < image->height(); row += 2) {
- int offset = row * image->bytesPerLine();
- memcpy(pixels + offset, new_pixels + offset, image->bytesPerLine());
- }
-
- // Update height and width
- height = image->height();
- width = image->width();
- has_image_data = true;
+ for (int row = start; row < image->height(); row += 2) {
+ int offset = row * image->bytesPerLine();
+ memcpy(pixels + offset, new_pixels + offset, image->bytesPerLine());
}
+
+ // Update height and width
+ height = image->height();
+ width = image->width();
+ has_image_data = true;
}
}
@@ -891,31 +862,29 @@ void Frame::ResizeAudio(int channels, int length, int rate, ChannelLayout layout
// Add audio samples to a specific channel
void Frame::AddAudio(bool replaceSamples, int destChannel, int destStartSample, const float* source, int numSamples, float gainToApplyToSource = 1.0f) {
const GenericScopedLock lock(addingAudioSection);
- #pragma omp critical (adding_audio)
- {
- // Clamp starting sample to 0
- int destStartSampleAdjusted = max(destStartSample, 0);
-
- // Extend audio container to hold more (or less) samples and channels.. if needed
- int new_length = destStartSampleAdjusted + numSamples;
- int new_channel_length = audio->getNumChannels();
- if (destChannel >= new_channel_length)
- new_channel_length = destChannel + 1;
- if (new_length > audio->getNumSamples() || new_channel_length > audio->getNumChannels())
- audio->setSize(new_channel_length, new_length, true, true, false);
-
- // Clear the range of samples first (if needed)
- if (replaceSamples)
- audio->clear(destChannel, destStartSampleAdjusted, numSamples);
-
- // Add samples to frame's audio buffer
- audio->addFrom(destChannel, destStartSampleAdjusted, source, numSamples, gainToApplyToSource);
- has_audio_data = true;
-
- // Calculate max audio sample added
- if (new_length > max_audio_sample)
- max_audio_sample = new_length;
- }
+
+ // Clamp starting sample to 0
+ int destStartSampleAdjusted = max(destStartSample, 0);
+
+ // Extend audio container to hold more (or less) samples and channels.. if needed
+ int new_length = destStartSampleAdjusted + numSamples;
+ int new_channel_length = audio->getNumChannels();
+ if (destChannel >= new_channel_length)
+ new_channel_length = destChannel + 1;
+ if (new_length > audio->getNumSamples() || new_channel_length > audio->getNumChannels())
+ audio->setSize(new_channel_length, new_length, true, true, false);
+
+ // Clear the range of samples first (if needed)
+ if (replaceSamples)
+ audio->clear(destChannel, destStartSampleAdjusted, numSamples);
+
+ // Add samples to frame's audio buffer
+ audio->addFrom(destChannel, destStartSampleAdjusted, source, numSamples, gainToApplyToSource);
+ has_audio_data = true;
+
+ // Calculate max audio sample added
+ if (new_length > max_audio_sample)
+ max_audio_sample = new_length;
}
// Apply gain ramp (i.e. fading volume)
@@ -958,7 +927,7 @@ cv::Mat Frame::GetImageCV()
if (!image)
// Fill with black
AddColor(width, height, color);
-
+
// if (imagecv.empty())
// Convert Qimage to Mat
imagecv = Qimage2mat(image);
diff --git a/src/KeyFrame.cpp b/src/KeyFrame.cpp
index a178f2dad..421a562ef 100644
--- a/src/KeyFrame.cpp
+++ b/src/KeyFrame.cpp
@@ -101,6 +101,7 @@ namespace openshot{
case CONSTANT: return left.co.Y;
case LINEAR: return InterpolateLinearCurve(left, right, target);
case BEZIER: return InterpolateBezierCurve(left, right, target, allowed_error);
+ default: return InterpolateLinearCurve(left, right, target);
}
}
}
diff --git a/src/Qt/VideoCacheThread.cpp b/src/Qt/VideoCacheThread.cpp
index 82b3c146b..371d3ac0b 100644
--- a/src/Qt/VideoCacheThread.cpp
+++ b/src/Qt/VideoCacheThread.cpp
@@ -40,7 +40,7 @@ namespace openshot
// Constructor
VideoCacheThread::VideoCacheThread()
: Thread("video-cache"), speed(1), is_playing(false), position(1)
- , reader(NULL), max_frames(std::min(OPEN_MP_NUM_PROCESSORS * 8, 64)), current_display_frame(1)
+ , reader(NULL), max_concurrent_frames(OPEN_MP_NUM_PROCESSORS * 4), current_display_frame(1)
{
}
@@ -98,13 +98,13 @@ namespace openshot
// Cache frames up to the max frames. Reset to current position
// if cache gets too far away from display frame. Cache frames
// even when player is paused (i.e. speed 0).
- while (((position - current_display_frame) < max_frames) && is_playing)
+ while (((position - current_display_frame) < max_concurrent_frames) && is_playing)
{
- // Only cache up till the max_frames amount... then sleep
+ // Only cache up till the max_concurrent_frames amount... then sleep
try
{
if (reader) {
- ZmqLogger::Instance()->AppendDebugMethod("VideoCacheThread::run (cache frame)", "position", position, "current_display_frame", current_display_frame, "max_frames", max_frames, "needed_frames", (position - current_display_frame));
+ ZmqLogger::Instance()->AppendDebugMethod("VideoCacheThread::run (cache frame)", "position", position, "current_display_frame", current_display_frame, "max_concurrent_frames", max_concurrent_frames, "needed_frames", (position - current_display_frame));
// Force the frame to be generated
if (reader->GetCache()->GetSmallestFrame()) {
diff --git a/src/Qt/VideoCacheThread.h b/src/Qt/VideoCacheThread.h
index f9551bdb8..1ab3b1936 100644
--- a/src/Qt/VideoCacheThread.h
+++ b/src/Qt/VideoCacheThread.h
@@ -54,7 +54,7 @@ namespace openshot
bool is_playing;
int64_t current_display_frame;
ReaderBase *reader;
- int max_frames;
+ int max_concurrent_frames;
/// Constructor
VideoCacheThread();
diff --git a/src/QtImageReader.cpp b/src/QtImageReader.cpp
index 01b023259..29243ed45 100644
--- a/src/QtImageReader.cpp
+++ b/src/QtImageReader.cpp
@@ -38,6 +38,7 @@
#include
#include
#include
+#include
#if USE_RESVG == 1
// If defined and found in CMake, utilize the libresvg for parsing
@@ -75,8 +76,12 @@ void QtImageReader::Open()
if (!loaded) {
// Attempt to open file using Qt's build in image processing capabilities
+ // AutoTransform enables exif data to be parsed and auto transform the image
+ // to the correct orientation
image = std::make_shared();
- loaded = image->load(path);
+ QImageReader imgReader( path );
+ imgReader.setAutoTransform( true );
+ loaded = imgReader.read(image.get());
}
if (!loaded) {
diff --git a/src/QtPlayer.cpp b/src/QtPlayer.cpp
index 345bc24f0..c43cdf800 100644
--- a/src/QtPlayer.cpp
+++ b/src/QtPlayer.cpp
@@ -90,7 +90,23 @@ namespace openshot
FFmpegReader *ffreader = new FFmpegReader(source);
ffreader->DisplayInfo();
- reader = new Timeline(ffreader->info.width, ffreader->info.height, ffreader->info.fps, ffreader->info.sample_rate, ffreader->info.channels, ffreader->info.channel_layout);
+ // Use default sample rate (or use the FFmpegReader's audio settings if any)
+ int sample_rate = 44100;
+ if (ffreader->info.sample_rate > 0)
+ sample_rate = ffreader->info.sample_rate;
+
+ // Use default channels (or use the FFmpegReader's audio settings if any)
+ int channels = 2;
+ if (ffreader->info.channels > 0)
+ channels = ffreader->info.channels;
+
+ // Use default channel layout (or use the FFmpegReader's audio settings if any)
+ openshot::ChannelLayout channel_layout = openshot::LAYOUT_STEREO;
+ if (channels != 2)
+ channel_layout = ffreader->info.channel_layout;
+
+ // Create timeline instance (720p, since we have no re-scaling in this player yet)
+ reader = new Timeline(1280, 720, ffreader->info.fps, sample_rate, channels, channel_layout);
Clip *c = new Clip(source);
Timeline* tm = (Timeline*)reader;
diff --git a/src/Settings.cpp b/src/Settings.cpp
index 9cf16fbbd..cfbe2e2ca 100644
--- a/src/Settings.cpp
+++ b/src/Settings.cpp
@@ -45,7 +45,6 @@ Settings *Settings::Instance()
m_pInstance = new Settings;
m_pInstance->HARDWARE_DECODER = 0;
m_pInstance->HIGH_QUALITY_SCALING = false;
- m_pInstance->WAIT_FOR_VIDEO_PROCESSING_TASK = false;
m_pInstance->OMP_THREADS = 12;
m_pInstance->FF_THREADS = 8;
m_pInstance->DE_LIMIT_HEIGHT_MAX = 1100;
diff --git a/src/Settings.h b/src/Settings.h
index da85b89e0..4d16f6b17 100644
--- a/src/Settings.h
+++ b/src/Settings.h
@@ -94,15 +94,6 @@ namespace openshot {
/// Scale mode used in FFmpeg decoding and encoding (used as an optimization for faster previews)
bool HIGH_QUALITY_SCALING = false;
- /// Maximum width for image data (useful for optimzing for a smaller preview or render)
- int MAX_WIDTH = 0;
-
- /// Maximum height for image data (useful for optimzing for a smaller preview or render)
- int MAX_HEIGHT = 0;
-
- /// Wait for OpenMP task to finish before continuing (used to limit threads on slower systems)
- bool WAIT_FOR_VIDEO_PROCESSING_TASK = false;
-
/// Number of threads of OpenMP
int OMP_THREADS = 12;
diff --git a/src/Timeline.cpp b/src/Timeline.cpp
index 614dca8f8..1bf915e3e 100644
--- a/src/Timeline.cpp
+++ b/src/Timeline.cpp
@@ -35,7 +35,8 @@ using namespace openshot;
// Default Constructor for the timeline (which sets the canvas width and height)
Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
- is_open(false), auto_map_clips(true), managed_cache(true), path("")
+ is_open(false), auto_map_clips(true), managed_cache(true), path(""),
+ max_concurrent_frames(OPEN_MP_NUM_PROCESSORS)
{
// Create CrashHandler and Attach (incase of errors)
CrashHandler::Instance();
@@ -70,23 +71,17 @@ Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int cha
info.acodec = "openshot::timeline";
info.vcodec = "openshot::timeline";
- // Configure OpenMP parallelism
- // Default number of threads per block
- omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
- // Allow nested parallel sections as deeply as supported
- omp_set_max_active_levels(OPEN_MP_MAX_ACTIVE);
+ // Init cache
+ final_cache = new CacheMemory();
// Init max image size
SetMaxSize(info.width, info.height);
-
- // Init cache
- final_cache = new CacheMemory();
- final_cache->SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
}
// Constructor for the timeline (which loads a JSON structure from a file path, and initializes a timeline)
Timeline::Timeline(const std::string& projectPath, bool convert_absolute_paths) :
- is_open(false), auto_map_clips(true), managed_cache(true), path(projectPath) {
+ is_open(false), auto_map_clips(true), managed_cache(true), path(projectPath),
+ max_concurrent_frames(OPEN_MP_NUM_PROCESSORS) {
// Create CrashHandler and Attach (incase of errors)
CrashHandler::Instance();
@@ -203,18 +198,11 @@ Timeline::Timeline(const std::string& projectPath, bool convert_absolute_paths)
info.has_video = true;
info.has_audio = true;
- // Configure OpenMP parallelism
- // Default number of threads per section
- omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
- // Allow nested parallel sections as deeply as supported
- omp_set_max_active_levels(OPEN_MP_MAX_ACTIVE);
+ // Init cache
+ final_cache = new CacheMemory();
// Init max image size
SetMaxSize(info.width, info.height);
-
- // Init cache
- final_cache = new CacheMemory();
- final_cache->SetMaxBytesFromInfo(OPEN_MP_NUM_PROCESSORS * 2, info.width, info.height, info.sample_rate, info.channels);
}
Timeline::~Timeline() {
@@ -568,7 +556,7 @@ std::shared_ptr Timeline::apply_effects(std::shared_ptr frame, int
}
// Get or generate a blank frame
-std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
+std::shared_ptr Timeline::GetOrCreateFrame(std::shared_ptr background_frame, Clip* clip, int64_t number)
{
std::shared_ptr new_frame;
@@ -580,8 +568,7 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame);
// Attempt to get a frame (but this could fail if a reader has just been closed)
- #pragma omp critical (T_GetOtCreateFrame)
- new_frame = std::shared_ptr(clip->GetFrame(number));
+ new_frame = std::shared_ptr(clip->GetFrame(background_frame, number));
// Return real frame
return new_frame;
@@ -596,22 +583,15 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number)
ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame);
// Create blank frame
- new_frame = std::make_shared(number, preview_width, preview_height, "#000000", samples_in_frame, info.channels);
- #pragma omp critical (T_GetOtCreateFrame)
- {
- new_frame->SampleRate(info.sample_rate);
- new_frame->ChannelsLayout(info.channel_layout);
- }
return new_frame;
}
// Process a new layer of video or audio
void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume)
{
- // Get the clip's frame & image
+ // Get the clip's frame, composited on top of the current timeline frame
std::shared_ptr source_frame;
- #pragma omp critical (T_addLayer)
- source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
+ source_frame = GetOrCreateFrame(new_frame, source_clip, clip_frame_number);
// No frame found... so bail
if (!source_frame)
@@ -623,13 +603,9 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in
/* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
* effects on the top clip. */
if (is_top_clip) {
- #pragma omp critical (T_addLayer)
source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
}
- // Declare an image to hold the source frame's image
- std::shared_ptr source_image;
-
/* COPY AUDIO - with correct volume */
if (source_clip->Reader()->info.has_audio) {
// Debug output
@@ -678,51 +654,17 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in
// This is a crude solution at best. =)
if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()){
// Force timeline frame to match the source frame
- #pragma omp critical (T_addLayer)
-
new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
}
// Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
// be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
- #pragma omp critical (T_addLayer)
new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
-
}
else
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number);
}
- // Skip out if video was disabled or only an audio frame (no visualisation in use)
- if (source_clip->has_video.GetInt(clip_frame_number) == 0 ||
- (!source_clip->Waveform() && !source_clip->Reader()->info.has_video))
- // Skip the rest of the image processing for performance reasons
- return;
-
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number);
-
- // Get actual frame image data
- source_image = source_frame->GetImage();
-
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "source_image->width()", source_image->width());
-
- /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
- std::shared_ptr new_image;
- new_image = new_frame->GetImage();
-
- // Load timeline's new frame image into a QPainter
- QPainter painter(new_image.get());
-
- // Composite a new layer onto the image
- painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
- painter.drawImage(0, 0, *source_image, 0, 0, source_image->width(), source_image->height());
- painter.end();
-
- // Add new QImage to frame
- new_frame->AddImage(new_image);
-
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width());
}
@@ -821,7 +763,6 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame)
// Check cache
std::shared_ptr frame;
std::lock_guard guard(get_frame_mutex);
- #pragma omp critical (T_GetFrame)
frame = final_cache->GetFrame(requested_frame);
if (frame) {
// Debug output
@@ -840,7 +781,6 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame)
throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.");
// Check cache again (due to locking)
- #pragma omp critical (T_GetFrame)
frame = final_cache->GetFrame(requested_frame);
if (frame) {
// Debug output
@@ -865,139 +805,97 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame)
// Get a list of clips that intersect with the requested section of timeline
// This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
std::vector nearby_clips;
- #pragma omp critical (T_GetFrame)
- nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
-
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS);
-
- // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
- // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
- for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
- {
- // Loop through clips
- for (auto clip : nearby_clips)
- {
- long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
- long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
-
- bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
- if (does_clip_intersect)
- {
- // Get clip frame #
- long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
- long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
-
- // Cache clip object
- clip->GetFrame(clip_frame_number);
- }
- }
- }
-
- #pragma omp parallel
- {
- // Loop through all requested frames
- #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames) schedule(static,1)
- for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
- {
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num());
-
- // Init some basic properties about this frame
- int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
-
- // Create blank frame (which will become the requested frame)
- std::shared_ptr new_frame(std::make_shared(frame_number, preview_width, preview_height, "#000000", samples_in_frame, info.channels));
- #pragma omp critical (T_GetFrame)
- {
- new_frame->AddAudioSilence(samples_in_frame);
- new_frame->SampleRate(info.sample_rate);
- new_frame->ChannelsLayout(info.channel_layout);
- }
-
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height);
-
- // Add Background Color to 1st layer (if animated or not black)
- if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) ||
- (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
- new_frame->AddColor(preview_width, preview_height, color.GetColorHex(frame_number));
-
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size());
-
- // Find Clips near this time
- for (auto clip : nearby_clips)
- {
- long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
- long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
-
- bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
-
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect);
-
- // Clip is visible
- if (does_clip_intersect)
- {
- // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
- bool is_top_clip = true;
- float max_volume = 0.0;
- for (auto nearby_clip : nearby_clips)
- {
- long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
- long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
- long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
- long nearby_clip_frame_number = frame_number - nearby_clip_start_position + nearby_clip_start_frame;
-
- // Determine if top clip
- if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
- nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
- nearby_clip_start_position > clip_start_position && is_top_clip == true) {
- is_top_clip = false;
- }
-
- // Determine max volume of overlapping clips
- if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
- nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
- nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number) {
- max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
- }
- }
-
- // Determine the frame needed for this clip (based on the position on the timeline)
- long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
- long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
-
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number);
-
- // Add clip's frame as layer
- add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip, max_volume);
-
- } else
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "does_clip_intersect", does_clip_intersect);
-
- } // end clip loop
-
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height);
-
- // Set frame # on mapped frame
- #pragma omp ordered
- {
- new_frame->SetFrameNumber(frame_number);
-
- // Add final frame to cache
- final_cache->Add(new_frame);
- }
-
- } // end frame loop
- } // end parallel
-
- // Debug output
- ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num());
+ nearby_clips = find_intersecting_clips(requested_frame, 1, true);
+
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num());
+
+ // Init some basic properties about this frame
+ int samples_in_frame = Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels);
+
+ // Create blank frame (which will become the requested frame)
+ std::shared_ptr new_frame(std::make_shared(requested_frame, preview_width, preview_height, "#000000", samples_in_frame, info.channels));
+ new_frame->AddAudioSilence(samples_in_frame);
+ new_frame->SampleRate(info.sample_rate);
+ new_frame->ChannelsLayout(info.channel_layout);
+
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "requested_frame", requested_frame, "info.width", info.width, "info.height", info.height);
+
+ // Add Background Color to 1st layer (if animated or not black)
+ if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) ||
+ (color.red.GetValue(requested_frame) != 0.0 || color.green.GetValue(requested_frame) != 0.0 || color.blue.GetValue(requested_frame) != 0.0))
+ new_frame->AddColor(preview_width, preview_height, color.GetColorHex(requested_frame));
+
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "requested_frame", requested_frame, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size());
+
+ // Find Clips near this time
+ for (auto clip : nearby_clips)
+ {
+ long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
+ long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
+
+ bool does_clip_intersect = (clip_start_position <= requested_frame && clip_end_position >= requested_frame);
+
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "requested_frame", requested_frame, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect);
+
+ // Clip is visible
+ if (does_clip_intersect)
+ {
+ // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
+ bool is_top_clip = true;
+ float max_volume = 0.0;
+ for (auto nearby_clip : nearby_clips)
+ {
+ long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
+ long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
+ long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1;
+ long nearby_clip_frame_number = requested_frame - nearby_clip_start_position + nearby_clip_start_frame;
+
+ // Determine if top clip
+ if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
+ nearby_clip_start_position <= requested_frame && nearby_clip_end_position >= requested_frame &&
+ nearby_clip_start_position > clip_start_position && is_top_clip == true) {
+ is_top_clip = false;
+ }
+
+ // Determine max volume of overlapping clips
+ if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio &&
+ nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 &&
+ nearby_clip_start_position <= requested_frame && nearby_clip_end_position >= requested_frame) {
+ max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number);
+ }
+ }
+
+ // Determine the frame needed for this clip (based on the position on the timeline)
+ long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
+ long clip_frame_number = requested_frame - clip_start_position + clip_start_frame;
+
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number);
+
+ // Add clip's frame as layer
+ add_layer(new_frame, clip, clip_frame_number, requested_frame, is_top_clip, max_volume);
+
+ } else {
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)",
+ "requested_frame", requested_frame, "does_clip_intersect",
+ does_clip_intersect);
+ }
+
+ } // end clip loop
+
+ // Debug output
+ ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "requested_frame", requested_frame, "info.width", info.width, "info.height", info.height);
+
+ // Set frame # on mapped frame
+ new_frame->SetFrameNumber(requested_frame);
+
+ // Add final frame to cache
+ final_cache->Add(new_frame);
// Return frame (or blank frame)
return final_cache->GetFrame(requested_frame);
@@ -1033,7 +931,6 @@ std::vector Timeline::find_intersecting_clips(int64_t requested_frame, in
ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_frame", min_requested_frame, "max_requested_frame", max_requested_frame, "clip->Position()", clip->Position(), "does_clip_intersect", does_clip_intersect);
// Open (or schedule for closing) this clip, based on if it's intersecting or not
- #pragma omp critical (reader_lock)
update_open_clips(clip, does_clip_intersect);
// Clip is visible
@@ -1424,12 +1321,6 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef
// Add Effect to Timeline
AddEffect(e);
-
- // Clear cache on parent clip (if any)
- Clip* parent_clip = (Clip*) e->ParentClip();
- if (parent_clip && parent_clip->GetCache()) {
- parent_clip->GetCache()->Clear();
- }
}
} else if (change_type == "update") {
@@ -1442,12 +1333,6 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef
int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
- // Clear cache on parent clip (if any)
- Clip* parent_clip = (Clip*) existing_effect->ParentClip();
- if (parent_clip && parent_clip->GetCache()) {
- parent_clip->GetCache()->Clear();
- }
-
// Update effect properties from JSON
existing_effect->SetJsonValue(change["value"]);
}
@@ -1462,12 +1347,6 @@ void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_ef
int64_t old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
- // Clear cache on parent clip (if any)
- Clip* parent_clip = (Clip*) existing_effect->ParentClip();
- if (parent_clip && parent_clip->GetCache()) {
- parent_clip->GetCache()->Clear();
- }
-
// Remove effect from timeline
RemoveEffect(existing_effect);
}
@@ -1611,7 +1490,6 @@ void Timeline::ClearAllCache() {
for (auto clip : clips)
{
// Clear cache on clip
- clip->GetCache()->Clear();
clip->Reader()->GetCache()->Clear();
// Clear nested Reader (if any)
@@ -1637,4 +1515,7 @@ void Timeline::SetMaxSize(int width, int height) {
// Update preview settings
preview_width = display_ratio_size.width();
preview_height = display_ratio_size.height();
+
+ // Update timeline cache size
+ final_cache->SetMaxBytesFromInfo(max_concurrent_frames * 4, preview_width, preview_height, info.sample_rate, info.channels);
}
\ No newline at end of file
diff --git a/src/Timeline.h b/src/Timeline.h
index afc7f8053..16ceda8ed 100644
--- a/src/Timeline.h
+++ b/src/Timeline.h
@@ -179,6 +179,8 @@ namespace openshot {
bool managed_cache; ///< Does this timeline instance manage the cache object
std::string path; ///< Optional path of loaded UTF-8 OpenShot JSON project file
std::mutex get_frame_mutex; ///< Mutex to protect GetFrame method from different threads calling it
+ int max_concurrent_frames; ///< Max concurrent frames to process at one time
+
std::map> tracked_objects; ///< map of TrackedObjectBBoxes and their IDs
/// Process a new layer of video or audio
@@ -204,8 +206,8 @@ namespace openshot {
/// @param include Include or Exclude intersecting clips
std::vector find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include);
- /// Get or generate a blank frame
- std::shared_ptr GetOrCreateFrame(openshot::Clip* clip, int64_t number);
+ /// Get a clip's frame or generate a blank frame
+ std::shared_ptr GetOrCreateFrame(std::shared_ptr background_frame, openshot::Clip* clip, int64_t number);
/// Apply effects to the source frame (if any)
std::shared_ptr apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer);
diff --git a/tests/Clip_Tests.cpp b/tests/Clip_Tests.cpp
index 87272427c..5d8ab43ce 100644
--- a/tests/Clip_Tests.cpp
+++ b/tests/Clip_Tests.cpp
@@ -258,7 +258,7 @@ TEST(Verify_Parent_Timeline)
// Check size of frame image (with an associated timeline)
CHECK_EQUAL(c1.GetFrame(1)->GetImage()->width(), 640);
- CHECK_EQUAL(c1.GetFrame(1)->GetImage()->height(), 480);
+ CHECK_EQUAL(c1.GetFrame(1)->GetImage()->height(), 360);
}
} // SUITE
diff --git a/tests/Settings_Tests.cpp b/tests/Settings_Tests.cpp
index 62dd2e23f..ce5dc2824 100644
--- a/tests/Settings_Tests.cpp
+++ b/tests/Settings_Tests.cpp
@@ -43,7 +43,6 @@ TEST(Settings_Default_Constructor)
CHECK_EQUAL(12, s->OMP_THREADS);
CHECK_EQUAL(false, s->HIGH_QUALITY_SCALING);
- CHECK_EQUAL(false, s->WAIT_FOR_VIDEO_PROCESSING_TASK);
}
TEST(Settings_Change_Settings)
@@ -52,13 +51,10 @@ TEST(Settings_Change_Settings)
Settings *s = Settings::Instance();
s->OMP_THREADS = 8;
s->HIGH_QUALITY_SCALING = true;
- s->WAIT_FOR_VIDEO_PROCESSING_TASK = true;
CHECK_EQUAL(8, s->OMP_THREADS);
CHECK_EQUAL(true, s->HIGH_QUALITY_SCALING);
- CHECK_EQUAL(true, s->WAIT_FOR_VIDEO_PROCESSING_TASK);
CHECK_EQUAL(8, Settings::Instance()->OMP_THREADS);
CHECK_EQUAL(true, Settings::Instance()->HIGH_QUALITY_SCALING);
- CHECK_EQUAL(true, Settings::Instance()->WAIT_FOR_VIDEO_PROCESSING_TASK);
}