From 3daa5bdb7bf5dcf670dec23a9f789f48a8e54af6 Mon Sep 17 00:00:00 2001 From: Jonathan Thomas Date: Wed, 17 Feb 2021 19:44:44 -0600 Subject: [PATCH] Large refactor of OpenMP integration (or rather the removal of OpenMP on the Timeline and FFmpeg-related classes). The logic behind this decision, was based on profiling libopenshot and the amount of wasted CPU idle time on all the various threads. The slow code is still synchronous, and all the threads must wait on each other, adding additional overhead. So, removing lots of unneeded threads, and simplifying the underlying Timeline->Clip->FFmpegReader flow. Also, removed 2 calls to QPainter::drawImage, by improving the flexibility of Clip->GetFrame. --- .gitignore | 2 +- examples/Example.cpp | 71 +-- src/Clip.cpp | 173 +++---- src/Clip.h | 11 +- src/FFmpegReader.cpp | 568 +++++++++++----------- src/FFmpegWriter.cpp | 991 +++++++++++++++++++-------------------- src/Frame.cpp | 130 +++-- src/KeyFrame.cpp | 1 + src/Settings.cpp | 1 - src/Settings.h | 9 - src/Timeline.cpp | 292 ++++-------- src/Timeline.h | 4 +- src/ZmqLogger.cpp | 20 +- tests/Clip_Tests.cpp | 2 +- tests/Settings_Tests.cpp | 4 - 15 files changed, 1037 insertions(+), 1242 deletions(-) diff --git a/.gitignore b/.gitignore index 5d00d580c..3f706e28c 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,6 @@ .project .cproject /.metadata/ +cmake-build-debug/* tags *~ - diff --git a/examples/Example.cpp b/examples/Example.cpp index 2cd770712..16d6a675d 100644 --- a/examples/Example.cpp +++ b/examples/Example.cpp @@ -39,51 +39,52 @@ using namespace openshot; int main(int argc, char* argv[]) { - Settings *s = Settings::Instance(); - s->HARDWARE_DECODER = 2; // 1 VA-API, 2 NVDEC, 6 VDPAU - s->HW_DE_DEVICE_SET = 0; + // Types for storing time durations in whole and fractional milliseconds + using ms = std::chrono::milliseconds; + using s = std::chrono::seconds; + using double_ms = std::chrono::duration; - std::string input_filepath = TEST_MEDIA_PATH; - input_filepath += "sintel_trailer-720p.mp4"; + // Track total time + const auto total_time = double_ms(0.0); - FFmpegReader r9(input_filepath); + // FFmpeg Reader performance test + const auto total_1 = std::chrono::high_resolution_clock::now(); + FFmpegReader r9("/home/jonathan/Videos/sintel_trailer-1080p.mp4"); r9.Open(); - r9.DisplayInfo(); - - /* WRITER ---------------- */ - FFmpegWriter w9("metadata.mp4"); + for (long int frame = 1; frame <= 1000; frame++) + { + const auto time1 = std::chrono::high_resolution_clock::now(); + std::shared_ptr f = r9.GetFrame(frame); + const auto time2 = std::chrono::high_resolution_clock::now(); + std::cout << "FFmpegReader: " << frame << " (" << double_ms(time2 - time1).count() << " ms)" << std::endl; + } + const auto total_2 = std::chrono::high_resolution_clock::now(); + auto total_sec = std::chrono::duration_cast(total_2 - total_1); + std::cout << "FFmpegReader TOTAL: " << total_sec.count() << " ms" << std::endl; + r9.Close(); - // Set options - w9.SetAudioOptions(true, "libmp3lame", r9.info.sample_rate, r9.info.channels, r9.info.channel_layout, 128000); - w9.SetVideoOptions(true, "libx264", r9.info.fps, 1024, 576, Fraction(1,1), false, false, 3000000); - w9.info.metadata["title"] = "testtest"; - w9.info.metadata["artist"] = "aaa"; - w9.info.metadata["album"] = "bbb"; - w9.info.metadata["year"] = "2015"; - w9.info.metadata["description"] = "ddd"; - w9.info.metadata["comment"] = "eee"; - w9.info.metadata["comment"] = "comment"; - w9.info.metadata["copyright"] = "copyright OpenShot!"; - // Open writer - w9.Open(); + // Timeline Reader performance test + Timeline tm(r9.info.width, r9.info.height, r9.info.fps, r9.info.sample_rate, r9.info.channels, r9.info.channel_layout); + Clip *c = new Clip(&r9); + tm.AddClip(c); + tm.Open(); - for (long int frame = 1; frame <= 100; frame++) + const auto total_3 = std::chrono::high_resolution_clock::now(); + for (long int frame = 1; frame <= 1000; frame++) { - //int frame_number = (rand() % 750) + 1; - int frame_number = frame; - std::shared_ptr f = r9.GetFrame(frame_number); - w9.WriteFrame(f); + const auto time1 = std::chrono::high_resolution_clock::now(); + std::shared_ptr f = tm.GetFrame(frame); + const auto time2 = std::chrono::high_resolution_clock::now(); + std::cout << "Timeline: " << frame << " (" << double_ms(time2 - time1).count() << " ms)" << std::endl; } + const auto total_4 = std::chrono::high_resolution_clock::now(); + total_sec = std::chrono::duration_cast(total_4 - total_3); + std::cout << "Timeline TOTAL: " << total_sec.count() << " ms" << std::endl; + tm.Close(); - // Close writer & reader - w9.Close(); - - // Close timeline - r9.Close(); - - std::cout << "Completed successfully!" << std::endl; + std::cout << "Completed successfully!" << std::endl; return 0; } diff --git a/src/Clip.cpp b/src/Clip.cpp index b98df04b7..b82703a19 100644 --- a/src/Clip.cpp +++ b/src/Clip.cpp @@ -346,7 +346,7 @@ std::shared_ptr Clip::GetFrame(int64_t frame_number) } // Use an existing openshot::Frame object and draw this Clip's frame onto it -std::shared_ptr Clip::GetFrame(std::shared_ptr frame, int64_t frame_number) +std::shared_ptr Clip::GetFrame(std::shared_ptr background_frame, int64_t frame_number) { // Check for open reader (or throw exception) if (!is_open) @@ -376,7 +376,7 @@ std::shared_ptr Clip::GetFrame(std::shared_ptr frame, in int enabled_video = has_video.GetInt(frame_number); if (enabled_video == -1 && reader && reader->info.has_video) enabled_video = 1; - else if (enabled_video == -1 && reader && !reader->info.has_audio) + else if (enabled_video == -1 && reader && !reader->info.has_video) enabled_video = 0; // Is a time map detected @@ -386,27 +386,14 @@ std::shared_ptr Clip::GetFrame(std::shared_ptr frame, in new_frame_number = time_mapped_number; // Now that we have re-mapped what frame number is needed, go and get the frame pointer - std::shared_ptr original_frame; - original_frame = GetOrCreateFrame(new_frame_number); - - // Copy the image from the odd field - if (enabled_video) - frame->AddImage(std::make_shared(*original_frame->GetImage())); - - // Loop through each channel, add audio - if (enabled_audio && reader->info.has_audio) - for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++) - frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0); + std::shared_ptr original_frame = GetOrCreateFrame(new_frame_number); // Get time mapped frame number (used to increase speed, change direction, etc...) // TODO: Handle variable # of samples, since this resamples audio for different speeds (only when time curve is set) - get_time_mapped_frame(frame, new_frame_number); - - // Adjust # of samples to match requested (the interaction with time curves will make this tricky) - // TODO: Implement move samples to/from next frame + get_time_mapped_frame(original_frame, new_frame_number); // Apply effects to the frame (if any) - apply_effects(frame); + apply_effects(original_frame); // Determine size of image (from Timeline or Reader) int width = 0; @@ -422,13 +409,13 @@ std::shared_ptr Clip::GetFrame(std::shared_ptr frame, in } // Apply keyframe / transforms - apply_keyframes(frame, width, height); + apply_keyframes(original_frame, background_frame->GetImage()); - // Cache frame - cache.Add(frame); + // Cache frame + cache.Add(original_frame); // Return processed 'frame' - return frame; + return original_frame; } else // Throw error if reader not initialized @@ -709,7 +696,6 @@ std::shared_ptr Clip::GetOrCreateFrame(int64_t number) // Create a new copy of reader frame // This allows a clip to modify the pixels and audio of this frame without // changing the underlying reader's frame data - //std::shared_ptr reader_copy(new Frame(number, 1, 1, "#000000", reader_frame->GetAudioSamplesCount(), reader_frame->GetAudioChannelsCount())); auto reader_copy = std::make_shared(*reader_frame.get()); reader_copy->SampleRate(reader_frame->SampleRate()); reader_copy->ChannelsLayout(reader_frame->ChannelsLayout()); @@ -1126,18 +1112,84 @@ bool Clip::isEqual(double a, double b) return fabs(a - b) < 0.000001; } +// Apply keyframes to the source frame (if any) +void Clip::apply_keyframes(std::shared_ptr frame, std::shared_ptr background_canvas) { + // Skip out if video was disabled or only an audio frame (no visualisation in use) + if (has_video.GetInt(frame->number) == 0 || + (!Waveform() && !Reader()->info.has_video)) + // Skip the rest of the image processing for performance reasons + return; + + // Get image from clip + std::shared_ptr source_image = frame->GetImage(); + + // Size of final image + int width = background_canvas->width(); + int height = background_canvas->height(); + + // Get transform from clip's keyframes + QTransform transform = get_transform(frame, width, height); + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number); + + // Load timeline's new frame image into a QPainter + QPainter painter(background_canvas.get()); + painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true); + + // Apply transform (translate, rotate, scale) + painter.setTransform(transform); + + // Composite a new layer onto the image + painter.setCompositionMode(QPainter::CompositionMode_SourceOver); + painter.drawImage(0, 0, *source_image); + + if (timeline) { + Timeline *t = (Timeline *) timeline; + + // Draw frame #'s on top of image (if needed) + if (display != FRAME_DISPLAY_NONE) { + std::stringstream frame_number_str; + switch (display) { + case (FRAME_DISPLAY_NONE): + // This is only here to prevent unused-enum warnings + break; + + case (FRAME_DISPLAY_CLIP): + frame_number_str << frame->number; + break; + + case (FRAME_DISPLAY_TIMELINE): + frame_number_str << (position * t->info.fps.ToFloat()) + frame->number; + break; + + case (FRAME_DISPLAY_BOTH): + frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")"; + break; + } + + // Draw frame number on top of image + painter.setPen(QColor("#ffffff")); + painter.drawText(20, 20, QString(frame_number_str.str().c_str())); + } + } + painter.end(); + + // Add new QImage to frame + frame->AddImage(background_canvas); +} // Apply keyframes to the source frame (if any) -void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) +QTransform Clip::get_transform(std::shared_ptr frame, int width, int height) { - // Get actual frame image data - std::shared_ptr source_image = frame->GetImage(); + // Get image from clip + std::shared_ptr source_image = frame->GetImage(); /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */ if (Waveform()) { // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform()); + ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform()); // Get the color of the waveform int red = wave_color.red.GetInt(frame->number); @@ -1170,7 +1222,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) } // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number); + ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number); } /* RESIZE SOURCE IMAGE - based on scale type */ @@ -1181,21 +1233,21 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) source_size.scale(width, height, Qt::KeepAspectRatio); // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); break; } case (SCALE_STRETCH): { source_size.scale(width, height, Qt::IgnoreAspectRatio); // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); break; } case (SCALE_CROP): { source_size.scale(width, height, Qt::KeepAspectRatioByExpanding); // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); break; } case (SCALE_NONE): { @@ -1207,7 +1259,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) source_size.scale(width * source_width_ratio, height * source_height_ratio, Qt::KeepAspectRatio); // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); + ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height()); break; } } @@ -1258,7 +1310,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) } // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height); + ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height); /* LOCATION, ROTATION, AND SCALE */ float r = rotation.GetValue(frame->number); // rotate in degrees @@ -1272,7 +1324,7 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) QTransform transform; // Transform source image (if needed) - ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy); + ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy); if (!isEqual(x, 0) || !isEqual(y, 0)) { // TRANSLATE/MOVE CLIP @@ -1297,56 +1349,5 @@ void Clip::apply_keyframes(std::shared_ptr frame, int width, int height) transform.scale(source_width_scale, source_height_scale); } - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number); - - /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */ - auto new_image = std::make_shared(QSize(width, height), source_image->format()); - new_image->fill(QColor(QString::fromStdString("#00000000"))); - - // Load timeline's new frame image into a QPainter - QPainter painter(new_image.get()); - painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true); - - // Apply transform (translate, rotate, scale) - painter.setTransform(transform); - - // Composite a new layer onto the image - painter.setCompositionMode(QPainter::CompositionMode_SourceOver); - painter.drawImage(0, 0, *source_image); - - if (timeline) { - Timeline *t = (Timeline *) timeline; - - // Draw frame #'s on top of image (if needed) - if (display != FRAME_DISPLAY_NONE) { - std::stringstream frame_number_str; - switch (display) { - case (FRAME_DISPLAY_NONE): - // This is only here to prevent unused-enum warnings - break; - - case (FRAME_DISPLAY_CLIP): - frame_number_str << frame->number; - break; - - case (FRAME_DISPLAY_TIMELINE): - frame_number_str << (position * t->info.fps.ToFloat()) + frame->number; - break; - - case (FRAME_DISPLAY_BOTH): - frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")"; - break; - } - - // Draw frame number on top of image - painter.setPen(QColor("#ffffff")); - painter.drawText(20, 20, QString(frame_number_str.str().c_str())); - } - } - - painter.end(); - - // Add new QImage to frame - frame->AddImage(new_image); + return transform; } diff --git a/src/Clip.h b/src/Clip.h index ca303c16b..104103941 100644 --- a/src/Clip.h +++ b/src/Clip.h @@ -139,8 +139,11 @@ namespace openshot { /// Apply effects to the source frame (if any) void apply_effects(std::shared_ptr frame); - /// Apply keyframes to the source frame (if any) - void apply_keyframes(std::shared_ptr frame, int width, int height); + /// Apply keyframes to an openshot::Frame and use an existing QImage as a background image (if any) + void apply_keyframes(std::shared_ptr frame, std::shared_ptr background_canvas); + + /// Get QTransform from keyframes + QTransform get_transform(std::shared_ptr frame, int width, int height); /// Get file extension std::string get_file_extension(std::string path); @@ -226,9 +229,9 @@ namespace openshot { /// rendered. /// /// @returns The modified openshot::Frame object - /// @param frame This is ignored on Clip, due to caching optimizations. This frame instance is clobbered with the source frame. + /// @param background_frame The frame object to use as a background canvas (i.e. an existing Timeline openshot::Frame instance) /// @param frame_number The frame number (starting at 1) of the clip or effect on the timeline. - std::shared_ptr GetFrame(std::shared_ptr frame, int64_t frame_number) override; + std::shared_ptr GetFrame(std::shared_ptr background_frame, int64_t frame_number); /// Open the internal reader void Open() override; diff --git a/src/FFmpegReader.cpp b/src/FFmpegReader.cpp index 766d90473..a5e18003d 100644 --- a/src/FFmpegReader.cpp +++ b/src/FFmpegReader.cpp @@ -95,12 +95,6 @@ FFmpegReader::FFmpegReader(const std::string& path, bool inspect_reader) current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0), packet(NULL), max_concurrent_frames(OPEN_MP_NUM_PROCESSORS) { - // Configure OpenMP parallelism - // Default number of threads per section - omp_set_num_threads(max_concurrent_frames); - // Allow nested parallel sections as deeply as supported - omp_set_max_active_levels(OPEN_MP_MAX_ACTIVE); - // Initialize FFMpeg, and register all formats and codecs AV_REGISTER_ALL AVCODEC_REGISTER_ALL @@ -847,47 +841,44 @@ std::shared_ptr FFmpegReader::GetFrame(int64_t requested_frame) { // Return the cached frame return frame; } else { -#pragma omp critical (ReadStream) - { - // Check the cache a 2nd time (due to a potential previous lock) - frame = final_cache.GetFrame(requested_frame); - if (frame) { - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetFrame", "returned cached frame on 2nd look", requested_frame); - - // Return the cached frame - } else { - // Frame is not in cache - // Reset seek count - seek_count = 0; - - // Check for first frame (always need to get frame 1 before other frames, to correctly calculate offsets) - if (last_frame == 0 && requested_frame != 1) - // Get first frame - ReadStream(1); - - // Are we within X frames of the requested frame? - int64_t diff = requested_frame - last_frame; - if (diff >= 1 && diff <= 20) { - // Continue walking the stream - frame = ReadStream(requested_frame); - } else { - // Greater than 30 frames away, or backwards, we need to seek to the nearest key frame - if (enable_seek) - // Only seek if enabled - Seek(requested_frame); - - else if (!enable_seek && diff < 0) { - // Start over, since we can't seek, and the requested frame is smaller than our position - Close(); - Open(); - } - - // Then continue walking the stream - frame = ReadStream(requested_frame); - } - } - } //omp critical + // Check the cache a 2nd time (due to a potential previous lock) + frame = final_cache.GetFrame(requested_frame); + if (frame) { + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::GetFrame", "returned cached frame on 2nd look", requested_frame); + + // Return the cached frame + } else { + // Frame is not in cache + // Reset seek count + seek_count = 0; + + // Check for first frame (always need to get frame 1 before other frames, to correctly calculate offsets) + if (last_frame == 0 && requested_frame != 1) + // Get first frame + ReadStream(1); + + // Are we within X frames of the requested frame? + int64_t diff = requested_frame - last_frame; + if (diff >= 1 && diff <= 20) { + // Continue walking the stream + frame = ReadStream(requested_frame); + } else { + // Greater than 30 frames away, or backwards, we need to seek to the nearest key frame + if (enable_seek) + // Only seek if enabled + Seek(requested_frame); + + else if (!enable_seek && diff < 0) { + // Start over, since we can't seek, and the requested frame is smaller than our position + Close(); + Open(); + } + + // Then continue walking the stream + frame = ReadStream(requested_frame); + } + } return frame; } } @@ -902,141 +893,129 @@ std::shared_ptr FFmpegReader::ReadStream(int64_t requested_frame) { // Minimum number of packets to process (for performance reasons) int packets_processed = 0; - int minimum_packets = max_concurrent_frames; + int minimum_packets = 1; int max_packets = 4096; // Debug output ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream", "requested_frame", requested_frame, "max_concurrent_frames", max_concurrent_frames); -#pragma omp parallel - { -#pragma omp single - { - // Loop through the stream until the correct frame is found - while (true) { - // Get the next packet into a local variable called packet - packet_error = GetNextPacket(); - - int processing_video_frames_size = 0; - int processing_audio_frames_size = 0; - { - const GenericScopedLock lock(processingCriticalSection); - processing_video_frames_size = processing_video_frames.size(); - processing_audio_frames_size = processing_audio_frames.size(); - } - - // Wait if too many frames are being processed - while (processing_video_frames_size + processing_audio_frames_size >= minimum_packets) { - std::this_thread::sleep_for(std::chrono::milliseconds(3)); - const GenericScopedLock lock(processingCriticalSection); - processing_video_frames_size = processing_video_frames.size(); - processing_audio_frames_size = processing_audio_frames.size(); - } + // Loop through the stream until the correct frame is found + while (true) { + // Get the next packet into a local variable called packet + packet_error = GetNextPacket(); - // Get the next packet (if any) - if (packet_error < 0) { - // Break loop when no more packets found - end_of_stream = true; - break; - } + int processing_video_frames_size = 0; + int processing_audio_frames_size = 0; + { + const GenericScopedLock lock(processingCriticalSection); + processing_video_frames_size = processing_video_frames.size(); + processing_audio_frames_size = processing_audio_frames.size(); + } - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (GetNextPacket)", "requested_frame", requested_frame, "processing_video_frames_size", processing_video_frames_size, "processing_audio_frames_size", processing_audio_frames_size, "minimum_packets", minimum_packets, "packets_processed", packets_processed, "is_seeking", is_seeking); + // Wait if too many frames are being processed + while (processing_video_frames_size + processing_audio_frames_size >= minimum_packets) { + std::this_thread::sleep_for(std::chrono::milliseconds(3)); + const GenericScopedLock lock(processingCriticalSection); + processing_video_frames_size = processing_video_frames.size(); + processing_audio_frames_size = processing_audio_frames.size(); + } - // Video packet - if (info.has_video && packet->stream_index == videoStream) { - // Reset this counter, since we have a video packet - num_packets_since_video_frame = 0; + // Get the next packet (if any) + if (packet_error < 0) { + // Break loop when no more packets found + end_of_stream = true; + break; + } - // Check the status of a seek (if any) - if (is_seeking) -#pragma omp critical (openshot_seek) - check_seek = CheckSeek(true); - else - check_seek = false; + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (GetNextPacket)", "requested_frame", requested_frame, "processing_video_frames_size", processing_video_frames_size, "processing_audio_frames_size", processing_audio_frames_size, "minimum_packets", minimum_packets, "packets_processed", packets_processed, "is_seeking", is_seeking); - if (check_seek) { - // Jump to the next iteration of this loop - continue; - } + // Video packet + if (info.has_video && packet->stream_index == videoStream) { + // Reset this counter, since we have a video packet + num_packets_since_video_frame = 0; - // Packet may become NULL on Close inside Seek if CheckSeek returns false - if (!packet) - // Jump to the next iteration of this loop - continue; + // Check the status of a seek (if any) + if (is_seeking) { + check_seek = CheckSeek(true); + } else { + check_seek = false; + } - // Get the AVFrame from the current packet - frame_finished = GetAVFrame(); + if (check_seek) { + // Jump to the next iteration of this loop + continue; + } - // Check if the AVFrame is finished and set it - if (frame_finished) { - // Update PTS / Frame Offset (if any) - UpdatePTSOffset(true); + // Packet may become NULL on Close inside Seek if CheckSeek returns false + if (!packet) { + // Jump to the next iteration of this loop + continue; + } - // Process Video Packet - ProcessVideoPacket(requested_frame); + // Get the AVFrame from the current packet + frame_finished = GetAVFrame(); - if (openshot::Settings::Instance()->WAIT_FOR_VIDEO_PROCESSING_TASK) { - // Wait on each OMP task to complete before moving on to the next one. This slows - // down processing considerably, but might be more stable on some systems. -#pragma omp taskwait - } - } + // Check if the AVFrame is finished and set it + if (frame_finished) { + // Update PTS / Frame Offset (if any) + UpdatePTSOffset(true); - } - // Audio packet - else if (info.has_audio && packet->stream_index == audioStream) { - // Increment this (to track # of packets since the last video packet) - num_packets_since_video_frame++; - - // Check the status of a seek (if any) - if (is_seeking) -#pragma omp critical (openshot_seek) - check_seek = CheckSeek(false); - else - check_seek = false; - - if (check_seek) { - // Jump to the next iteration of this loop - continue; - } + // Process Video Packet + ProcessVideoPacket(requested_frame); + } - // Packet may become NULL on Close inside Seek if CheckSeek returns false - if (!packet) - // Jump to the next iteration of this loop - continue; + } + // Audio packet + else if (info.has_audio && packet->stream_index == audioStream) { + // Increment this (to track # of packets since the last video packet) + num_packets_since_video_frame++; + + // Check the status of a seek (if any) + if (is_seeking) { + check_seek = CheckSeek(false); + } else { + check_seek = false; + } - // Update PTS / Frame Offset (if any) - UpdatePTSOffset(false); + if (check_seek) { + // Jump to the next iteration of this loop + continue; + } - // Determine related video frame and starting sample # from audio PTS - AudioLocation location = GetAudioPTSLocation(packet->pts); + // Packet may become NULL on Close inside Seek if CheckSeek returns false + if (!packet) { + // Jump to the next iteration of this loop + continue; + } - // Process Audio Packet - ProcessAudioPacket(requested_frame, location.frame, location.sample_start); - } + // Update PTS / Frame Offset (if any) + UpdatePTSOffset(false); - // Check if working frames are 'finished' - if (!is_seeking) { - // Check for final frames - CheckWorkingFrames(false, requested_frame); - } + // Determine related video frame and starting sample # from audio PTS + AudioLocation location = GetAudioPTSLocation(packet->pts); - // Check if requested 'final' frame is available - bool is_cache_found = (final_cache.GetFrame(requested_frame) != NULL); + // Process Audio Packet + ProcessAudioPacket(requested_frame, location.frame, location.sample_start); + } - // Increment frames processed - packets_processed++; + // Check if working frames are 'finished' + if (!is_seeking) { + // Check for final frames + CheckWorkingFrames(false, requested_frame); + } - // Break once the frame is found - if ((is_cache_found && packets_processed >= minimum_packets) || packets_processed > max_packets) - break; + // Check if requested 'final' frame is available + bool is_cache_found = (final_cache.GetFrame(requested_frame) != NULL); - } // end while + // Increment frames processed + packets_processed++; - } // end omp single + // Break once the frame is found + if ((is_cache_found && packets_processed >= minimum_packets) || packets_processed > max_packets) + break; - } // end omp parallel + } // end while // Debug output ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ReadStream (Completed)", "packets_processed", packets_processed, "end_of_stream", end_of_stream, "largest_frame_processed", largest_frame_processed, "Working Cache Count", working_cache.Count()); @@ -1072,24 +1051,19 @@ std::shared_ptr FFmpegReader::ReadStream(int64_t requested_frame) { int FFmpegReader::GetNextPacket() { int found_packet = 0; AVPacket *next_packet; -#pragma omp critical(getnextpacket) - { - next_packet = new AVPacket(); - found_packet = av_read_frame(pFormatCtx, next_packet); + next_packet = new AVPacket(); + found_packet = av_read_frame(pFormatCtx, next_packet); - - if (packet) { - // Remove previous packet before getting next one - RemoveAVPacket(packet); - packet = NULL; - } - - if (found_packet >= 0) { - // Update current packet pointer - packet = next_packet; - } - else - delete next_packet; + if (packet) { + // Remove previous packet before getting next one + RemoveAVPacket(packet); + packet = NULL; + } + if (found_packet >= 0) { + // Update current packet pointer + packet = next_packet; + } else { + delete next_packet; } // Return if packet was found (or error number) return found_packet; @@ -1102,12 +1076,10 @@ bool FFmpegReader::GetAVFrame() { // Decode video frame AVFrame *next_frame = AV_ALLOCATE_FRAME(); -#pragma omp critical (packet_cache) - { -#if IS_FFMPEG_3_2 - frameFinished = 0; - ret = avcodec_send_packet(pCodecCtx, packet); +#if IS_FFMPEG_3_2 + frameFinished = 0; + ret = avcodec_send_packet(pCodecCtx, packet); #if HAVE_HW_ACCEL // Get the format from the variables set in get_hw_dec_format @@ -1186,7 +1158,6 @@ bool FFmpegReader::GetAVFrame() { info.height); } #endif // IS_FFMPEG_3_2 - } // deallocate the frame AV_FREE_FRAME(&next_frame); @@ -1271,142 +1242,135 @@ void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) { const GenericScopedLock lock(processingCriticalSection); processing_video_frames[current_frame] = current_frame; -#pragma omp task firstprivate(current_frame, my_frame, height, width, video_length, pix_fmt) - { - // Create variables for a RGB Frame (since most videos are not in RGB, we must convert it) - AVFrame *pFrameRGB = NULL; - int numBytes; - uint8_t *buffer = NULL; - - // Allocate an AVFrame structure - pFrameRGB = AV_ALLOCATE_FRAME(); - if (pFrameRGB == NULL) - throw OutOfBoundsFrame("Convert Image Broke!", current_frame, video_length); - - // Determine the max size of this source image (based on the timeline's size, the scaling mode, - // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible, - // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline - // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in - // the future. - int max_width = info.width; - int max_height = info.height; - - Clip *parent = (Clip *) ParentClip(); - if (parent) { - if (parent->ParentTimeline()) { - // Set max width/height based on parent clip's timeline (if attached to a timeline) - max_width = parent->ParentTimeline()->preview_width; - max_height = parent->ParentTimeline()->preview_height; - } - if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) { - // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) - float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; - float max_scale_y = parent->scale_y.GetMaxPoint().co.Y; - max_width = std::max(float(max_width), max_width * max_scale_x); - max_height = std::max(float(max_height), max_height * max_scale_y); - - } else if (parent->scale == SCALE_CROP) { - // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes) - float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; - float max_scale_y = parent->scale_y.GetMaxPoint().co.Y; - QSize width_size(max_width * max_scale_x, - round(max_width / (float(info.width) / float(info.height)))); - QSize height_size(round(max_height / (float(info.height) / float(info.width))), - max_height * max_scale_y); - // respect aspect ratio - if (width_size.width() >= max_width && width_size.height() >= max_height) { - max_width = std::max(max_width, width_size.width()); - max_height = std::max(max_height, width_size.height()); - } else { - max_width = std::max(max_width, height_size.width()); - max_height = std::max(max_height, height_size.height()); - } - - } else { - // No scaling, use original image size (slower) - max_width = info.width; - max_height = info.height; - } + // Create variables for a RGB Frame (since most videos are not in RGB, we must convert it) + AVFrame *pFrameRGB = NULL; + int numBytes; + uint8_t *buffer = NULL; + + // Allocate an AVFrame structure + pFrameRGB = AV_ALLOCATE_FRAME(); + if (pFrameRGB == NULL) + throw OutOfBoundsFrame("Convert Image Broke!", current_frame, video_length); + + // Determine the max size of this source image (based on the timeline's size, the scaling mode, + // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible, + // without losing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline + // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in + // the future. + int max_width = info.width; + int max_height = info.height; + + Clip *parent = (Clip *) ParentClip(); + if (parent) { + if (parent->ParentTimeline()) { + // Set max width/height based on parent clip's timeline (if attached to a timeline) + max_width = parent->ParentTimeline()->preview_width; + max_height = parent->ParentTimeline()->preview_height; } - - // Determine if image needs to be scaled (for performance reasons) - int original_height = height; - if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) { - // Override width and height (but maintain aspect ratio) - float ratio = float(width) / float(height); - int possible_width = round(max_height * ratio); - int possible_height = round(max_width / ratio); - - if (possible_width <= max_width) { - // use calculated width, and max_height - width = possible_width; - height = max_height; + if (parent->scale == SCALE_FIT || parent->scale == SCALE_STRETCH) { + // Best fit or Stretch scaling (based on max timeline size * scaling keyframes) + float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; + float max_scale_y = parent->scale_y.GetMaxPoint().co.Y; + max_width = std::max(float(max_width), max_width * max_scale_x); + max_height = std::max(float(max_height), max_height * max_scale_y); + + } else if (parent->scale == SCALE_CROP) { + // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes) + float max_scale_x = parent->scale_x.GetMaxPoint().co.Y; + float max_scale_y = parent->scale_y.GetMaxPoint().co.Y; + QSize width_size(max_width * max_scale_x, + round(max_width / (float(info.width) / float(info.height)))); + QSize height_size(round(max_height / (float(info.height) / float(info.width))), + max_height * max_scale_y); + // respect aspect ratio + if (width_size.width() >= max_width && width_size.height() >= max_height) { + max_width = std::max(max_width, width_size.width()); + max_height = std::max(max_height, width_size.height()); } else { - // use max_width, and calculated height - width = max_width; - height = possible_height; + max_width = std::max(max_width, height_size.width()); + max_height = std::max(max_height, height_size.height()); } - } - // Determine required buffer size and allocate buffer - numBytes = AV_GET_IMAGE_SIZE(PIX_FMT_RGBA, width, height); - -#pragma omp critical (video_buffer) - buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t)); - - // Copy picture data from one AVFrame (or AVPicture) to another one. - AV_COPY_PICTURE_DATA(pFrameRGB, buffer, PIX_FMT_RGBA, width, height); + } else { + // No scaling, use original image size (slower) + max_width = info.width; + max_height = info.height; + } + } - int scale_mode = SWS_FAST_BILINEAR; - if (openshot::Settings::Instance()->HIGH_QUALITY_SCALING) { - scale_mode = SWS_BICUBIC; + // Determine if image needs to be scaled (for performance reasons) + int original_height = height; + if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) { + // Override width and height (but maintain aspect ratio) + float ratio = float(width) / float(height); + int possible_width = round(max_height * ratio); + int possible_height = round(max_width / ratio); + + if (possible_width <= max_width) { + // use calculated width, and max_height + width = possible_width; + height = max_height; + } else { + // use max_width, and calculated height + width = max_width; + height = possible_height; } - SwsContext *img_convert_ctx = sws_getContext(info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width, - height, PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL); + } - // Resize / Convert to RGB - sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0, - original_height, pFrameRGB->data, pFrameRGB->linesize); + // Determine required buffer size and allocate buffer + numBytes = AV_GET_IMAGE_SIZE(PIX_FMT_RGBA, width, height); + const int bytes_per_pixel = 4; + int buffer_size = width * height * bytes_per_pixel; + buffer = new unsigned char[buffer_size](); - // Create or get the existing frame object - std::shared_ptr f = CreateFrame(current_frame); + // Copy picture data from one AVFrame (or AVPicture) to another one. + AV_COPY_PICTURE_DATA(pFrameRGB, buffer, PIX_FMT_RGBA, width, height); - // Add Image data to frame - if (!ffmpeg_has_alpha(AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx))) { - // Add image with no alpha channel, Speed optimization - f->AddImage(width, height, 4, QImage::Format_RGBA8888_Premultiplied, buffer); - } else { - // Add image with alpha channel (this will be converted to premultipled when needed, but is slower) - f->AddImage(width, height, 4, QImage::Format_RGBA8888, buffer); - } + int scale_mode = SWS_FAST_BILINEAR; + if (openshot::Settings::Instance()->HIGH_QUALITY_SCALING) { + scale_mode = SWS_BICUBIC; + } + SwsContext *img_convert_ctx = sws_getContext(info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx), width, + height, PIX_FMT_RGBA, scale_mode, NULL, NULL, NULL); - // Update working cache - working_cache.Add(f); + // Resize / Convert to RGB + sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0, + original_height, pFrameRGB->data, pFrameRGB->linesize); - // Keep track of last last_video_frame -#pragma omp critical (video_buffer) - last_video_frame = f; + // Create or get the existing frame object + std::shared_ptr f = CreateFrame(current_frame); - // Free the RGB image - av_free(buffer); - AV_FREE_FRAME(&pFrameRGB); + // Add Image data to frame + if (!ffmpeg_has_alpha(AV_GET_CODEC_PIXEL_FORMAT(pStream, pCodecCtx))) { + // Add image with no alpha channel, Speed optimization + f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888_Premultiplied, buffer); + } else { + // Add image with alpha channel (this will be converted to premultipled when needed, but is slower) + f->AddImage(width, height, bytes_per_pixel, QImage::Format_RGBA8888, buffer); + } - // Remove frame and packet - RemoveAVFrame(my_frame); - sws_freeContext(img_convert_ctx); + // Update working cache + working_cache.Add(f); - // Remove video frame from list of processing video frames - { - const GenericScopedLock lock(processingCriticalSection); - processing_video_frames.erase(current_frame); - processed_video_frames[current_frame] = current_frame; - } + // Keep track of last last_video_frame + last_video_frame = f; - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessVideoPacket (After)", "requested_frame", requested_frame, "current_frame", current_frame, "f->number", f->number); + // Free the RGB image + AV_FREE_FRAME(&pFrameRGB); - } // end omp task + // Remove frame and packet + RemoveAVFrame(my_frame); + sws_freeContext(img_convert_ctx); + // Remove video frame from list of processing video frames + { + const GenericScopedLock lock(processingCriticalSection); + processing_video_frames.erase(current_frame); + processed_video_frames[current_frame] = current_frame; + } + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("FFmpegReader::ProcessVideoPacket (After)", "requested_frame", requested_frame, "current_frame", current_frame, "f->number", f->number); } // Process an audio packet @@ -1435,8 +1399,6 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr int packet_samples = 0; int data_size = 0; -#pragma omp critical (ProcessAudioPacket) - { #if IS_FFMPEG_3_2 int ret = 0; frame_finished = 1; @@ -1467,7 +1429,6 @@ void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_fr #else int used = avcodec_decode_audio4(aCodecCtx, audio_frame, &frame_finished, packet); #endif - } if (frame_finished) { @@ -2416,13 +2377,10 @@ void FFmpegReader::RemoveAVFrame(AVFrame *remove_frame) { // Remove pFrame (if exists) if (remove_frame) { // Free memory -#pragma omp critical (packet_cache) - { - av_freep(&remove_frame->data[0]); + av_freep(&remove_frame->data[0]); #ifndef WIN32 - AV_FREE_FRAME(&remove_frame); + AV_FREE_FRAME(&remove_frame); #endif - } } } diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index 210a6e435..0cc4daa2d 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -98,12 +98,6 @@ FFmpegWriter::FFmpegWriter(const std::string& path) : info.has_audio = false; info.has_video = false; - // Configure OpenMP parallelism - // Default number of threads per block - omp_set_num_threads(OPEN_MP_NUM_PROCESSORS); - // Allow nested parallel sections as deeply as supported - omp_set_max_active_levels(OPEN_MP_MAX_ACTIVE); - // Initialize FFMpeg, and register all formats and codecs AV_REGISTER_ALL @@ -728,85 +722,75 @@ void FFmpegWriter::write_queued_frames() { // Create blank exception bool has_error_encoding_video = false; -#pragma omp parallel - { -#pragma omp single - { - // Process all audio frames (in a separate thread) - if (info.has_audio && audio_st && !queued_audio_frames.empty()) - write_audio_packets(false); + // Process all audio frames (in a separate thread) + if (info.has_audio && audio_st && !queued_audio_frames.empty()) + write_audio_packets(false); - // Loop through each queued image frame - while (!queued_video_frames.empty()) { - // Get front frame (from the queue) - std::shared_ptr frame = queued_video_frames.front(); + // Loop through each queued image frame + while (!queued_video_frames.empty()) { + // Get front frame (from the queue) + std::shared_ptr frame = queued_video_frames.front(); - // Add to processed queue - processed_frames.push_back(frame); + // Add to processed queue + processed_frames.push_back(frame); - // Encode and add the frame to the output file - if (info.has_video && video_st) - process_video_packet(frame); + // Encode and add the frame to the output file + if (info.has_video && video_st) + process_video_packet(frame); - // Remove front item - queued_video_frames.pop_front(); + // Remove front item + queued_video_frames.pop_front(); - } // end while - } // end omp single + } // end while -#pragma omp single - { - // Loop back through the frames (in order), and write them to the video file - while (!processed_frames.empty()) { - // Get front frame (from the queue) - std::shared_ptr frame = processed_frames.front(); - - if (info.has_video && video_st) { - // Add to deallocate queue (so we can remove the AVFrames when we are done) - deallocate_frames.push_back(frame); - - // Does this frame's AVFrame still exist - if (av_frames.count(frame)) { - // Get AVFrame - AVFrame *frame_final = av_frames[frame]; - - // Write frame to video file - bool success = write_video_packet(frame, frame_final); - if (!success) - has_error_encoding_video = true; - } - } - // Remove front item - processed_frames.pop_front(); - } + // Loop back through the frames (in order), and write them to the video file + while (!processed_frames.empty()) { + // Get front frame (from the queue) + std::shared_ptr frame = processed_frames.front(); - // Loop through, and deallocate AVFrames - while (!deallocate_frames.empty()) { - // Get front frame (from the queue) - std::shared_ptr frame = deallocate_frames.front(); + if (info.has_video && video_st) { + // Add to deallocate queue (so we can remove the AVFrames when we are done) + deallocate_frames.push_back(frame); - // Does this frame's AVFrame still exist - if (av_frames.count(frame)) { - // Get AVFrame - AVFrame *av_frame = av_frames[frame]; + // Does this frame's AVFrame still exist + if (av_frames.count(frame)) { + // Get AVFrame + AVFrame *frame_final = av_frames[frame]; - // Deallocate AVPicture and AVFrame - av_freep(&(av_frame->data[0])); - AV_FREE_FRAME(&av_frame); - av_frames.erase(frame); - } + // Write frame to video file + bool success = write_video_packet(frame, frame_final); + if (!success) + has_error_encoding_video = true; + } + } - // Remove front item - deallocate_frames.pop_front(); - } + // Remove front item + processed_frames.pop_front(); + } + + // Loop through, and deallocate AVFrames + while (!deallocate_frames.empty()) { + // Get front frame (from the queue) + std::shared_ptr frame = deallocate_frames.front(); + + // Does this frame's AVFrame still exist + if (av_frames.count(frame)) { + // Get AVFrame + AVFrame *av_frame = av_frames[frame]; - // Done writing - is_writing = false; + // Deallocate AVPicture and AVFrame + av_freep(&(av_frame->data[0])); + AV_FREE_FRAME(&av_frame); + av_frames.erase(frame); + } - } // end omp single + // Remove front item + deallocate_frames.pop_front(); + } - } // end omp parallel + // Done writing + is_writing = false; // Raise exception from main thread if (has_error_encoding_video) @@ -881,29 +865,26 @@ void FFmpegWriter::flush_encoders() { int error_code = 0; #if IS_FFMPEG_3_2 - #pragma omp critical (write_video_packet) - { - // Encode video packet (latest version of FFmpeg) - error_code = avcodec_send_frame(video_codec_ctx, NULL); - got_packet = 0; - while (error_code >= 0) { - error_code = avcodec_receive_packet(video_codec_ctx, &pkt); - if (error_code == AVERROR(EAGAIN)|| error_code == AVERROR_EOF) { - got_packet = 0; - // Write packet - avcodec_flush_buffers(video_codec_ctx); - break; - } - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base); - if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base); - pkt.stream_index = video_st->index; - error_code = av_interleaved_write_frame(oc, &pkt); - } - } + // Encode video packet (latest version of FFmpeg) + error_code = avcodec_send_frame(video_codec_ctx, NULL); + got_packet = 0; + while (error_code >= 0) { + error_code = avcodec_receive_packet(video_codec_ctx, &pkt); + if (error_code == AVERROR(EAGAIN)|| error_code == AVERROR_EOF) { + got_packet = 0; + // Write packet + avcodec_flush_buffers(video_codec_ctx); + break; + } + if (pkt.pts != AV_NOPTS_VALUE) + pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base); + if (pkt.dts != AV_NOPTS_VALUE) + pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base); + if (pkt.duration > 0) + pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base); + pkt.stream_index = video_st->index; + error_code = av_interleaved_write_frame(oc, &pkt); + } #else // IS_FFMPEG_3_2 // Encode video packet (older than FFmpeg 3.2) @@ -1554,408 +1535,404 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) { // write all queued frames' audio to the video file void FFmpegWriter::write_audio_packets(bool is_final) { -#pragma omp task firstprivate(is_final) - { - // Init audio buffers / variables - int total_frame_samples = 0; - int frame_position = 0; - int channels_in_frame = 0; - int sample_rate_in_frame = 0; - int samples_in_frame = 0; - ChannelLayout channel_layout_in_frame = LAYOUT_MONO; // default channel layout - - // Create a new array (to hold all S16 audio samples, for the current queued frames - unsigned int all_queued_samples_size = sizeof(int16_t) * (queued_audio_frames.size() * AVCODEC_MAX_AUDIO_FRAME_SIZE); - int16_t *all_queued_samples = (int16_t *) av_malloc(all_queued_samples_size); - int16_t *all_resampled_samples = NULL; - int16_t *final_samples_planar = NULL; - int16_t *final_samples = NULL; - - // Loop through each queued audio frame - while (!queued_audio_frames.empty()) { - // Get front frame (from the queue) - std::shared_ptr frame = queued_audio_frames.front(); - - // Get the audio details from this frame - sample_rate_in_frame = frame->SampleRate(); - samples_in_frame = frame->GetAudioSamplesCount(); - channels_in_frame = frame->GetAudioChannelsCount(); - channel_layout_in_frame = frame->ChannelsLayout(); - - // Get audio sample array - float *frame_samples_float = NULL; - // Get samples interleaved together (c1 c2 c1 c2 c1 c2) - frame_samples_float = frame->GetInterleavedAudioSamples(sample_rate_in_frame, NULL, &samples_in_frame); - - // Calculate total samples - total_frame_samples = samples_in_frame * channels_in_frame; - - // Translate audio sample values back to 16 bit integers with saturation - const int16_t max16 = 32767; - const int16_t min16 = -32768; - for (int s = 0; s < total_frame_samples; s++, frame_position++) { - float valF = frame_samples_float[s] * (1 << 15); - int16_t conv; - if (valF > max16) { - conv = max16; - } else if (valF < min16) { - conv = min16; - } else { - conv = int(valF + 32768.5) - 32768; // +0.5 is for rounding - } - - // Copy into buffer - all_queued_samples[frame_position] = conv; - } - - // Deallocate float array - delete[] frame_samples_float; - - // Remove front item - queued_audio_frames.pop_front(); - - } // end while + // Init audio buffers / variables + int total_frame_samples = 0; + int frame_position = 0; + int channels_in_frame = 0; + int sample_rate_in_frame = 0; + int samples_in_frame = 0; + ChannelLayout channel_layout_in_frame = LAYOUT_MONO; // default channel layout + + // Create a new array (to hold all S16 audio samples, for the current queued frames + unsigned int all_queued_samples_size = sizeof(int16_t) * (queued_audio_frames.size() * AVCODEC_MAX_AUDIO_FRAME_SIZE); + int16_t *all_queued_samples = (int16_t *) av_malloc(all_queued_samples_size); + int16_t *all_resampled_samples = NULL; + int16_t *final_samples_planar = NULL; + int16_t *final_samples = NULL; + + // Loop through each queued audio frame + while (!queued_audio_frames.empty()) { + // Get front frame (from the queue) + std::shared_ptr frame = queued_audio_frames.front(); + + // Get the audio details from this frame + sample_rate_in_frame = frame->SampleRate(); + samples_in_frame = frame->GetAudioSamplesCount(); + channels_in_frame = frame->GetAudioChannelsCount(); + channel_layout_in_frame = frame->ChannelsLayout(); + + // Get audio sample array + float *frame_samples_float = NULL; + // Get samples interleaved together (c1 c2 c1 c2 c1 c2) + frame_samples_float = frame->GetInterleavedAudioSamples(sample_rate_in_frame, NULL, &samples_in_frame); + + // Calculate total samples + total_frame_samples = samples_in_frame * channels_in_frame; + + // Translate audio sample values back to 16 bit integers with saturation + const int16_t max16 = 32767; + const int16_t min16 = -32768; + for (int s = 0; s < total_frame_samples; s++, frame_position++) { + float valF = frame_samples_float[s] * (1 << 15); + int16_t conv; + if (valF > max16) { + conv = max16; + } else if (valF < min16) { + conv = min16; + } else { + conv = int(valF + 32768.5) - 32768; // +0.5 is for rounding + } + + // Copy into buffer + all_queued_samples[frame_position] = conv; + } + // Deallocate float array + delete[] frame_samples_float; - // Update total samples (since we've combined all queued frames) - total_frame_samples = frame_position; - int remaining_frame_samples = total_frame_samples; - int samples_position = 0; + // Remove front item + queued_audio_frames.pop_front(); + } // end while - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets", "is_final", is_final, "total_frame_samples", total_frame_samples, "channel_layout_in_frame", channel_layout_in_frame, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "LAYOUT_MONO", LAYOUT_MONO); - // Keep track of the original sample format - AVSampleFormat output_sample_fmt = audio_codec_ctx->sample_fmt; + // Update total samples (since we've combined all queued frames) + total_frame_samples = frame_position; + int remaining_frame_samples = total_frame_samples; + int samples_position = 0; - AVFrame *audio_frame = NULL; - if (!is_final) { - // Create input frame (and allocate arrays) - audio_frame = AV_ALLOCATE_FRAME(); - AV_RESET_FRAME(audio_frame); - audio_frame->nb_samples = total_frame_samples / channels_in_frame; - // Fill input frame with sample data - int error_code = avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) all_queued_samples, all_queued_samples_size, 0); - if (error_code < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); - } + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets", "is_final", is_final, "total_frame_samples", total_frame_samples, "channel_layout_in_frame", channel_layout_in_frame, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "LAYOUT_MONO", LAYOUT_MONO); - // Do not convert audio to planar format (yet). We need to keep everything interleaved at this point. - switch (audio_codec_ctx->sample_fmt) { - case AV_SAMPLE_FMT_FLTP: { - output_sample_fmt = AV_SAMPLE_FMT_FLT; - break; - } - case AV_SAMPLE_FMT_S32P: { - output_sample_fmt = AV_SAMPLE_FMT_S32; - break; - } - case AV_SAMPLE_FMT_S16P: { - output_sample_fmt = AV_SAMPLE_FMT_S16; - break; - } - case AV_SAMPLE_FMT_U8P: { - output_sample_fmt = AV_SAMPLE_FMT_U8; - break; - } - default: { - // This is only here to silence unused-enum warnings - break; - } - } - - // Update total samples & input frame size (due to bigger or smaller data types) - total_frame_samples *= (float(info.sample_rate) / sample_rate_in_frame); // adjust for different byte sizes - total_frame_samples *= (float(info.channels) / channels_in_frame); // adjust for different # of channels - - // Create output frame (and allocate arrays) - AVFrame *audio_converted = AV_ALLOCATE_FRAME(); - AV_RESET_FRAME(audio_converted); - audio_converted->nb_samples = total_frame_samples / channels_in_frame; - av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_converted->nb_samples, output_sample_fmt, 0); - - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (1st resampling)", "in_sample_fmt", AV_SAMPLE_FMT_S16, "out_sample_fmt", output_sample_fmt, "in_sample_rate", sample_rate_in_frame, "out_sample_rate", info.sample_rate, "in_channels", channels_in_frame, "out_channels", info.channels); - - // setup resample context - if (!avr) { - avr = SWR_ALLOC(); - av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0); - av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0); - av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); - av_opt_set_int(avr, "out_sample_fmt", output_sample_fmt, 0); // planar not allowed here - av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0); - av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0); - av_opt_set_int(avr, "in_channels", channels_in_frame, 0); - av_opt_set_int(avr, "out_channels", info.channels, 0); - SWR_INIT(avr); - } - int nb_samples = 0; - - // Convert audio samples - nb_samples = SWR_CONVERT( - avr, // audio resample context - audio_converted->data, // output data pointers - audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown) - audio_converted->nb_samples, // maximum number of samples that the output buffer can hold - audio_frame->data, // input data pointers - audio_frame->linesize[0], // input plane size, in bytes (0 if unknown) - audio_frame->nb_samples // number of input samples to convert - ); - - // Set remaining samples - remaining_frame_samples = nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16); - - // Create a new array (to hold all resampled S16 audio samples) - all_resampled_samples = (int16_t *) av_malloc( - sizeof(int16_t) * nb_samples * info.channels - * (av_get_bytes_per_sample(output_sample_fmt) / - av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) ) - ); - - // Copy audio samples over original samples - memcpy(all_resampled_samples, audio_converted->data[0], nb_samples * info.channels * av_get_bytes_per_sample(output_sample_fmt)); - - // Remove converted audio - av_freep(&(audio_frame->data[0])); - AV_FREE_FRAME(&audio_frame); - av_freep(&audio_converted->data[0]); - AV_FREE_FRAME(&audio_converted); - all_queued_samples = NULL; // this array cleared with above call - - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (Successfully completed 1st resampling)", "nb_samples", nb_samples, "remaining_frame_samples", remaining_frame_samples); - } - - // Loop until no more samples - while (remaining_frame_samples > 0 || is_final) { - // Get remaining samples needed for this packet - int remaining_packet_samples = (audio_input_frame_size * info.channels) - audio_input_position; - - // Determine how many samples we need - int diff = 0; - if (remaining_frame_samples >= remaining_packet_samples) { - diff = remaining_packet_samples; - } else { - diff = remaining_frame_samples; - } - - // Copy frame samples into the packet samples array - if (!is_final) - //TODO: Make this more sane - memcpy( - samples + (audio_input_position - * (av_get_bytes_per_sample(output_sample_fmt) / - av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) ) - ), - all_resampled_samples + samples_position, - diff * av_get_bytes_per_sample(output_sample_fmt) - ); - - // Increment counters - audio_input_position += diff; - samples_position += diff * (av_get_bytes_per_sample(output_sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)); - remaining_frame_samples -= diff; - - // Do we have enough samples to proceed? - if (audio_input_position < (audio_input_frame_size * info.channels) && !is_final) - // Not enough samples to encode... so wait until the next frame - break; + // Keep track of the original sample format + AVSampleFormat output_sample_fmt = audio_codec_ctx->sample_fmt; - // Convert to planar (if needed by audio codec) - AVFrame *frame_final = AV_ALLOCATE_FRAME(); - AV_RESET_FRAME(frame_final); - if (av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) { - ZmqLogger::Instance()->AppendDebugMethod( - "FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)", - "in_sample_fmt", output_sample_fmt, - "out_sample_fmt", audio_codec_ctx->sample_fmt, - "in_sample_rate", info.sample_rate, - "out_sample_rate", info.sample_rate, - "in_channels", info.channels, - "out_channels", info.channels - ); - - // setup resample context - if (!avr_planar) { - avr_planar = SWR_ALLOC(); - av_opt_set_int(avr_planar, "in_channel_layout", info.channel_layout, 0); - av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0); - av_opt_set_int(avr_planar, "in_sample_fmt", output_sample_fmt, 0); - av_opt_set_int(avr_planar, "out_sample_fmt", audio_codec_ctx->sample_fmt, 0); // planar not allowed here - av_opt_set_int(avr_planar, "in_sample_rate", info.sample_rate, 0); - av_opt_set_int(avr_planar, "out_sample_rate", info.sample_rate, 0); - av_opt_set_int(avr_planar, "in_channels", info.channels, 0); - av_opt_set_int(avr_planar, "out_channels", info.channels, 0); - SWR_INIT(avr_planar); - } + AVFrame *audio_frame = NULL; + if (!is_final) { + // Create input frame (and allocate arrays) + audio_frame = AV_ALLOCATE_FRAME(); + AV_RESET_FRAME(audio_frame); + audio_frame->nb_samples = total_frame_samples / channels_in_frame; - // Create input frame (and allocate arrays) - audio_frame = AV_ALLOCATE_FRAME(); - AV_RESET_FRAME(audio_frame); - audio_frame->nb_samples = audio_input_position / info.channels; - - // Create a new array - final_samples_planar = (int16_t *) av_malloc( - sizeof(int16_t) * audio_frame->nb_samples * info.channels - * (av_get_bytes_per_sample(output_sample_fmt) / - av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) ) - ); - - // Copy audio into buffer for frame - memcpy(final_samples_planar, samples, audio_frame->nb_samples * info.channels * av_get_bytes_per_sample(output_sample_fmt)); - - // Fill input frame with sample data - avcodec_fill_audio_frame(audio_frame, info.channels, output_sample_fmt, - (uint8_t *) final_samples_planar, audio_encoder_buffer_size, 0); - - // Create output frame (and allocate arrays) - frame_final->nb_samples = audio_input_frame_size; - av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, - frame_final->nb_samples, audio_codec_ctx->sample_fmt, 0); - - // Convert audio samples - int nb_samples = SWR_CONVERT( - avr_planar, // audio resample context - frame_final->data, // output data pointers - frame_final->linesize[0], // output plane size, in bytes. (0 if unknown) - frame_final->nb_samples, // maximum number of samples that the output buffer can hold - audio_frame->data, // input data pointers - audio_frame->linesize[0], // input plane size, in bytes (0 if unknown) - audio_frame->nb_samples // number of input samples to convert - ); - - // Copy audio samples over original samples - if (nb_samples > 0) { - memcpy(samples, frame_final->data[0], - nb_samples * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) * info.channels); + // Fill input frame with sample data + int error_code = avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) all_queued_samples, all_queued_samples_size, 0); + if (error_code < 0) { + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); } - // deallocate AVFrame - av_freep(&(audio_frame->data[0])); - AV_FREE_FRAME(&audio_frame); - all_queued_samples = NULL; // this array cleared with above call - - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (Successfully completed 2nd resampling for Planar formats)", "nb_samples", nb_samples); - - } else { - // Create a new array - final_samples = (int16_t *) av_malloc( - sizeof(int16_t) * audio_input_position - * (av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) / - av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) ) - ); - - // Copy audio into buffer for frame - memcpy(final_samples, samples, - audio_input_position * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt)); + // Do not convert audio to planar format (yet). We need to keep everything interleaved at this point. + switch (audio_codec_ctx->sample_fmt) { + case AV_SAMPLE_FMT_FLTP: { + output_sample_fmt = AV_SAMPLE_FMT_FLT; + break; + } + case AV_SAMPLE_FMT_S32P: { + output_sample_fmt = AV_SAMPLE_FMT_S32; + break; + } + case AV_SAMPLE_FMT_S16P: { + output_sample_fmt = AV_SAMPLE_FMT_S16; + break; + } + case AV_SAMPLE_FMT_U8P: { + output_sample_fmt = AV_SAMPLE_FMT_U8; + break; + } + default: { + // This is only here to silence unused-enum warnings + break; + } + } - // Init the nb_samples property - frame_final->nb_samples = audio_input_frame_size; + // Update total samples & input frame size (due to bigger or smaller data types) + total_frame_samples *= (float(info.sample_rate) / sample_rate_in_frame); // adjust for different byte sizes + total_frame_samples *= (float(info.channels) / channels_in_frame); // adjust for different # of channels + + // Create output frame (and allocate arrays) + AVFrame *audio_converted = AV_ALLOCATE_FRAME(); + AV_RESET_FRAME(audio_converted); + audio_converted->nb_samples = total_frame_samples / channels_in_frame; + av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_converted->nb_samples, output_sample_fmt, 0); + + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (1st resampling)", "in_sample_fmt", AV_SAMPLE_FMT_S16, "out_sample_fmt", output_sample_fmt, "in_sample_rate", sample_rate_in_frame, "out_sample_rate", info.sample_rate, "in_channels", channels_in_frame, "out_channels", info.channels); + + // setup resample context + if (!avr) { + avr = SWR_ALLOC(); + av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0); + av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0); + av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0); + av_opt_set_int(avr, "out_sample_fmt", output_sample_fmt, 0); // planar not allowed here + av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0); + av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0); + av_opt_set_int(avr, "in_channels", channels_in_frame, 0); + av_opt_set_int(avr, "out_channels", info.channels, 0); + SWR_INIT(avr); + } + int nb_samples = 0; + + // Convert audio samples + nb_samples = SWR_CONVERT( + avr, // audio resample context + audio_converted->data, // output data pointers + audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown) + audio_converted->nb_samples, // maximum number of samples that the output buffer can hold + audio_frame->data, // input data pointers + audio_frame->linesize[0], // input plane size, in bytes (0 if unknown) + audio_frame->nb_samples // number of input samples to convert + ); + + // Set remaining samples + remaining_frame_samples = nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16); + + // Create a new array (to hold all resampled S16 audio samples) + all_resampled_samples = (int16_t *) av_malloc( + sizeof(int16_t) * nb_samples * info.channels + * (av_get_bytes_per_sample(output_sample_fmt) / + av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) ) + ); + + // Copy audio samples over original samples + memcpy(all_resampled_samples, audio_converted->data[0], nb_samples * info.channels * av_get_bytes_per_sample(output_sample_fmt)); + + // Remove converted audio + av_freep(&(audio_frame->data[0])); + AV_FREE_FRAME(&audio_frame); + av_freep(&audio_converted->data[0]); + AV_FREE_FRAME(&audio_converted); + all_queued_samples = NULL; // this array cleared with above call + + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (Successfully completed 1st resampling)", "nb_samples", nb_samples, "remaining_frame_samples", remaining_frame_samples); + } + + // Loop until no more samples + while (remaining_frame_samples > 0 || is_final) { + // Get remaining samples needed for this packet + int remaining_packet_samples = (audio_input_frame_size * info.channels) - audio_input_position; + + // Determine how many samples we need + int diff = 0; + if (remaining_frame_samples >= remaining_packet_samples) { + diff = remaining_packet_samples; + } else { + diff = remaining_frame_samples; + } - // Fill the final_frame AVFrame with audio (non planar) - avcodec_fill_audio_frame(frame_final, audio_codec_ctx->channels, - audio_codec_ctx->sample_fmt, (uint8_t *) final_samples, - audio_encoder_buffer_size, 0); - } + // Copy frame samples into the packet samples array + if (!is_final) + //TODO: Make this more sane + memcpy( + samples + (audio_input_position + * (av_get_bytes_per_sample(output_sample_fmt) / + av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) ) + ), + all_resampled_samples + samples_position, + diff * av_get_bytes_per_sample(output_sample_fmt) + ); + + // Increment counters + audio_input_position += diff; + samples_position += diff * (av_get_bytes_per_sample(output_sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)); + remaining_frame_samples -= diff; + + // Do we have enough samples to proceed? + if (audio_input_position < (audio_input_frame_size * info.channels) && !is_final) + // Not enough samples to encode... so wait until the next frame + break; + + // Convert to planar (if needed by audio codec) + AVFrame *frame_final = AV_ALLOCATE_FRAME(); + AV_RESET_FRAME(frame_final); + if (av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) { + ZmqLogger::Instance()->AppendDebugMethod( + "FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)", + "in_sample_fmt", output_sample_fmt, + "out_sample_fmt", audio_codec_ctx->sample_fmt, + "in_sample_rate", info.sample_rate, + "out_sample_rate", info.sample_rate, + "in_channels", info.channels, + "out_channels", info.channels + ); + + // setup resample context + if (!avr_planar) { + avr_planar = SWR_ALLOC(); + av_opt_set_int(avr_planar, "in_channel_layout", info.channel_layout, 0); + av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0); + av_opt_set_int(avr_planar, "in_sample_fmt", output_sample_fmt, 0); + av_opt_set_int(avr_planar, "out_sample_fmt", audio_codec_ctx->sample_fmt, 0); // planar not allowed here + av_opt_set_int(avr_planar, "in_sample_rate", info.sample_rate, 0); + av_opt_set_int(avr_planar, "out_sample_rate", info.sample_rate, 0); + av_opt_set_int(avr_planar, "in_channels", info.channels, 0); + av_opt_set_int(avr_planar, "out_channels", info.channels, 0); + SWR_INIT(avr_planar); + } + + // Create input frame (and allocate arrays) + audio_frame = AV_ALLOCATE_FRAME(); + AV_RESET_FRAME(audio_frame); + audio_frame->nb_samples = audio_input_position / info.channels; + + // Create a new array + final_samples_planar = (int16_t *) av_malloc( + sizeof(int16_t) * audio_frame->nb_samples * info.channels + * (av_get_bytes_per_sample(output_sample_fmt) / + av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) ) + ); + + // Copy audio into buffer for frame + memcpy(final_samples_planar, samples, audio_frame->nb_samples * info.channels * av_get_bytes_per_sample(output_sample_fmt)); + + // Fill input frame with sample data + avcodec_fill_audio_frame(audio_frame, info.channels, output_sample_fmt, + (uint8_t *) final_samples_planar, audio_encoder_buffer_size, 0); + + // Create output frame (and allocate arrays) + frame_final->nb_samples = audio_input_frame_size; + av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, + frame_final->nb_samples, audio_codec_ctx->sample_fmt, 0); + + // Convert audio samples + int nb_samples = SWR_CONVERT( + avr_planar, // audio resample context + frame_final->data, // output data pointers + frame_final->linesize[0], // output plane size, in bytes. (0 if unknown) + frame_final->nb_samples, // maximum number of samples that the output buffer can hold + audio_frame->data, // input data pointers + audio_frame->linesize[0], // input plane size, in bytes (0 if unknown) + audio_frame->nb_samples // number of input samples to convert + ); + + // Copy audio samples over original samples + if (nb_samples > 0) { + memcpy(samples, frame_final->data[0], + nb_samples * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) * info.channels); + } + + // deallocate AVFrame + av_freep(&(audio_frame->data[0])); + AV_FREE_FRAME(&audio_frame); + all_queued_samples = NULL; // this array cleared with above call + + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (Successfully completed 2nd resampling for Planar formats)", "nb_samples", nb_samples); + + } else { + // Create a new array + final_samples = (int16_t *) av_malloc( + sizeof(int16_t) * audio_input_position + * (av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) / + av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) ) + ); + + // Copy audio into buffer for frame + memcpy(final_samples, samples, + audio_input_position * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt)); + + // Init the nb_samples property + frame_final->nb_samples = audio_input_frame_size; + + // Fill the final_frame AVFrame with audio (non planar) + avcodec_fill_audio_frame(frame_final, audio_codec_ctx->channels, + audio_codec_ctx->sample_fmt, (uint8_t *) final_samples, + audio_encoder_buffer_size, 0); + } - // Increment PTS (in samples) - write_audio_count += FFMIN(audio_input_frame_size, audio_input_position); - frame_final->pts = write_audio_count; // Set the AVFrame's PTS + // Increment PTS (in samples) + write_audio_count += FFMIN(audio_input_frame_size, audio_input_position); + frame_final->pts = write_audio_count; // Set the AVFrame's PTS - // Init the packet - AVPacket pkt; - av_init_packet(&pkt); - pkt.data = audio_encoder_buffer; - pkt.size = audio_encoder_buffer_size; + // Init the packet + AVPacket pkt; + av_init_packet(&pkt); + pkt.data = audio_encoder_buffer; + pkt.size = audio_encoder_buffer_size; - // Set the packet's PTS prior to encoding - pkt.pts = pkt.dts = write_audio_count; + // Set the packet's PTS prior to encoding + pkt.pts = pkt.dts = write_audio_count; - /* encode the audio samples */ - int got_packet_ptr = 0; + /* encode the audio samples */ + int got_packet_ptr = 0; #if IS_FFMPEG_3_2 - // Encode audio (latest version of FFmpeg) - int error_code; - int ret = 0; - int frame_finished = 0; - error_code = ret = avcodec_send_frame(audio_codec_ctx, frame_final); - if (ret < 0 && ret != AVERROR(EINVAL) && ret != AVERROR_EOF) { - avcodec_send_frame(audio_codec_ctx, NULL); - } - else { - if (ret >= 0) - pkt.size = 0; - ret = avcodec_receive_packet(audio_codec_ctx, &pkt); - if (ret >= 0) - frame_finished = 1; - if(ret == AVERROR(EINVAL) || ret == AVERROR_EOF) { - avcodec_flush_buffers(audio_codec_ctx); - ret = 0; - } - if (ret >= 0) { - ret = frame_finished; - } - } - if (!pkt.data && !frame_finished) - { - ret = -1; - } - got_packet_ptr = ret; + // Encode audio (latest version of FFmpeg) + int error_code; + int ret = 0; + int frame_finished = 0; + error_code = ret = avcodec_send_frame(audio_codec_ctx, frame_final); + if (ret < 0 && ret != AVERROR(EINVAL) && ret != AVERROR_EOF) { + avcodec_send_frame(audio_codec_ctx, NULL); + } + else { + if (ret >= 0) + pkt.size = 0; + ret = avcodec_receive_packet(audio_codec_ctx, &pkt); + if (ret >= 0) + frame_finished = 1; + if(ret == AVERROR(EINVAL) || ret == AVERROR_EOF) { + avcodec_flush_buffers(audio_codec_ctx); + ret = 0; + } + if (ret >= 0) { + ret = frame_finished; + } + } + if (!pkt.data && !frame_finished) + { + ret = -1; + } + got_packet_ptr = ret; #else - // Encode audio (older versions of FFmpeg) - int error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, frame_final, &got_packet_ptr); + // Encode audio (older versions of FFmpeg) + int error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, frame_final, &got_packet_ptr); #endif - /* if zero size, it means the image was buffered */ - if (error_code == 0 && got_packet_ptr) { - - // Since the PTS can change during encoding, set the value again. This seems like a huge hack, - // but it fixes lots of PTS related issues when I do this. - pkt.pts = pkt.dts = write_audio_count; - - // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase) - if (pkt.pts != AV_NOPTS_VALUE) - pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base); - if (pkt.dts != AV_NOPTS_VALUE) - pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base); - if (pkt.duration > 0) - pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base); - - // set stream - pkt.stream_index = audio_st->index; - pkt.flags |= AV_PKT_FLAG_KEY; - - /* write the compressed frame in the media file */ - error_code = av_interleaved_write_frame(oc, &pkt); - } - - if (error_code < 0) { - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); - } - - // deallocate AVFrame - av_freep(&(frame_final->data[0])); - AV_FREE_FRAME(&frame_final); - - // deallocate memory for packet - AV_FREE_PACKET(&pkt); - - // Reset position - audio_input_position = 0; - is_final = false; - } + /* if zero size, it means the image was buffered */ + if (error_code == 0 && got_packet_ptr) { + + // Since the PTS can change during encoding, set the value again. This seems like a huge hack, + // but it fixes lots of PTS related issues when I do this. + pkt.pts = pkt.dts = write_audio_count; + + // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase) + if (pkt.pts != AV_NOPTS_VALUE) + pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base); + if (pkt.dts != AV_NOPTS_VALUE) + pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base); + if (pkt.duration > 0) + pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base); + + // set stream + pkt.stream_index = audio_st->index; + pkt.flags |= AV_PKT_FLAG_KEY; + + /* write the compressed frame in the media file */ + error_code = av_interleaved_write_frame(oc, &pkt); + } - // Delete arrays (if needed) - if (all_resampled_samples) { - av_freep(&all_resampled_samples); - all_resampled_samples = NULL; - } - if (all_queued_samples) { - av_freep(&all_queued_samples); - all_queued_samples = NULL; - } + if (error_code < 0) { + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (std::string) av_err2str(error_code) + "]", "error_code", error_code); + } - } // end task + // deallocate AVFrame + av_freep(&(frame_final->data[0])); + AV_FREE_FRAME(&frame_final); + + // deallocate memory for packet + AV_FREE_PACKET(&pkt); + + // Reset position + audio_input_position = 0; + is_final = false; + } + + // Delete arrays (if needed) + if (all_resampled_samples) { + av_freep(&all_resampled_samples); + all_resampled_samples = NULL; + } + if (all_queued_samples) { + av_freep(&all_queued_samples); + all_queued_samples = NULL; + } } // Allocate an AVFrame object @@ -2006,53 +1983,47 @@ void FFmpegWriter::process_video_packet(std::shared_ptr frame) { if (rescaler_position == num_of_rescalers) rescaler_position = 0; -#pragma omp task firstprivate(frame, scaler, source_image_width, source_image_height) - { - // Allocate an RGB frame & final output frame - int bytes_source = 0; - int bytes_final = 0; - AVFrame *frame_source = NULL; - const uchar *pixels = NULL; + // Allocate an RGB frame & final output frame + int bytes_source = 0; + int bytes_final = 0; + AVFrame *frame_source = NULL; + const uchar *pixels = NULL; - // Get a list of pixels from source image - pixels = frame->GetPixels(); + // Get a list of pixels from source image + pixels = frame->GetPixels(); - // Init AVFrame for source image & final (converted image) - frame_source = allocate_avframe(PIX_FMT_RGBA, source_image_width, source_image_height, &bytes_source, (uint8_t *) pixels); + // Init AVFrame for source image & final (converted image) + frame_source = allocate_avframe(PIX_FMT_RGBA, source_image_width, source_image_height, &bytes_source, (uint8_t *) pixels); #if IS_FFMPEG_3_2 - AVFrame *frame_final; - #if HAVE_HW_ACCEL - if (hw_en_on && hw_en_supported) { - frame_final = allocate_avframe(AV_PIX_FMT_NV12, info.width, info.height, &bytes_final, NULL); - } else - #endif // HAVE_HW_ACCEL - { - frame_final = allocate_avframe( - (AVPixelFormat)(video_st->codecpar->format), - info.width, info.height, &bytes_final, NULL - ); - } + AVFrame *frame_final; +#if HAVE_HW_ACCEL + if (hw_en_on && hw_en_supported) { + frame_final = allocate_avframe(AV_PIX_FMT_NV12, info.width, info.height, &bytes_final, NULL); + } else +#endif // HAVE_HW_ACCEL + { + frame_final = allocate_avframe( + (AVPixelFormat)(video_st->codecpar->format), + info.width, info.height, &bytes_final, NULL + ); + } #else - AVFrame *frame_final = allocate_avframe(video_codec_ctx->pix_fmt, info.width, info.height, &bytes_final, NULL); + AVFrame *frame_final = allocate_avframe(video_codec_ctx->pix_fmt, info.width, info.height, &bytes_final, NULL); #endif // IS_FFMPEG_3_2 - // Fill with data - AV_COPY_PICTURE_DATA(frame_source, (uint8_t *) pixels, PIX_FMT_RGBA, source_image_width, source_image_height); - ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::process_video_packet", "frame->number", frame->number, "bytes_source", bytes_source, "bytes_final", bytes_final); - - // Resize & convert pixel format - sws_scale(scaler, frame_source->data, frame_source->linesize, 0, - source_image_height, frame_final->data, frame_final->linesize); - - // Add resized AVFrame to av_frames map -#pragma omp critical (av_frames_section) - add_avframe(frame, frame_final); + // Fill with data + AV_COPY_PICTURE_DATA(frame_source, (uint8_t *) pixels, PIX_FMT_RGBA, source_image_width, source_image_height); + ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::process_video_packet", "frame->number", frame->number, "bytes_source", bytes_source, "bytes_final", bytes_final); - // Deallocate memory - AV_FREE_FRAME(&frame_source); + // Resize & convert pixel format + sws_scale(scaler, frame_source->data, frame_source->linesize, 0, + source_image_height, frame_final->data, frame_final->linesize); - } // end task + // Add resized AVFrame to av_frames map + add_avframe(frame, frame_final); + // Deallocate memory + AV_FREE_FRAME(&frame_source); } // write video frame diff --git a/src/Frame.cpp b/src/Frame.cpp index d25def6dd..d0c9cf1b2 100644 --- a/src/Frame.cpp +++ b/src/Frame.cpp @@ -754,13 +754,11 @@ void Frame::AddColor(int new_width, int new_height, std::string new_color) // Create new image object, and fill with pixel data const GenericScopedLock lock(addingImageSection); - #pragma omp critical (AddImage) - { - image = std::make_shared(new_width, new_height, QImage::Format_RGBA8888_Premultiplied); + image = std::make_shared(new_width, new_height, QImage::Format_RGBA8888_Premultiplied); + + // Fill with solid color + image->fill(QColor(QString::fromStdString(color))); - // Fill with solid color - image->fill(QColor(QString::fromStdString(color))); - } // Update height and width width = image->width(); height = image->height(); @@ -775,12 +773,7 @@ void Frame::AddImage( // Create new buffer { const GenericScopedLock lock(addingImageSection); - int buffer_size = new_width * new_height * bytes_per_pixel; - qbuffer = new unsigned char[buffer_size](); - - // Copy buffer data - memcpy((unsigned char*)qbuffer, pixels_, buffer_size); - + qbuffer = pixels_; } // Release addingImageSection lock // Create new image object from pixel data @@ -804,19 +797,16 @@ void Frame::AddImage(std::shared_ptr new_image) // assign image data const GenericScopedLock lock(addingImageSection); - #pragma omp critical (AddImage) - { - image = new_image; + image = new_image; - // Always convert to Format_RGBA8888_Premultiplied (if different) - if (image->format() != QImage::Format_RGBA8888_Premultiplied) - *image = image->convertToFormat(QImage::Format_RGBA8888_Premultiplied); + // Always convert to Format_RGBA8888_Premultiplied (if different) + if (image->format() != QImage::Format_RGBA8888_Premultiplied) + *image = image->convertToFormat(QImage::Format_RGBA8888_Premultiplied); - // Update height and width - width = image->width(); - height = image->height(); - has_image_data = true; - } + // Update height and width + width = image->width(); + height = image->height(); + has_image_data = true; } // Add (or replace) pixel data to the frame (for only the odd or even lines) @@ -834,15 +824,12 @@ void Frame::AddImage(std::shared_ptr new_image, bool only_odd_lines) } else { // Ignore image of different sizes or formats bool ret=false; - #pragma omp critical (AddImage) - { - if (image == new_image || image->size() != new_image->size()) { - ret = true; - } - else if (new_image->format() != QImage::Format_RGBA8888_Premultiplied) { - new_image = std::make_shared( + if (image == new_image || image->size() != new_image->size()) { + ret = true; + } + else if (new_image->format() != QImage::Format_RGBA8888_Premultiplied) { + new_image = std::make_shared( new_image->convertToFormat(QImage::Format_RGBA8888_Premultiplied)); - } } if (ret) { return; @@ -850,26 +837,23 @@ void Frame::AddImage(std::shared_ptr new_image, bool only_odd_lines) // Get the frame's image const GenericScopedLock lock(addingImageSection); - #pragma omp critical (AddImage) - { - unsigned char *pixels = image->bits(); - const unsigned char *new_pixels = new_image->constBits(); + unsigned char *pixels = image->bits(); + const unsigned char *new_pixels = new_image->constBits(); - // Loop through the scanlines of the image (even or odd) - int start = 0; - if (only_odd_lines) - start = 1; + // Loop through the scanlines of the image (even or odd) + int start = 0; + if (only_odd_lines) + start = 1; - for (int row = start; row < image->height(); row += 2) { - int offset = row * image->bytesPerLine(); - memcpy(pixels + offset, new_pixels + offset, image->bytesPerLine()); - } - - // Update height and width - height = image->height(); - width = image->width(); - has_image_data = true; + for (int row = start; row < image->height(); row += 2) { + int offset = row * image->bytesPerLine(); + memcpy(pixels + offset, new_pixels + offset, image->bytesPerLine()); } + + // Update height and width + height = image->height(); + width = image->width(); + has_image_data = true; } } @@ -891,31 +875,29 @@ void Frame::ResizeAudio(int channels, int length, int rate, ChannelLayout layout // Add audio samples to a specific channel void Frame::AddAudio(bool replaceSamples, int destChannel, int destStartSample, const float* source, int numSamples, float gainToApplyToSource = 1.0f) { const GenericScopedLock lock(addingAudioSection); - #pragma omp critical (adding_audio) - { - // Clamp starting sample to 0 - int destStartSampleAdjusted = max(destStartSample, 0); - - // Extend audio container to hold more (or less) samples and channels.. if needed - int new_length = destStartSampleAdjusted + numSamples; - int new_channel_length = audio->getNumChannels(); - if (destChannel >= new_channel_length) - new_channel_length = destChannel + 1; - if (new_length > audio->getNumSamples() || new_channel_length > audio->getNumChannels()) - audio->setSize(new_channel_length, new_length, true, true, false); - - // Clear the range of samples first (if needed) - if (replaceSamples) - audio->clear(destChannel, destStartSampleAdjusted, numSamples); - - // Add samples to frame's audio buffer - audio->addFrom(destChannel, destStartSampleAdjusted, source, numSamples, gainToApplyToSource); - has_audio_data = true; - - // Calculate max audio sample added - if (new_length > max_audio_sample) - max_audio_sample = new_length; - } + + // Clamp starting sample to 0 + int destStartSampleAdjusted = max(destStartSample, 0); + + // Extend audio container to hold more (or less) samples and channels.. if needed + int new_length = destStartSampleAdjusted + numSamples; + int new_channel_length = audio->getNumChannels(); + if (destChannel >= new_channel_length) + new_channel_length = destChannel + 1; + if (new_length > audio->getNumSamples() || new_channel_length > audio->getNumChannels()) + audio->setSize(new_channel_length, new_length, true, true, false); + + // Clear the range of samples first (if needed) + if (replaceSamples) + audio->clear(destChannel, destStartSampleAdjusted, numSamples); + + // Add samples to frame's audio buffer + audio->addFrom(destChannel, destStartSampleAdjusted, source, numSamples, gainToApplyToSource); + has_audio_data = true; + + // Calculate max audio sample added + if (new_length > max_audio_sample) + max_audio_sample = new_length; } // Apply gain ramp (i.e. fading volume) @@ -958,7 +940,7 @@ cv::Mat Frame::GetImageCV() if (!image) // Fill with black AddColor(width, height, color); - + // if (imagecv.empty()) // Convert Qimage to Mat imagecv = Qimage2mat(image); diff --git a/src/KeyFrame.cpp b/src/KeyFrame.cpp index 2ac57cf80..8e4cfa69c 100644 --- a/src/KeyFrame.cpp +++ b/src/KeyFrame.cpp @@ -97,6 +97,7 @@ namespace { case CONSTANT: return left.co.Y; case LINEAR: return InterpolateLinearCurve(left, right, target); case BEZIER: return InterpolateBezierCurve(left, right, target, allowed_error); + default: return InterpolateLinearCurve(left, right, target); } } diff --git a/src/Settings.cpp b/src/Settings.cpp index 9cf16fbbd..cfbe2e2ca 100644 --- a/src/Settings.cpp +++ b/src/Settings.cpp @@ -45,7 +45,6 @@ Settings *Settings::Instance() m_pInstance = new Settings; m_pInstance->HARDWARE_DECODER = 0; m_pInstance->HIGH_QUALITY_SCALING = false; - m_pInstance->WAIT_FOR_VIDEO_PROCESSING_TASK = false; m_pInstance->OMP_THREADS = 12; m_pInstance->FF_THREADS = 8; m_pInstance->DE_LIMIT_HEIGHT_MAX = 1100; diff --git a/src/Settings.h b/src/Settings.h index da85b89e0..4d16f6b17 100644 --- a/src/Settings.h +++ b/src/Settings.h @@ -94,15 +94,6 @@ namespace openshot { /// Scale mode used in FFmpeg decoding and encoding (used as an optimization for faster previews) bool HIGH_QUALITY_SCALING = false; - /// Maximum width for image data (useful for optimzing for a smaller preview or render) - int MAX_WIDTH = 0; - - /// Maximum height for image data (useful for optimzing for a smaller preview or render) - int MAX_HEIGHT = 0; - - /// Wait for OpenMP task to finish before continuing (used to limit threads on slower systems) - bool WAIT_FOR_VIDEO_PROCESSING_TASK = false; - /// Number of threads of OpenMP int OMP_THREADS = 12; diff --git a/src/Timeline.cpp b/src/Timeline.cpp index b4d13b04e..ad510a038 100644 --- a/src/Timeline.cpp +++ b/src/Timeline.cpp @@ -74,12 +74,6 @@ Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int cha // Init cache final_cache = new CacheMemory(); - // Configure OpenMP parallelism - // Default number of threads per block - omp_set_num_threads(max_concurrent_frames); - // Allow nested parallel sections as deeply as supported - omp_set_max_active_levels(OPEN_MP_MAX_ACTIVE); - // Init max image size SetMaxSize(info.width, info.height); } @@ -207,12 +201,6 @@ Timeline::Timeline(const std::string& projectPath, bool convert_absolute_paths) // Init cache final_cache = new CacheMemory(); - // Configure OpenMP parallelism - // Default number of threads per section - omp_set_num_threads(max_concurrent_frames); - // Allow nested parallel sections as deeply as supported - omp_set_max_active_levels(OPEN_MP_MAX_ACTIVE); - // Init max image size SetMaxSize(info.width, info.height); } @@ -442,7 +430,7 @@ std::shared_ptr Timeline::apply_effects(std::shared_ptr frame, int } // Get or generate a blank frame -std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number) +std::shared_ptr Timeline::GetOrCreateFrame(std::shared_ptr background_frame, Clip* clip, int64_t number) { std::shared_ptr new_frame; @@ -454,8 +442,7 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number) ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame); // Attempt to get a frame (but this could fail if a reader has just been closed) - #pragma omp critical (T_GetOtCreateFrame) - new_frame = std::shared_ptr(clip->GetFrame(number)); + new_frame = std::shared_ptr(clip->GetFrame(background_frame, number)); // Return real frame return new_frame; @@ -470,23 +457,15 @@ std::shared_ptr Timeline::GetOrCreateFrame(Clip* clip, int64_t number) ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame); // Create blank frame - new_frame = std::make_shared(number, preview_width, preview_height, "#000000", samples_in_frame, info.channels); - #pragma omp critical (T_GetOtCreateFrame) - { - new_frame->AddAudioSilence(samples_in_frame); - new_frame->SampleRate(info.sample_rate); - new_frame->ChannelsLayout(info.channel_layout); - } return new_frame; } // Process a new layer of video or audio void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, int64_t clip_frame_number, int64_t timeline_frame_number, bool is_top_clip, float max_volume) { - // Get the clip's frame & image + // Get the clip's frame, composited on top of the current timeline frame std::shared_ptr source_frame; - #pragma omp critical (T_addLayer) - source_frame = GetOrCreateFrame(source_clip, clip_frame_number); + source_frame = GetOrCreateFrame(new_frame, source_clip, clip_frame_number); // No frame found... so bail if (!source_frame) @@ -498,13 +477,9 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the * effects on the top clip. */ if (is_top_clip) { - #pragma omp critical (T_addLayer) source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer()); } - // Declare an image to hold the source frame's image - std::shared_ptr source_image; - /* COPY AUDIO - with correct volume */ if (source_clip->Reader()->info.has_audio) { // Debug output @@ -553,51 +528,17 @@ void Timeline::add_layer(std::shared_ptr new_frame, Clip* source_clip, in // This is a crude solution at best. =) if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()){ // Force timeline frame to match the source frame - #pragma omp critical (T_addLayer) - new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout); } // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen). - #pragma omp critical (T_addLayer) new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0); - } else // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number); } - // Skip out if video was disabled or only an audio frame (no visualisation in use) - if (source_clip->has_video.GetInt(clip_frame_number) == 0 || - (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)) - // Skip the rest of the image processing for performance reasons - return; - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number); - - // Get actual frame image data - source_image = source_frame->GetImage(); - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "source_image->width()", source_image->width()); - - /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */ - std::shared_ptr new_image; - new_image = new_frame->GetImage(); - - // Load timeline's new frame image into a QPainter - QPainter painter(new_image.get()); - - // Composite a new layer onto the image - painter.setCompositionMode(QPainter::CompositionMode_SourceOver); - painter.drawImage(0, 0, *source_image, 0, 0, source_image->width(), source_image->height()); - painter.end(); - - // Add new QImage to frame - new_frame->AddImage(new_image); - // Debug output ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "new_frame->GetImage()->width()", new_frame->GetImage()->width()); } @@ -696,7 +637,6 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame) // Check cache std::shared_ptr frame; std::lock_guard guard(get_frame_mutex); - #pragma omp critical (T_GetFrame) frame = final_cache->GetFrame(requested_frame); if (frame) { // Debug output @@ -715,7 +655,6 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame) throw ReaderClosed("The Timeline is closed. Call Open() before calling this method."); // Check cache again (due to locking) - #pragma omp critical (T_GetFrame) frame = final_cache->GetFrame(requested_frame); if (frame) { // Debug output @@ -725,146 +664,100 @@ std::shared_ptr Timeline::GetFrame(int64_t requested_frame) return frame; } - // Minimum number of frames to process (for performance reasons) - // Too many causes stuttering, too few causes stuttering - int minimum_frames = std::min(max_concurrent_frames / 2, 4); - // Get a list of clips that intersect with the requested section of timeline // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing' std::vector nearby_clips; - #pragma omp critical (T_GetFrame) - nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true); + nearby_clips = find_intersecting_clips(requested_frame, 1, true); - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "max_concurrent_frames", max_concurrent_frames); + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num()); - // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE) - // Determine all clip frames, and request them in order (to keep resampled audio in sequence) - for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++) - { - // Loop through clips - for (auto clip : nearby_clips) - { - long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1; - long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1; + // Init some basic properties about this frame + int samples_in_frame = Frame::GetSamplesPerFrame(requested_frame, info.fps, info.sample_rate, info.channels); - bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number); - if (does_clip_intersect) - { - // Get clip frame # - long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1; - long clip_frame_number = frame_number - clip_start_position + clip_start_frame; - - // Cache clip object - clip->GetFrame(clip_frame_number); - } - } - } - - #pragma omp parallel - { - // Loop through all requested frames - #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames) schedule(static,1) - for (int64_t frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++) - { - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num()); - - // Init some basic properties about this frame - int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels); - - // Create blank frame (which will become the requested frame) - std::shared_ptr new_frame(std::make_shared(frame_number, preview_width, preview_height, "#000000", samples_in_frame, info.channels)); - #pragma omp critical (T_GetFrame) - { - new_frame->AddAudioSilence(samples_in_frame); - new_frame->SampleRate(info.sample_rate); - new_frame->ChannelsLayout(info.channel_layout); - } - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height); - - // Add Background Color to 1st layer (if animated or not black) - if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) || - (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0)) - new_frame->AddColor(preview_width, preview_height, color.GetColorHex(frame_number)); - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size()); - - // Find Clips near this time - for (auto clip : nearby_clips) - { - long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1; - long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1; - - bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number); - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect); - - // Clip is visible - if (does_clip_intersect) - { - // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping) - bool is_top_clip = true; - float max_volume = 0.0; - for (auto nearby_clip : nearby_clips) - { - long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1; - long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1; - long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1; - long nearby_clip_frame_number = frame_number - nearby_clip_start_position + nearby_clip_start_frame; - - // Determine if top clip - if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() && - nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number && - nearby_clip_start_position > clip_start_position && is_top_clip == true) { - is_top_clip = false; - } - - // Determine max volume of overlapping clips - if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio && - nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 && - nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number) { - max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number); - } - } - - // Determine the frame needed for this clip (based on the position on the timeline) - long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1; - long clip_frame_number = frame_number - clip_start_position + clip_start_frame; - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number); - - // Add clip's frame as layer - add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip, max_volume); - - } else - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "does_clip_intersect", does_clip_intersect); - - } // end clip loop - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height); - - // Set frame # on mapped frame - #pragma omp ordered - { - new_frame->SetFrameNumber(frame_number); - - // Add final frame to cache - final_cache->Add(new_frame); - } - - } // end frame loop - } // end parallel - - // Debug output - ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num()); + // Create blank frame (which will become the requested frame) + std::shared_ptr new_frame(std::make_shared(requested_frame, preview_width, preview_height, "#000000", samples_in_frame, info.channels)); + new_frame->AddAudioSilence(samples_in_frame); + new_frame->SampleRate(info.sample_rate); + new_frame->ChannelsLayout(info.channel_layout); + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "requested_frame", requested_frame, "info.width", info.width, "info.height", info.height); + + // Add Background Color to 1st layer (if animated or not black) + if ((color.red.GetCount() > 1 || color.green.GetCount() > 1 || color.blue.GetCount() > 1) || + (color.red.GetValue(requested_frame) != 0.0 || color.green.GetValue(requested_frame) != 0.0 || color.blue.GetValue(requested_frame) != 0.0)) + new_frame->AddColor(preview_width, preview_height, color.GetColorHex(requested_frame)); + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "requested_frame", requested_frame, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size()); + + // Find Clips near this time + for (auto clip : nearby_clips) + { + long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1; + long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1; + + bool does_clip_intersect = (clip_start_position <= requested_frame && clip_end_position >= requested_frame); + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "requested_frame", requested_frame, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect); + + // Clip is visible + if (does_clip_intersect) + { + // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping) + bool is_top_clip = true; + float max_volume = 0.0; + for (auto nearby_clip : nearby_clips) + { + long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1; + long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1; + long nearby_clip_start_frame = (nearby_clip->Start() * info.fps.ToDouble()) + 1; + long nearby_clip_frame_number = requested_frame - nearby_clip_start_position + nearby_clip_start_frame; + + // Determine if top clip + if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() && + nearby_clip_start_position <= requested_frame && nearby_clip_end_position >= requested_frame && + nearby_clip_start_position > clip_start_position && is_top_clip == true) { + is_top_clip = false; + } + + // Determine max volume of overlapping clips + if (nearby_clip->Reader() && nearby_clip->Reader()->info.has_audio && + nearby_clip->has_audio.GetInt(nearby_clip_frame_number) != 0 && + nearby_clip_start_position <= requested_frame && nearby_clip_end_position >= requested_frame) { + max_volume += nearby_clip->volume.GetValue(nearby_clip_frame_number); + } + } + + // Determine the frame needed for this clip (based on the position on the timeline) + long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1; + long clip_frame_number = requested_frame - clip_start_position + clip_start_frame; + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number); + + // Add clip's frame as layer + add_layer(new_frame, clip, clip_frame_number, requested_frame, is_top_clip, max_volume); + + } else { + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", + "requested_frame", requested_frame, "does_clip_intersect", + does_clip_intersect); + } + + } // end clip loop + + // Debug output + ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "requested_frame", requested_frame, "info.width", info.width, "info.height", info.height); + + // Set frame # on mapped frame + new_frame->SetFrameNumber(requested_frame); + + // Add final frame to cache + final_cache->Add(new_frame); // Return frame (or blank frame) return final_cache->GetFrame(requested_frame); @@ -900,7 +793,6 @@ std::vector Timeline::find_intersecting_clips(int64_t requested_frame, in ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_frame", min_requested_frame, "max_requested_frame", max_requested_frame, "clip->Position()", clip->Position(), "does_clip_intersect", does_clip_intersect); // Open (or schedule for closing) this clip, based on if it's intersecting or not - #pragma omp critical (reader_lock) update_open_clips(clip, does_clip_intersect); // Clip is visible diff --git a/src/Timeline.h b/src/Timeline.h index ddce4568b..8d90f5770 100644 --- a/src/Timeline.h +++ b/src/Timeline.h @@ -202,8 +202,8 @@ namespace openshot { /// @param include Include or Exclude intersecting clips std::vector find_intersecting_clips(int64_t requested_frame, int number_of_frames, bool include); - /// Get or generate a blank frame - std::shared_ptr GetOrCreateFrame(openshot::Clip* clip, int64_t number); + /// Get a clip's frame or generate a blank frame + std::shared_ptr GetOrCreateFrame(std::shared_ptr background_frame, openshot::Clip* clip, int64_t number); /// Apply effects to the source frame (if any) std::shared_ptr apply_effects(std::shared_ptr frame, int64_t timeline_frame_number, int layer); diff --git a/src/ZmqLogger.cpp b/src/ZmqLogger.cpp index 4d8d8be62..ff93b39a9 100644 --- a/src/ZmqLogger.cpp +++ b/src/ZmqLogger.cpp @@ -126,16 +126,16 @@ void ZmqLogger::Log(std::string message) // Create a scoped lock, allowing only a single thread to run the following code at one time const juce::GenericScopedLock lock(loggerCriticalSection); - // Send message over socket (ZeroMQ) - zmq::message_t reply (message.length()); - std::memcpy (reply.data(), message.c_str(), message.length()); - -#if ZMQ_VERSION > ZMQ_MAKE_VERSION(4, 3, 1) - // Set flags for immediate delivery (new API) - publisher->send(reply, zmq::send_flags::dontwait); -#else - publisher->send(reply); -#endif +// // Send message over socket (ZeroMQ) +// zmq::message_t reply (message.length()); +// std::memcpy (reply.data(), message.c_str(), message.length()); +// +//#if ZMQ_VERSION > ZMQ_MAKE_VERSION(4, 3, 1) +// // Set flags for immediate delivery (new API) +// publisher->send(reply, zmq::send_flags::dontwait); +//#else +// publisher->send(reply); +//#endif // Also log to file, if open LogToFile(message); diff --git a/tests/Clip_Tests.cpp b/tests/Clip_Tests.cpp index 87272427c..5d8ab43ce 100644 --- a/tests/Clip_Tests.cpp +++ b/tests/Clip_Tests.cpp @@ -258,7 +258,7 @@ TEST(Verify_Parent_Timeline) // Check size of frame image (with an associated timeline) CHECK_EQUAL(c1.GetFrame(1)->GetImage()->width(), 640); - CHECK_EQUAL(c1.GetFrame(1)->GetImage()->height(), 480); + CHECK_EQUAL(c1.GetFrame(1)->GetImage()->height(), 360); } } // SUITE diff --git a/tests/Settings_Tests.cpp b/tests/Settings_Tests.cpp index 62dd2e23f..ce5dc2824 100644 --- a/tests/Settings_Tests.cpp +++ b/tests/Settings_Tests.cpp @@ -43,7 +43,6 @@ TEST(Settings_Default_Constructor) CHECK_EQUAL(12, s->OMP_THREADS); CHECK_EQUAL(false, s->HIGH_QUALITY_SCALING); - CHECK_EQUAL(false, s->WAIT_FOR_VIDEO_PROCESSING_TASK); } TEST(Settings_Change_Settings) @@ -52,13 +51,10 @@ TEST(Settings_Change_Settings) Settings *s = Settings::Instance(); s->OMP_THREADS = 8; s->HIGH_QUALITY_SCALING = true; - s->WAIT_FOR_VIDEO_PROCESSING_TASK = true; CHECK_EQUAL(8, s->OMP_THREADS); CHECK_EQUAL(true, s->HIGH_QUALITY_SCALING); - CHECK_EQUAL(true, s->WAIT_FOR_VIDEO_PROCESSING_TASK); CHECK_EQUAL(8, Settings::Instance()->OMP_THREADS); CHECK_EQUAL(true, Settings::Instance()->HIGH_QUALITY_SCALING); - CHECK_EQUAL(true, Settings::Instance()->WAIT_FOR_VIDEO_PROCESSING_TASK); }