Skip to content

Commit

Permalink
Large refactor of OpenMP integration (or rather the removal of OpenMP…
Browse files Browse the repository at this point in the history
… on the Timeline and FFmpeg-related classes). The logic behind this decision, was based on profiling libopenshot and the amount of wasted CPU idle time on all the various threads. The slow code is still synchronous, and all the threads must wait on each other, adding additional overhead. So, removing lots of unneeded threads, and simplifying the underlying Timeline->Clip->FFmpegReader flow. Also, removed 2 calls to QPainter::drawImage, by improving the flexibility of Clip->GetFrame.
  • Loading branch information
jonoomph committed Feb 18, 2021
1 parent 15695e3 commit 3daa5bd
Show file tree
Hide file tree
Showing 15 changed files with 1,037 additions and 1,242 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Expand Up @@ -6,6 +6,6 @@
.project
.cproject
/.metadata/
cmake-build-debug/*
tags
*~

71 changes: 36 additions & 35 deletions examples/Example.cpp
Expand Up @@ -39,51 +39,52 @@ using namespace openshot;

int main(int argc, char* argv[]) {

Settings *s = Settings::Instance();
s->HARDWARE_DECODER = 2; // 1 VA-API, 2 NVDEC, 6 VDPAU
s->HW_DE_DEVICE_SET = 0;
// Types for storing time durations in whole and fractional milliseconds
using ms = std::chrono::milliseconds;
using s = std::chrono::seconds;
using double_ms = std::chrono::duration<double, ms::period>;

std::string input_filepath = TEST_MEDIA_PATH;
input_filepath += "sintel_trailer-720p.mp4";
// Track total time
const auto total_time = double_ms(0.0);

FFmpegReader r9(input_filepath);
// FFmpeg Reader performance test
const auto total_1 = std::chrono::high_resolution_clock::now();
FFmpegReader r9("/home/jonathan/Videos/sintel_trailer-1080p.mp4");
r9.Open();
r9.DisplayInfo();

/* WRITER ---------------- */
FFmpegWriter w9("metadata.mp4");
for (long int frame = 1; frame <= 1000; frame++)
{
const auto time1 = std::chrono::high_resolution_clock::now();
std::shared_ptr<Frame> f = r9.GetFrame(frame);
const auto time2 = std::chrono::high_resolution_clock::now();
std::cout << "FFmpegReader: " << frame << " (" << double_ms(time2 - time1).count() << " ms)" << std::endl;
}
const auto total_2 = std::chrono::high_resolution_clock::now();
auto total_sec = std::chrono::duration_cast<ms>(total_2 - total_1);
std::cout << "FFmpegReader TOTAL: " << total_sec.count() << " ms" << std::endl;
r9.Close();

// Set options
w9.SetAudioOptions(true, "libmp3lame", r9.info.sample_rate, r9.info.channels, r9.info.channel_layout, 128000);
w9.SetVideoOptions(true, "libx264", r9.info.fps, 1024, 576, Fraction(1,1), false, false, 3000000);

w9.info.metadata["title"] = "testtest";
w9.info.metadata["artist"] = "aaa";
w9.info.metadata["album"] = "bbb";
w9.info.metadata["year"] = "2015";
w9.info.metadata["description"] = "ddd";
w9.info.metadata["comment"] = "eee";
w9.info.metadata["comment"] = "comment";
w9.info.metadata["copyright"] = "copyright OpenShot!";

// Open writer
w9.Open();
// Timeline Reader performance test
Timeline tm(r9.info.width, r9.info.height, r9.info.fps, r9.info.sample_rate, r9.info.channels, r9.info.channel_layout);
Clip *c = new Clip(&r9);
tm.AddClip(c);
tm.Open();

for (long int frame = 1; frame <= 100; frame++)
const auto total_3 = std::chrono::high_resolution_clock::now();
for (long int frame = 1; frame <= 1000; frame++)
{
//int frame_number = (rand() % 750) + 1;
int frame_number = frame;
std::shared_ptr<Frame> f = r9.GetFrame(frame_number);
w9.WriteFrame(f);
const auto time1 = std::chrono::high_resolution_clock::now();
std::shared_ptr<Frame> f = tm.GetFrame(frame);
const auto time2 = std::chrono::high_resolution_clock::now();
std::cout << "Timeline: " << frame << " (" << double_ms(time2 - time1).count() << " ms)" << std::endl;
}
const auto total_4 = std::chrono::high_resolution_clock::now();
total_sec = std::chrono::duration_cast<ms>(total_4 - total_3);
std::cout << "Timeline TOTAL: " << total_sec.count() << " ms" << std::endl;
tm.Close();

// Close writer & reader
w9.Close();

// Close timeline
r9.Close();

std::cout << "Completed successfully!" << std::endl;
std::cout << "Completed successfully!" << std::endl;

return 0;
}
173 changes: 87 additions & 86 deletions src/Clip.cpp
Expand Up @@ -346,7 +346,7 @@ std::shared_ptr<Frame> Clip::GetFrame(int64_t frame_number)
}

// Use an existing openshot::Frame object and draw this Clip's frame onto it
std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> frame, int64_t frame_number)
std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t frame_number)
{
// Check for open reader (or throw exception)
if (!is_open)
Expand Down Expand Up @@ -376,7 +376,7 @@ std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> frame, in
int enabled_video = has_video.GetInt(frame_number);
if (enabled_video == -1 && reader && reader->info.has_video)
enabled_video = 1;
else if (enabled_video == -1 && reader && !reader->info.has_audio)
else if (enabled_video == -1 && reader && !reader->info.has_video)
enabled_video = 0;

// Is a time map detected
Expand All @@ -386,27 +386,14 @@ std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> frame, in
new_frame_number = time_mapped_number;

// Now that we have re-mapped what frame number is needed, go and get the frame pointer
std::shared_ptr<Frame> original_frame;
original_frame = GetOrCreateFrame(new_frame_number);

// Copy the image from the odd field
if (enabled_video)
frame->AddImage(std::make_shared<QImage>(*original_frame->GetImage()));

// Loop through each channel, add audio
if (enabled_audio && reader->info.has_audio)
for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
std::shared_ptr<Frame> original_frame = GetOrCreateFrame(new_frame_number);

// Get time mapped frame number (used to increase speed, change direction, etc...)
// TODO: Handle variable # of samples, since this resamples audio for different speeds (only when time curve is set)
get_time_mapped_frame(frame, new_frame_number);

// Adjust # of samples to match requested (the interaction with time curves will make this tricky)
// TODO: Implement move samples to/from next frame
get_time_mapped_frame(original_frame, new_frame_number);

// Apply effects to the frame (if any)
apply_effects(frame);
apply_effects(original_frame);

// Determine size of image (from Timeline or Reader)
int width = 0;
Expand All @@ -422,13 +409,13 @@ std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> frame, in
}

// Apply keyframe / transforms
apply_keyframes(frame, width, height);
apply_keyframes(original_frame, background_frame->GetImage());

// Cache frame
cache.Add(frame);
// Cache frame
cache.Add(original_frame);

// Return processed 'frame'
return frame;
return original_frame;
}
else
// Throw error if reader not initialized
Expand Down Expand Up @@ -709,7 +696,6 @@ std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
// Create a new copy of reader frame
// This allows a clip to modify the pixels and audio of this frame without
// changing the underlying reader's frame data
//std::shared_ptr<Frame> reader_copy(new Frame(number, 1, 1, "#000000", reader_frame->GetAudioSamplesCount(), reader_frame->GetAudioChannelsCount()));
auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
reader_copy->SampleRate(reader_frame->SampleRate());
reader_copy->ChannelsLayout(reader_frame->ChannelsLayout());
Expand Down Expand Up @@ -1126,18 +1112,84 @@ bool Clip::isEqual(double a, double b)
return fabs(a - b) < 0.000001;
}

// Apply keyframes to the source frame (if any)
void Clip::apply_keyframes(std::shared_ptr<Frame> frame, std::shared_ptr<QImage> background_canvas) {
// Skip out if video was disabled or only an audio frame (no visualisation in use)
if (has_video.GetInt(frame->number) == 0 ||
(!Waveform() && !Reader()->info.has_video))
// Skip the rest of the image processing for performance reasons
return;

// Get image from clip
std::shared_ptr<QImage> source_image = frame->GetImage();

// Size of final image
int width = background_canvas->width();
int height = background_canvas->height();

// Get transform from clip's keyframes
QTransform transform = get_transform(frame, width, height);

// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::ApplyKeyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number);

// Load timeline's new frame image into a QPainter
QPainter painter(background_canvas.get());
painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);

// Apply transform (translate, rotate, scale)
painter.setTransform(transform);

// Composite a new layer onto the image
painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
painter.drawImage(0, 0, *source_image);

if (timeline) {
Timeline *t = (Timeline *) timeline;

// Draw frame #'s on top of image (if needed)
if (display != FRAME_DISPLAY_NONE) {
std::stringstream frame_number_str;
switch (display) {
case (FRAME_DISPLAY_NONE):
// This is only here to prevent unused-enum warnings
break;

case (FRAME_DISPLAY_CLIP):
frame_number_str << frame->number;
break;

case (FRAME_DISPLAY_TIMELINE):
frame_number_str << (position * t->info.fps.ToFloat()) + frame->number;
break;

case (FRAME_DISPLAY_BOTH):
frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
break;
}

// Draw frame number on top of image
painter.setPen(QColor("#ffffff"));
painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
}
}
painter.end();

// Add new QImage to frame
frame->AddImage(background_canvas);
}

// Apply keyframes to the source frame (if any)
void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
{
// Get actual frame image data
std::shared_ptr<QImage> source_image = frame->GetImage();
// Get image from clip
std::shared_ptr<QImage> source_image = frame->GetImage();

/* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
if (Waveform())
{
// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform());
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Generate Waveform Image)", "frame->number", frame->number, "Waveform()", Waveform());

// Get the color of the waveform
int red = wave_color.red.GetInt(frame->number);
Expand Down Expand Up @@ -1170,7 +1222,7 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
}

// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number);
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Set Alpha & Opacity)", "alpha_value", alpha_value, "frame->number", frame->number);
}

/* RESIZE SOURCE IMAGE - based on scale type */
Expand All @@ -1181,21 +1233,21 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
source_size.scale(width, height, Qt::KeepAspectRatio);

// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_FIT)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
case (SCALE_STRETCH): {
source_size.scale(width, height, Qt::IgnoreAspectRatio);

// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_STRETCH)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
case (SCALE_CROP): {
source_size.scale(width, height, Qt::KeepAspectRatioByExpanding);

// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_CROP)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
case (SCALE_NONE): {
Expand All @@ -1207,7 +1259,7 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
source_size.scale(width * source_width_ratio, height * source_height_ratio, Qt::KeepAspectRatio);

// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Scale: SCALE_NONE)", "frame->number", frame->number, "source_width", source_size.width(), "source_height", source_size.height());
break;
}
}
Expand Down Expand Up @@ -1258,7 +1310,7 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
}

// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height);
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Gravity)", "frame->number", frame->number, "source_clip->gravity", gravity, "scaled_source_width", scaled_source_width, "scaled_source_height", scaled_source_height);

/* LOCATION, ROTATION, AND SCALE */
float r = rotation.GetValue(frame->number); // rotate in degrees
Expand All @@ -1272,7 +1324,7 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
QTransform transform;

// Transform source image (if needed)
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
ZmqLogger::Instance()->AppendDebugMethod("Clip::get_transform (Build QTransform - if needed)", "frame->number", frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);

if (!isEqual(x, 0) || !isEqual(y, 0)) {
// TRANSLATE/MOVE CLIP
Expand All @@ -1297,56 +1349,5 @@ void Clip::apply_keyframes(std::shared_ptr<Frame> frame, int width, int height)
transform.scale(source_width_scale, source_height_scale);
}

// Debug output
ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_keyframes (Transform: Composite Image Layer: Prepare)", "frame->number", frame->number);

/* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
auto new_image = std::make_shared<QImage>(QSize(width, height), source_image->format());
new_image->fill(QColor(QString::fromStdString("#00000000")));

// Load timeline's new frame image into a QPainter
QPainter painter(new_image.get());
painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);

// Apply transform (translate, rotate, scale)
painter.setTransform(transform);

// Composite a new layer onto the image
painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
painter.drawImage(0, 0, *source_image);

if (timeline) {
Timeline *t = (Timeline *) timeline;

// Draw frame #'s on top of image (if needed)
if (display != FRAME_DISPLAY_NONE) {
std::stringstream frame_number_str;
switch (display) {
case (FRAME_DISPLAY_NONE):
// This is only here to prevent unused-enum warnings
break;

case (FRAME_DISPLAY_CLIP):
frame_number_str << frame->number;
break;

case (FRAME_DISPLAY_TIMELINE):
frame_number_str << (position * t->info.fps.ToFloat()) + frame->number;
break;

case (FRAME_DISPLAY_BOTH):
frame_number_str << (position * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
break;
}

// Draw frame number on top of image
painter.setPen(QColor("#ffffff"));
painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
}
}

painter.end();

// Add new QImage to frame
frame->AddImage(new_image);
return transform;
}

0 comments on commit 3daa5bd

Please sign in to comment.