diff --git a/src/CVStabilization.cpp b/src/CVStabilization.cpp index 864f9b583..12b124c9d 100644 --- a/src/CVStabilization.cpp +++ b/src/CVStabilization.cpp @@ -90,9 +90,6 @@ void CVStabilization::stabilizeClip(openshot::Clip& video, size_t _start, size_t // Update progress processingController->SetProgress(uint(100*(frame_number-start)/(end-start))); } - // Show average and max transformation parameters - std::cout<<"\nAVERAGE DX: "< trajectory = ComputeFramesTrajectory(); @@ -194,9 +191,6 @@ bool CVStabilization::TrackFrameFeatures(cv::Mat frame, size_t frameNum){ prev_to_cur_transform.push_back(TransformParam(dx, dy, da)); frame.copyTo(prev_grey); - // Show processing info - cout << "Frame: " << frameNum << " - good optical flow: " << prev_corner2.size() << endl; - return true; } diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp index 0cc4daa2d..b3965ff66 100644 --- a/src/FFmpegWriter.cpp +++ b/src/FFmpegWriter.cpp @@ -1669,10 +1669,8 @@ void FFmpegWriter::write_audio_packets(bool is_final) { av_opt_set_int(avr, "out_channels", info.channels, 0); SWR_INIT(avr); } - int nb_samples = 0; - // Convert audio samples - nb_samples = SWR_CONVERT( + int nb_samples = SWR_CONVERT( avr, // audio resample context audio_converted->data, // output data pointers audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown) @@ -1683,7 +1681,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) { ); // Set remaining samples - remaining_frame_samples = nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16); + remaining_frame_samples = total_frame_samples; // Create a new array (to hold all resampled S16 audio samples) all_resampled_samples = (int16_t *) av_malloc( diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp index 98b0c112c..0e3b0272e 100644 --- a/src/FrameMapper.cpp +++ b/src/FrameMapper.cpp @@ -36,7 +36,7 @@ using namespace std; using namespace openshot; FrameMapper::FrameMapper(ReaderBase *reader, Fraction target, PulldownType target_pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout) : - reader(reader), target(target), pulldown(target_pulldown), is_dirty(true), avr(NULL) + reader(reader), target(target), pulldown(target_pulldown), is_dirty(true), avr(NULL), parent_position(0.0) { // Set the original frame rate from the reader original = Fraction(reader->info.fps.num, reader->info.fps.den); @@ -112,6 +112,16 @@ void FrameMapper::Init() fields.clear(); frames.clear(); + // Find parent position (if any) + Clip *parent = (Clip *) ParentClip(); + if (parent) { + parent_position = parent->Position(); + parent_start = parent->Start(); + } else { + parent_position = 0.0; + parent_start = 0.0; + } + // Mark as not dirty is_dirty = false; @@ -263,8 +273,9 @@ void FrameMapper::Init() while (remaining_samples > 0) { - // get original samples - int original_samples = Frame::GetSamplesPerFrame(AdjustFrameNumber(end_samples_frame), original, reader->info.sample_rate, reader->info.channels) - end_samples_position; + // Get original samples (with NO framerate adjustments) + // This is the original reader's frame numbers + int original_samples = Frame::GetSamplesPerFrame(end_samples_frame, original, reader->info.sample_rate, reader->info.channels) - end_samples_position; // Enough samples if (original_samples >= remaining_samples) @@ -395,9 +406,20 @@ std::shared_ptr FrameMapper::GetFrame(int64_t requested_frame) // Create a scoped lock, allowing only a single thread to run the following code at one time const GenericScopedLock lock(getFrameCriticalSection); - // Check if mappings are dirty (and need to be recalculated) + // Find parent properties (if any) + Clip *parent = (Clip *) ParentClip(); + if (parent) { + float position = parent->Position(); + float start = parent->Start(); + if (parent_position != position || parent_start != start) { + // Force dirty if parent clip has moved or been trimmed + // since this heavily affects frame #s and audio mappings + is_dirty = true; + } + } + + // Check if mappings are dirty (and need to be recalculated) if (is_dirty) - // Recalculate mappings Init(); // Check final cache a 2nd time (due to potential lock already generating this frame) diff --git a/src/FrameMapper.h b/src/FrameMapper.h index ac333ddfa..de3d8f9ba 100644 --- a/src/FrameMapper.h +++ b/src/FrameMapper.h @@ -144,6 +144,8 @@ namespace openshot ReaderBase *reader; // The source video reader CacheMemory final_cache; // Cache of actual Frame objects bool is_dirty; // When this is true, the next call to GetFrame will re-init the mapping + float parent_position; // Position of parent clip (which is used to generate the audio mapping) + float parent_start; // Start of parent clip (which is used to generate the audio mapping) SWRCONTEXT *avr; // Audio resampling context object // Internal methods used by init diff --git a/tests/FrameMapper_Tests.cpp b/tests/FrameMapper_Tests.cpp index d586c30c2..59fcfbb36 100644 --- a/tests/FrameMapper_Tests.cpp +++ b/tests/FrameMapper_Tests.cpp @@ -368,7 +368,7 @@ TEST(redistribute_samples_per_frame) { // (i.e. same exact audio sample data). We use a Timeline to overlap these clips // (and offset 1 clip by 1 frame), and we verify that the correct # of samples is returned by each // Clip Frame instance. In the past, FrameMappers would sometimes generate the wrong # of samples - // in a frame, and the Timeline recieve mismatching # of audio samples from 2 or more clips... + // in a frame, and the Timeline receive mismatching # of audio samples from 2 or more clips... // causing audio data to be truncated and lost (i.e. creating a pop). // Create cache object to hold test frames @@ -485,6 +485,135 @@ TEST(redistribute_samples_per_frame) { r.Close(); } +TEST(distribute_samples) { + // This test verifies that audio data can be redistributed correctly + // between common and uncommon frame rates + int sample_rate = 48000; + int channels = 2; + int num_seconds = 1; + + // Source frame rates (varies the # of samples per frame) + vector rates = { openshot::Fraction(30,1), + openshot::Fraction(24,1) , + openshot::Fraction(119,4), + openshot::Fraction(30000,1001) }; + for (auto& frame_rate : rates) { + // Init sin wave variables + int OFFSET = 0; + float AMPLITUDE = 0.75; + double ANGLE = 0.0; + int NUM_SAMPLES = 100; + + // Create cache object to hold test frames + CacheMemory cache; + + // Let's create some test frames + for (int64_t frame_number = 1; frame_number <= (frame_rate.ToFloat() * num_seconds * 2); frame_number++) { + // Create blank frame (with specific frame #, samples, and channels) + int sample_count = openshot::Frame::GetSamplesPerFrame(frame_number, frame_rate, sample_rate, channels); + std::shared_ptr f(new openshot::Frame(frame_number, sample_count, channels)); + f->SampleRate(sample_rate); + + // Create test samples with sin wave (predictable values) + float *audio_buffer = new float[sample_count * 2]; + for (int sample_number = 0; sample_number < sample_count; sample_number++) { + // Calculate sin wave + float sample_value = float(AMPLITUDE * sin(ANGLE) + OFFSET); + audio_buffer[sample_number] = abs(sample_value); + ANGLE += (2 * M_PI) / NUM_SAMPLES; + } + + // Add custom audio samples to Frame (bool replaceSamples, int destChannel, int destStartSample, const float* source, + f->AddAudio(true, 0, 0, audio_buffer, sample_count, 1.0); // add channel 1 + f->AddAudio(true, 1, 0, audio_buffer, sample_count, 1.0); // add channel 2 + + // Add test frame to dummy reader + cache.Add(f); + } + + // Create a default fraction (should be 1/1) + openshot::DummyReader r(frame_rate, 1920, 1080, sample_rate, channels, 30.0, &cache); + r.Open(); // Open the reader + + // Target frame rates + vector mapped_rates = { openshot::Fraction(30,1), + openshot::Fraction(24,1) , + openshot::Fraction(119,4), + openshot::Fraction(30000,1001) }; + for (auto &mapped_rate : mapped_rates) { + // Reset SIN wave + ANGLE = 0.0; + + // Map to different fps + FrameMapper map(&r, mapped_rate, PULLDOWN_NONE, sample_rate, channels, LAYOUT_STEREO); + map.info.has_audio = true; + map.Open(); + + // Loop through samples, and verify FrameMapper didn't mess up individual sample values + int num_samples = 0; + for (int frame_index = 1; frame_index <= (map.info.fps.ToInt() * num_seconds); frame_index++) { + int sample_count = map.GetFrame(frame_index)->GetAudioSamplesCount(); + for (int sample_index = 0; sample_index < sample_count; sample_index++) { + + // Calculate sin wave + float predicted_value = abs(float(AMPLITUDE * sin(ANGLE) + OFFSET)); + ANGLE += (2 * M_PI) / NUM_SAMPLES; + + // Verify each mapped sample value is correct (after being redistributed by the FrameMapper) + float mapped_value = map.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0); + CHECK_CLOSE(predicted_value, mapped_value, 0.001); + } + // Increment sample value + num_samples += map.GetFrame(frame_index)->GetAudioSamplesCount(); + } + + float clip_position = 3.77; + int starting_clip_frame = round(clip_position * map.info.fps.ToFloat()) + 1; + + // Create Timeline (same specs as reader) + Timeline t1(map.info.width, map.info.height, map.info.fps, map.info.sample_rate, map.info.channels, + map.info.channel_layout); + + Clip c1; + c1.Reader(&map); + c1.Layer(1); + c1.Position(clip_position); + c1.Start(0.0); + c1.End(10.0); + + // Add clips + t1.AddClip(&c1); + t1.Open(); + + // Reset SIN wave + ANGLE = 0.0; + + for (int frame_index = starting_clip_frame; frame_index < (starting_clip_frame + (t1.info.fps.ToFloat() * num_seconds)); frame_index++) { + for (int sample_index = 0; sample_index < t1.GetFrame(frame_index)->GetAudioSamplesCount(); sample_index++) { + // Calculate sin wave + float predicted_value = abs(float(AMPLITUDE * sin(ANGLE) + OFFSET)); + ANGLE += (2 * M_PI) / NUM_SAMPLES; + + // Verify each mapped sample value is correct (after being redistributed by the FrameMapper) + float timeline_value = t1.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0); + + // Testing wave value X 2, since we have 2 overlapping clips + CHECK_CLOSE(predicted_value, timeline_value, 0.001); + } + } + + // Close mapper + map.Close(); + t1.Close(); + } + + // Clean up reader + r.Close(); + cache.Clear(); + + } // for rates +} + TEST(Json) { DummyReader r(Fraction(30,1), 1280, 720, 48000, 2, 5.0);