Skip to content

Commit

Permalink
Added 2 FrameMapper unit tests which verify resampled audio data has …
Browse files Browse the repository at this point in the history
…no gaps/pops, and offset FrameMapper clips don't use the wrong # of audio samples
  • Loading branch information
jonoomph committed Oct 23, 2020
1 parent 9495d1d commit bba1260
Showing 1 changed file with 215 additions and 64 deletions.
279 changes: 215 additions & 64 deletions tests/FrameMapper_Tests.cpp
Expand Up @@ -211,113 +211,264 @@ TEST(FrameMapper_resample_audio_48000_to_41000)
map.Close();
}

TEST(FrameMapper_AudioSample_Distribution)
{
TEST (FrameMapper_resample_audio_mapper) {
// This test verifies that audio data can be resampled on FrameMapper
// instances, even on frame rates that do not divide evenly, and that no audio data is misplaced
// or duplicated. We verify this by creating a SIN wave, add those data points to a DummyReader,
// and then resample, and compare the result back to the original SIN wave calculation.

// Create cache object to hold test frames
CacheMemory cache;

int OFFSET = 0;
float AMPLITUDE = 0.2;
float AMPLITUDE = 0.75;
double ANGLE = 0.0;
int NUM_SAMPLES = 100;
//std::cout << "Starting Resample Test" << std::endl;

for (int64_t frame_number = 1; frame_number <= 90; frame_number++)
{

// Let's create some test frames
for (int64_t frame_number = 1; frame_number <= 90; frame_number++) {
// Create blank frame (with specific frame #, samples, and channels)
// Sample count should be 44100 / 30 fps = 1470 samples per frame

int sample_count = 1470;
std::shared_ptr<openshot::Frame> f(new openshot::Frame(frame_number, sample_count, 2));

// Create test samples with sin wave (predictable values)
float *audio_buffer = new float[sample_count * 2];

for (int sample_number = 0; sample_number < sample_count; sample_number++)
{
// Calculate sin wave
// TODO: I'm using abs(), because calling AddAudio only seems to be adding the positive values and it's bizarre
float sample_value = float(AMPLITUDE * sin(ANGLE) + OFFSET);
audio_buffer[sample_number] = sample_value;//abs(sample_value);
audio_buffer[sample_number] = abs(sample_value);
ANGLE += (2 * M_PI) / NUM_SAMPLES;
}

// Add custom audio samples to Frame (bool replaceSamples, int destChannel, int destStartSample, const float* source,
f->AddAudio(true, 0, 0, audio_buffer, sample_count, 1.0); // add channel 1
f->AddAudio(true, 1, 0, audio_buffer, sample_count, 1.0); // add channel 2
// Add custom audio samples to Frame (bool replaceSamples, int destChannel, int destStartSample, const float* source,
f->AddAudio(true, 0, 0, audio_buffer, sample_count, 1.0); // add channel 1
f->AddAudio(true, 1, 0, audio_buffer, sample_count, 1.0); // add channel 2

// Add test frame to dummy reader
cache.Add(f);
}
// Add test frame to dummy reader
cache.Add(f);
}

// Create a default fraction (should be 1/1)
openshot::DummyReader r(openshot::Fraction(30, 1), 1920, 1080, 44100, 2, 30.0, &cache);
r.info.has_audio = true;
r.Open(); // Open the reader

// Map to 24 fps, which should create a variable # of samples per frame
///FrameMapper map(&r, Fraction(24, 1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO);
//map.info.has_audio = true;
//map.Open();
// Sample rates
vector<int> arr = { 44100, 16000 };
for (auto& rate : arr) {
// Reset SIN wave
ANGLE = 0.0;

// Map to 24 fps, which should create a variable # of samples per frame
FrameMapper map(&r, Fraction(24,1), PULLDOWN_NONE, rate, 2, LAYOUT_STEREO);
map.info.has_audio = true;
map.Open();

// Calculating how much the initial sample rate has changed
float resample_multiplier = ((float) rate / r.info.sample_rate);

// Loop through samples, and verify FrameMapper didn't mess up individual sample values
int num_samples = 0;
for (int frame_index = 1; frame_index <= map.info.fps.ToInt(); frame_index++) {
int sample_count = map.GetFrame(frame_index)->GetAudioSamplesCount();
for (int sample_index = 0; sample_index < sample_count; sample_index++) {

// Calculate sin wave
float sample_value = abs(float(AMPLITUDE * sin(ANGLE) + OFFSET));
ANGLE += (2 * M_PI) / (NUM_SAMPLES * resample_multiplier);

// Verify each mapped sample value is correct (after being redistributed by the FrameMapper)
float resampled_value = map.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0);

// TODO: 0.1 is much to broad to accurately test this, but without this, all the resampled values are too far away from expected
CHECK_CLOSE(sample_value, resampled_value, 0.1);
}
// Increment sample value
num_samples += map.GetFrame(frame_index)->GetAudioSamplesCount();
}

Timeline t1(1920, 1080, Fraction(24, 1), 44100, 2, LAYOUT_STEREO);
// Verify samples per second is correct (i.e. 44100)
CHECK_EQUAL(num_samples, map.info.sample_rate);

Clip c1;
c1.Reader(&r);
c1.Layer(1);
c1.Position(0.0);
c1.Start(0.0);
c1.End(10.0);
// Create Timeline (same specs as reader)
Timeline t1(map.info.width, map.info.height, map.info.fps, rate, map.info.channels, map.info.channel_layout);

// Create 2nd map to 24 fps, which should create a variable # of samples per frame
Clip c1;
c1.Reader(&map);
c1.Layer(1);
c1.Position(0.0);
c1.Start(0.0);
c1.End(10.0);

//FrameMapper map2(&r, Fraction(24, 1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO);
// Create 2nd map to 24 fps, which should create a variable # of samples per frame (for some sample rates)
FrameMapper map2(&r, Fraction(24, 1), PULLDOWN_NONE, rate, 2, LAYOUT_STEREO);
map2.info.has_audio = true;
map2.Open();

//map2.info.has_audio = true;
//map2.Open();
Clip c2;
c2.Reader(&map2);
c2.Layer(1);
c2.Position(0.0);
c2.Start(0.0);
c2.End(10.0);

Clip c2;
c2.Reader(&r);
c2.Layer(2);
// Add clips
t1.AddClip(&c1);
t1.AddClip(&c2);
t1.Open();

// Position 1 frame into the video, this should mis-align the audio and create situations
// which overlapping Frame instances have different # of samples for the Timeline.
// TODO: Moving to 0.0 position, to simplify this test for now
// Reset SIN wave
ANGLE = 0.0;

c2.Position(0.041666667 * 14);
c2.Start(1.0);
c2.End(10.0);
for (int frame_index = 1; frame_index < 24; frame_index++) {
t1.GetFrame(frame_index);
for (int sample_index = 0; sample_index < t1.GetFrame(frame_index)->GetAudioSamplesCount(); sample_index++) {

// Add clips
t1.AddClip(&c1);
t1.AddClip(&c2);
// Calculate sin wave
float sample_value = abs(float(AMPLITUDE * sin(ANGLE) + OFFSET));
ANGLE += (2 * M_PI) / (NUM_SAMPLES * resample_multiplier);

//t1.SetJson(t1.Json());
t1.Open();
// Verify each mapped sample value is correct (after being redistributed by the FrameMapper)
float resampled_value = t1.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0);

FFmpegWriter w("output-resample.mp4");
// TODO: 0.1 is much to broad to accurately test this, but without this, all the resampled values are too far away from expected
// Testing wave value X 2, since we have 2 overlapping clips
CHECK_CLOSE(sample_value * 2.0, resampled_value, 0.1);

// Set options
w.SetAudioOptions("aac", 44100, 192000);
w.SetVideoOptions("libx264", 1280, 720, Fraction(24,1), 5000000);
}
}

// Open writer
w.Open();
// Close mapper
map.Close();
map2.Close();
t1.Close();
}

w.WriteFrame(&t1, 5, 50);
// Clean up
cache.Clear();
r.Close();
}

//for (int64_t frame_number = 1; frame_number <= 90; frame_number++){
// w.WriteFrame(t1.GetFrame(frame_number));
//}
TEST (FrameMapper_redistribute_samples_per_frame) {
// This test verifies that audio data is correctly aligned on
// FrameMapper instances. We do this by creating 2 Clips based on the same parent reader
// (i.e. same exact audio sample data). We use a Timeline to overlap these clips
// (and offset 1 clip by 1 frame), and we verify that the correct # of samples is returned by each
// Clip Frame instance. In the past, FrameMappers would sometimes generate the wrong # of samples
// in a frame, and the Timeline recieve mismatching # of audio samples from 2 or more clips...
// causing audio data to be truncated and lost (i.e. creating a pop).

// Close writer & reader
w.Close();
// Create cache object to hold test frames
CacheMemory cache;

//map.Close();
//map2.Close();
// Let's create some test frames
int sample_value = 0;
for (int64_t frame_number = 1; frame_number <= 90; frame_number++) {
// Create blank frame (with specific frame #, samples, and channels)
// Sample count should be 44100 / 30 fps = 1470 samples per frame
int sample_count = 1470;
std::shared_ptr<openshot::Frame> f(new openshot::Frame(frame_number, sample_count, 2));

// Create test samples with incrementing value
float *audio_buffer = new float[sample_count];
for (int64_t sample_number = 0; sample_number < sample_count; sample_number++) {
audio_buffer[sample_number] = sample_value + sample_number;
}

// Increment counter
sample_value += sample_count;

t1.Close();
// Add custom audio samples to Frame (bool replaceSamples, int destChannel, int destStartSample, const float* source,
f->AddAudio(true, 0, 0, audio_buffer, sample_count, 1.0); // add channel 1
f->AddAudio(true, 1, 0, audio_buffer, sample_count, 1.0); // add channel 2

// Add test frame to dummy reader
cache.Add(f);
}

// Create a default fraction (should be 1/1)
openshot::DummyReader r(openshot::Fraction(30, 1), 1920, 1080, 44100, 2, 30.0, &cache);
r.Open(); // Open the reader

// Sample rates
vector<int> arr = { 24, 30, 60 };
for (auto& fps : arr) {
// Map to 24 fps, which should create a variable # of samples per frame
FrameMapper map(&r, Fraction(fps,1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO);
map.info.has_audio = true;
map.Open();

// Loop through samples, and verify FrameMapper didn't mess up individual sample values
sample_value = 0;
for (int frame_index = 1; frame_index <= map.info.fps.ToInt(); frame_index++) {
for (int sample_index = 0; sample_index < map.GetFrame(frame_index)->GetAudioSamplesCount(); sample_index++) {
// Verify each mapped sample value is correct (after being redistributed by the FrameMapper)
CHECK_EQUAL(sample_value + sample_index, map.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0));
}
// Increment sample value
sample_value += map.GetFrame(frame_index)->GetAudioSamplesCount();
}

// Verify samples per second is correct (i.e. 44100)
CHECK_EQUAL(sample_value, map.info.sample_rate);

// Create Timeline (same specs as reader)
Timeline t1(map.info.width, map.info.height, map.info.fps, 44100, map.info.channels, map.info.channel_layout);

Clip c1;
c1.Reader(&map);
c1.Layer(1);
c1.Position(0.0);
c1.Start(0.0);
c1.End(10.0);

// Create 2nd map to 24 fps, which should create a variable # of samples per frame
FrameMapper map2(&r, Fraction(fps, 1), PULLDOWN_NONE, 44100, 2, LAYOUT_STEREO);
map2.info.has_audio = true;
map2.Open();

Clip c2;
c2.Reader(&map2);
c2.Layer(1);
// Position 1 frame into the video, this should mis-align the audio and create situations
// which overlapping Frame instances have different # of samples for the Timeline.
c2.Position(map2.info.video_timebase.ToFloat());
c2.Start(0.0);
c2.End(10.0);

// Add clips
t1.AddClip(&c1);
t1.AddClip(&c2);
t1.Open();

// Loop through samples, and verify Timeline didn't mess up individual sample values
int previous_sample_value = 0;
for (int frame_index = 2; frame_index < 24; frame_index++) {
t1.GetFrame(frame_index);
for (int sample_index = 0; sample_index < t1.GetFrame(frame_index)->GetAudioSamplesCount(); sample_index++) {
int sample_diff = t1.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0) - previous_sample_value;
if (previous_sample_value == 0) {
sample_diff = 2;
}

// Check if sample_value - previous_value == 2
// This should be true, because the DummyReader is added twice to the Timeline, and is overlapping
// This should be an ever increasing linear curve, increasing by 2 each sample on the Timeline
CHECK_EQUAL(2, sample_diff);

// Set previous sample value
previous_sample_value = t1.GetFrame(frame_index)->GetAudioSample(0, sample_index, 1.0);
}
}

// Close mapper
map.Close();
map2.Close();
t1.Close();
}

// Clean up
cache.Clear();

r.Close();
}
}

0 comments on commit bba1260

Please sign in to comment.