diff --git a/include/FFmpegUtilities.h b/include/FFmpegUtilities.h
index b4ec951f8..7daf0bc56 100644
--- a/include/FFmpegUtilities.h
+++ b/include/FFmpegUtilities.h
@@ -40,7 +40,7 @@
#ifndef IS_FFMPEG_3_2
#define IS_FFMPEG_3_2 (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 64, 101))
#endif
-
+
#ifndef HAVE_HW_ACCEL
#define HAVE_HW_ACCEL (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 107, 100))
#endif
@@ -177,13 +177,13 @@
#define AV_OUTPUT_CONTEXT(output_context, path) avformat_alloc_output_context2( output_context, NULL, NULL, path)
#define AV_OPTION_FIND(priv_data, name) av_opt_find(priv_data, name, NULL, 0, 0)
#define AV_OPTION_SET( av_stream, priv_data, name, value, avcodec) av_opt_set(priv_data, name, value, 0); avcodec_parameters_from_context(av_stream->codecpar, avcodec);
- #define AV_FORMAT_NEW_STREAM(oc, st_codec, av_codec, av_st) av_st = avformat_new_stream(oc, NULL);\
+ #define AV_FORMAT_NEW_STREAM(oc, st_codec_ctx, av_codec, av_st) av_st = avformat_new_stream(oc, NULL);\
if (!av_st) \
throw OutOfMemory("Could not allocate memory for the video stream.", path); \
c = avcodec_alloc_context3(av_codec); \
- st_codec = c; \
+ st_codec_ctx = c; \
av_st->codecpar->codec_id = av_codec->id;
- #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec) avcodec_parameters_from_context(av_stream->codecpar, av_codec);
+ #define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec_ctx) avcodec_parameters_from_context(av_stream->codecpar, av_codec_ctx);
#elif IS_FFMPEG_3_2
#define AV_REGISTER_ALL av_register_all();
#define AVCODEC_REGISTER_ALL avcodec_register_all();
diff --git a/include/FFmpegWriter.h b/include/FFmpegWriter.h
index 37fa22dce..98fbbb590 100644
--- a/include/FFmpegWriter.h
+++ b/include/FFmpegWriter.h
@@ -164,8 +164,8 @@ namespace openshot {
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *audio_st, *video_st;
- AVCodecContext *video_codec;
- AVCodecContext *audio_codec;
+ AVCodecContext *video_codec_ctx;
+ AVCodecContext *audio_codec_ctx;
SwsContext *img_convert_ctx;
int16_t *samples;
uint8_t *audio_outbuf;
diff --git a/include/FrameMapper.h b/include/FrameMapper.h
index c98d7b719..35dc8fb71 100644
--- a/include/FrameMapper.h
+++ b/include/FrameMapper.h
@@ -138,7 +138,6 @@ namespace openshot
*/
class FrameMapper : public ReaderBase {
private:
- bool is_open;
bool field_toggle; // Internal odd / even toggle (used when building the mapping)
Fraction original; // The original frame rate
Fraction target; // The target frame rate
diff --git a/src/FFmpegWriter.cpp b/src/FFmpegWriter.cpp
index 158ef6620..5797d09e1 100644
--- a/src/FFmpegWriter.cpp
+++ b/src/FFmpegWriter.cpp
@@ -87,7 +87,7 @@ FFmpegWriter::FFmpegWriter(std::string path) :
path(path), fmt(NULL), oc(NULL), audio_st(NULL), video_st(NULL), samples(NULL),
audio_outbuf(NULL), audio_outbuf_size(0), audio_input_frame_size(0), audio_input_position(0),
initial_audio_input_frame_size(0), img_convert_ctx(NULL), cache_size(8), num_of_rescalers(32),
- rescaler_position(0), video_codec(NULL), audio_codec(NULL), is_writing(false), write_video_count(0), write_audio_count(0),
+ rescaler_position(0), video_codec_ctx(NULL), audio_codec_ctx(NULL), is_writing(false), write_video_count(0), write_audio_count(0),
original_sample_rate(0), original_channels(0), avr(NULL), avr_planar(NULL), is_open(false), prepare_streams(false),
write_header(false), write_trailer(false), audio_encoder_buffer_size(0), audio_encoder_buffer(NULL) {
@@ -339,11 +339,11 @@ void FFmpegWriter::SetOption(StreamType stream, std::string name, std::string va
if (info.has_video && stream == VIDEO_STREAM && video_st) {
st = video_st;
// Get codec context
- c = AV_GET_CODEC_PAR_CONTEXT(st, video_codec);
+ c = AV_GET_CODEC_PAR_CONTEXT(st, video_codec_ctx);
} else if (info.has_audio && stream == AUDIO_STREAM && audio_st) {
st = audio_st;
// Get codec context
- c = AV_GET_CODEC_PAR_CONTEXT(st, audio_codec);
+ c = AV_GET_CODEC_PAR_CONTEXT(st, audio_codec_ctx);
} else
throw NoStreamsFound("The stream was not found. Be sure to call PrepareStreams() first.", path);
@@ -839,13 +839,13 @@ void FFmpegWriter::WriteTrailer() {
// Flush encoders
void FFmpegWriter::flush_encoders() {
- if (info.has_audio && audio_codec && AV_GET_CODEC_TYPE(audio_st) == AVMEDIA_TYPE_AUDIO && AV_GET_CODEC_ATTRIBUTES(audio_st, audio_codec)->frame_size <= 1)
+ if (info.has_audio && audio_codec_ctx && AV_GET_CODEC_TYPE(audio_st) == AVMEDIA_TYPE_AUDIO && AV_GET_CODEC_ATTRIBUTES(audio_st, audio_codec_ctx)->frame_size <= 1)
return;
#if (LIBAVFORMAT_VERSION_MAJOR < 58)
- if (info.has_video && video_codec && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && (oc->oformat->flags & AVFMT_RAWPICTURE) && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO)
+ if (info.has_video && video_codec_ctx && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && (oc->oformat->flags & AVFMT_RAWPICTURE) && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO)
return;
#else
- if (info.has_video && video_codec && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO)
+ if (info.has_video && video_codec_ctx && AV_GET_CODEC_TYPE(video_st) == AVMEDIA_TYPE_VIDEO && AV_FIND_DECODER_CODEC_ID(video_st) == AV_CODEC_ID_RAWVIDEO)
return;
#endif
@@ -854,7 +854,7 @@ void FFmpegWriter::flush_encoders() {
for (;;) {
// Increment PTS (in frames and scaled to the codec's timebase)
- write_video_count += av_rescale_q(1, (AVRational) {info.fps.den, info.fps.num}, video_codec->time_base);
+ write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base);
AVPacket pkt;
av_init_packet(&pkt);
@@ -872,22 +872,22 @@ void FFmpegWriter::flush_encoders() {
#pragma omp critical (write_video_packet)
{
// Encode video packet (latest version of FFmpeg)
- error_code = avcodec_send_frame(video_codec, NULL);
+ error_code = avcodec_send_frame(video_codec_ctx, NULL);
got_packet = 0;
while (error_code >= 0) {
- error_code = avcodec_receive_packet(video_codec, &pkt);
+ error_code = avcodec_receive_packet(video_codec_ctx, &pkt);
if (error_code == AVERROR(EAGAIN)|| error_code == AVERROR_EOF) {
got_packet = 0;
// Write packet
- avcodec_flush_buffers(video_codec);
+ avcodec_flush_buffers(video_codec_ctx);
break;
}
if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, video_codec->time_base, video_st->time_base);
+ pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, video_codec->time_base, video_st->time_base);
+ pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base);
if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, video_codec->time_base, video_st->time_base);
+ pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base);
pkt.stream_index = video_st->index;
error_code = av_interleaved_write_frame(oc, &pkt);
}
@@ -896,18 +896,18 @@ void FFmpegWriter::flush_encoders() {
#if LIBAVFORMAT_VERSION_MAJOR >= 54
// Encode video packet (older than FFmpeg 3.2)
- error_code = avcodec_encode_video2(video_codec, &pkt, NULL, &got_packet);
+ error_code = avcodec_encode_video2(video_codec_ctx, &pkt, NULL, &got_packet);
#else
// Encode video packet (even older version of FFmpeg)
int video_outbuf_size = 0;
/* encode the image */
- int out_size = avcodec_encode_video(video_codec, NULL, video_outbuf_size, NULL);
+ int out_size = avcodec_encode_video(video_codec_ctx, NULL, video_outbuf_size, NULL);
/* if zero size, it means the image was buffered */
if (out_size > 0) {
- if(video_codec->coded_frame->key_frame)
+ if(video_codec_ctx->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.data= video_outbuf;
pkt.size= out_size;
@@ -927,11 +927,11 @@ void FFmpegWriter::flush_encoders() {
// set the timestamp
if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, video_codec->time_base, video_st->time_base);
+ pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, video_codec->time_base, video_st->time_base);
+ pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base);
if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, video_codec->time_base, video_st->time_base);
+ pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base);
pkt.stream_index = video_st->index;
// Write packet
@@ -948,9 +948,9 @@ void FFmpegWriter::flush_encoders() {
// Increment PTS (in samples and scaled to the codec's timebase)
#if LIBAVFORMAT_VERSION_MAJOR >= 54
// for some reason, it requires me to multiply channels X 2
- write_audio_count += av_rescale_q(audio_input_position / (audio_codec->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)), (AVRational){1, info.sample_rate}, audio_codec->time_base);
+ write_audio_count += av_rescale_q(audio_input_position / (audio_codec_ctx->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)), av_make_q(1, info.sample_rate), audio_codec_ctx->time_base);
#else
- write_audio_count += av_rescale_q(audio_input_position / audio_codec->channels, (AVRational){1, info.sample_rate}, audio_codec->time_base);
+ write_audio_count += av_rescale_q(audio_input_position / audio_codec_ctx->channels, av_make_q(1, info.sample_rate), audio_codec_ctx->time_base);
#endif
AVPacket pkt;
@@ -963,9 +963,9 @@ void FFmpegWriter::flush_encoders() {
int error_code = 0;
int got_packet = 0;
#if IS_FFMPEG_3_2
- error_code = avcodec_send_frame(audio_codec, NULL);
+ error_code = avcodec_send_frame(audio_codec_ctx, NULL);
#else
- error_code = avcodec_encode_audio2(audio_codec, &pkt, NULL, &got_packet);
+ error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, NULL, &got_packet);
#endif
if (error_code < 0) {
ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (std::string)av_err2str(error_code) + "]", "error_code", error_code);
@@ -980,11 +980,11 @@ void FFmpegWriter::flush_encoders() {
// Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase)
if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, audio_codec->time_base, audio_st->time_base);
+ pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, audio_codec->time_base, audio_st->time_base);
+ pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base);
if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, audio_codec->time_base, audio_st->time_base);
+ pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base);
// set stream
pkt.stream_index = audio_st->index;
@@ -1101,8 +1101,13 @@ AVStream *FFmpegWriter::add_audio_stream() {
if (codec == NULL)
throw InvalidCodec("A valid audio codec could not be found for this file.", path);
+ // Free any previous memory allocations
+ if (audio_codec_ctx != NULL) {
+ AV_FREE_CONTEXT(audio_codec_ctx);
+ }
+
// Create a new audio stream
- AV_FORMAT_NEW_STREAM(oc, audio_codec, codec, st)
+ AV_FORMAT_NEW_STREAM(oc, audio_codec_ctx, codec, st)
c->codec_id = codec->id;
#if LIBAVFORMAT_VERSION_MAJOR >= 53
@@ -1185,7 +1190,7 @@ AVStream *FFmpegWriter::add_video_stream() {
throw InvalidCodec("A valid video codec could not be found for this file.", path);
// Create a new video stream
- AV_FORMAT_NEW_STREAM(oc, video_codec, codec, st)
+ AV_FORMAT_NEW_STREAM(oc, video_codec_ctx, codec, st)
c->codec_id = codec->id;
#if LIBAVFORMAT_VERSION_MAJOR >= 53
@@ -1363,15 +1368,15 @@ AVStream *FFmpegWriter::add_video_stream() {
// open audio codec
void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st) {
AVCodec *codec;
- AV_GET_CODEC_FROM_STREAM(st, audio_codec)
+ AV_GET_CODEC_FROM_STREAM(st, audio_codec_ctx)
// Set number of threads equal to number of processors (not to exceed 16)
- audio_codec->thread_count = std::min(FF_NUM_PROCESSORS, 16);
+ audio_codec_ctx->thread_count = std::min(FF_NUM_PROCESSORS, 16);
// Find the audio encoder
codec = avcodec_find_encoder_by_name(info.acodec.c_str());
if (!codec)
- codec = avcodec_find_encoder(audio_codec->codec_id);
+ codec = avcodec_find_encoder(audio_codec_ctx->codec_id);
if (!codec)
throw InvalidCodec("Could not find codec", path);
@@ -1380,16 +1385,16 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st) {
av_dict_set(&opts, "strict", "experimental", 0);
// Open the codec
- if (avcodec_open2(audio_codec, codec, &opts) < 0)
+ if (avcodec_open2(audio_codec_ctx, codec, &opts) < 0)
throw InvalidCodec("Could not open audio codec", path);
- AV_COPY_PARAMS_FROM_CONTEXT(st, audio_codec);
+ AV_COPY_PARAMS_FROM_CONTEXT(st, audio_codec_ctx);
// Free options
av_dict_free(&opts);
// Calculate the size of the input frame (i..e how many samples per packet), and the output buffer
// TODO: Ugly hack for PCM codecs (will be removed ASAP with new PCM support to compute the input frame size in samples
- if (audio_codec->frame_size <= 1) {
+ if (audio_codec_ctx->frame_size <= 1) {
// No frame size found... so calculate
audio_input_frame_size = 50000 / info.channels;
@@ -1406,7 +1411,7 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st) {
}
} else {
// Set frame size based on the codec
- audio_input_frame_size = audio_codec->frame_size;
+ audio_input_frame_size = audio_codec_ctx->frame_size;
}
// Set the initial frame size (since it might change during resampling)
@@ -1428,16 +1433,16 @@ void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st) {
av_dict_set(&st->metadata, iter->first.c_str(), iter->second.c_str(), 0);
}
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec->thread_count", audio_codec->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec_ctx->thread_count", audio_codec_ctx->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + MY_INPUT_BUFFER_PADDING_SIZE);
}
// open video codec
void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
AVCodec *codec;
- AV_GET_CODEC_FROM_STREAM(st, video_codec)
+ AV_GET_CODEC_FROM_STREAM(st, video_codec_ctx)
// Set number of threads equal to number of processors (not to exceed 16)
- video_codec->thread_count = std::min(FF_NUM_PROCESSORS, 16);
+ video_codec_ctx->thread_count = std::min(FF_NUM_PROCESSORS, 16);
#if HAVE_HW_ACCEL
if (hw_en_on && hw_en_supported) {
@@ -1492,8 +1497,8 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
throw InvalidCodec("Could not find codec", path);
/* Force max_b_frames to 0 in some cases (i.e. for mjpeg image sequences */
- if (video_codec->max_b_frames && video_codec->codec_id != AV_CODEC_ID_MPEG4 && video_codec->codec_id != AV_CODEC_ID_MPEG1VIDEO && video_codec->codec_id != AV_CODEC_ID_MPEG2VIDEO)
- video_codec->max_b_frames = 0;
+ if (video_codec_ctx->max_b_frames && video_codec_ctx->codec_id != AV_CODEC_ID_MPEG4 && video_codec_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO && video_codec_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO)
+ video_codec_ctx->max_b_frames = 0;
// Init options
AVDictionary *opts = NULL;
@@ -1501,7 +1506,7 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
#if HAVE_HW_ACCEL
if (hw_en_on && hw_en_supported) {
- video_codec->pix_fmt = hw_en_av_pix_fmt;
+ video_codec_ctx->pix_fmt = hw_en_av_pix_fmt;
// for the list of possible options, see the list of codec-specific options:
// e.g. ffmpeg -h encoder=h264_vaapi or ffmpeg -h encoder=hevc_vaapi
@@ -1511,23 +1516,23 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
// which is ffmpeg version-specific.
if (hw_en_av_pix_fmt == AV_PIX_FMT_VAAPI) {
int64_t qp;
- if (av_opt_get_int(video_codec->priv_data, "qp", 0, &qp) != 0 || qp == 0) {
+ if (av_opt_get_int(video_codec_ctx->priv_data, "qp", 0, &qp) != 0 || qp == 0) {
// unless "qp" was set for CQP, switch to VBR RC mode
- av_opt_set(video_codec->priv_data, "rc_mode", "VBR", 0);
+ av_opt_set(video_codec_ctx->priv_data, "rc_mode", "VBR", 0);
// In the current state (ffmpeg-4.2-4 libva-mesa-driver-19.1.5-1) to use VBR,
// one has to specify both bit_rate and maxrate, otherwise a small low quality file is generated on Intel iGPU).
- video_codec->rc_max_rate = video_codec->bit_rate;
+ video_codec_ctx->rc_max_rate = video_codec_ctx->bit_rate;
}
}
- switch (video_codec->codec_id) {
+ switch (video_codec_ctx->codec_id) {
case AV_CODEC_ID_H264:
- video_codec->max_b_frames = 0; // At least this GPU doesn't support b-frames
- video_codec->profile = FF_PROFILE_H264_BASELINE | FF_PROFILE_H264_CONSTRAINED;
- av_opt_set(video_codec->priv_data, "preset", "slow", 0);
- av_opt_set(video_codec->priv_data, "tune", "zerolatency", 0);
- av_opt_set(video_codec->priv_data, "vprofile", "baseline", AV_OPT_SEARCH_CHILDREN);
+ video_codec_ctx->max_b_frames = 0; // At least this GPU doesn't support b-frames
+ video_codec_ctx->profile = FF_PROFILE_H264_BASELINE | FF_PROFILE_H264_CONSTRAINED;
+ av_opt_set(video_codec_ctx->priv_data, "preset", "slow", 0);
+ av_opt_set(video_codec_ctx->priv_data, "tune", "zerolatency", 0);
+ av_opt_set(video_codec_ctx->priv_data, "vprofile", "baseline", AV_OPT_SEARCH_CHILDREN);
break;
case AV_CODEC_ID_HEVC:
// tested to work with defaults
@@ -1537,13 +1542,13 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
break;
default:
ZmqLogger::Instance()->AppendDebugMethod("No codec-specific options defined for this codec. HW encoding may fail",
- "codec_id", video_codec->codec_id);
+ "codec_id", video_codec_ctx->codec_id);
break;
}
// set hw_frames_ctx for encoder's AVCodecContext
int err;
- if ((err = set_hwframe_ctx(video_codec, hw_device_ctx, info.width, info.height)) < 0) {
+ if ((err = set_hwframe_ctx(video_codec_ctx, hw_device_ctx, info.width, info.height)) < 0) {
ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video (set_hwframe_ctx) ERROR faled to set hwframe context",
"width", info.width, "height", info.height, av_err2str(err), -1);
}
@@ -1551,9 +1556,9 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
#endif // HAVE_HW_ACCEL
/* open the codec */
- if (avcodec_open2(video_codec, codec, &opts) < 0)
+ if (avcodec_open2(video_codec_ctx, codec, &opts) < 0)
throw InvalidCodec("Could not open video codec", path);
- AV_COPY_PARAMS_FROM_CONTEXT(st, video_codec);
+ AV_COPY_PARAMS_FROM_CONTEXT(st, video_codec_ctx);
// Free options
av_dict_free(&opts);
@@ -1563,7 +1568,7 @@ void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st) {
av_dict_set(&st->metadata, iter->first.c_str(), iter->second.c_str(), 0);
}
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video", "video_codec->thread_count", video_codec->thread_count);
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video", "video_codec_ctx->thread_count", video_codec_ctx->thread_count);
}
@@ -1641,7 +1646,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets", "is_final", is_final, "total_frame_samples", total_frame_samples, "channel_layout_in_frame", channel_layout_in_frame, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "LAYOUT_MONO", LAYOUT_MONO);
// Keep track of the original sample format
- AVSampleFormat output_sample_fmt = audio_codec->sample_fmt;
+ AVSampleFormat output_sample_fmt = audio_codec_ctx->sample_fmt;
AVFrame *audio_frame = NULL;
if (!is_final) {
@@ -1657,7 +1662,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
}
// Do not convert audio to planar format (yet). We need to keep everything interleaved at this point.
- switch (audio_codec->sample_fmt) {
+ switch (audio_codec_ctx->sample_fmt) {
case AV_SAMPLE_FMT_FLTP: {
output_sample_fmt = AV_SAMPLE_FMT_FLT;
break;
@@ -1768,8 +1773,8 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
// Convert to planar (if needed by audio codec)
AVFrame *frame_final = AV_ALLOCATE_FRAME();
AV_RESET_FRAME(frame_final);
- if (av_sample_fmt_is_planar(audio_codec->sample_fmt)) {
- ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)", "in_sample_fmt", output_sample_fmt, "out_sample_fmt", audio_codec->sample_fmt, "in_sample_rate", info.sample_rate, "out_sample_rate", info.sample_rate, "in_channels", info.channels, "out_channels", info.channels);
+ if (av_sample_fmt_is_planar(audio_codec_ctx->sample_fmt)) {
+ ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)", "in_sample_fmt", output_sample_fmt, "out_sample_fmt", audio_codec_ctx->sample_fmt, "in_sample_rate", info.sample_rate, "out_sample_rate", info.sample_rate, "in_channels", info.channels, "out_channels", info.channels);
// setup resample context
if (!avr_planar) {
@@ -1777,7 +1782,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
av_opt_set_int(avr_planar, "in_channel_layout", info.channel_layout, 0);
av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0);
av_opt_set_int(avr_planar, "in_sample_fmt", output_sample_fmt, 0);
- av_opt_set_int(avr_planar, "out_sample_fmt", audio_codec->sample_fmt, 0); // planar not allowed here
+ av_opt_set_int(avr_planar, "out_sample_fmt", audio_codec_ctx->sample_fmt, 0); // planar not allowed here
av_opt_set_int(avr_planar, "in_sample_rate", info.sample_rate, 0);
av_opt_set_int(avr_planar, "out_sample_rate", info.sample_rate, 0);
av_opt_set_int(avr_planar, "in_channels", info.channels, 0);
@@ -1803,7 +1808,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
// Create output frame (and allocate arrays)
frame_final->nb_samples = audio_input_frame_size;
- av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, frame_final->nb_samples, audio_codec->sample_fmt, 0);
+ av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, frame_final->nb_samples, audio_codec_ctx->sample_fmt, 0);
// Convert audio samples
int nb_samples = SWR_CONVERT(avr_planar, // audio resample context
@@ -1816,7 +1821,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
// Copy audio samples over original samples
if (nb_samples > 0)
- memcpy(samples, frame_final->data[0], nb_samples * av_get_bytes_per_sample(audio_codec->sample_fmt) * info.channels);
+ memcpy(samples, frame_final->data[0], nb_samples * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) * info.channels);
// deallocate AVFrame
av_freep(&(audio_frame->data[0]));
@@ -1828,16 +1833,16 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
} else {
// Create a new array
final_samples = (int16_t *) av_malloc(
- sizeof(int16_t) * audio_input_position * (av_get_bytes_per_sample(audio_codec->sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)));
+ sizeof(int16_t) * audio_input_position * (av_get_bytes_per_sample(audio_codec_ctx->sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)));
// Copy audio into buffer for frame
- memcpy(final_samples, samples, audio_input_position * av_get_bytes_per_sample(audio_codec->sample_fmt));
+ memcpy(final_samples, samples, audio_input_position * av_get_bytes_per_sample(audio_codec_ctx->sample_fmt));
// Init the nb_samples property
frame_final->nb_samples = audio_input_frame_size;
// Fill the final_frame AVFrame with audio (non planar)
- avcodec_fill_audio_frame(frame_final, audio_codec->channels, audio_codec->sample_fmt, (uint8_t *) final_samples,
+ avcodec_fill_audio_frame(frame_final, audio_codec_ctx->channels, audio_codec_ctx->sample_fmt, (uint8_t *) final_samples,
audio_encoder_buffer_size, 0);
}
@@ -1862,18 +1867,18 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
int error_code;
int ret = 0;
int frame_finished = 0;
- error_code = ret = avcodec_send_frame(audio_codec, frame_final);
+ error_code = ret = avcodec_send_frame(audio_codec_ctx, frame_final);
if (ret < 0 && ret != AVERROR(EINVAL) && ret != AVERROR_EOF) {
- avcodec_send_frame(audio_codec, NULL);
+ avcodec_send_frame(audio_codec_ctx, NULL);
}
else {
if (ret >= 0)
pkt.size = 0;
- ret = avcodec_receive_packet(audio_codec, &pkt);
+ ret = avcodec_receive_packet(audio_codec_ctx, &pkt);
if (ret >= 0)
frame_finished = 1;
if(ret == AVERROR(EINVAL) || ret == AVERROR_EOF) {
- avcodec_flush_buffers(audio_codec);
+ avcodec_flush_buffers(audio_codec_ctx);
ret = 0;
}
if (ret >= 0) {
@@ -1887,7 +1892,7 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
got_packet_ptr = ret;
#else
// Encode audio (older versions of FFmpeg)
- int error_code = avcodec_encode_audio2(audio_codec, &pkt, frame_final, &got_packet_ptr);
+ int error_code = avcodec_encode_audio2(audio_codec_ctx, &pkt, frame_final, &got_packet_ptr);
#endif
/* if zero size, it means the image was buffered */
if (error_code == 0 && got_packet_ptr) {
@@ -1898,11 +1903,11 @@ void FFmpegWriter::write_audio_packets(bool is_final) {
// Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase)
if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, audio_codec->time_base, audio_st->time_base);
+ pkt.pts = av_rescale_q(pkt.pts, audio_codec_ctx->time_base, audio_st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, audio_codec->time_base, audio_st->time_base);
+ pkt.dts = av_rescale_q(pkt.dts, audio_codec_ctx->time_base, audio_st->time_base);
if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, audio_codec->time_base, audio_st->time_base);
+ pkt.duration = av_rescale_q(pkt.duration, audio_codec_ctx->time_base, audio_st->time_base);
// set stream
pkt.stream_index = audio_st->index;
@@ -2016,7 +2021,7 @@ void FFmpegWriter::process_video_packet(std::shared_ptr frame) {
frame_final = allocate_avframe((AVPixelFormat)(video_st->codecpar->format), info.width, info.height, &bytes_final, NULL);
}
#else
- AVFrame *frame_final = allocate_avframe(video_codec->pix_fmt, info.width, info.height, &bytes_final, NULL);
+ AVFrame *frame_final = allocate_avframe(video_codec_ctx->pix_fmt, info.width, info.height, &bytes_final, NULL);
#endif // IS_FFMPEG_3_2
// Fill with data
@@ -2059,7 +2064,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
pkt.size = sizeof(AVPicture);
// Increment PTS (in frames and scaled to the codec's timebase)
- write_video_count += av_rescale_q(1, (AVRational) {info.fps.den, info.fps.num}, video_codec->time_base);
+ write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base);
pkt.pts = write_video_count;
/* write the compressed frame in the media file */
@@ -2085,7 +2090,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
uint8_t *video_outbuf = NULL;
// Increment PTS (in frames and scaled to the codec's timebase)
- write_video_count += av_rescale_q(1, (AVRational) {info.fps.den, info.fps.num}, video_codec->time_base);
+ write_video_count += av_rescale_q(1, av_make_q(info.fps.den, info.fps.num), video_codec_ctx->time_base);
// Assign the initial AVFrame PTS from the frame counter
frame_final->pts = write_video_count;
@@ -2094,7 +2099,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
if (!(hw_frame = av_frame_alloc())) {
fprintf(stderr, "Error code: av_hwframe_alloc\n");
}
- if (av_hwframe_get_buffer(video_codec->hw_frames_ctx, hw_frame, 0) < 0) {
+ if (av_hwframe_get_buffer(video_codec_ctx->hw_frames_ctx, hw_frame, 0) < 0) {
fprintf(stderr, "Error code: av_hwframe_get_buffer\n");
}
if (!hw_frame->hw_frames_ctx) {
@@ -2116,11 +2121,11 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
#if HAVE_HW_ACCEL
if (hw_en_on && hw_en_supported) {
- ret = avcodec_send_frame(video_codec, hw_frame); //hw_frame!!!
+ ret = avcodec_send_frame(video_codec_ctx, hw_frame); //hw_frame!!!
} else
#endif // HAVE_HW_ACCEL
{
- ret = avcodec_send_frame(video_codec, frame_final);
+ ret = avcodec_send_frame(video_codec_ctx, frame_final);
}
error_code = ret;
if (ret < 0 ) {
@@ -2131,14 +2136,14 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
if (ret == AVERROR_EOF ) {
std::cerr << "Frame AVERROR_EOF" << "\n";
}
- avcodec_send_frame(video_codec, NULL);
+ avcodec_send_frame(video_codec_ctx, NULL);
}
else {
while (ret >= 0) {
- ret = avcodec_receive_packet(video_codec, &pkt);
+ ret = avcodec_receive_packet(video_codec_ctx, &pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
- avcodec_flush_buffers(video_codec);
+ avcodec_flush_buffers(video_codec_ctx);
got_packet_ptr = 0;
break;
}
@@ -2151,7 +2156,7 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
#else
#if LIBAVFORMAT_VERSION_MAJOR >= 54
// Write video packet (older than FFmpeg 3.2)
- error_code = avcodec_encode_video2(video_codec, &pkt, frame_final, &got_packet_ptr);
+ error_code = avcodec_encode_video2(video_codec_ctx, &pkt, frame_final, &got_packet_ptr);
if (error_code != 0) {
std::cerr << "Frame AVERROR_EOF" << "\n";
}
@@ -2164,11 +2169,11 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
video_outbuf = (uint8_t*) av_malloc(200000);
/* encode the image */
- int out_size = avcodec_encode_video(video_codec, video_outbuf, video_outbuf_size, frame_final);
+ int out_size = avcodec_encode_video(video_codec_ctx, video_outbuf, video_outbuf_size, frame_final);
/* if zero size, it means the image was buffered */
if (out_size > 0) {
- if(video_codec->coded_frame->key_frame)
+ if(video_codec_ctx->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.data= video_outbuf;
pkt.size= out_size;
@@ -2188,11 +2193,11 @@ bool FFmpegWriter::write_video_packet(std::shared_ptr frame, AVFrame *fra
// set the timestamp
if (pkt.pts != AV_NOPTS_VALUE)
- pkt.pts = av_rescale_q(pkt.pts, video_codec->time_base, video_st->time_base);
+ pkt.pts = av_rescale_q(pkt.pts, video_codec_ctx->time_base, video_st->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
- pkt.dts = av_rescale_q(pkt.dts, video_codec->time_base, video_st->time_base);
+ pkt.dts = av_rescale_q(pkt.dts, video_codec_ctx->time_base, video_st->time_base);
if (pkt.duration > 0)
- pkt.duration = av_rescale_q(pkt.duration, video_codec->time_base, video_st->time_base);
+ pkt.duration = av_rescale_q(pkt.duration, video_codec_ctx->time_base, video_st->time_base);
pkt.stream_index = video_st->index;
/* write the compressed frame in the media file */
diff --git a/src/FrameMapper.cpp b/src/FrameMapper.cpp
index 4b213f81c..e1e5700c2 100644
--- a/src/FrameMapper.cpp
+++ b/src/FrameMapper.cpp
@@ -61,9 +61,9 @@ FrameMapper::FrameMapper(ReaderBase *reader, Fraction target, PulldownType targe
// Destructor
FrameMapper::~FrameMapper() {
- if (is_open)
- // Auto Close if not already
- Close();
+
+ // Auto Close if not already
+ Close();
reader = NULL;
}