Skip to content

Commit

Permalink
VideoBuffers: Optimise number of buffers for reference frames in use
Browse files Browse the repository at this point in the history
- parse the number of reference frames from the AVCodecContext when
initialising the video stream and pass the value through to the
VideoOutput classes via the player.
- use the H.264 parser to check the number of ref frames (but don't use
it to trigger a stream change)
- use the number of ref frames to determine the number of video buffers.
Only currently impacts VAAPI and software decode.
- HEVC, VP8 and VP9 default to their maximum values (i.e. they are not
parsed - not even sure they can be)
- otherwise the default is 2

This significantly reduces the number of video buffers for a lot of
content - and hence reduces memory consumption.
  • Loading branch information
mark-kendall committed Jun 22, 2019
1 parent 0b0d7d2 commit af37064
Show file tree
Hide file tree
Showing 16 changed files with 147 additions and 74 deletions.
73 changes: 51 additions & 22 deletions mythtv/libs/libmythtv/decoders/avformatdecoder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1474,14 +1474,49 @@ enum AVPixelFormat get_format_dxva2(struct AVCodecContext *avctx,
}
#endif

int AvFormatDecoder::GetMaxReferenceFrames(AVCodecContext *Context)
{
switch (Context->codec_id)
{
case AV_CODEC_ID_H264:
{
int result = 16;
if (Context->extradata && (Context->extradata_size >= 7))
{
uint8_t offset = 0;
if (Context->extradata[0] == 1)
offset = 9; // avCC
else if (AV_RB24(Context->extradata) == 0x01) // Annex B - 3 byte startcode 0x000001
offset = 4;
else if (AV_RB32(Context->extradata) == 0x01) // Annex B - 4 byte startcode 0x00000001
offset= 5;

if (offset)
{
H264Parser parser;
bool dummy;
parser.parse_SPS(Context->extradata + offset,
static_cast<uint>(Context->extradata_size - offset), dummy, result);
}
}
return result;
}
case AV_CODEC_ID_H265: return 16;
case AV_CODEC_ID_VP9: return 8;
case AV_CODEC_ID_VP8: return 3;
default: break;
}
return 2;
}

void AvFormatDecoder::InitVideoCodec(AVStream *stream, AVCodecContext *enc,
bool selectedStream)
{
LOG(VB_PLAYBACK, LOG_INFO, LOC +
QString("InitVideoCodec() 0x%1 id(%2) type (%3).")
.arg((uint64_t)enc,0,16)
QString("InitVideoCodec ID:%1 Type:%2 Size:%3x%4")
.arg(ff_codec_id_string(enc->codec_id))
.arg(ff_codec_type_string(enc->codec_type)));
.arg(ff_codec_type_string(enc->codec_type))
.arg(enc->width).arg(enc->height));

if (ringBuffer && ringBuffer->IsDVD())
m_directrendering = false;
Expand Down Expand Up @@ -1655,7 +1690,9 @@ void AvFormatDecoder::InitVideoCodec(AVStream *stream, AVCodecContext *enc,
QString codecName;
if (codec2)
codecName = codec2->name;
m_parent->SetVideoParams(width, height, m_fps, m_current_aspect, kScan_Detect, codecName);
m_parent->SetVideoParams(width, height, static_cast<double>(m_fps),
m_current_aspect, GetMaxReferenceFrames(enc),
kScan_Detect, codecName);
if (LCD *lcd = LCD::Get())
{
LCDVideoFormatSet video_format;
Expand Down Expand Up @@ -2618,12 +2655,12 @@ int AvFormatDecoder::ScanStreams(bool novideo)
tvformat == "pal-m" || tvformat == "atsc")
{
m_fps = 29.97;
m_parent->SetVideoParams(-1, -1, 29.97, 1.0f);
m_parent->SetVideoParams(-1, -1, 29.97, 1.0f, 16);
}
else
{
m_fps = 25.0;
m_parent->SetVideoParams(-1, -1, 25.0, 1.0f);
m_parent->SetVideoParams(-1, -1, 25.0, 1.0f, 16);
}
}

Expand Down Expand Up @@ -3284,7 +3321,7 @@ void AvFormatDecoder::MpegPreProcessPkt(AVStream *stream, AVPacket *pkt)
// as for H.264, if a decoder deinterlacer is in operation - the stream must be progressive
bool doublerate = false;
bool decoderdeint = m_mythcodecctx->IsDeinterlacing(doublerate, true);
m_parent->SetVideoParams(width, height, static_cast<double>(seqFPS), m_current_aspect,
m_parent->SetVideoParams(width, height, static_cast<double>(seqFPS), m_current_aspect, 2,
decoderdeint ? kScan_Progressive : kScan_Ignore);

m_current_width = width;
Expand Down Expand Up @@ -3335,22 +3372,13 @@ int AvFormatDecoder::H264PreProcessPkt(AVStream *stream, AVPacket *pkt)
const uint8_t *buf_end = pkt->data + pkt->size;
int num_frames = 0;

// crude NAL unit vs Annex B detection.
// the parser only understands Annex B
if (context->extradata && context->extradata_size >= 4)
// The parser only understands Annex B/bytestream format - so check for avCC
// format (starts with 0x01) and rely on FFmpeg keyframe detection
if (context->extradata && (context->extradata_size >= 7) && (context->extradata[0] == 0x01))
{
int nal_size = 0;
int size_length = (context->extradata[4] & 0x3) + 1;

for (int i = 0; i < size_length; i++)
nal_size += buf[i];

if (nal_size)
{
if (pkt->flags & AV_PKT_FLAG_KEY)
HandleGopStart(pkt, false);
return 1;
}
if (pkt->flags & AV_PKT_FLAG_KEY)
HandleGopStart(pkt, false);
return 1;
}

while (buf < buf_end)
Expand Down Expand Up @@ -3397,6 +3425,7 @@ int AvFormatDecoder::H264PreProcessPkt(AVStream *stream, AVPacket *pkt)
bool doublerate = false;
bool decoderdeint = m_mythcodecctx->IsDeinterlacing(doublerate, true);
m_parent->SetVideoParams(width, height, seqFPS, m_current_aspect,
static_cast<int>(m_h264_parser->getRefFrames()),
decoderdeint ? kScan_Progressive : kScan_Ignore);

m_current_width = width;
Expand Down
2 changes: 2 additions & 0 deletions mythtv/libs/libmythtv/decoders/avformatdecoder.h
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,8 @@ class AvFormatDecoder : public DecoderBase
virtual int GetAudioLanguage(uint audio_index, uint stream_index);
virtual AudioTrackType GetAudioTrackType(uint stream_index);

static int GetMaxReferenceFrames(AVCodecContext *Context);

private:
AvFormatDecoder(const AvFormatDecoder &) = delete; // not copyable
AvFormatDecoder &operator=(const AvFormatDecoder &) = delete; // not copyable
Expand Down
4 changes: 2 additions & 2 deletions mythtv/libs/libmythtv/decoders/nuppeldecoder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ int NuppelDecoder::OpenFile(RingBuffer *rbuffer, bool novideo,

GetPlayer()->SetKeyframeDistance(m_fileheader.keyframedist);
GetPlayer()->SetVideoParams(m_fileheader.width, m_fileheader.height,
m_fileheader.fps, m_current_aspect);
m_fileheader.fps, m_current_aspect, 16);

m_video_width = m_fileheader.width;
m_video_height = m_fileheader.height;
Expand Down Expand Up @@ -1338,7 +1338,7 @@ bool NuppelDecoder::GetFrame(DecodeType decodetype, bool&)

GetPlayer()->SetKeyframeDistance(m_fileheader.keyframedist);
GetPlayer()->SetVideoParams(m_fileheader.width, m_fileheader.height,
m_fileheader.fps, m_current_aspect);
m_fileheader.fps, m_current_aspect, 2);
}
}
}
Expand Down
5 changes: 5 additions & 0 deletions mythtv/libs/libmythtv/mpeg/H264Parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1316,6 +1316,11 @@ void H264Parser::getFrameRate(FrameRate &result) const
result = FrameRate(timeScale / 2, unitsInTick);
}

uint H264Parser::getRefFrames(void) const
{
return num_ref_frames;
}

uint H264Parser::aspectRatio(void) const
{

Expand Down
1 change: 1 addition & 0 deletions mythtv/libs/libmythtv/mpeg/H264Parser.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,7 @@ class H264Parser {
uint aspectRatio(void) const;
double frameRate(void) const;
void getFrameRate(FrameRate &result) const;
uint getRefFrames(void) const;

uint64_t frameAUstreamOffset(void) const {return frame_start_offset;}
uint64_t keyframeAUstreamOffset(void) const {return keyframe_start_offset;}
Expand Down
16 changes: 12 additions & 4 deletions mythtv/libs/libmythtv/mythplayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,7 @@ MythPlayer::MythPlayer(PlayerFlags flags)
m_lastFrameCodec(FMT_NONE),
// Input Video Attributes
video_disp_dim(0,0), video_dim(0,0),
m_maxReferenceFrames(16),
video_frame_rate(29.97F), video_aspect(4.0F / 3.0F),
forced_video_aspect(-1),
resetScan(kScan_Ignore), m_scan(kScan_Interlaced),
Expand Down Expand Up @@ -516,7 +517,7 @@ bool MythPlayer::InitVideo(void)
decoder->GetVideoCodecID(),
pipState, video_dim, video_disp_dim, video_aspect,
parentWidget, embedRect,
video_frame_rate, (uint)playerFlags, m_codecName);
video_frame_rate, (uint)playerFlags, m_codecName, m_maxReferenceFrames);

if (!videoOutput)
{
Expand Down Expand Up @@ -594,7 +595,8 @@ void MythPlayer::ReinitVideo(void)
videoOutput->SetVideoFrameRate(static_cast<float>(video_frame_rate));
float aspect = (forced_video_aspect > 0) ? forced_video_aspect : video_aspect;
if (!videoOutput->InputChanged(video_dim, video_disp_dim, aspect,
decoder->GetVideoCodecID(), aspect_only, &locker))
decoder->GetVideoCodecID(), aspect_only, &locker,
m_maxReferenceFrames))
{
LOG(VB_GENERAL, LOG_ERR, LOC +
"Failed to Reinitialize Video. Exiting..");
Expand Down Expand Up @@ -758,7 +760,7 @@ void MythPlayer::SetScanType(FrameScanType scan)
}

void MythPlayer::SetVideoParams(int width, int height, double fps, float aspect,
FrameScanType scan, const QString& codecName)
int ReferenceFrames, FrameScanType scan, const QString& codecName)
{
bool paramsChanged = false;

Expand Down Expand Up @@ -793,6 +795,12 @@ void MythPlayer::SetVideoParams(int width, int height, double fps, float aspect,
paramsChanged = true;
}

if (ReferenceFrames > 0)
{
m_maxReferenceFrames = ReferenceFrames;
paramsChanged = true;
}

if (!paramsChanged)
return;

Expand Down Expand Up @@ -838,7 +846,7 @@ void MythPlayer::OpenDummy(void)
if (!videoOutput)
{
SetKeyframeDistance(15);
SetVideoParams(720, 576, 25.00, 1.25f);
SetVideoParams(720, 576, 25.00, 1.25f, 2);
}

player_ctx->LockPlayingInfo(__FILE__, __LINE__);
Expand Down
3 changes: 2 additions & 1 deletion mythtv/libs/libmythtv/mythplayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ class MTV_PUBLIC MythPlayer
void SetWatchingRecording(bool mode);
void SetWatched(bool forceWatched = false);
void SetKeyframeDistance(int keyframedistance);
void SetVideoParams(int w, int h, double fps, float aspect,
void SetVideoParams(int w, int h, double fps, float aspect, int ReferenceFrames,
FrameScanType scan = kScan_Ignore, const QString& codecName = QString());
void SetFileLength(int total, int frames);
void SetDuration(int duration);
Expand Down Expand Up @@ -733,6 +733,7 @@ class MTV_PUBLIC MythPlayer
// Input Video Attributes
QSize video_disp_dim; ///< Video (input) width & height
QSize video_dim; ///< Video (input) buffer width & height
int m_maxReferenceFrames; ///< Number of reference frames used in the video stream
double video_frame_rate;///< Video (input) Frame Rate (often inaccurate)
float video_aspect; ///< Video (input) Apect Ratio
float forced_video_aspect;
Expand Down
17 changes: 11 additions & 6 deletions mythtv/libs/libmythtv/mythvaapicontext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include "mythcontext.h"
#include "mythmainwindow.h"
#include "mythlogging.h"
#include "decoders/avformatdecoder.h"
#include "mythrender_opengl.h"
#include "videobuffers.h"
#include "mythvaapiinterop.h"
Expand Down Expand Up @@ -387,7 +388,8 @@ int MythVAAPIContext::InitialiseContext(AVCodecContext *Context)
vaapi_frames_ctx->attributes = prefs;
vaapi_frames_ctx->nb_attributes = 3;
hw_frames_ctx->sw_format = FramesFormat(Context->sw_pix_fmt);
hw_frames_ctx->initial_pool_size = static_cast<int>(VideoBuffers::GetNumBuffers(FMT_VAAPI, true));
int referenceframes = AvFormatDecoder::GetMaxReferenceFrames(Context);
hw_frames_ctx->initial_pool_size = static_cast<int>(VideoBuffers::GetNumBuffers(FMT_VAAPI, referenceframes, true));
hw_frames_ctx->format = AV_PIX_FMT_VAAPI;
hw_frames_ctx->width = Context->coded_width;
hw_frames_ctx->height = Context->coded_height;
Expand All @@ -402,8 +404,9 @@ int MythVAAPIContext::InitialiseContext(AVCodecContext *Context)
return res;
}

LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("VAAPI FFmpeg buffer pool created with %1 surfaces")
.arg(vaapi_frames_ctx->nb_surfaces));
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("VAAPI FFmpeg buffer pool created with %1 %2x%3 surfaces (%4 references)")
.arg(vaapi_frames_ctx->nb_surfaces).arg(Context->coded_width).arg(Context->coded_height)
.arg(referenceframes));
av_buffer_unref(&hwdeviceref);
return 0;
}
Expand All @@ -430,13 +433,14 @@ int MythVAAPIContext::InitialiseContext2(AVCodecContext *Context)
return -1;
}

int referenceframes = AvFormatDecoder::GetMaxReferenceFrames(Context);
AVHWFramesContext* hw_frames_ctx = reinterpret_cast<AVHWFramesContext*>(Context->hw_frames_ctx->data);
AVVAAPIFramesContext* vaapi_frames_ctx = reinterpret_cast<AVVAAPIFramesContext*>(hw_frames_ctx->hwctx);
hw_frames_ctx->sw_format = FramesFormat(Context->sw_pix_fmt);
hw_frames_ctx->format = AV_PIX_FMT_VAAPI;
hw_frames_ctx->width = Context->coded_width;
hw_frames_ctx->height = Context->coded_height;
hw_frames_ctx->initial_pool_size = static_cast<int>(VideoBuffers::GetNumBuffers(FMT_VAAPI, true));
hw_frames_ctx->initial_pool_size = static_cast<int>(VideoBuffers::GetNumBuffers(FMT_VAAPI, referenceframes));
hw_frames_ctx->free = &MythCodecContext::FramesContextFinished;
if (av_hwframe_ctx_init(Context->hw_frames_ctx) < 0)
{
Expand All @@ -446,8 +450,9 @@ int MythVAAPIContext::InitialiseContext2(AVCodecContext *Context)
return -1;
}

LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("VAAPI FFmpeg buffer pool created with %1 %2x%3 surfaces")
.arg(vaapi_frames_ctx->nb_surfaces).arg(Context->coded_width).arg(Context->coded_height));
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("VAAPI FFmpeg buffer pool created with %1 %2x%3 surfaces (%4 references)")
.arg(vaapi_frames_ctx->nb_surfaces).arg(Context->coded_width).arg(Context->coded_height)
.arg(referenceframes));
av_buffer_unref(&device);
return 0;
}
Expand Down
23 changes: 16 additions & 7 deletions mythtv/libs/libmythtv/videobuffers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -136,21 +136,27 @@ YUVInfo::YUVInfo(uint Width, uint Height, uint Size, const int *Pitches,
* \see VideoOutput
*/

uint VideoBuffers::GetNumBuffers(int PixelFormat, bool Decoder /*=false*/)
uint VideoBuffers::GetNumBuffers(int PixelFormat, int MaxReferenceFrames, bool Decoder /*=false*/)
{
uint refs = static_cast<uint>(MaxReferenceFrames);
switch (PixelFormat)
{
case FMT_DXVA2: return 30;
case FMT_VDPAU: return 28;
case FMT_NVDEC: return 8;
case FMT_VTB: return 24;
// Max 16 ref frames, 12 headroom and allocate 2 extra in the VAAPI frames
// context for additional references held by the VPP deinterlacer (i.e.
// prevent buffer starvation in the decoder)
// This covers the 'worst case' samples.
case FMT_VAAPI: return Decoder ? 30 : 28;
case FMT_DXVA2: return 30;
case FMT_YV12: return 31;
case FMT_VAAPI: return Decoder ? (refs + 14) : (refs + 12);
// Copyback of hardware frames. These decoders are buffering internally
// already - so no need for a large presentation buffer
case FMT_NONE: return 8;
// As for copyback, these decoders buffer internally
case FMT_NVDEC: return 8;
case FMT_MEDIACODEC: return 8;
// Standard software decode
case FMT_YV12: return refs + 14;
default: break;
}
return 30;
Expand Down Expand Up @@ -848,9 +854,10 @@ void VideoBuffers::ClearAfterSeek(void)

bool VideoBuffers::CreateBuffers(VideoFrameType Type, QSize Size, bool ExtraForPause,
uint NeedFree, uint NeedprebufferNormal,
uint NeedPrebufferSmall, uint KeepPrebuffer)
uint NeedPrebufferSmall, uint KeepPrebuffer,
int MaxReferenceFrames)
{
Init(GetNumBuffers(Type), ExtraForPause, NeedFree, NeedprebufferNormal,
Init(GetNumBuffers(Type, MaxReferenceFrames), ExtraForPause, NeedFree, NeedprebufferNormal,
NeedPrebufferSmall, KeepPrebuffer);
return CreateBuffers(Type, Size.width(), Size.height());
}
Expand All @@ -862,6 +869,8 @@ bool VideoBuffers::CreateBuffers(VideoFrameType Type, int Width, int Height)
bool success = true;
for (uint i = 0; i < Size(); i++)
success &= CreateBuffer(Width, Height, i, nullptr, Type);
LOG(VB_PLAYBACK, LOG_INFO, QString("Created %1 %2 (%3x%4) video buffers")
.arg(Size()).arg(format_description(Type)).arg(Width).arg(Height));
return success;
}

Expand Down
4 changes: 2 additions & 2 deletions mythtv/libs/libmythtv/videobuffers.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,13 +56,13 @@ class MTV_PUBLIC VideoBuffers
VideoBuffers() = default;
virtual ~VideoBuffers();

static uint GetNumBuffers(int PixelFormat, bool Decoder = false);
static uint GetNumBuffers(int PixelFormat, int MaxReferenceFrames = 16, bool Decoder = false);
void Init(uint NumDecode, bool ExtraForPause,
uint NeedFree, uint NeedprebufferNormal,
uint NeedPrebufferSmall, uint KeepPrebuffer);
bool CreateBuffers(VideoFrameType Type, QSize Size, bool ExtraForPause,
uint NeedFree, uint NeedprebufferNormal,
uint NeedPrebufferSmall, uint KeepPrebuffer);
uint NeedPrebufferSmall, uint KeepPrebuffer, int MaxReferenceFrames = 16);
bool CreateBuffers(VideoFrameType Type, int Width, int Height,
vector<YUVInfo> YUVInfos);
bool CreateBuffers(VideoFrameType Type, int Width, int Height);
Expand Down
Loading

0 comments on commit af37064

Please sign in to comment.