22 changes: 4 additions & 18 deletions mythtv/libs/libmythmetadata/metadatafactory.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class META_PUBLIC MetadataFactoryMultiResult : public QEvent
public:
explicit MetadataFactoryMultiResult(const MetadataLookupList& res)
: QEvent(kEventType), m_results(res) {}
~MetadataFactoryMultiResult() override = default;
~MetadataFactoryMultiResult() override;

MetadataLookupList m_results;

Expand All @@ -40,14 +40,7 @@ class META_PUBLIC MetadataFactorySingleResult : public QEvent
m_result->IncrRef();
}
}
~MetadataFactorySingleResult() override
{
if (m_result)
{
m_result->DecrRef();
m_result = nullptr;
}
}
~MetadataFactorySingleResult() override;

MetadataLookup *m_result {nullptr};

Expand All @@ -65,14 +58,7 @@ class META_PUBLIC MetadataFactoryNoResult : public QEvent
m_result->IncrRef();
}
}
~MetadataFactoryNoResult() override
{
if (m_result)
{
m_result->DecrRef();
m_result = nullptr;
}
}
~MetadataFactoryNoResult() override;

MetadataLookup *m_result {nullptr};

Expand All @@ -87,7 +73,7 @@ class META_PUBLIC MetadataFactoryVideoChanges : public QEvent
m_additions(std::move(adds)),
m_moved(std::move(movs)),
m_deleted(std::move(dels)) {}
~MetadataFactoryVideoChanges() override = default;
~MetadataFactoryVideoChanges() override;

QList<int> m_additions; // newly added intids
QList<int> m_moved; // intids moved to new filename
Expand Down
2 changes: 2 additions & 0 deletions mythtv/libs/libmythtv/HLS/httplivestreambuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,9 @@ class HLSRingBuffer : public RingBuffer
mutable QMutex m_lock; // protect general class members
bool m_meta {false}; // meta playlist
bool m_error {false}; // parsing error
#ifdef USING_LIBCRYPTO
bool m_aesmsg {false}; // only print one time that the media is encrypted
#endif
int m_startup {0}; // starting segment (where seek start)
/**
* assumed bitrate of playback
Expand Down
25 changes: 22 additions & 3 deletions mythtv/libs/libmythtv/decoders/avformatdecoder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1387,20 +1387,39 @@ float AvFormatDecoder::GetVideoFrameRate(AVStream *Stream, AVCodecContext *Conte
};

// If the first choice rate is unusual, see if there is something more 'usual'
if (Sanitise && !IsStandard(rates.front()))
double detected = rates.front();
if (Sanitise && !IsStandard(detected))
{
for (auto rate : rates)
{
if (IsStandard(rate))
{
LOG(VB_GENERAL, LOG_INFO, LOC + QString("%1 is non-standard. Selecting %2 instead.")
LOG(VB_GENERAL, LOG_INFO, LOC + QString("%1 is non-standard - using %2 instead.")
.arg(rates.front()).arg(rate));

// The most common problem here is mpegts files where the average
// rate is slightly out and the estimated rate is the fallback.
// As noted above, however, the estimated rate is sometimes twice
// the actual for interlaced content. Try and detect and fix this
// so that we don't throw out deinterlacing and video mode switching.
// Assume anything under 30 may be interlaced - with +-10% error.
if (rate > 33.0 && detected < 33.0)
{
double half = rate / 2.0;
if (qAbs(half - detected) < (half * 0.1))
{
LOG(VB_GENERAL, LOG_INFO, LOC +
QString("Assuming %1 is a better choice than %2")
.arg(half).arg(rate));
return static_cast<float>(half);
}
}
return static_cast<float>(rate);
}
}
}

return static_cast<float>(rates.front());
return static_cast<float>(detected);
}

#ifdef USING_DXVA2
Expand Down
35 changes: 28 additions & 7 deletions mythtv/libs/libmythtv/decoders/mythcodeccontext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,11 +115,23 @@ QStringList MythCodecContext::GetDecoderDescription(void)
{
QStringList decoders;

#ifdef USING_VDPAU
MythVDPAUHelper::GetDecoderList(decoders);
#endif
#ifdef USING_VAAPI
MythVAAPIContext::GetDecoderList(decoders);
#endif
#ifdef USING_MEDIACODEC
MythMediaCodecContext::GetDecoderList(decoders);
#endif
#ifdef USING_NVDEC
MythNVDECContext::GetDecoderList(decoders);
#endif
#ifdef USING_MMAL
MythMMALContext::GetDecoderList(decoders);
#endif
#ifdef USING_V4L2
MythV4L2M2MContext::GetDecoderList(decoders);
#endif
return decoders;
}
Expand Down Expand Up @@ -195,12 +207,15 @@ void MythCodecContext::GetDecoders(RenderOptions &Opts)
}
#endif
#ifdef USING_MMAL
Opts.decoders->append("mmal-dec");
(*Opts.equiv_decoders)["mmal-dec"].append("dummy");
if (MythOpenGLInterop::GetInteropType(FMT_MMAL, nullptr) != MythOpenGLInterop::Unsupported)
if (MythMMALContext::HaveMMAL())
{
Opts.decoders->append("mmal");
(*Opts.equiv_decoders)["mmal"].append("dummy");
Opts.decoders->append("mmal-dec");
(*Opts.equiv_decoders)["mmal-dec"].append("dummy");
if (MythOpenGLInterop::GetInteropType(FMT_MMAL, nullptr) != MythOpenGLInterop::Unsupported)
{
Opts.decoders->append("mmal");
(*Opts.equiv_decoders)["mmal"].append("dummy");
}
}
#endif
}
Expand Down Expand Up @@ -628,6 +643,7 @@ MythCodecContext::CodecProfile MythCodecContext::FFmpegToMythProfile(AVCodecID C
{
switch (CodecID)
{
case AV_CODEC_ID_MPEG1VIDEO: return MPEG1;
case AV_CODEC_ID_MPEG2VIDEO:
switch (Profile)
{
Expand Down Expand Up @@ -726,12 +742,14 @@ MythCodecContext::CodecProfile MythCodecContext::FFmpegToMythProfile(AVCodecID C
return NoProfile;
}

QString MythCodecContext::GetProfileDescription(CodecProfile Profile, QSize Size)
QString MythCodecContext::GetProfileDescription(CodecProfile Profile, QSize Size,
VideoFrameType Format, uint ColorDepth)
{
QString profile;
switch (Profile)
{
case NoProfile: profile = QObject::tr("Unknown/Unsupported"); break;
case MPEG1: profile = "MPEG1"; break;
case MPEG2: profile = "MPEG2"; break;
case MPEG2Simple: profile = "MPEG2 Simple"; break;
case MPEG2Main: profile = "MPEG2 Main"; break;
Expand Down Expand Up @@ -800,5 +818,8 @@ QString MythCodecContext::GetProfileDescription(CodecProfile Profile, QSize Size
if (Size.isEmpty())
return profile;

return QObject::tr("%1 (Max size: %2x%3)").arg(profile).arg(Size.width()).arg(Size.height());
return QObject::tr("%1%2%3 (Max size: %4x%5)")
.arg(profile).arg(Format != FMT_NONE ? QString(" %1").arg(format_description(Format)) : "")
.arg(ColorDepth > 8 ? QString(" %1Bit").arg(ColorDepth) : "")
.arg(Size.width()).arg(Size.height());
}
4 changes: 3 additions & 1 deletion mythtv/libs/libmythtv/decoders/mythcodeccontext.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ class MTV_PUBLIC MythCodecContext
enum CodecProfile
{
NoProfile = 0,
MPEG1,
MPEG2,
MPEG2Simple,
MPEG2Main,
Expand Down Expand Up @@ -137,7 +138,8 @@ class MTV_PUBLIC MythCodecContext
static void CreateDecoderCallback (void *Wait, void *Context, void *Callback);
static AVBufferRef* CreateDevice (AVHWDeviceType Type, MythOpenGLInterop *Interop, const QString &Device = QString());
static bool IsUnsupportedProfile (AVCodecContext *Context);
static QString GetProfileDescription (CodecProfile Profile, QSize Size);
static QString GetProfileDescription (CodecProfile Profile, QSize Size,
VideoFrameType Format = FMT_NONE, uint ColorDepth = 0);
static CodecProfile FFmpegToMythProfile(AVCodecID CodecID, int Profile);

virtual void InitVideoCodec (AVCodecContext *Context,
Expand Down
2 changes: 1 addition & 1 deletion mythtv/libs/libmythtv/decoders/mythmediacodeccontext.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ extern "C" {
#include "libavcodec/avcodec.h"
}

typedef QList<QPair<MythCodecContext::CodecProfile,QSize> > MCProfiles;
using MCProfiles = QList<QPair<MythCodecContext::CodecProfile,QSize>>;

class MythMediaCodecContext : public MythCodecContext
{
Expand Down
146 changes: 101 additions & 45 deletions mythtv/libs/libmythtv/decoders/mythmmalcontext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,6 @@
#include "decoders/avformatdecoder.h"
#include "mythmmalcontext.h"

// Broadcom
extern "C" {
#include "interface/vmcs_host/vc_vchi_gencmd.h"
}

// FFmpeg
extern "C" {
#include "libavutil/opt.h"
Expand Down Expand Up @@ -39,54 +34,28 @@ MythCodecID MythMMALContext::GetSupportedCodec(AVCodecContext **Context,
return failure;

// Only MPEG2, MPEG4, VC1 and H264 supported (and HEVC will never be supported)
QString codecstr;
MythCodecContext::CodecProfile mythprofile = MythCodecContext::NoProfile;
switch ((*Codec)->id)
{
case AV_CODEC_ID_MPEG2VIDEO: codecstr = "MPG2"; break;
case AV_CODEC_ID_MPEG4: codecstr = "MPG4"; break;
case AV_CODEC_ID_VC1: codecstr = "WVC1"; break;
case AV_CODEC_ID_H264: codecstr = "H264"; break;
case AV_CODEC_ID_MPEG2VIDEO: mythprofile = MythCodecContext::MPEG2; break;
case AV_CODEC_ID_MPEG4: mythprofile = MythCodecContext::MPEG4; break;
case AV_CODEC_ID_VC1: mythprofile = MythCodecContext::VC1; break;
case AV_CODEC_ID_H264:
if ((*Context)->profile == FF_PROFILE_H264_HIGH_10 ||
(*Context)->profile == FF_PROFILE_H264_HIGH_10_INTRA)
{
return failure;
}
mythprofile = MythCodecContext::H264; break;
default: break;
}

if (codecstr.isEmpty())
if (mythprofile == MythCodecContext::NoProfile)
return failure;

// check actual decoder support
vcos_init();
VCHI_INSTANCE_T vchi_instance;
if (vchi_initialise(&vchi_instance) != 0)
return failure;
if (vchi_connect(nullptr, 0, vchi_instance) != 0)
return failure;
VCHI_CONNECTION_T *vchi_connection = nullptr;
vc_vchi_gencmd_init(vchi_instance, &vchi_connection, 1 );

bool found = false;
char command[32];
char* response = nullptr;
int responsesize = 0;
QString msg = QString("codec_enabled %1").arg(codecstr);
if (!vc_gencmd(command, sizeof(command), msg.toLocal8Bit().constData()))
vc_gencmd_string_property(command, codecstr.toLocal8Bit().constData(), &response, &responsesize);

if (!response || responsesize < 1)
{
LOG(VB_GENERAL, LOG_ERR, LOC + "Failed to query codec support");
}
else
{
if (qstrcmp(response, "enabled") != 0)
LOG(VB_GENERAL, LOG_INFO, LOC +QString("Codec '%1' not supported (no license?)")
.arg(avcodec_get_name((*Codec)->id)));
else
found = true;
}

vc_gencmd_stop();
vchi_disconnect(vchi_instance);

if (!found)
const MMALProfiles& profiles = MythMMALContext::GetProfiles();
if (!profiles.contains(mythprofile))
return failure;

if (!decodeonly)
Expand Down Expand Up @@ -266,3 +235,90 @@ AVPixelFormat MythMMALContext::GetFormat(AVCodecContext*, const AVPixelFormat *P
}
return AV_PIX_FMT_NONE;
}

bool MythMMALContext::HaveMMAL(void)
{
static QMutex lock(QMutex::Recursive);
QMutexLocker locker(&lock);
static bool s_checked = false;
static bool s_available = false;

if (s_checked)
return s_available;
s_checked = true;

const MMALProfiles& profiles = MythMMALContext::GetProfiles();
if (profiles.isEmpty())
return s_available;

LOG(VB_GENERAL, LOG_INFO, LOC + "Supported/available MMAL decoders:");
s_available = true;
QSize size{0, 0};
foreach (auto profile, profiles)
LOG(VB_GENERAL, LOG_INFO, LOC + MythCodecContext::GetProfileDescription(profile, size));
return s_available;
}

void MythMMALContext::GetDecoderList(QStringList &Decoders)
{
const MMALProfiles& profiles = MythMMALContext::GetProfiles();
if (profiles.isEmpty())
return;

QSize size(0, 0);
Decoders.append("MMAL:");
for (MythCodecContext::CodecProfile profile : profiles)
Decoders.append(MythCodecContext::GetProfileDescription(profile, size));
}

// Broadcom
extern "C" {
#include "interface/vmcs_host/vc_vchi_gencmd.h"
}

const MMALProfiles& MythMMALContext::GetProfiles(void)
{
static QMutex lock(QMutex::Recursive);
static bool s_initialised = false;
static MMALProfiles s_profiles;

QMutexLocker locker(&lock);
if (s_initialised)
return s_profiles;
s_initialised = true;

static const QPair<QString, MythCodecContext::CodecProfile> s_map[] =
{
{ "MPG2", MythCodecContext::MPEG2 },
{ "MPG4", MythCodecContext::MPEG4 },
{ "WVC1", MythCodecContext::VC1 },
{ "H264", MythCodecContext::H264 }
};

vcos_init();
VCHI_INSTANCE_T vchi_instance;
if (vchi_initialise(&vchi_instance) != 0)
return s_profiles;
if (vchi_connect(nullptr, 0, vchi_instance) != 0)
return s_profiles;
VCHI_CONNECTION_T *vchi_connection = nullptr;
vc_vchi_gencmd_init(vchi_instance, &vchi_connection, 1 );

for (auto profile : s_map)
{
char command[32];
char* response = nullptr;
int responsesize = 0;
QString msg = QString("codec_enabled %1").arg(profile.first);
if (!vc_gencmd(command, sizeof(command), msg.toLocal8Bit().constData()))
vc_gencmd_string_property(command, profile.first.toLocal8Bit().constData(), &response, &responsesize);

if (response && responsesize && qstrcmp(response, "enabled") == 0)
s_profiles.append(profile.second);
}

vc_gencmd_stop();
vchi_disconnect(vchi_instance);

return s_profiles;
}
5 changes: 5 additions & 0 deletions mythtv/libs/libmythtv/decoders/mythmmalcontext.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
#include "mythcodeccontext.h"
#include "mythmmalinterop.h"

using MMALProfiles = QList<MythCodecContext::CodecProfile>;

class MythMMALContext : public MythCodecContext
{
public:
Expand All @@ -22,8 +24,11 @@ class MythMMALContext : public MythCodecContext
static bool GetBuffer (AVCodecContext *Context, VideoFrame *Frame, AVFrame *AvFrame, int);
bool GetBuffer2 (AVCodecContext *Context, VideoFrame *Frame, AVFrame *AvFrame, int);
static enum AVPixelFormat GetFormat (AVCodecContext*, const AVPixelFormat *PixFmt);
static void GetDecoderList (QStringList &Decoders);
static bool HaveMMAL (void);

protected:
static const MMALProfiles& GetProfiles(void);
MythMMALInterop* m_interop { nullptr };
};

Expand Down
198 changes: 106 additions & 92 deletions mythtv/libs/libmythtv/decoders/mythnvdeccontext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,11 @@
#include "mythnvdeccontext.h"

extern "C" {
#include "libavutil/opt.h"
#include "libavutil/opt.h"
}

#define LOC QString("NVDEC: ")

QMutex* MythNVDECContext::s_NVDECLock = new QMutex(QMutex::Recursive);
bool MythNVDECContext::s_NVDECAvailable = false;
vector<MythNVDECContext::MythNVDECCaps> MythNVDECContext::s_NVDECDecoderCaps = vector<MythNVDECContext::MythNVDECCaps>();

MythNVDECContext::MythNVDECContext(DecoderBase *Parent, MythCodecID CodecID)
: MythCodecContext(Parent, CodecID)
Expand Down Expand Up @@ -47,8 +44,6 @@ MythCodecID MythNVDECContext::GetSupportedCodec(AVCodecContext **Context,
QString profile = avcodec_profile_name((*Context)->codec_id, (*Context)->profile);
QString pixfmt = av_get_pix_fmt_name((*Context)->pix_fmt);

// Check actual decoder capabilities. These are loaded statically and in a thread safe
// manner in HaveNVDEC
cudaVideoCodec cudacodec = cudaVideoCodec_NumCodecs;
switch ((*Context)->codec_id)
{
Expand Down Expand Up @@ -77,39 +72,24 @@ MythCodecID MythNVDECContext::GetSupportedCodec(AVCodecContext **Context,
uint depth = static_cast<uint>(ColorDepth(type) - 8);
bool supported = false;

if ((cudacodec != cudaVideoCodec_NumCodecs) && (cudaformat != cudaVideoChromaFormat_Monochrome))
if ((cudacodec == cudaVideoCodec_NumCodecs) || (cudaformat == cudaVideoChromaFormat_Monochrome))
return failure;

// iterate over known decoder capabilities
const std::vector<MythNVDECCaps>& profiles = MythNVDECContext::GetProfiles();
for (auto cap : profiles)
{
// iterate over known decoder capabilities
s_NVDECLock->lock();
for (const auto & cap : s_NVDECDecoderCaps)
if (cap.Supports(cudacodec, cudaformat, depth, (*Context)->width, (*Context)->width))
{
if ((cap.m_codec == cudacodec) && (cap.m_depth == depth) && (cap.m_format == cudaformat))
{
// match - now check restrictions
int width = (*Context)->width;
int height = (*Context)->height;
uint mblocks = static_cast<uint>((width * height) / 256);
if ((cap.m_maximum.width() >= width) && (cap.m_maximum.height() >= height) &&
(cap.m_minimum.width() <= width) && (cap.m_minimum.height() <= height) &&
(cap.m_macroBlocks >= mblocks))
{
supported = true;
}
else
{
LOG(VB_PLAYBACK, LOG_INFO, LOC +
QString("Codec '%9' failed size constraints: source: %1x%2 min: %3x%4 max: %5x%6 mbs: %7, max %8")
.arg(width).arg(height).arg(cap.m_minimum.width()).arg(cap.m_minimum.height())
.arg(cap.m_maximum.width()).arg(cap.m_maximum.height()).arg(mblocks).arg(cap.m_macroBlocks)
.arg(get_encoding_type(success)));

}
break;
}
supported = true;
break;
}
s_NVDECLock->unlock();
}

QString desc = QString("'%1 %2 %3 Depth:%4 %5x%6'")
.arg(codecstr).arg(profile).arg(pixfmt).arg(depth + 8)
.arg((*Context)->width).arg((*Context)->height);

// and finally try and retrieve the actual FFmpeg decoder
if (supported)
{
Expand All @@ -128,9 +108,7 @@ MythCodecID MythNVDECContext::GetSupportedCodec(AVCodecContext **Context,
AVCodec *codec = avcodec_find_decoder_by_name(name.toLocal8Bit());
if (codec)
{
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("HW device type '%1' supports decoding '%2 %3 %4' depth %5")
.arg(av_hwdevice_get_type_name(AV_HWDEVICE_TYPE_CUDA)).arg(codecstr)
.arg(profile).arg(pixfmt).arg(depth + 8));
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("NVDEC supports decoding %1").arg(desc));
*Codec = codec;
gCodecMap->freeCodecContext(Stream);
*Context = gCodecMap->getCodecContext(Stream, *Codec);
Expand All @@ -141,9 +119,7 @@ MythCodecID MythNVDECContext::GetSupportedCodec(AVCodecContext **Context,
}
}

LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("HW device type '%1' does not support decoding '%2 %3 %4' depth %5")
.arg(av_hwdevice_get_type_name(AV_HWDEVICE_TYPE_CUDA)).arg(codecstr)
.arg(profile).arg(pixfmt).arg(depth + 8));
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("NVDEC does NOT support %1").arg(desc));
return failure;
}

Expand Down Expand Up @@ -465,77 +441,122 @@ MythNVDECContext::MythNVDECCaps::MythNVDECCaps(cudaVideoCodec Codec, uint Depth,
m_maximum(Maximum),
m_macroBlocks(MacroBlocks)
{
auto ToMythProfile = [](cudaVideoCodec CudaCodec)
{
switch (CudaCodec)
{
case cudaVideoCodec_MPEG1: return MythCodecContext::MPEG1;
case cudaVideoCodec_MPEG2: return MythCodecContext::MPEG2;
case cudaVideoCodec_MPEG4: return MythCodecContext::MPEG4;
case cudaVideoCodec_VC1: return MythCodecContext::VC1;
case cudaVideoCodec_H264: return MythCodecContext::H264;
case cudaVideoCodec_HEVC: return MythCodecContext::HEVC;
case cudaVideoCodec_VP8: return MythCodecContext::VP8;
case cudaVideoCodec_VP9: return MythCodecContext::VP9;
default: break;
}
return MythCodecContext::NoProfile;
};

auto ToMythFormat = [](cudaVideoChromaFormat CudaFormat)
{
switch (CudaFormat)
{
case cudaVideoChromaFormat_420: return FMT_YV12;
case cudaVideoChromaFormat_422: return FMT_YUV422P;
case cudaVideoChromaFormat_444: return FMT_YUV444P;
default: break;
}
return FMT_NONE;
};
m_profile = ToMythProfile(m_codec);
m_type = ToMythFormat(m_format);
}

bool MythNVDECContext::MythNVDECCaps::Supports(cudaVideoCodec Codec, cudaVideoChromaFormat Format,
uint Depth, int Width, int Height)
{
uint mblocks = static_cast<uint>((Width * Height) / 256);
return (Codec == m_codec) && (Format == m_format) && (Depth == m_depth) &&
(m_maximum.width() >= Width) && (m_maximum.height() >= Height) &&
(m_minimum.width() <= Width) && (m_minimum.height() <= Height) &&
(m_macroBlocks >= mblocks);
}

bool MythNVDECContext::HaveNVDEC(void)
{
QMutexLocker locker(s_NVDECLock);
static QMutex lock(QMutex::Recursive);
QMutexLocker locker(&lock);
static bool s_checked = false;
static bool s_available = false;
if (!s_checked)
{
if (gCoreContext->IsUIThread())
NVDECCheck();
{
const std::vector<MythNVDECCaps>& profiles = MythNVDECContext::GetProfiles();
if (profiles.empty())
{
LOG(VB_GENERAL, LOG_INFO, LOC + "No NVDEC decoders found");
}
else
{
s_available = true;
LOG(VB_GENERAL, LOG_INFO, LOC + "Supported/available NVDEC decoders:");
for (auto profile : profiles)
{
LOG(VB_GENERAL, LOG_INFO, LOC +
MythCodecContext::GetProfileDescription(profile.m_profile,profile.m_maximum,
profile.m_type, profile.m_depth + 8));
}
}
}
else
{
LOG(VB_GENERAL, LOG_WARNING, LOC + "HaveNVDEC must be initialised from the main thread");
}
}
s_checked = true;
return s_NVDECAvailable;
return s_available;
}

inline MythCodecID cuda_to_myth(cudaVideoCodec Codec)
{
switch (Codec)
{
case cudaVideoCodec_MPEG1: return kCodec_MPEG1;
case cudaVideoCodec_MPEG2: return kCodec_MPEG2;
case cudaVideoCodec_MPEG4: return kCodec_MPEG4;
case cudaVideoCodec_VC1: return kCodec_VC1;
case cudaVideoCodec_H264: return kCodec_H264;
case cudaVideoCodec_HEVC: return kCodec_HEVC;
case cudaVideoCodec_VP8: return kCodec_VP8;
case cudaVideoCodec_VP9: return kCodec_VP9;
default: break;
}
return kCodec_NONE;
}

inline VideoFrameType cuda_to_myth(cudaVideoChromaFormat Format)
void MythNVDECContext::GetDecoderList(QStringList &Decoders)
{
switch (Format)
{
case cudaVideoChromaFormat_420: return FMT_YV12;
case cudaVideoChromaFormat_422: return FMT_YUV422P;
case cudaVideoChromaFormat_444: return FMT_YUV444P;
default: break;
}
return FMT_NONE;
const std::vector<MythNVDECCaps>& profiles = MythNVDECContext::GetProfiles();
if (profiles.empty())
return;
Decoders.append("NVDEC:");
for (auto profile : profiles)
if (!(profile.m_depth % 2)) // Ignore 9/11bit etc
Decoders.append(MythCodecContext::GetProfileDescription(profile.m_profile, profile.m_maximum,
profile.m_type, profile.m_depth + 8));
}

/*! \brief Perform the actual NVDEC availability and capability check
* \note lock is held in HaveNVDEC
*/
void MythNVDECContext::NVDECCheck(void)
const std::vector<MythNVDECContext::MythNVDECCaps> &MythNVDECContext::GetProfiles(void)
{
static QMutex lock(QMutex::Recursive);
static bool s_initialised = false;
static std::vector<MythNVDECContext::MythNVDECCaps> s_profiles;

QMutexLocker locker(&lock);
if (s_initialised)
return s_profiles;
s_initialised = true;

MythRenderOpenGL *opengl = MythRenderOpenGL::GetOpenGLRender();
CUcontext context = nullptr;
CudaFunctions *cuda = nullptr;
if (MythNVDECInterop::CreateCUDAContext(opengl, cuda, context))
{
OpenGLLocker locker(opengl);
OpenGLLocker gllocker(opengl);
CuvidFunctions *cuvid = nullptr;
CUcontext dummy = nullptr;
cuda->cuCtxPushCurrent(context);

if (cuvid_load_functions(&cuvid, nullptr) == 0)
{
// basic check passed
LOG(VB_GENERAL, LOG_INFO, LOC + "NVDEC is available");
s_NVDECAvailable = true;
s_NVDECDecoderCaps.clear();

if (cuvid->cuvidGetDecoderCaps)
LOG(VB_PLAYBACK, LOG_INFO, LOC + "Decoder support check:");
else
if (!cuvid->cuvidGetDecoderCaps)
LOG(VB_GENERAL, LOG_WARNING, LOC + "Old driver - cannot check decoder capabilities");

// now iterate over codecs, depths and formats to check support
Expand All @@ -557,24 +578,18 @@ void MythNVDECContext::NVDECCheck(void)
if (cuvid->cuvidGetDecoderCaps && (cuvid->cuvidGetDecoderCaps(&caps) == CUDA_SUCCESS) &&
caps.bIsSupported)
{
s_NVDECDecoderCaps.emplace_back(
s_profiles.emplace_back(
MythNVDECCaps(cudacodec, depth, cudaformat,
QSize(caps.nMinWidth, caps.nMinHeight),
QSize(static_cast<int>(caps.nMaxWidth), static_cast<int>(caps.nMaxHeight)),
caps.nMaxMBCount));
LOG(VB_PLAYBACK, LOG_INFO, LOC +
QString("Codec: %1: Depth: %2 Format: %3 Min: %4x%5 Max: %6x%7 MBs: %8")
.arg(toString(cuda_to_myth(cudacodec))).arg(depth + 8)
.arg(format_description(cuda_to_myth(cudaformat)))
.arg(caps.nMinWidth).arg(caps.nMinHeight)
.arg(caps.nMaxWidth).arg(caps.nMaxHeight).arg(caps.nMaxMBCount));
}
else if (!cuvid->cuvidGetDecoderCaps)
{
// dummy - just support everything:)
s_NVDECDecoderCaps.emplace_back(MythNVDECCaps(cudacodec, depth, cudaformat,
QSize(32, 32), QSize(8192, 8192),
(8192 * 8192) / 256));
s_profiles.emplace_back(MythNVDECCaps(cudacodec, depth, cudaformat,
QSize(32, 32), QSize(8192, 8192),
(8192 * 8192) / 256));
}
}
}
Expand All @@ -585,6 +600,5 @@ void MythNVDECContext::NVDECCheck(void)
}
MythNVDECInterop::CleanupContext(opengl, cuda, context);

if (!s_NVDECAvailable)
LOG(VB_GENERAL, LOG_INFO, LOC + "NVDEC functionality checked failed");
return s_profiles;
}
23 changes: 12 additions & 11 deletions mythtv/libs/libmythtv/decoders/mythnvdeccontext.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,28 +42,29 @@ class MythNVDECContext : public MythCodecContext
AVFrame *AvFrame, int Flags);
static int InitialiseDecoder (AVCodecContext *Context);
static bool HaveNVDEC (void);
static void GetDecoderList (QStringList &Decoders);

private:
class MythNVDECCaps
{
public:
MythNVDECCaps(cudaVideoCodec Codec, uint Depth, cudaVideoChromaFormat Format,
QSize Minimum, QSize Maximum, uint MacroBlocks);
bool Supports(cudaVideoCodec Codec, cudaVideoChromaFormat Format, uint Depth,
int Width, int Height);

cudaVideoCodec m_codec;
uint m_depth;
cudaVideoChromaFormat m_format;
QSize m_minimum;
QSize m_maximum;
uint m_macroBlocks;
MythCodecContext::CodecProfile m_profile { MythCodecContext::NoProfile };
VideoFrameType m_type { FMT_NONE };
cudaVideoCodec m_codec { cudaVideoCodec_NumCodecs };
uint m_depth { 0 };
cudaVideoChromaFormat m_format { cudaVideoChromaFormat_Monochrome };
QSize m_minimum { };
QSize m_maximum { };
uint m_macroBlocks { 0 };
};

static QMutex* s_NVDECLock;
static bool s_NVDECAvailable;
static std::vector<MythNVDECCaps> s_NVDECDecoderCaps;
static void NVDECCheck (void);

private:
static const std::vector<MythNVDECCaps>& GetProfiles(void);
MythDeintType m_deinterlacer { DEINT_NONE };
bool m_deinterlacer2x { false };
};
Expand Down
301 changes: 163 additions & 138 deletions mythtv/libs/libmythtv/decoders/mythv4l2m2mcontext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,24 +36,6 @@ MythV4L2M2MContext::MythV4L2M2MContext(DecoderBase *Parent, MythCodecID CodecID)
{
}

inline uint32_t V4L2CodecType(AVCodecID Id)
{
switch (Id)
{
case AV_CODEC_ID_MPEG1VIDEO: return V4L2_PIX_FMT_MPEG1;
case AV_CODEC_ID_MPEG2VIDEO: return V4L2_PIX_FMT_MPEG2;
case AV_CODEC_ID_MPEG4: return V4L2_PIX_FMT_MPEG4;
case AV_CODEC_ID_H263: return V4L2_PIX_FMT_H263;
case AV_CODEC_ID_H264: return V4L2_PIX_FMT_H264;
case AV_CODEC_ID_VC1: return V4L2_PIX_FMT_VC1_ANNEX_G;
case AV_CODEC_ID_VP8: return V4L2_PIX_FMT_VP8;
case AV_CODEC_ID_VP9: return V4L2_PIX_FMT_VP9;
case AV_CODEC_ID_HEVC: return V4L2_PIX_FMT_HEVC;
default: break;
}
return 0;
}

bool MythV4L2M2MContext::DecoderWillResetOnFlush(void)
{
return codec_is_v4l2(m_codecID);
Expand All @@ -73,13 +55,27 @@ MythCodecID MythV4L2M2MContext::GetSupportedCodec(AVCodecContext **Context,
if (!Decoder.startsWith("v4l2"))
return failure;

// unknown codec
uint32_t v4l2_fmt = V4L2CodecType((*Codec)->id);
if (!v4l2_fmt)
// supported by device driver?
MythCodecContext::CodecProfile mythprofile = MythCodecContext::NoProfile;
switch ((*Codec)->id)
{
case AV_CODEC_ID_MPEG1VIDEO: mythprofile = MythCodecContext::MPEG1; break;
case AV_CODEC_ID_MPEG2VIDEO: mythprofile = MythCodecContext::MPEG2; break;
case AV_CODEC_ID_MPEG4: mythprofile = MythCodecContext::MPEG4; break;
case AV_CODEC_ID_H263: mythprofile = MythCodecContext::H263; break;
case AV_CODEC_ID_H264: mythprofile = MythCodecContext::H264; break;
case AV_CODEC_ID_VC1: mythprofile = MythCodecContext::VC1; break;
case AV_CODEC_ID_VP8: mythprofile = MythCodecContext::VP8; break;
case AV_CODEC_ID_VP9: mythprofile = MythCodecContext::VP9; break;
case AV_CODEC_ID_HEVC: mythprofile = MythCodecContext::HEVC; break;
default: break;
}

if (mythprofile == MythCodecContext::NoProfile)
return failure;

// supported by device driver?
if (!HaveV4L2Codecs((*Codec)->id))
const V4L2Profiles& profiles = MythV4L2M2MContext::GetProfiles();
if (!profiles.contains(mythprofile))
return failure;

if (s_useV4L2Request && !decodeonly)
Expand Down Expand Up @@ -199,147 +195,176 @@ bool MythV4L2M2MContext::GetBuffer(AVCodecContext *Context, VideoFrame *Frame, A
return true;
}

bool MythV4L2M2MContext::HaveV4L2Codecs(AVCodecID Codec /* = AV_CODEC_ID_NONE */)
const V4L2Profiles& MythV4L2M2MContext::GetProfiles(void)
{
static QVector<AVCodecID> s_avcodecs({AV_CODEC_ID_MPEG1VIDEO, AV_CODEC_ID_MPEG2VIDEO,
AV_CODEC_ID_MPEG4, AV_CODEC_ID_H263,
AV_CODEC_ID_H264, AV_CODEC_ID_VC1,
AV_CODEC_ID_VP8, AV_CODEC_ID_VP9,
AV_CODEC_ID_HEVC});

static bool s_needscheck = true;
static QVector<AVCodecID> s_supportedV4L2Codecs;

QMutexLocker locker(&s_drmPrimeLock);
static const QPair<uint32_t, MythCodecContext::CodecProfile> s_map[] =
{
{ V4L2_PIX_FMT_MPEG1, MythCodecContext::MPEG1 },
{ V4L2_PIX_FMT_MPEG2, MythCodecContext::MPEG2 },
{ V4L2_PIX_FMT_MPEG4, MythCodecContext::MPEG4 },
{ V4L2_PIX_FMT_H263, MythCodecContext::H263 },
{ V4L2_PIX_FMT_H264, MythCodecContext::H264 },
{ V4L2_PIX_FMT_VC1_ANNEX_G, MythCodecContext::VC1 },
{ V4L2_PIX_FMT_VP8, MythCodecContext::VP8 },
{ V4L2_PIX_FMT_VP9, MythCodecContext::VP9 },
{ V4L2_PIX_FMT_HEVC, MythCodecContext::HEVC }
};

static QMutex lock(QMutex::Recursive);
static bool s_initialised = false;
static V4L2Profiles s_profiles;

QMutexLocker locker(&lock);
if (s_initialised)
return s_profiles;
s_initialised = true;

// this a temporary workaround for v4l2_request support - assume available
if (s_useV4L2Request)
{
s_needscheck = false;
s_supportedV4L2Codecs = s_avcodecs;
LOG(VB_GENERAL, LOG_INFO, LOC + "V4L2Request support endabled - assuming all available");
for (auto profile : s_map)
s_profiles.append(profile.second);
return s_profiles;
}

if (s_needscheck)
const QString root("/dev/");
QDir dir(root);
QStringList namefilters;
namefilters.append("video*");
QStringList devices = dir.entryList(namefilters, QDir::Files |QDir::System);
foreach (QString device, devices)
{
s_needscheck = false;
s_supportedV4L2Codecs.clear();

// Iterate over /dev/videoXX and check for generic codecs support
// We don't care what device is used or actually try a given format (which
// would require width/height etc) - but simply check for capture and output
// support for the given codecs, whether multiplanar or not.
const QString root("/dev/");
QDir dir(root);
QStringList namefilters;
namefilters.append("video*");
QStringList devices = dir.entryList(namefilters, QDir::Files |QDir::System);
foreach (QString device, devices)
V4L2util v4l2dev(root + device);
uint32_t caps = v4l2dev.GetCapabilities();
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("Device: %1 Driver: '%2' Capabilities: 0x%3")
.arg(v4l2dev.GetDeviceName()).arg(v4l2dev.GetDriverName()).arg(caps, 0, 16));

// check capture and output support
// these mimic the device checks in v4l2_m2m.c
bool mplanar = (caps & (V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE) &&
caps & V4L2_CAP_STREAMING);
bool mplanarm2m = caps & V4L2_CAP_VIDEO_M2M_MPLANE;
bool splanar = (caps & (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT) &&
caps & V4L2_CAP_STREAMING);
bool splanarm2m = caps & V4L2_CAP_VIDEO_M2M;

if (!(mplanar || mplanarm2m || splanar || splanarm2m))
continue;

v4l2_buf_type capturetype = V4L2_BUF_TYPE_VIDEO_CAPTURE;
v4l2_buf_type outputtype = V4L2_BUF_TYPE_VIDEO_OUTPUT;

if (mplanar || mplanarm2m)
{
V4L2util v4l2dev(root + device);
uint32_t caps = v4l2dev.GetCapabilities();
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("Device: %1 Driver: '%2' Capabilities: 0x%3")
.arg(v4l2dev.GetDeviceName()).arg(v4l2dev.GetDriverName()).arg(caps, 0, 16));

// check capture and output support
// these mimic the device checks in v4l2_m2m.c
bool mplanar = (caps & (V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE) &&
caps & V4L2_CAP_STREAMING);
bool mplanarm2m = caps & V4L2_CAP_VIDEO_M2M_MPLANE;
bool splanar = (caps & (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT) &&
caps & V4L2_CAP_STREAMING);
bool splanarm2m = caps & V4L2_CAP_VIDEO_M2M;

if (!(mplanar || mplanarm2m || splanar || splanarm2m))
continue;

v4l2_buf_type capturetype = V4L2_BUF_TYPE_VIDEO_CAPTURE;
v4l2_buf_type outputtype = V4L2_BUF_TYPE_VIDEO_OUTPUT;

if (mplanar || mplanarm2m)
capturetype = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
outputtype = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
}

// check codec support
QStringList debug;
QSize dummy{0, 0};
for (auto profile : s_map)
{
bool found = false;
uint32_t v4l2pixfmt = profile.first;
MythCodecContext::CodecProfile mythprofile = profile.second;
struct v4l2_fmtdesc fdesc {};
memset(&fdesc, 0, sizeof(fdesc));

// check output first
fdesc.type = outputtype;
while (!found)
{
capturetype = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
outputtype = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
int res = ioctl(v4l2dev.FD(), VIDIOC_ENUM_FMT, &fdesc);
if (res)
break;
if (fdesc.pixelformat == v4l2pixfmt)
found = true;
fdesc.index++;
}

// check codec support
QStringList debug;
foreach (AVCodecID codec, s_avcodecs)
if (found)
{
bool found = false;
uint32_t v4l2pixfmt = V4L2CodecType(codec);
struct v4l2_fmtdesc fdesc {};
QStringList pixformats;
bool foundfmt = false;
// check capture
memset(&fdesc, 0, sizeof(fdesc));

// check output first
fdesc.type = outputtype;
while (!found)
fdesc.type = capturetype;
while (true)
{
int res = ioctl(v4l2dev.FD(), VIDIOC_ENUM_FMT, &fdesc);
if (res)
break;
if (fdesc.pixelformat == v4l2pixfmt)
found = true;
pixformats.append(fourcc_str(static_cast<int>(fdesc.pixelformat)));

// this is a bit of a shortcut
if (fdesc.pixelformat == V4L2_PIX_FMT_YUV420 ||
fdesc.pixelformat == V4L2_PIX_FMT_YVU420 ||
fdesc.pixelformat == V4L2_PIX_FMT_YUV420M ||
fdesc.pixelformat == V4L2_PIX_FMT_YVU420M ||
fdesc.pixelformat == V4L2_PIX_FMT_NV12 ||
fdesc.pixelformat == V4L2_PIX_FMT_NV12M ||
fdesc.pixelformat == V4L2_PIX_FMT_NV21 ||
fdesc.pixelformat == V4L2_PIX_FMT_NV21M)
{
if (!s_profiles.contains(mythprofile))
s_profiles.append(mythprofile);
foundfmt = true;
break;
}
fdesc.index++;
}

if (found)
if (!foundfmt)
{
QStringList pixformats;
bool foundfmt = false;
// check capture
memset(&fdesc, 0, sizeof(fdesc));
fdesc.type = capturetype;
while (true)
{
int res = ioctl(v4l2dev.FD(), VIDIOC_ENUM_FMT, &fdesc);
if (res)
break;
pixformats.append(fourcc_str(static_cast<int>(fdesc.pixelformat)));

// this is a bit of a shortcut
if (fdesc.pixelformat == V4L2_PIX_FMT_YUV420 ||
fdesc.pixelformat == V4L2_PIX_FMT_YVU420 ||
fdesc.pixelformat == V4L2_PIX_FMT_YUV420M ||
fdesc.pixelformat == V4L2_PIX_FMT_YVU420M ||
fdesc.pixelformat == V4L2_PIX_FMT_NV12 ||
fdesc.pixelformat == V4L2_PIX_FMT_NV12M ||
fdesc.pixelformat == V4L2_PIX_FMT_NV21 ||
fdesc.pixelformat == V4L2_PIX_FMT_NV21M)
{
if (!s_supportedV4L2Codecs.contains(codec))
s_supportedV4L2Codecs.append(codec);
debug.append(avcodec_get_name(codec));
foundfmt = true;
break;
}
fdesc.index++;
}

if (!foundfmt)
{
if (pixformats.isEmpty())
pixformats.append("None");
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("Codec '%1' has no supported formats (Supported: %2)")
.arg(codec).arg(pixformats.join((","))));
}
if (pixformats.isEmpty())
pixformats.append("None");
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("Codec '%1' has no supported formats (Supported: %2)")
.arg(MythCodecContext::GetProfileDescription(mythprofile, dummy)).arg(pixformats.join((","))));
}
}
if (debug.isEmpty())
debug.append("None");
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("Device: %1 Supported codecs: '%2'")
.arg(v4l2dev.GetDeviceName()).arg(debug.join(",")));
}
QStringList gdebug;
foreach (AVCodecID codec, s_supportedV4L2Codecs)
gdebug.append(avcodec_get_name(codec));
if (gdebug.isEmpty())
gdebug.append("None");
LOG(VB_GENERAL, LOG_INFO, LOC + QString("V4L2 codecs supported: %1").arg(gdebug.join(",")));
}

if (!Codec)
return !s_supportedV4L2Codecs.isEmpty();
return s_supportedV4L2Codecs.contains(Codec);
return s_profiles;
}

void MythV4L2M2MContext::GetDecoderList(QStringList &Decoders)
{
const V4L2Profiles& profiles = MythV4L2M2MContext::GetProfiles();
if (profiles.isEmpty())
return;

QSize size(0, 0);
Decoders.append("V4L2:");
for (MythCodecContext::CodecProfile profile : profiles)
Decoders.append(MythCodecContext::GetProfileDescription(profile, size));
}

bool MythV4L2M2MContext::HaveV4L2Codecs(void)
{
static QMutex lock(QMutex::Recursive);
QMutexLocker locker(&lock);
static bool s_checked = false;
static bool s_available = false;

if (s_checked)
return s_available;
s_checked = true;

const V4L2Profiles& profiles = MythV4L2M2MContext::GetProfiles();
if (profiles.isEmpty())
{
LOG(VB_GENERAL, LOG_INFO, LOC + "No V4L2 decoders found");
return s_available;
}

LOG(VB_GENERAL, LOG_INFO, LOC + "Supported/available V4L2 decoders:");
s_available = true;
QSize size{0, 0};
foreach (auto profile, profiles)
LOG(VB_GENERAL, LOG_INFO, LOC + MythCodecContext::GetProfileDescription(profile, size));
return s_available;
}

AVPixelFormat MythV4L2M2MContext::GetV4L2RequestFormat(AVCodecContext *Context, const AVPixelFormat *PixFmt)
Expand Down
8 changes: 7 additions & 1 deletion mythtv/libs/libmythtv/decoders/mythv4l2m2mcontext.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
// MythTV
#include "mythdrmprimecontext.h"

using V4L2Profiles = QList<MythCodecContext::CodecProfile>;

class MythV4L2M2MContext : public MythDRMPRIMEContext
{
public:
Expand All @@ -20,10 +22,14 @@ class MythV4L2M2MContext : public MythDRMPRIMEContext
int HwDecoderInit (AVCodecContext *Context) override;
bool DecoderWillResetOnFlush (void) override;
static bool GetBuffer (AVCodecContext *Context, VideoFrame *Frame, AVFrame *AvFrame, int/*Flags*/);
static bool HaveV4L2Codecs (AVCodecID Codec = AV_CODEC_ID_NONE);
static bool HaveV4L2Codecs (void);
static void GetDecoderList (QStringList &Decoders);

static enum AVPixelFormat GetV4L2RequestFormat(AVCodecContext *Context, const AVPixelFormat *PixFmt);
static int InitialiseV4L2RequestContext(AVCodecContext *Context);

protected:
static const V4L2Profiles& GetProfiles(void);
};

#endif // MYTHV4L2M2MCONTEXT_H
14 changes: 14 additions & 0 deletions mythtv/libs/libmythtv/decoders/mythvaapicontext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -593,6 +593,20 @@ const VAAPIProfiles &MythVAAPIContext::GetProfiles(void)
av_freep(&profilelist);
av_buffer_unref(&hwdevicectx);

// Once only check for EGL support for best performance
MythRenderOpenGL* render = MythRenderOpenGL::GetOpenGLRender();
if (!s_profiles.isEmpty() && render)
{
if (render->IsEGL())
{
LOG(VB_GENERAL, LOG_INFO, LOC + "EGL DMABUF available for best VAAPI performance");
}
else
{
LOG(VB_GENERAL, LOG_WARNING, LOC + "No EGL support. VAAPI performance will be reduced");
LOG(VB_GENERAL, LOG_WARNING, LOC + "Consider setting MYTHTV_FORCE_EGL=1 to try and enable");
}
}
return s_profiles;
}

Expand Down
2 changes: 1 addition & 1 deletion mythtv/libs/libmythtv/decoders/mythvaapicontext.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ extern "C" {
#include "libavfilter/buffersrc.h"
}

typedef QPair<MythCodecContext::CodecProfile,QPair<QSize,QSize>> VAAPIProfile;
using VAAPIProfile = QPair<MythCodecContext::CodecProfile,QPair<QSize,QSize>>;
using VAAPIProfiles = QList<VAAPIProfile>;

class MTV_PUBLIC MythVAAPIContext : public MythCodecContext
Expand Down
34 changes: 24 additions & 10 deletions mythtv/libs/libmythtv/decoders/mythvdpaucontext.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,24 +159,38 @@ MythCodecID MythVDPAUContext::GetSupportedCodec(AVCodecContext **Context,
VideoFrameType type = PixelFormatToFrameType((*Context)->pix_fmt);
bool vdpau = (type == FMT_YV12) && MythVDPAUHelper::HaveVDPAU() &&
(decodeonly ? codec_is_vdpau_dechw(success) : codec_is_vdpau_hw(success));
if (vdpau && (success == kCodec_MPEG4_VDPAU || success == kCodec_MPEG4_VDPAU_DEC))
vdpau = MythVDPAUHelper::HaveMPEG4Decode();

if (vdpau)
{
MythCodecContext::CodecProfile mythprofile =
MythCodecContext::FFmpegToMythProfile((*Context)->codec_id, (*Context)->profile);
const VDPAUProfiles& profiles = MythVDPAUHelper::GetProfiles();
vdpau = false;
for (auto vdpauprofile : profiles)
{
if (vdpauprofile.first == mythprofile &&
vdpauprofile.second.Supported((*Context)->width, (*Context)->height, (*Context)->level))
{
vdpau = true;
break;
}
}
}

// H264 needs additional checks for old hardware
if (vdpau && (success == kCodec_H264_VDPAU || success == kCodec_H264_VDPAU_DEC))
vdpau = MythVDPAUHelper::CheckH264Decode(*Context);
if (vdpau && (success == kCodec_HEVC_VDPAU || success == kCodec_HEVC_VDPAU_DEC))
vdpau = MythVDPAUHelper::CheckHEVCDecode(*Context);

QString desc = QString("'%1 %2 %3 %4x%5'")
.arg(codec).arg(profile).arg(pixfmt).arg((*Context)->width).arg((*Context)->height);

if (!vdpau)
{
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("HW device type '%1' does not support decoding '%2 %3 %4'")
.arg(av_hwdevice_get_type_name(AV_HWDEVICE_TYPE_VDPAU)).arg(codec)
.arg(profile).arg(pixfmt));
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("VDPAU does not support decoding %1").arg(desc));
return failure;
}

LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("HW device type '%1' supports decoding '%2 %3 %4'")
.arg(av_hwdevice_get_type_name(AV_HWDEVICE_TYPE_VDPAU)).arg(codec)
.arg(profile).arg(pixfmt));
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("VDPAU supports decoding %1").arg(desc));
(*Context)->pix_fmt = AV_PIX_FMT_VDPAU;
return success;
}
Expand Down
268 changes: 160 additions & 108 deletions mythtv/libs/libmythtv/decoders/mythvdpauhelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,6 @@
// Std
#include <cmath>

QMutex MythVDPAUHelper::gVDPAULock(QMutex::Recursive);
bool MythVDPAUHelper::gVDPAUAvailable = false;
bool MythVDPAUHelper::gVDPAUMPEG4Available = false;

#define LOC QString("VDPAUHelp: ")

#define INIT_ST \
Expand All @@ -29,31 +25,170 @@ if (!ok) \
#define GET_PROC(FUNC_ID, PROC) \
status = m_vdpGetProcAddress(m_device, FUNC_ID, reinterpret_cast<void **>(&(PROC))); CHECK_ST

VDPAUCodec::VDPAUCodec(MythCodecContext::CodecProfile Profile, QSize Size, uint32_t Macroblocks, uint32_t Level)
: m_maxSize(Size),
m_maxMacroBlocks(Macroblocks),
m_maxLevel(Level)
{
// Levels don't work for MPEG1/2
if (MythCodecContext::MPEG1 <= Profile && Profile <= MythCodecContext::MPEG2SNR)
m_maxLevel = 1000;
}

bool VDPAUCodec::Supported(int Width, int Height, int Level)
{
uint32_t macros = static_cast<uint32_t>(((Width + 15) & ~15) * ((Height + 15) & ~15)) / 256;
return (Width <= m_maxSize.width()) && (Height <= m_maxSize.height()) &&
(macros <= m_maxMacroBlocks) && (static_cast<uint32_t>(Level) <= m_maxLevel);
}

bool MythVDPAUHelper::HaveVDPAU(void)
{
QMutexLocker locker(&gVDPAULock);
static QMutex s_mutex;
static bool s_checked = false;
static bool s_available = false;

QMutexLocker locker(&s_mutex);
if (s_checked)
return gVDPAUAvailable;
return s_available;

{
MythVDPAUHelper vdpau;
s_available = vdpau.IsValid();
}

MythVDPAUHelper vdpau;
gVDPAUAvailable = vdpau.IsValid();
s_checked = true;
if (gVDPAUAvailable)
if (s_available)
{
LOG(VB_GENERAL, LOG_INFO, LOC + "VDPAU is available");
gVDPAUMPEG4Available = vdpau.CheckMPEG4();
LOG(VB_GENERAL, LOG_INFO, LOC + "Supported/available VDPAU decoders:");
const VDPAUProfiles& profiles = MythVDPAUHelper::GetProfiles();
foreach (auto profile, profiles)
LOG(VB_GENERAL, LOG_INFO, LOC +
MythCodecContext::GetProfileDescription(profile.first, profile.second.m_maxSize));
}
else
{
LOG(VB_GENERAL, LOG_INFO, LOC + "VDPAU is NOT available");
}
return gVDPAUAvailable;
return s_available;
}

bool MythVDPAUHelper::ProfileCheck(VdpDecoderProfile Profile, uint32_t &Level,
uint32_t &Macros, uint32_t &Width, uint32_t &Height)
{
if (!m_device)
return false;

INIT_ST
VdpBool supported = 0;
status = m_vdpDecoderQueryCapabilities(m_device, Profile, &supported,
&Level, &Macros, &Width, &Height);
CHECK_ST
return supported > 0;
}

const VDPAUProfiles& MythVDPAUHelper::GetProfiles(void)
{
static const VdpDecoderProfile MainProfiles[] =
{
VDP_DECODER_PROFILE_MPEG1, VDP_DECODER_PROFILE_MPEG2_SIMPLE, VDP_DECODER_PROFILE_MPEG2_MAIN,
VDP_DECODER_PROFILE_MPEG4_PART2_SP, VDP_DECODER_PROFILE_MPEG4_PART2_ASP,
VDP_DECODER_PROFILE_VC1_SIMPLE, VDP_DECODER_PROFILE_VC1_MAIN, VDP_DECODER_PROFILE_VC1_ADVANCED,
VDP_DECODER_PROFILE_H264_BASELINE, VDP_DECODER_PROFILE_H264_MAIN, VDP_DECODER_PROFILE_H264_HIGH,
VDP_DECODER_PROFILE_H264_EXTENDED, VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE,
VDP_DECODER_PROFILE_H264_CONSTRAINED_HIGH, VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
};

static const VdpDecoderProfile HEVCProfiles[] =
{
VDP_DECODER_PROFILE_HEVC_MAIN, VDP_DECODER_PROFILE_HEVC_MAIN_10,
VDP_DECODER_PROFILE_HEVC_MAIN_STILL, VDP_DECODER_PROFILE_HEVC_MAIN_444
};

auto VDPAUToMythProfile = [](VdpDecoderProfile Profile)
{
switch (Profile)
{
case VDP_DECODER_PROFILE_MPEG1: return MythCodecContext::MPEG1;
case VDP_DECODER_PROFILE_MPEG2_SIMPLE: return MythCodecContext::MPEG2Simple;
case VDP_DECODER_PROFILE_MPEG2_MAIN: return MythCodecContext::MPEG2Main;

case VDP_DECODER_PROFILE_MPEG4_PART2_SP: return MythCodecContext::MPEG4Simple;
case VDP_DECODER_PROFILE_MPEG4_PART2_ASP: return MythCodecContext::MPEG4AdvancedSimple;

case VDP_DECODER_PROFILE_VC1_SIMPLE: return MythCodecContext::VC1Simple;
case VDP_DECODER_PROFILE_VC1_MAIN: return MythCodecContext::VC1Main;
case VDP_DECODER_PROFILE_VC1_ADVANCED: return MythCodecContext::VC1Advanced;

case VDP_DECODER_PROFILE_H264_BASELINE: return MythCodecContext::H264Baseline;
case VDP_DECODER_PROFILE_H264_MAIN: return MythCodecContext::H264Main;
case VDP_DECODER_PROFILE_H264_HIGH: return MythCodecContext::H264High;
case VDP_DECODER_PROFILE_H264_EXTENDED: return MythCodecContext::H264Extended;
case VDP_DECODER_PROFILE_H264_CONSTRAINED_BASELINE: return MythCodecContext::H264ConstrainedBaseline;
case VDP_DECODER_PROFILE_H264_CONSTRAINED_HIGH: return MythCodecContext::H264ConstrainedHigh;
case VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE: return MythCodecContext::H264High444; // ?

case VDP_DECODER_PROFILE_HEVC_MAIN: return MythCodecContext::HEVCMain;
case VDP_DECODER_PROFILE_HEVC_MAIN_10: return MythCodecContext::HEVCMain10;
case VDP_DECODER_PROFILE_HEVC_MAIN_STILL: return MythCodecContext::HEVCMainStill;
case VDP_DECODER_PROFILE_HEVC_MAIN_444: return MythCodecContext::HEVCRext;
}
return MythCodecContext::NoProfile;
};

static QMutex lock(QMutex::Recursive);
static bool s_initialised = false;
static VDPAUProfiles s_profiles;

QMutexLocker locker(&lock);
if (s_initialised)
return s_profiles;
s_initialised = true;

MythVDPAUHelper helper;
if (!helper.IsValid())
return s_profiles;

uint32_t level = 0;
uint32_t macros = 0;
uint32_t width = 0;
uint32_t height = 0;
for (VdpDecoderProfile profile : MainProfiles)
{
if (helper.ProfileCheck(profile, level, macros, width, height))
{
MythCodecContext::CodecProfile prof = VDPAUToMythProfile(profile);
s_profiles.append(VDPAUProfile(prof,
VDPAUCodec(prof, QSize(static_cast<int>(width), static_cast<int>(height)), macros, level)));
}
}

if (helper.HEVCSupported())
{
for (VdpDecoderProfile profile : HEVCProfiles)
{
if (helper.ProfileCheck(profile, level, macros, width, height))
{
MythCodecContext::CodecProfile prof = VDPAUToMythProfile(profile);
s_profiles.append(VDPAUProfile(prof,
VDPAUCodec(prof, QSize(static_cast<int>(width), static_cast<int>(height)), macros, level)));
}
}
}

return s_profiles;
}

bool MythVDPAUHelper::HaveMPEG4Decode(void)
void MythVDPAUHelper::GetDecoderList(QStringList &Decoders)
{
return gVDPAUMPEG4Available;
const VDPAUProfiles& profiles = MythVDPAUHelper::GetProfiles();
if (profiles.isEmpty())
return;

Decoders.append("VDPAU:");
foreach (auto profile, profiles)
if (profile.first != MythCodecContext::MJPEG)
Decoders.append(MythCodecContext::GetProfileDescription(profile.first, profile.second.m_maxSize));
}

static void vdpau_preemption_callback(VdpDevice /*unused*/, void* Opaque)
Expand Down Expand Up @@ -155,44 +290,11 @@ void MythVDPAUHelper::SetPreempted(void)
emit DisplayPreempted();
}

bool MythVDPAUHelper::CheckMPEG4(void)
bool MythVDPAUHelper::HEVCSupported(void)
{
if (!m_valid)
return false;

#ifdef VDP_DECODER_PROFILE_MPEG4_PART2_ASP
INIT_ST
VdpBool supported = false;
uint32_t tmp1 = 0;
uint32_t tmp2 = 0;
uint32_t tmp3 = 0;
uint32_t tmp4 = 0;
status = m_vdpDecoderQueryCapabilities(m_device,
VDP_DECODER_PROFILE_MPEG4_PART2_ASP, &supported,
&tmp1, &tmp2, &tmp3, &tmp4);
CHECK_ST
return supported;
#else
return false;
#endif
}

bool MythVDPAUHelper::CheckHEVCDecode(AVCodecContext *Context)
{
if (!Context)
return false;

MythVDPAUHelper vdpau;
if (vdpau.IsValid())
return vdpau.HEVCProfileCheck(Context);
return false;
}

bool MythVDPAUHelper::HEVCProfileCheck(AVCodecContext *Context)
{
if (!m_valid || !Context)
return false;

// FFmpeg will disallow HEVC VDPAU for driver versions < 410
const char* infostring = nullptr;
INIT_ST
Expand All @@ -203,44 +305,21 @@ bool MythVDPAUHelper::HEVCProfileCheck(AVCodecContext *Context)

int driver = 0;
sscanf(infostring, "NVIDIA VDPAU Driver Shared Library %d", &driver);
if (driver < 410)
return false;

VdpDecoderProfile profile = 0;
switch (Context->profile)
{
#ifdef VDP_DECODER_PROFILE_HEVC_MAIN
case FF_PROFILE_HEVC_MAIN: profile = VDP_DECODER_PROFILE_HEVC_MAIN; break;
#endif
#ifdef VDP_DECODER_PROFILE_HEVC_MAIN_10
case FF_PROFILE_HEVC_MAIN_10: profile = VDP_DECODER_PROFILE_HEVC_MAIN_10; break;
#endif
#ifdef VDP_DECODER_PROFILE_HEVC_MAIN_STILL
case FF_PROFILE_HEVC_MAIN_STILL_PICTURE: profile = VDP_DECODER_PROFILE_HEVC_MAIN_STILL; break;
#endif
default: return false;
}

VdpBool supported = false;
uint32_t level = 0;
uint32_t macros = 0;
uint32_t width = 0;
uint32_t height = 0;
status = m_vdpDecoderQueryCapabilities(m_device, profile, &supported, &level, &macros, &width, &height);
CHECK_ST
if (!supported)
return false;

return (width >= static_cast<uint>(Context->width)) &&
(height >= static_cast<uint>(Context->height)) &&
(level >= static_cast<uint>(Context->level));
return !(driver < 410);
}

bool MythVDPAUHelper::CheckH264Decode(AVCodecContext *Context)
{
if (!Context)
return false;

int mbs = static_cast<int>(ceil(static_cast<double>(Context->width) / 16.0));
if (!(mbs == 49 ) || (mbs == 54 ) || (mbs == 59 ) || (mbs == 64) ||
(mbs == 113) || (mbs == 118) || (mbs == 123) || (mbs == 128))
{
return true;
}

VdpDecoderProfile profile = 0;
switch (Context->profile & ~FF_PROFILE_H264_INTRA)
{
Expand All @@ -262,17 +341,11 @@ bool MythVDPAUHelper::CheckH264Decode(AVCodecContext *Context)
default: return false;
}

int mbs = static_cast<int>(ceil(static_cast<double>(Context->width) / 16.0));
int check = (mbs == 49 ) || (mbs == 54 ) || (mbs == 59 ) || (mbs == 64) ||
(mbs == 113) || (mbs == 118) || (mbs == 123) || (mbs == 128);

// Create an instance
MythVDPAUHelper helper;
if (!helper.IsValid())
return false;
if (check)
if (helper.IsValid())
return helper.H264DecodeCheck(profile, Context);
return helper.H264ProfileCheck(profile, Context);
return false;
}

bool MythVDPAUHelper::H264DecodeCheck(VdpDecoderProfile Profile, AVCodecContext *Context)
Expand All @@ -295,27 +368,6 @@ bool MythVDPAUHelper::H264DecodeCheck(VdpDecoderProfile Profile, AVCodecContext
return ok;
}

bool MythVDPAUHelper::H264ProfileCheck(VdpDecoderProfile Profile, AVCodecContext *Context)
{
if (!m_valid || !Context)
return false;

VdpBool supported = false;
uint32_t level = 0;
uint32_t macros = 0;
uint32_t width = 0;
uint32_t height = 0;
INIT_ST
status = m_vdpDecoderQueryCapabilities(m_device, Profile, &supported, &level, &macros, &width, &height);
CHECK_ST
if (!supported)
return false;

return (width >= static_cast<uint>(Context->width)) &&
(height >= static_cast<uint>(Context->height)) &&
(level >= static_cast<uint>(Context->level));
}

VdpOutputSurface MythVDPAUHelper::CreateOutputSurface(QSize Size)
{
if (!m_valid)
Expand Down
31 changes: 21 additions & 10 deletions mythtv/libs/libmythtv/decoders/mythvdpauhelper.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

// MythTV
#include "mythframe.h"
#include "mythcodeccontext.h"
#include "videoouttypes.h"

// FFmpeg
Expand All @@ -20,6 +21,20 @@ extern "C" {
class MythXDisplay;
class VideoColourSpace;

class VDPAUCodec
{
public:
VDPAUCodec(MythCodecContext::CodecProfile Profile, QSize Size,
uint32_t Macroblocks, uint32_t Level);
bool Supported(int Width, int Height, int Level);
QSize m_maxSize { 0, 0 };
uint32_t m_maxMacroBlocks { 0 };
uint32_t m_maxLevel { 0 };
};

using VDPAUProfile = QPair<MythCodecContext::CodecProfile, VDPAUCodec>;
using VDPAUProfiles = QList<VDPAUProfile>;

class MythVDPAUHelper : public QObject
{
Q_OBJECT
Expand All @@ -35,9 +50,9 @@ class MythVDPAUHelper : public QObject
Q_DECLARE_FLAGS(VDPMixerFeatures, VDPMixerFeature)

static bool HaveVDPAU(void);
static bool HaveMPEG4Decode(void);
static bool CheckH264Decode(AVCodecContext *Context);
static bool CheckHEVCDecode(AVCodecContext *Context);
static const VDPAUProfiles& GetProfiles(void);
static void GetDecoderList (QStringList &Decoders);

explicit MythVDPAUHelper(AVVDPAUDeviceContext *Context);
~MythVDPAUHelper(void) override;
Expand All @@ -61,14 +76,10 @@ class MythVDPAUHelper : public QObject
protected:
MythVDPAUHelper(void);

bool CheckMPEG4(void);
bool H264DecodeCheck(VdpDecoderProfile Profile, AVCodecContext *Context);
bool H264ProfileCheck(VdpDecoderProfile Profile, AVCodecContext *Context);
bool HEVCProfileCheck(AVCodecContext *Context);

static QMutex gVDPAULock;
static bool gVDPAUAvailable;
static bool gVDPAUMPEG4Available;
bool H264DecodeCheck (VdpDecoderProfile Profile, AVCodecContext *Context);
bool HEVCSupported (void);
bool ProfileCheck (VdpDecoderProfile Profile, uint32_t &Level,
uint32_t &Macros, uint32_t &Width, uint32_t &Height);

private:
bool InitProcs(void);
Expand Down
4 changes: 2 additions & 2 deletions mythtv/libs/libmythtv/libmythtv.pro
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ using_frontend {

# Video output
HEADERS += mythvideoout.h mythvideooutnull.h
HEADERS += videobuffers.h vsync.h
HEADERS += videobuffers.h
HEADERS += jitterometer.h
HEADERS += videodisplayprofile.h mythcodecid.h
HEADERS += videoouttypes.h
Expand All @@ -394,7 +394,7 @@ using_frontend {
HEADERS += visualisations/videovisualdefs.h
HEADERS += mythdeinterlacer.h
SOURCES += mythvideoout.cpp mythvideooutnull.cpp
SOURCES += videobuffers.cpp vsync.cpp
SOURCES += videobuffers.cpp
SOURCES += jitterometer.cpp
SOURCES += videodisplayprofile.cpp mythcodecid.cpp
SOURCES += videooutwindow.cpp
Expand Down
2 changes: 1 addition & 1 deletion mythtv/libs/libmythtv/mpeg/mpegdescriptors.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ const char *descriptor_tag_strings[256] =
/* 0x0C */ "Multiplex Buffer Utilization", /* 0x0D */ "Copyright",
/* 0x0E */ "Maximum Bitrate", /* 0x0F */ "Private Data Indicator",

/* 0x10 */ "Smooting Buffer", /* 0x11 */ "STD",
/* 0x10 */ "Smoothing Buffer", /* 0x11 */ "STD",
/* 0x12 */ "IBP", /* 0x13 */ "DSM-CC Carousel Identifier",
/* 0x14 */ "DSM-CC Association Tag",
/* 0x15 */ "DSM-CC Deferred Association Tag",
Expand Down
22 changes: 20 additions & 2 deletions mythtv/libs/libmythtv/mpeg/sctetables.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -305,10 +305,18 @@ bool ShortVirtualChannelTable::Parse(void)
bool descriptors_included = (pesdata()[7] & 0x20) != 0;
uint number_of_vc_records = pesdata()[13];
const unsigned char *next = pesdata() + 14;
const unsigned char *end = pesdata()+Length();
bool ok = true;

if (!descriptors_included)
{
for (uint i = 0; i < number_of_vc_records; i++)
for (uint i = 0; i < number_of_vc_records && ok; i++)
{
if (next + 10 >= end)
{
ok = false;
break;
}
m_ptrs.push_back(next);
next += 9;
}
Expand All @@ -323,7 +331,12 @@ bool ShortVirtualChannelTable::Parse(void)
next += 10;
for (uint j = 0; j < desc_count; j++)
{
MPEGDescriptor desc(next);
if (next >= end)
{
ok = false;
break;
}
MPEGDescriptor desc(next, end-next);
if (!desc.IsValid())
{
m_ptrs.clear();
Expand All @@ -334,6 +347,11 @@ bool ShortVirtualChannelTable::Parse(void)
}
}
m_ptrs.push_back(next);
if (!ok || next >= end)
{
m_ptrs.clear();
return false;
}
}
else if (kInverseChannelMap == TableSubtype())
{
Expand Down
2 changes: 1 addition & 1 deletion mythtv/libs/libmythtv/mythframe.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ const char* format_description(VideoFrameType Type)
{
case FMT_NONE: return "None";
case FMT_RGB24: return "RGB24";
case FMT_YV12: return "YV12";
case FMT_YV12: return "YUV420P";
case FMT_RGB32: return "RGB32";
case FMT_ARGB32: return "ARGB32";
case FMT_RGBA32: return "RGBA32";
Expand Down
95 changes: 19 additions & 76 deletions mythtv/libs/libmythtv/mythplayer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ using namespace std;
#include <mythmainwindow.h>

extern "C" {
#include "vsync.h"
#include "libavcodec/avcodec.h"
}

Expand Down Expand Up @@ -199,9 +198,6 @@ MythPlayer::~MythPlayer(void)
delete m_decoderThread;
m_decoderThread = nullptr;

delete m_videoSync;
m_videoSync = nullptr;

delete m_videoOutput;
m_videoOutput = nullptr;

Expand Down Expand Up @@ -469,20 +465,6 @@ void MythPlayer::ReinitVideo(bool ForceUpdate)
return;
}

// the display refresh rate may have been changed by VideoOutput
if (m_videoSync)
{
int ri = m_display->GetRefreshInterval(m_frameInterval);
if (ri != m_videoSync->getRefreshInterval())
{
LOG(VB_PLAYBACK, LOG_INFO, LOC +
QString("Refresh rate has changed from %1 to %2")
.arg(m_videoSync->getRefreshInterval())
.arg(ri));
m_videoSync->setRefreshInterval(ri);
}
}

if (m_osd)
m_osd->SetPainter(m_videoOutput->GetOSDPainter());
ReinitOSD();
Expand Down Expand Up @@ -590,7 +572,7 @@ void MythPlayer::SetScanType(FrameScanType scan)
return;
}

if (!m_videoOutput || !m_videoSync)
if (!m_videoOutput)
return; // hopefully this will be called again later...

m_resetScan = kScan_Ignore;
Expand Down Expand Up @@ -716,14 +698,14 @@ void MythPlayer::OpenDummy(void)

void MythPlayer::CreateDecoder(char *TestBuffer, int TestSize)
{
if (NuppelDecoder::CanHandle(TestBuffer, TestSize))
if (AvFormatDecoder::CanHandle(TestBuffer, m_playerCtx->m_buffer->GetFilename(), TestSize))
{
SetDecoder(new NuppelDecoder(this, *m_playerCtx->m_playingInfo));
SetDecoder(new AvFormatDecoder(this, *m_playerCtx->m_playingInfo, m_playerFlags));
return;
}

if (AvFormatDecoder::CanHandle(TestBuffer, m_playerCtx->m_buffer->GetFilename(), TestSize))
SetDecoder(new AvFormatDecoder(this, *m_playerCtx->m_playingInfo, m_playerFlags));
if (NuppelDecoder::CanHandle(TestBuffer, TestSize))
SetDecoder(new NuppelDecoder(this, *m_playerCtx->m_playingInfo));
}

int MythPlayer::OpenFile(int Retries)
Expand Down Expand Up @@ -1519,8 +1501,6 @@ void MythPlayer::SetFrameInterval(FrameScanType scan, double frame_period)

LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("SetFrameInterval Interval:%1 Speed:%2 Scan:%3 (Multiplier: %4)")
.arg(m_frameInterval).arg(static_cast<double>(m_playSpeed)).arg(toQString(scan)).arg(m_fpsMultiplier));
if (m_playSpeed < 1 || m_playSpeed > 2 || m_refreshRate <= 0)
return;
}

void MythPlayer::ResetAVSync(void)
Expand All @@ -1536,28 +1516,18 @@ void MythPlayer::ResetAVSync(void)

void MythPlayer::InitAVSync(void)
{
m_videoSync->Start();

m_refreshRate = m_display->GetRefreshInterval(m_frameInterval);

m_rtcBase = 0;
m_priorAudioTimecode = 0;
m_priorVideoTimecode = 0;
m_lastFix = 0.0;

if (!FlagIsSet(kVideoIsNull))
{
QString timing_type = m_videoSync->getName();

QString msg = QString("Video timing method: %1").arg(timing_type);
LOG(VB_GENERAL, LOG_INFO, LOC + msg);
msg = QString("Display Refresh Rate: %1 Video Frame Rate: %2")
.arg(1000000.0 / m_refreshRate, 0, 'f', 3)
.arg(1000000.0 / m_frameInterval, 0, 'f', 3);
LOG(VB_PLAYBACK, LOG_INFO, LOC + msg);
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("Display Refresh Rate: %1 Video Frame Rate: %2")
.arg(1000000.0 / m_display->GetRefreshInterval(m_frameInterval), 0, 'f', 3)
.arg(1000000.0 / m_frameInterval, 0, 'f', 3));

SetFrameInterval(m_scan,
1.0 / (m_videoFrameRate * static_cast<double>(m_playSpeed)));
SetFrameInterval(m_scan, 1.0 / (m_videoFrameRate * static_cast<double>(m_playSpeed)));

// try to get preferential scheduling, but ignore if we fail to.
myth_nice(-19);
Expand Down Expand Up @@ -1838,7 +1808,7 @@ void MythPlayer::RefreshPauseFrame(void)

void MythPlayer::DisplayPauseFrame(void)
{
if (!m_videoOutput || ! m_videoSync)
if (!m_videoOutput)
return;

if (m_videoOutput->IsErrored())
Expand All @@ -1859,7 +1829,6 @@ void MythPlayer::DisplayPauseFrame(void)
m_videoOutput->PrepareFrame(nullptr, scan, m_osd);
m_osdLock.unlock();
m_videoOutput->Show(scan);
m_videoSync->Start();
}

void MythPlayer::SetBuffering(bool new_buffering)
Expand Down Expand Up @@ -1903,6 +1872,11 @@ bool MythPlayer::PrebufferEnoughFrames(int min_buffers)
{
uint64_t frameCount = GetCurrentFrameCount();
uint64_t framesLeft = frameCount - m_framesPlayed;
// Sometimes m_framesPlayed > frameCount. Until that can
// be fixed, set framesLeft = 0 so the forced pause below
// is performed.
if (m_framesPlayed > frameCount)
framesLeft = 0;
auto margin = (uint64_t) (m_videoFrameRate * 3);
if (framesLeft < margin)
{
Expand Down Expand Up @@ -1985,8 +1959,6 @@ bool MythPlayer::PrebufferEnoughFrames(int min_buffers)
"Waited too long for decoder to fill video buffers. Exiting..");
SetErrored(tr("Video frame buffering failed too many times."));
}
if (m_normalSpeed)
m_videoSync->Start();
return false;
}

Expand Down Expand Up @@ -2085,15 +2057,8 @@ void MythPlayer::PreProcessNormalFrame(void)
bool MythPlayer::CanSupportDoubleRate(void)
{
int refreshinterval = 1;
if (m_videoSync)
{
refreshinterval = m_videoSync->getRefreshInterval();
}
else if (m_display)
{
// used by the decoder before m_videoSync is created
if (m_display)
refreshinterval = m_display->GetRefreshInterval(m_frameInterval);
}

// At this point we may not have the correct frame rate.
// Since interlaced is always at 25 or 30 fps, if the interval
Expand Down Expand Up @@ -2176,14 +2141,8 @@ void MythPlayer::VideoStart(void)
ClearAfterSeek(false);

m_avsyncAvg = 0; // Frames till next sync check
m_refreshRate = 0;

EnableFrameRateMonitor();
m_refreshRate = m_frameInterval;

float temp_speed = (m_playSpeed == 0.0F) ? m_audio.GetStretchFactor() : m_playSpeed;
int fr_int = static_cast<int>(1000000.0 / m_videoFrameRate / static_cast<double>(temp_speed));
int displayinterval = m_display->GetRefreshInterval(fr_int);

// Default to interlaced playback but set the tracker to progressive
// Enable autodetection of interlaced/progressive from video stream
Expand All @@ -2198,27 +2157,13 @@ void MythPlayer::VideoStart(void)
m_doubleFramerate = false;
m_scanTracker = -2;

if (m_playerCtx->IsPIP() && FlagIsSet(kVideoIsNull))
if (!FlagIsSet(kVideoIsNull) && m_videoOutput)
{
m_videoSync = new DummyVideoSync(m_videoOutput, 0);
}
else if (FlagIsSet(kVideoIsNull))
{
m_videoSync = new USleepVideoSync(m_videoOutput, 0);
}
else if (m_videoOutput)
{
m_videoSync = VideoSync::BestMethod(m_videoOutput, static_cast<uint>(displayinterval));
m_doubleFramerate = CanSupportDoubleRate(); // needs m_videoSync
m_doubleFramerate = CanSupportDoubleRate();
m_videoOutput->SetDeinterlacing(true, m_doubleFramerate);
}

if (!m_videoSync)
m_videoSync = new BusyWaitVideoSync(m_videoOutput, displayinterval);

InitAVSync();
m_videoSync->Start();

AutoVisualise();
}

Expand Down Expand Up @@ -2258,10 +2203,8 @@ void MythPlayer::VideoEnd(void)
m_osdLock.lock();
m_vidExitLock.lock();
delete m_osd;
delete m_videoSync;
delete m_videoOutput;
m_osd = nullptr;
m_videoSync = nullptr;
m_videoOutput = nullptr;
m_vidExitLock.unlock();
m_osdLock.unlock();
Expand Down Expand Up @@ -3638,7 +3581,7 @@ void MythPlayer::ChangeSpeed(void)
.arg(m_videoFrameRate).arg(static_cast<double>(m_playSpeed))
.arg(m_ffrewSkip).arg(m_frameInterval));

if (m_videoOutput && m_videoSync)
if (m_videoOutput)
m_videoOutput->SetVideoFrameRate(static_cast<float>(m_videoFrameRate));

// ensure we re-check double rate support following a speed change
Expand Down
3 changes: 0 additions & 3 deletions mythtv/libs/libmythtv/mythplayer.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ class RemoteEncoder;
class MythSqlDatabase;
class ProgramInfo;
class DecoderBase;
class VideoSync;
class LiveTVChain;
class TV;
struct SwsContext;
Expand Down Expand Up @@ -851,10 +850,8 @@ class MTV_PUBLIC MythPlayer
bool m_fileChanged {false};

// Audio and video synchronization stuff
VideoSync *m_videoSync {nullptr};
int m_avsyncAvg {0};
int m_avsyncPredictor {0};
int m_refreshRate {0};
int64_t m_dispTimecode {0};
bool m_avsyncAudioPaused {false};
int64_t m_rtcBase {0}; // real time clock base for presentation time (microsecs)
Expand Down
38 changes: 25 additions & 13 deletions mythtv/libs/libmythtv/opengl/mythdrmprimeinterop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,34 +113,37 @@ vector<MythVideoTexture*> MythDRMPRIMEInterop::Acquire(MythRenderOpenGL *Context

bool firstpass = m_openglTextures.isEmpty();
bool interlaced = is_interlaced(Scan);
bool composed = static_cast<uint>(drmdesc->nb_layers) == 1;
bool composed = static_cast<uint>(drmdesc->nb_layers) == 1 && m_composable;
auto id = reinterpret_cast<unsigned long long>(drmdesc);

// Separate texture for each plane
if (!composed)
auto Separate = [=]()
{
vector<MythVideoTexture*> textures;
if (!m_openglTextures.contains(id))
{
result = CreateTextures(drmdesc, m_context, Frame);
m_openglTextures.insert(id, result);
textures = CreateTextures(drmdesc, m_context, Frame, true);
m_openglTextures.insert(id, textures);
}
else
{
result = m_openglTextures[id];
textures = m_openglTextures[id];
}

if (!result.empty() ? format_is_yuv(result[0]->m_frameType) : false)
if (textures.empty() ? false : format_is_yuv(textures[0]->m_frameFormat))
{
// YUV frame - enable picture attributes
// Enable colour controls for YUV frame
if (firstpass)
ColourSpace->SetSupportedAttributes(ALL_PICTURE_ATTRIBUTES);
ColourSpace->UpdateColourSpace(Frame);

// Enable shader based deinterlacing for YUV frames
// and shader based deinterlacing
Frame->deinterlace_allowed = Frame->deinterlace_allowed | DEINT_SHADER;
}
return result;
}
return textures;
};

// Separate texture for each plane
if (!composed)
return Separate();

// Single RGB texture
// Disable picture attributes
Expand Down Expand Up @@ -173,8 +176,17 @@ vector<MythVideoTexture*> MythDRMPRIMEInterop::Acquire(MythRenderOpenGL *Context
{
// This will create 2 half height textures representing the top and bottom fields
// if deinterlacing
result = CreateTextures(drmdesc, m_context, Frame,
result = CreateTextures(drmdesc, m_context, Frame, false,
m_deinterlacing ? kScan_Interlaced : kScan_Progressive);
// Fallback to separate textures if the driver does not support composition
if (result.empty())
{
m_composable = false;
m_deinterlacing = false;
DeleteTextures();
LOG(VB_GENERAL, LOG_INFO, LOC + "YUV composition failed. Trying separate textures.");
return Separate();
}
m_openglTextures.insert(id, result);
}
else
Expand Down
3 changes: 2 additions & 1 deletion mythtv/libs/libmythtv/opengl/mythdrmprimeinterop.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,9 @@ class MythDRMPRIMEInterop : public MythOpenGLInterop, public MythEGLDMABUF
static Type GetInteropType(VideoFrameType Format);

private:
AVDRMFrameDescriptor* VerifyBuffer(MythRenderOpenGL *Context, VideoFrame *Frame);
AVDRMFrameDescriptor* VerifyBuffer(MythRenderOpenGL *Context, VideoFrame *Frame);
bool m_deinterlacing { false };
bool m_composable { true };
};

#endif // MYTHDRMPRIMEINTEROP_H
163 changes: 158 additions & 5 deletions mythtv/libs/libmythtv/opengl/mythegldmabuf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ MythEGLDMABUF::MythEGLDMABUF(MythRenderOpenGL *Context)
{
OpenGLLocker locker(Context);
m_useModifiers = Context->IsEGL() && Context->HasEGLExtension("EGL_EXT_image_dma_buf_import_modifiers");
QSurfaceFormat fmt = Context->format();
}
}

Expand Down Expand Up @@ -55,6 +54,11 @@ static void inline DebugDRMFrame(AVDRMFrameDescriptor* Desc)
.arg(i).arg(Desc->objects[i].fd).arg(Desc->objects[i].format_modifier, 0 , 16));
}

/*! \brief Create a single RGBA32 texture using the provided AVDRMFramDescriptor.
*
* \note This assumes one layer with multiple planes, typically where the layer
* is a YUV format.
*/
inline vector<MythVideoTexture*> MythEGLDMABUF::CreateComposed(AVDRMFrameDescriptor* Desc,
MythRenderOpenGL *Context,
VideoFrame *Frame, FrameScanType Scan) const
Expand All @@ -68,8 +72,13 @@ inline vector<MythVideoTexture*> MythEGLDMABUF::CreateComposed(AVDRMFrameDescrip
vector<MythVideoTexture*> textures =
MythVideoTexture::CreateTextures(Context, Frame->codec, FMT_RGBA32, sizes,
GL_TEXTURE_EXTERNAL_OES);
for (auto & texture : textures)
texture->m_allowGLSLDeint = false;
if (textures.empty())
{
ClearDMATextures(Context, result);
return result;
}

textures[0]->m_allowGLSLDeint = false;

EGLint colourspace = EGL_ITU_REC709_EXT;
switch (Frame->colorspace)
Expand Down Expand Up @@ -144,7 +153,13 @@ inline vector<MythVideoTexture*> MythEGLDMABUF::CreateComposed(AVDRMFrameDescrip
EGLImageKHR image = Context->eglCreateImageKHR(Context->GetEGLDisplay(), EGL_NO_CONTEXT,
EGL_LINUX_DMA_BUF_EXT, nullptr, attribs.data());
if (!image)
{
LOG(VB_GENERAL, LOG_ERR, LOC + QString("No EGLImage '%1'").arg(Context->GetEGLError()));
// Ensure we release anything already created and return nothing
ClearDMATextures(Context, result);
return result;
}

MythVideoTexture *texture = textures[0];
Context->glBindTexture(texture->m_target, texture->m_textureId);
Context->eglImageTargetTexture2DOES(texture->m_target, image);
Expand All @@ -156,6 +171,10 @@ inline vector<MythVideoTexture*> MythEGLDMABUF::CreateComposed(AVDRMFrameDescrip
return result;
}

/*! \brief Create multiple textures that represent the planes for the given AVDRMFrameDescriptor
*
* \note This assumes multiple layers each with one plane.
*/
inline vector<MythVideoTexture*> MythEGLDMABUF::CreateSeparate(AVDRMFrameDescriptor* Desc,
MythRenderOpenGL *Context,
VideoFrame *Frame) const
Expand All @@ -173,11 +192,12 @@ inline vector<MythVideoTexture*> MythEGLDMABUF::CreateSeparate(AVDRMFrameDescrip
vector<MythVideoTexture*> result =
MythVideoTexture::CreateTextures(Context, Frame->codec, format, sizes,
QOpenGLTexture::Target2D);
for (auto & texture : result)
texture->m_allowGLSLDeint = true;
if (result.empty())
return result;

for (uint plane = 0; plane < result.size(); ++plane)
{
result[plane]->m_allowGLSLDeint = true;
AVDRMLayerDescriptor* layer = &Desc->layers[plane];
AVDRMPlaneDescriptor* drmplane = &layer->planes[0];
QVector<EGLint> attribs = {
Expand All @@ -202,8 +222,121 @@ inline vector<MythVideoTexture*> MythEGLDMABUF::CreateSeparate(AVDRMFrameDescrip
EGLImageKHR image = Context->eglCreateImageKHR(Context->GetEGLDisplay(), EGL_NO_CONTEXT,
EGL_LINUX_DMA_BUF_EXT, nullptr, attribs.data());
if (!image)
{
LOG(VB_GENERAL, LOG_ERR, LOC + QString("No EGLImage for plane %1 %2")
.arg(plane).arg(Context->GetEGLError()));
ClearDMATextures(Context, result);
return result;
}

Context->glBindTexture(result[plane]->m_target, result[plane]->m_textureId);
Context->eglImageTargetTexture2DOES(result[plane]->m_target, image);
Context->glBindTexture(result[plane]->m_target, 0);
result[plane]->m_data = static_cast<unsigned char *>(image);
}

return result;
}

#ifndef DRM_FORMAT_R8
#define MKTAG2(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | (static_cast<unsigned>(d) << 24))
#define DRM_FORMAT_R8 MKTAG2('R', '8', ' ', ' ')
#define DRM_FORMAT_GR88 MKTAG2('G', 'R', '8', '8')
#define DRM_FORMAT_R16 MKTAG2('R', '1', '6', ' ')
#define DRM_FORMAT_GR32 MKTAG2('G', 'R', '3', '2')
#define DRM_FORMAT_NV12 MKTAG2('N', 'V', '1', '2')
#define DRM_FORMAT_NV21 MKTAG2('N', 'V', '2', '1')
#define DRM_FORMAT_YUV420 MKTAG2('Y', 'U', '1', '2')
#define DRM_FORMAT_YVU420 MKTAG2('Y', 'V', '1', '2')
#define DRM_FORMAT_P010 MKTAG2('P', '0', '1', '0')
#endif

/*! \brief Create multiple textures that represent the planes for the given AVDRMFrameDescriptor
*
* \note This assumes one layer with multiple planes that represent a YUV format.
*
* It is used where the OpenGL DMA BUF implementation does not support composing YUV formats.
* It offers better feature support (as we can enable colour controls, shader
* deinterlacing etc) but may not be as fast on low end hardware; it might not
* use hardware accelerated paths and shader deinterlacing may not be as fast as the
* simple EGL based onefield/bob deinterlacer. It is essentially the same as
* CreateSeparate but the DRM descriptor uses a different layout and we have
* to 'guess' the correct DRM_FORMATs for each plane.
*
* \todo Add support for simple onefield/bob with YUV textures.
*/
inline vector<MythVideoTexture*> MythEGLDMABUF::CreateSeparate2(AVDRMFrameDescriptor* Desc,
MythRenderOpenGL *Context,
VideoFrame *Frame)
{
// As for CreateSeparate - may not work for some formats
AVDRMLayerDescriptor* layer = &Desc->layers[0];
vector<QSize> sizes;
for (int plane = 0 ; plane < layer->nb_planes; ++plane)
{
int width = Frame->width >> ((plane > 0) ? 1 : 0);
int height = Frame->height >> ((plane > 0) ? 1 : 0);
sizes.emplace_back(QSize(width, height));
}

// TODO - the v4l2_m2m decoder is not setting the correct sw_fmt - so we
// need to deduce the frame format from the fourcc
VideoFrameType format = FMT_YV12;
EGLint fourcc1 = DRM_FORMAT_R8;
EGLint fourcc2 = DRM_FORMAT_R8;
if (layer->format == DRM_FORMAT_NV12 || layer->format == DRM_FORMAT_NV21)
{
format = FMT_NV12;
fourcc2 = DRM_FORMAT_GR88;
}
else if (layer->format == DRM_FORMAT_P010)
{
format = FMT_P010;
fourcc1 = DRM_FORMAT_R16;
fourcc2 = DRM_FORMAT_GR32;
}

vector<MythVideoTexture*> result =
MythVideoTexture::CreateTextures(Context, Frame->codec, format, sizes,
QOpenGLTexture::Target2D);
if (result.empty())
return result;

for (uint plane = 0; plane < result.size(); ++plane)
{
result[plane]->m_allowGLSLDeint = true;
EGLint fourcc = fourcc1;
if (plane > 0)
fourcc = fourcc2;
AVDRMPlaneDescriptor* drmplane = &layer->planes[plane];
QVector<EGLint> attribs = {
EGL_LINUX_DRM_FOURCC_EXT, fourcc,
EGL_WIDTH, result[plane]->m_size.width(),
EGL_HEIGHT, result[plane]->m_size.height(),
EGL_DMA_BUF_PLANE0_FD_EXT, Desc->objects[drmplane->object_index].fd,
EGL_DMA_BUF_PLANE0_OFFSET_EXT, static_cast<EGLint>(drmplane->offset),
EGL_DMA_BUF_PLANE0_PITCH_EXT, static_cast<EGLint>(drmplane->pitch)
};

if (m_useModifiers && (Desc->objects[drmplane->object_index].format_modifier != 0 /* DRM_FORMAT_MOD_NONE*/))
{
attribs << EGL_DMA_BUF_PLANE0_MODIFIER_LO_EXT
<< static_cast<EGLint>(Desc->objects[drmplane->object_index].format_modifier & 0xffffffff)
<< EGL_DMA_BUF_PLANE0_MODIFIER_HI_EXT
<< static_cast<EGLint>(Desc->objects[drmplane->object_index].format_modifier >> 32);
}

attribs << EGL_NONE;

EGLImageKHR image = Context->eglCreateImageKHR(Context->GetEGLDisplay(), EGL_NO_CONTEXT,
EGL_LINUX_DMA_BUF_EXT, nullptr, attribs.data());
if (!image)
{
LOG(VB_GENERAL, LOG_ERR, LOC + QString("No EGLImage for plane %1 %2")
.arg(plane).arg(Context->GetEGLError()));
ClearDMATextures(Context, result);
return result;
}

Context->glBindTexture(result[plane]->m_target, result[plane]->m_textureId);
Context->eglImageTargetTexture2DOES(result[plane]->m_target, image);
Expand All @@ -214,9 +347,25 @@ inline vector<MythVideoTexture*> MythEGLDMABUF::CreateSeparate(AVDRMFrameDescrip
return result;
}

void MythEGLDMABUF::ClearDMATextures(MythRenderOpenGL* Context,
vector<MythVideoTexture *> &Textures) const
{
for (auto & texture : Textures)
{
if (texture->m_data)
Context->eglDestroyImageKHR(Context->GetEGLDisplay(), texture->m_data);
texture->m_data = nullptr;
if (texture->m_textureId)
Context->glDeleteTextures(1, &texture->m_textureId);
MythVideoTexture::DeleteTexture(Context, texture);
}
Textures.clear();
}

vector<MythVideoTexture*> MythEGLDMABUF::CreateTextures(AVDRMFrameDescriptor* Desc,
MythRenderOpenGL *Context,
VideoFrame *Frame,
bool UseSeparate,
FrameScanType Scan)
{
vector<MythVideoTexture*> result;
Expand Down Expand Up @@ -246,7 +395,11 @@ vector<MythVideoTexture*> MythEGLDMABUF::CreateTextures(AVDRMFrameDescriptor* De

// One layer with X planes
if (numlayers == 1)
{
if (UseSeparate)
return CreateSeparate2(Desc, Context, Frame);
return CreateComposed(Desc, Context, Frame, Scan);
}
// X layers with one plane each
return CreateSeparate(Desc, Context, Frame);
}
6 changes: 6 additions & 0 deletions mythtv/libs/libmythtv/opengl/mythegldmabuf.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,10 @@ class MythEGLDMABUF
vector<MythVideoTexture*> CreateTextures(AVDRMFrameDescriptor* Desc,
MythRenderOpenGL *Context,
VideoFrame *Frame,
bool UseSeparate,
FrameScanType Scan = kScan_Progressive);
void ClearDMATextures(MythRenderOpenGL *Context,
vector<MythVideoTexture*>& Textures) const;

private:
vector<MythVideoTexture*> CreateComposed(AVDRMFrameDescriptor* Desc,
Expand All @@ -32,6 +35,9 @@ class MythEGLDMABUF
vector<MythVideoTexture*> CreateSeparate(AVDRMFrameDescriptor* Desc,
MythRenderOpenGL *Context,
VideoFrame *Frame) const;
vector<MythVideoTexture*> CreateSeparate2(AVDRMFrameDescriptor* Desc,
MythRenderOpenGL *Context,
VideoFrame *Frame);
bool m_useModifiers { false };
};

Expand Down
2 changes: 1 addition & 1 deletion mythtv/libs/libmythtv/opengl/mythnvdecinterop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@ bool MythNVDECInterop::CreateCUDAContext(MythRenderOpenGL *GLContext, CudaFuncti
// retrieve CUDA entry points
if (cuda_load_functions(&CudaFuncs, nullptr) != 0)
{
LOG(VB_GENERAL, LOG_ERR, LOC + "Failed to load functions");
LOG(VB_PLAYBACK, LOG_ERR, LOC + "Failed to load functions");
return false;
}

Expand Down
16 changes: 4 additions & 12 deletions mythtv/libs/libmythtv/opengl/mythvaapidrminterop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ vector<MythVideoTexture*> MythVAAPIInteropDRM::AcquireVAAPI(VASurfaceID Id,
drmdesc.layers[i].planes[0].offset = vaimage.offsets[i];
}

result = CreateTextures(&drmdesc, Context, Frame);
result = CreateTextures(&drmdesc, Context, Frame, false);
}
}

Expand Down Expand Up @@ -412,7 +412,7 @@ vector<MythVideoTexture*> MythVAAPIInteropDRM::AcquirePrime(VASurfaceID Id,

if (!m_drmFrames.contains(Id))
return result;
result = CreateTextures(m_drmFrames[Id], Context, Frame);
result = CreateTextures(m_drmFrames[Id], Context, Frame, false);
#else
(void)Id;
(void)Context;
Expand Down Expand Up @@ -471,22 +471,14 @@ bool MythVAAPIInteropDRM::TestPrimeInterop(void)
AVDRMFrameDescriptor drmdesc;
memset(&drmdesc, 0, sizeof(drmdesc));
VADRMtoPRIME(&vadesc, &drmdesc);
vector<MythVideoTexture*> textures = CreateTextures(&drmdesc, m_context, &frame);
vector<MythVideoTexture*> textures = CreateTextures(&drmdesc, m_context, &frame, false);

if (!textures.empty())
{
s_supported = true;
for (auto & texture : textures)
{
s_supported &= texture->m_data && texture->m_textureId;
if (texture->m_data)
m_context->eglDestroyImageKHR(m_context->GetEGLDisplay(), texture->m_data);
texture->m_data = nullptr;
if (texture->m_textureId)
m_context->glDeleteTextures(1, &texture->m_textureId);
MythVideoTexture::DeleteTexture(m_context, texture);
}
textures.clear();
ClearDMATextures(m_context, textures);
}
for (uint32_t i = 0; i < vadesc.num_objects; ++i)
close(vadesc.objects[i].fd);
Expand Down
2 changes: 1 addition & 1 deletion mythtv/libs/libmythtv/opengl/mythvideooutopengl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ void MythVideoOutputOpenGL::SetVideoFrameRate(float NewRate)
if (qFuzzyCompare(m_dbDisplayProfile->GetOutput() + 1.0F, NewRate + 1.0F))
return;

LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("Video frame rate changed: %1->%2)")
LOG(VB_PLAYBACK, LOG_INFO, LOC + QString("Video frame rate changed: %1->%2")
.arg(static_cast<double>(m_dbDisplayProfile->GetOutput())).arg(static_cast<double>(NewRate)));
m_dbDisplayProfile->SetOutput(NewRate);
m_newFrameRate = true;
Expand Down
2 changes: 1 addition & 1 deletion mythtv/libs/libmythtv/recorders/ExternalStreamHandler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ int ExternIO::Write(const QByteArray & buffer)
}

LOG(VB_RECORD, LOG_DEBUG, QString("ExternIO::Write('%1')")
.arg(QString(buffer)));
.arg(QString(buffer).simplified()));

int len = write(m_appIn, buffer.constData(), buffer.size());
if (len != buffer.size())
Expand Down
14 changes: 7 additions & 7 deletions mythtv/libs/libmythtv/videosource.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2273,9 +2273,9 @@ ExternalConfigurationGroup::ExternalConfigurationGroup(CaptureCard &a_parent,
setVisible(false);
auto *device = new CommandPath(m_parent);
device->setLabel(tr("Command path"));
device->setHelpText(tr("A 'black box' application controlled via "
"stdin, status on stderr and TransportStream "
"read from stdout"));
device->setHelpText(tr("A 'black box' application controlled via stdin, status on "
"stderr and TransportStream read from stdout.\n"
"Use absolute path or path relative to the current directory."));
a_cardtype.addTargetedChild("EXTERNAL", device);

m_info->setLabel(tr("File info"));
Expand All @@ -2301,17 +2301,17 @@ void ExternalConfigurationGroup::probeApp(const QString & path)

if (fileInfo.exists())
{
ci = tr("'%1' is valid.").arg(fileInfo.absoluteFilePath());
ci = tr("File '%1' is valid.").arg(fileInfo.absoluteFilePath());
if (!fileInfo.isReadable() || !fileInfo.isFile())
ci = tr("WARNING: '%1' is not readable.")
ci = tr("WARNING: File '%1' is not readable.")
.arg(fileInfo.absoluteFilePath());
if (!fileInfo.isExecutable())
ci = tr("WARNING: '%1' is not executable.")
ci = tr("WARNING: File '%1' is not executable.")
.arg(fileInfo.absoluteFilePath());
}
else
{
ci = tr("WARNING: '%1' does not exist.")
ci = tr("WARNING: File '%1' does not exist.")
.arg(fileInfo.absoluteFilePath());
}

Expand Down
Loading