Skip to content

Commit

Permalink
all: Update AVPacket API usage
Browse files Browse the repository at this point in the history
AVPackets must all be on the heap now, since it is no longer
part of the libav* ABI.

Signed-off-by: Derek Buitenhuis <derek.buitenhuis@gmail.com>
  • Loading branch information
dwbuiten committed May 5, 2021
1 parent 55c2af5 commit 96cbf38
Show file tree
Hide file tree
Showing 6 changed files with 64 additions and 56 deletions.
16 changes: 10 additions & 6 deletions src/core/audiosource.cpp
Expand Up @@ -285,10 +285,15 @@ FFMS_AudioSource::AudioBlock *FFMS_AudioSource::CacheBlock(CacheIterator &pos) {
int FFMS_AudioSource::DecodeNextBlock(CacheIterator *pos) {
CurrentFrame = &Frames[PacketNumber];

AVPacket Packet;
if (!ReadPacket(&Packet))
AVPacket *Packet = av_packet_alloc();
if (!Packet)
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_ALLOCATION_FAILED,
"Could not allocate packet.");
if (!ReadPacket(Packet)) {
av_packet_free(&Packet);
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_UNKNOWN,
"ReadPacket unexpectedly failed to read a packet");
}

// ReadPacket may have changed the packet number
CurrentFrame = &Frames[PacketNumber];
Expand All @@ -297,8 +302,9 @@ int FFMS_AudioSource::DecodeNextBlock(CacheIterator *pos) {
int NumberOfSamples = 0;
AudioBlock *CachedBlock = nullptr;

int Ret = avcodec_send_packet(CodecContext, &Packet);
av_packet_unref(&Packet);
int Ret = avcodec_send_packet(CodecContext, Packet);
av_packet_unref(Packet);
av_packet_free(&Packet);

av_frame_unref(DecodeFrame);
Ret = avcodec_receive_frame(CodecContext, DecodeFrame);
Expand Down Expand Up @@ -513,8 +519,6 @@ void FFMS_AudioSource::Seek() {
}

bool FFMS_AudioSource::ReadPacket(AVPacket *Packet) {
InitNullPacket(*Packet);

while (av_read_frame(FormatContext, Packet) >= 0) {
if (Packet->stream_index == TrackNumber) {
// Required because not all audio packets, especially in ogg, have a pts. Use the previous valid packet's pts instead.
Expand Down
63 changes: 35 additions & 28 deletions src/core/indexing.cpp
Expand Up @@ -320,7 +320,7 @@ void FFMS_Indexer::CheckAudioProperties(int Track, AVCodecContext *Context) {
}
}

void FFMS_Indexer::ParseVideoPacket(SharedAVContext &VideoContext, AVPacket &pkt, int *RepeatPict,
void FFMS_Indexer::ParseVideoPacket(SharedAVContext &VideoContext, AVPacket *pkt, int *RepeatPict,
int *FrameType, bool *Invisible, enum AVPictureStructure *LastPicStruct) {
if (VideoContext.Parser) {
uint8_t *OB;
Expand All @@ -330,8 +330,8 @@ void FFMS_Indexer::ParseVideoPacket(SharedAVContext &VideoContext, AVPacket &pkt
av_parser_parse2(VideoContext.Parser,
VideoContext.CodecContext,
&OB, &OBSize,
pkt.data, pkt.size,
pkt.pts, pkt.dts, pkt.pos);
pkt->data, pkt->size,
pkt->pts, pkt->dts, pkt->pos);

// H.264 (PAFF) and HEVC may have one field per packet, so we need to track
// when we have a full or half frame available, and mark one of them as
Expand All @@ -351,15 +351,15 @@ void FFMS_Indexer::ParseVideoPacket(SharedAVContext &VideoContext, AVPacket &pkt

*RepeatPict = VideoContext.Parser->repeat_pict;
*FrameType = VideoContext.Parser->pict_type;
*Invisible = (IncompleteFrame || VideoContext.Parser->repeat_pict < 0 || (pkt.flags & AV_PKT_FLAG_DISCARD));
*Invisible = (IncompleteFrame || VideoContext.Parser->repeat_pict < 0 || (pkt->flags & AV_PKT_FLAG_DISCARD));
} else {
*Invisible = !!(pkt.flags & AV_PKT_FLAG_DISCARD);
*Invisible = !!(pkt->flags & AV_PKT_FLAG_DISCARD);
}

if (VideoContext.CodecContext->codec_id == AV_CODEC_ID_VP8)
ParseVP8(pkt.data[0], Invisible, FrameType);
ParseVP8(pkt->data[0], Invisible, FrameType);
else if (VideoContext.CodecContext->codec_id == AV_CODEC_ID_VP9)
ParseVP9(pkt.data[0], Invisible, FrameType);
ParseVP9(pkt->data[0], Invisible, FrameType);
}

void FFMS_Indexer::Free() {
Expand Down Expand Up @@ -458,32 +458,36 @@ FFMS_Index *FFMS_Indexer::DoIndexing() {
}
}

AVPacket Packet;
InitNullPacket(Packet);
AVPacket *Packet = av_packet_alloc();
if (!Packet)
throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_ALLOCATION_FAILED,
"Could not allocate packet.");
std::vector<int64_t> LastValidTS(FormatContext->nb_streams, AV_NOPTS_VALUE);

int64_t filesize = avio_size(FormatContext->pb);
enum AVPictureStructure LastPicStruct = AV_PICTURE_STRUCTURE_UNKNOWN;
while (av_read_frame(FormatContext, &Packet) >= 0) {
while (av_read_frame(FormatContext, Packet) >= 0) {
// Update progress
// FormatContext->pb can apparently be NULL when opening images.
if (IC && FormatContext->pb) {
if ((*IC)(FormatContext->pb->pos, filesize, ICPrivate))
if ((*IC)(FormatContext->pb->pos, filesize, ICPrivate)) {
av_packet_free(&Packet);
throw FFMS_Exception(FFMS_ERROR_CANCELLED, FFMS_ERROR_USER,
"Cancelled by user");
}
}
if (!IndexMask.count(Packet.stream_index)) {
av_packet_unref(&Packet);
if (!IndexMask.count(Packet->stream_index)) {
av_packet_unref(Packet);
continue;
}

int Track = Packet.stream_index;
int Track = Packet->stream_index;
FFMS_Track &TrackInfo = (*TrackIndices)[Track];
bool KeyFrame = !!(Packet.flags & AV_PKT_FLAG_KEY);
bool KeyFrame = !!(Packet->flags & AV_PKT_FLAG_KEY);
ReadTS(Packet, LastValidTS[Track], (*TrackIndices)[Track].UseDTS);

if (FormatContext->streams[Track]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
int64_t PTS = TrackInfo.UseDTS ? Packet.dts : Packet.pts;
int64_t PTS = TrackInfo.UseDTS ? Packet->dts : Packet->pts;
if (PTS == AV_NOPTS_VALUE) {
// VPx alt-refs are output as packets which lack timestmps or durations, since
// they are invisible. Currently, the timestamp mangling code in libavformat
Expand All @@ -495,9 +499,11 @@ FFMS_Index *FFMS_Indexer::DoIndexing() {
// FFMS2 currently sorts packets by PTS, which will break decoding, otherwise.
bool HasAltRefs = (FormatContext->streams[Track]->codecpar->codec_id == AV_CODEC_ID_VP8 ||
FormatContext->streams[Track]->codecpar->codec_id == AV_CODEC_ID_VP9);
if (Packet.duration == 0 && !HasAltRefs)
if (Packet->duration == 0 && !HasAltRefs) {
av_packet_free(&Packet);
throw FFMS_Exception(FFMS_ERROR_INDEXING, FFMS_ERROR_PARSER,
"Invalid packet pts, dts, and duration");
}

if (TrackInfo.empty())
PTS = 0;
Expand All @@ -513,7 +519,7 @@ FFMS_Index *FFMS_Indexer::DoIndexing() {
ParseVideoPacket(AVContexts[Track], Packet, &RepeatPict, &FrameType, &Invisible, &LastPicStruct);

TrackInfo.AddVideoFrame(PTS, RepeatPict, KeyFrame,
FrameType, Packet.pos, Invisible);
FrameType, Packet->pos, Invisible);
} else if (FormatContext->streams[Track]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
// For video seeking timestamps are used only if all packets have
// timestamps, while for audio they're used if any have timestamps,
Expand All @@ -522,28 +528,29 @@ FFMS_Index *FFMS_Indexer::DoIndexing() {
TrackInfo.HasTS = true;

int64_t StartSample = AVContexts[Track].CurrentSample;
uint32_t SampleCount = IndexAudioPacket(Track, &Packet, AVContexts[Track], *TrackIndices);
uint32_t SampleCount = IndexAudioPacket(Track, Packet, AVContexts[Track], *TrackIndices);
TrackInfo.SampleRate = AVContexts[Track].CodecContext->sample_rate;

TrackInfo.AddAudioFrame(LastValidTS[Track],
StartSample, SampleCount, KeyFrame, Packet.pos, Packet.flags & AV_PKT_FLAG_DISCARD);
StartSample, SampleCount, KeyFrame, Packet->pos, Packet->flags & AV_PKT_FLAG_DISCARD);
}

if (!(Packet.flags & AV_PKT_FLAG_DISCARD))
TrackInfo.LastDuration = Packet.duration;
if (!(Packet->flags & AV_PKT_FLAG_DISCARD))
TrackInfo.LastDuration = Packet->duration;

av_packet_unref(&Packet);
av_packet_unref(Packet);
}
av_packet_free(&Packet);

TrackIndices->Finalize(AVContexts, FormatContext->iformat->name);
return TrackIndices.release();
}

void FFMS_Indexer::ReadTS(const AVPacket &Packet, int64_t &TS, bool &UseDTS) {
if (!UseDTS && Packet.pts != AV_NOPTS_VALUE)
TS = Packet.pts;
void FFMS_Indexer::ReadTS(const AVPacket *Packet, int64_t &TS, bool &UseDTS) {
if (!UseDTS && Packet->pts != AV_NOPTS_VALUE)
TS = Packet->pts;
if (TS == AV_NOPTS_VALUE)
UseDTS = true;
if (UseDTS && Packet.dts != AV_NOPTS_VALUE)
TS = Packet.dts;
if (UseDTS && Packet->dts != AV_NOPTS_VALUE)
TS = Packet->dts;
}
4 changes: 2 additions & 2 deletions src/core/indexing.h
Expand Up @@ -80,10 +80,10 @@ struct FFMS_Indexer {
int64_t Filesize;
uint8_t Digest[20];

void ReadTS(const AVPacket &Packet, int64_t &TS, bool &UseDTS);
void ReadTS(const AVPacket *Packet, int64_t &TS, bool &UseDTS);
void CheckAudioProperties(int Track, AVCodecContext *Context);
uint32_t IndexAudioPacket(int Track, AVPacket *Packet, SharedAVContext &Context, FFMS_Index &TrackIndices);
void ParseVideoPacket(SharedAVContext &VideoContext, AVPacket &pkt, int *RepeatPict, int *FrameType, bool *Invisible, enum AVPictureStructure *LastPicStruct);
void ParseVideoPacket(SharedAVContext &VideoContext, AVPacket *pkt, int *RepeatPict, int *FrameType, bool *Invisible, enum AVPictureStructure *LastPicStruct);
void Free();
public:
FFMS_Indexer(const char *Filename);
Expand Down
6 changes: 0 additions & 6 deletions src/core/utils.cpp
Expand Up @@ -62,12 +62,6 @@ void ClearErrorInfo(FFMS_ErrorInfo *ErrorInfo) {
}
}

void InitNullPacket(AVPacket &pkt) {
av_init_packet(&pkt);
pkt.data = nullptr;
pkt.size = 0;
}

void FillAP(FFMS_AudioProperties &AP, AVCodecContext *CTX, FFMS_Track &Frames) {
AP.SampleFormat = static_cast<FFMS_SampleFormat>(av_get_packed_sample_fmt(CTX->sample_fmt));
AP.BitsPerSample = av_get_bytes_per_sample(CTX->sample_fmt) * 8;
Expand Down
1 change: 0 additions & 1 deletion src/core/utils.h
Expand Up @@ -58,7 +58,6 @@ std::unique_ptr<T> make_unique(Args&&... args) {
}

void ClearErrorInfo(FFMS_ErrorInfo *ErrorInfo);
void InitNullPacket(AVPacket &pkt);
void FillAP(FFMS_AudioProperties &AP, AVCodecContext *CTX, FFMS_Track &Frames);

void LAVFOpenFile(const char *SourceFile, AVFormatContext *&FormatContext, int Track);
Expand Down
30 changes: 17 additions & 13 deletions src/core/videosource.cpp
Expand Up @@ -673,30 +673,34 @@ void FFMS_VideoSource::DecodeNextFrame(int64_t &AStartTime, int64_t &Pos) {
if (HasPendingDelayedFrames())
return;

AVPacket Packet;
InitNullPacket(Packet);

while (ReadFrame(&Packet) >= 0) {
if (Packet.stream_index != VideoTrack) {
av_packet_unref(&Packet);
AVPacket *Packet = av_packet_alloc();
if (!Packet)
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_ALLOCATION_FAILED,
"Could not allocate packet.");

while (ReadFrame(Packet) >= 0) {
if (Packet->stream_index != VideoTrack) {
av_packet_unref(Packet);
continue;
}

if (AStartTime < 0)
AStartTime = Frames.UseDTS ? Packet.dts : Packet.pts;
AStartTime = Frames.UseDTS ? Packet->dts : Packet->pts;

if (Pos < 0)
Pos = Packet.pos;
Pos = Packet->pos;

bool FrameFinished = DecodePacket(&Packet);
av_packet_unref(&Packet);
if (FrameFinished)
bool FrameFinished = DecodePacket(Packet);
av_packet_unref(Packet);
if (FrameFinished) {
av_packet_free(&Packet);
return;
}
}

// Flush final frames
InitNullPacket(Packet);
DecodePacket(&Packet);
DecodePacket(Packet);
av_packet_free(&Packet);
}

bool FFMS_VideoSource::SeekTo(int n, int SeekOffset) {
Expand Down

0 comments on commit 96cbf38

Please sign in to comment.