Skip to content

Commit

Permalink
mpegvideo: split the encoding-only parts of frame_start() into a sepa…
Browse files Browse the repository at this point in the history
…rate function

This introduces some code duplication. However, much of it should go
away once the decoders stop using MpegEncContext.
  • Loading branch information
elenril committed Jan 3, 2014
1 parent b318106 commit aec25b1
Show file tree
Hide file tree
Showing 2 changed files with 139 additions and 74 deletions.
114 changes: 42 additions & 72 deletions libavcodec/mpegvideo.c
Original file line number Diff line number Diff line change
Expand Up @@ -1401,30 +1401,9 @@ int ff_find_unused_picture(MpegEncContext *s, int shared)
return ret;
}

static void update_noise_reduction(MpegEncContext *s)
{
int intra, i;

for (intra = 0; intra < 2; intra++) {
if (s->dct_count[intra] > (1 << 16)) {
for (i = 0; i < 64; i++) {
s->dct_error_sum[intra][i] >>= 1;
}
s->dct_count[intra] >>= 1;
}

for (i = 0; i < 64; i++) {
s->dct_offset[intra][i] = (s->avctx->noise_reduction *
s->dct_count[intra] +
s->dct_error_sum[intra][i] / 2) /
(s->dct_error_sum[intra][i] + 1);
}
}
}

/**
* generic function for encode/decode called after coding/decoding
* the header and before a frame is coded/decoded.
* generic function called after decoding
* the header and before a frame is decoded.
*/
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{
Expand All @@ -1441,62 +1420,58 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)

/* release forgotten pictures */
/* if (mpeg124/h263) */
if (!s->encoding) {
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (&s->picture[i] != s->last_picture_ptr &&
&s->picture[i] != s->next_picture_ptr &&
s->picture[i].reference && !s->picture[i].needs_realloc) {
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
av_log(avctx, AV_LOG_ERROR,
"releasing zombie picture\n");
ff_mpeg_unref_picture(s, &s->picture[i]);
}
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (&s->picture[i] != s->last_picture_ptr &&
&s->picture[i] != s->next_picture_ptr &&
s->picture[i].reference && !s->picture[i].needs_realloc) {
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
av_log(avctx, AV_LOG_ERROR,
"releasing zombie picture\n");
ff_mpeg_unref_picture(s, &s->picture[i]);
}
}

ff_mpeg_unref_picture(s, &s->current_picture);

if (!s->encoding) {
release_unused_pictures(s);
release_unused_pictures(s);

if (s->current_picture_ptr &&
s->current_picture_ptr->f.buf[0] == NULL) {
// we already have a unused image
// (maybe it was set before reading the header)
pic = s->current_picture_ptr;
} else {
i = ff_find_unused_picture(s, 0);
if (i < 0) {
av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
return i;
}
pic = &s->picture[i];
if (s->current_picture_ptr &&
s->current_picture_ptr->f.buf[0] == NULL) {
// we already have a unused image
// (maybe it was set before reading the header)
pic = s->current_picture_ptr;
} else {
i = ff_find_unused_picture(s, 0);
if (i < 0) {
av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
return i;
}
pic = &s->picture[i];
}

pic->reference = 0;
if (!s->droppable) {
if (s->pict_type != AV_PICTURE_TYPE_B)
pic->reference = 3;
}
pic->reference = 0;
if (!s->droppable) {
if (s->pict_type != AV_PICTURE_TYPE_B)
pic->reference = 3;
}

pic->f.coded_picture_number = s->coded_picture_number++;
pic->f.coded_picture_number = s->coded_picture_number++;

if (ff_alloc_picture(s, pic, 0) < 0)
return -1;
if (ff_alloc_picture(s, pic, 0) < 0)
return -1;

s->current_picture_ptr = pic;
// FIXME use only the vars from current_pic
s->current_picture_ptr->f.top_field_first = s->top_field_first;
if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
if (s->picture_structure != PICT_FRAME)
s->current_picture_ptr->f.top_field_first =
(s->picture_structure == PICT_TOP_FIELD) == s->first_field;
}
s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
!s->progressive_sequence;
s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
s->current_picture_ptr = pic;
// FIXME use only the vars from current_pic
s->current_picture_ptr->f.top_field_first = s->top_field_first;
if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
if (s->picture_structure != PICT_FRAME)
s->current_picture_ptr->f.top_field_first =
(s->picture_structure == PICT_TOP_FIELD) == s->first_field;
}
s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
!s->progressive_sequence;
s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;

s->current_picture_ptr->f.pict_type = s->pict_type;
// if (s->flags && CODEC_FLAG_QSCALE)
Expand Down Expand Up @@ -1634,11 +1609,6 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
}

if (s->dct_error_sum) {
assert(s->avctx->noise_reduction && s->encoding);
update_noise_reduction(s);
}

#if FF_API_XVMC
FF_DISABLE_DEPRECATION_WARNINGS
if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
Expand Down
99 changes: 97 additions & 2 deletions libavcodec/mpegvideo_enc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1385,6 +1385,99 @@ static void frame_end(MpegEncContext *s)

}

static void update_noise_reduction(MpegEncContext *s)
{
int intra, i;

for (intra = 0; intra < 2; intra++) {
if (s->dct_count[intra] > (1 << 16)) {
for (i = 0; i < 64; i++) {
s->dct_error_sum[intra][i] >>= 1;
}
s->dct_count[intra] >>= 1;
}

for (i = 0; i < 64; i++) {
s->dct_offset[intra][i] = (s->avctx->noise_reduction *
s->dct_count[intra] +
s->dct_error_sum[intra][i] / 2) /
(s->dct_error_sum[intra][i] + 1);
}
}
}

static int frame_start(MpegEncContext *s)
{
int ret;

/* mark & release old frames */
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
s->last_picture_ptr != s->next_picture_ptr &&
s->last_picture_ptr->f.buf[0]) {
ff_mpeg_unref_picture(s, s->last_picture_ptr);
}

s->current_picture_ptr->f.pict_type = s->pict_type;
s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;

ff_mpeg_unref_picture(s, &s->current_picture);
if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
s->current_picture_ptr)) < 0)
return ret;

if (s->pict_type != AV_PICTURE_TYPE_B) {
s->last_picture_ptr = s->next_picture_ptr;
if (!s->droppable)
s->next_picture_ptr = s->current_picture_ptr;
}

if (s->last_picture_ptr) {
ff_mpeg_unref_picture(s, &s->last_picture);
if (s->last_picture_ptr->f.buf[0] &&
(ret = ff_mpeg_ref_picture(s, &s->last_picture,
s->last_picture_ptr)) < 0)
return ret;
}
if (s->next_picture_ptr) {
ff_mpeg_unref_picture(s, &s->next_picture);
if (s->next_picture_ptr->f.buf[0] &&
(ret = ff_mpeg_ref_picture(s, &s->next_picture,
s->next_picture_ptr)) < 0)
return ret;
}

if (s->picture_structure!= PICT_FRAME) {
int i;
for (i = 0; i < 4; i++) {
if (s->picture_structure == PICT_BOTTOM_FIELD) {
s->current_picture.f.data[i] +=
s->current_picture.f.linesize[i];
}
s->current_picture.f.linesize[i] *= 2;
s->last_picture.f.linesize[i] *= 2;
s->next_picture.f.linesize[i] *= 2;
}
}

if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
} else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
} else {
s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
}

if (s->dct_error_sum) {
assert(s->avctx->noise_reduction && s->encoding);
update_noise_reduction(s);
}

return 0;
}

int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pic_arg, int *got_packet)
{
Expand Down Expand Up @@ -1425,7 +1518,9 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,

s->pict_type = s->new_picture.f.pict_type;
//emms_c();
ff_MPV_frame_start(s, avctx);
ret = frame_start(s);
if (ret < 0)
return ret;
vbv_retry:
if (encode_picture(s, s->picture_number) < 0)
return -1;
Expand Down Expand Up @@ -1461,7 +1556,7 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
s->lambda_table[i] * (s->qscale + 1) /
s->qscale);
}
s->mb_skipped = 0; // done in MPV_frame_start()
s->mb_skipped = 0; // done in frame_start()
// done in encode_picture() so we must undo it
if (s->pict_type == AV_PICTURE_TYPE_P) {
if (s->flipflop_rounding ||
Expand Down

0 comments on commit aec25b1

Please sign in to comment.