From aff4aaa19327c1dd2bb8d8a20e006481120d562b Mon Sep 17 00:00:00 2001 From: Hendrik Leppkes Date: Mon, 12 Oct 2015 21:07:35 +0200 Subject: [PATCH] vp9: expose bitstream header and reference frames through vp9.h --- libavcodec/vp9.c | 728 +++++++++++++++++------------------ libavcodec/vp9.h | 30 +- libavcodec/vp9_mc_template.c | 4 +- 3 files changed, 386 insertions(+), 376 deletions(-) diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c index c3bcac1b7598a..1397de33014a8 100644 --- a/libavcodec/vp9.c +++ b/libavcodec/vp9.c @@ -35,19 +35,6 @@ #define VP9_SYNCCODE 0x498342 -struct VP9mvrefPair { - VP56mv mv[2]; - int8_t ref[2]; -}; - -typedef struct VP9Frame { - ThreadFrame tf; - AVBufferRef *extradata; - uint8_t *segmentation_map; - struct VP9mvrefPair *mv; - int uses_2pass; -} VP9Frame; - struct VP9Filter { uint8_t level[8 * 8]; uint8_t /* bit=col */ mask[2 /* 0=y, 1=uv */][2 /* 0=col, 1=row */] @@ -65,6 +52,8 @@ typedef struct VP9Block { } VP9Block; typedef struct VP9Context { + VP9SharedContext s; + VP9DSPContext dsp; VideoDSPContext vdsp; GetBitContext gb; @@ -77,15 +66,10 @@ typedef struct VP9Context { uint8_t *dst[3]; ptrdiff_t y_stride, uv_stride; - struct Vp9BitstreamHeader h; uint8_t ss_h, ss_v; uint8_t last_bpp, bpp, bpp_index, bytesperpixel; uint8_t last_keyframe; - ThreadFrame refs[8], next_refs[8]; -#define CUR_FRAME 0 -#define REF_FRAME_MVPAIR 1 -#define REF_FRAME_SEGMAP 2 - VP9Frame frames[3]; + ThreadFrame next_refs[8]; struct { uint8_t lim_lut[64]; @@ -293,14 +277,14 @@ static int update_block_buffers(AVCodecContext *ctx) VP9Context *s = ctx->priv_data; int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel; - if (s->b_base && s->block_base && s->block_alloc_using_2pass == s->frames[CUR_FRAME].uses_2pass) + if (s->b_base && s->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass) return 0; av_free(s->b_base); av_free(s->block_base); chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v); chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v); - if (s->frames[CUR_FRAME].uses_2pass) { + if (s->s.frames[CUR_FRAME].uses_2pass) { int sbs = s->sb_cols * s->sb_rows; s->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block)); @@ -325,7 +309,7 @@ static int update_block_buffers(AVCodecContext *ctx) s->uveob_base[0] = s->eob_base + 16 * 16; s->uveob_base[1] = s->uveob_base[0] + chroma_eobs; } - s->block_alloc_using_2pass = s->frames[CUR_FRAME].uses_2pass; + s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass; return 0; } @@ -488,18 +472,18 @@ static int decode_frame_header(AVCodecContext *ctx, av_log(ctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", ctx->profile); return AVERROR_INVALIDDATA; } - s->h.profile = ctx->profile; + s->s.h.profile = ctx->profile; if (get_bits1(&s->gb)) { *ref = get_bits(&s->gb, 3); return 0; } - s->last_keyframe = s->h.keyframe; - s->h.keyframe = !get_bits1(&s->gb); - last_invisible = s->h.invisible; - s->h.invisible = !get_bits1(&s->gb); - s->h.errorres = get_bits1(&s->gb); - s->h.use_last_frame_mvs = !s->h.errorres && !last_invisible; - if (s->h.keyframe) { + s->last_keyframe = s->s.h.keyframe; + s->s.h.keyframe = !get_bits1(&s->gb); + last_invisible = s->s.h.invisible; + s->s.h.invisible = !get_bits1(&s->gb); + s->s.h.errorres = get_bits1(&s->gb); + s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible; + if (s->s.h.keyframe) { if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode av_log(ctx, AV_LOG_ERROR, "Invalid sync code\n"); return AVERROR_INVALIDDATA; @@ -507,15 +491,15 @@ static int decode_frame_header(AVCodecContext *ctx, if ((fmt = read_colorspace_details(ctx)) < 0) return fmt; // for profile 1, here follows the subsampling bits - s->h.refreshrefmask = 0xff; + s->s.h.refreshrefmask = 0xff; w = get_bits(&s->gb, 16) + 1; h = get_bits(&s->gb, 16) + 1; if (get_bits1(&s->gb)) // display size skip_bits(&s->gb, 32); } else { - s->h.intraonly = s->h.invisible ? get_bits1(&s->gb) : 0; - s->h.resetctx = s->h.errorres ? 0 : get_bits(&s->gb, 2); - if (s->h.intraonly) { + s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0; + s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2); + if (s->s.h.intraonly) { if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode av_log(ctx, AV_LOG_ERROR, "Invalid sync code\n"); return AVERROR_INVALIDDATA; @@ -532,34 +516,34 @@ static int decode_frame_header(AVCodecContext *ctx, ctx->colorspace = AVCOL_SPC_BT470BG; ctx->color_range = AVCOL_RANGE_JPEG; } - s->h.refreshrefmask = get_bits(&s->gb, 8); + s->s.h.refreshrefmask = get_bits(&s->gb, 8); w = get_bits(&s->gb, 16) + 1; h = get_bits(&s->gb, 16) + 1; if (get_bits1(&s->gb)) // display size skip_bits(&s->gb, 32); } else { - s->h.refreshrefmask = get_bits(&s->gb, 8); - s->h.refidx[0] = get_bits(&s->gb, 3); - s->h.signbias[0] = get_bits1(&s->gb) && !s->h.errorres; - s->h.refidx[1] = get_bits(&s->gb, 3); - s->h.signbias[1] = get_bits1(&s->gb) && !s->h.errorres; - s->h.refidx[2] = get_bits(&s->gb, 3); - s->h.signbias[2] = get_bits1(&s->gb) && !s->h.errorres; - if (!s->refs[s->h.refidx[0]].f->data[0] || - !s->refs[s->h.refidx[1]].f->data[0] || - !s->refs[s->h.refidx[2]].f->data[0]) { + s->s.h.refreshrefmask = get_bits(&s->gb, 8); + s->s.h.refidx[0] = get_bits(&s->gb, 3); + s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres; + s->s.h.refidx[1] = get_bits(&s->gb, 3); + s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres; + s->s.h.refidx[2] = get_bits(&s->gb, 3); + s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres; + if (!s->s.refs[s->s.h.refidx[0]].f->data[0] || + !s->s.refs[s->s.h.refidx[1]].f->data[0] || + !s->s.refs[s->s.h.refidx[2]].f->data[0]) { av_log(ctx, AV_LOG_ERROR, "Not all references are available\n"); return AVERROR_INVALIDDATA; } if (get_bits1(&s->gb)) { - w = s->refs[s->h.refidx[0]].f->width; - h = s->refs[s->h.refidx[0]].f->height; + w = s->s.refs[s->s.h.refidx[0]].f->width; + h = s->s.refs[s->s.h.refidx[0]].f->height; } else if (get_bits1(&s->gb)) { - w = s->refs[s->h.refidx[1]].f->width; - h = s->refs[s->h.refidx[1]].f->height; + w = s->s.refs[s->s.h.refidx[1]].f->width; + h = s->s.refs[s->s.h.refidx[1]].f->height; } else if (get_bits1(&s->gb)) { - w = s->refs[s->h.refidx[2]].f->width; - h = s->refs[s->h.refidx[2]].f->height; + w = s->s.refs[s->s.h.refidx[2]].f->width; + h = s->s.refs[s->s.h.refidx[2]].f->height; } else { w = get_bits(&s->gb, 16) + 1; h = get_bits(&s->gb, 16) + 1; @@ -567,33 +551,33 @@ static int decode_frame_header(AVCodecContext *ctx, // Note that in this code, "CUR_FRAME" is actually before we // have formally allocated a frame, and thus actually represents // the _last_ frame - s->h.use_last_frame_mvs &= s->frames[CUR_FRAME].tf.f->width == w && - s->frames[CUR_FRAME].tf.f->height == h; + s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w && + s->s.frames[CUR_FRAME].tf.f->height == h; if (get_bits1(&s->gb)) // display size skip_bits(&s->gb, 32); - s->h.highprecisionmvs = get_bits1(&s->gb); - s->h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE : + s->s.h.highprecisionmvs = get_bits1(&s->gb); + s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE : get_bits(&s->gb, 2); - s->h.allowcompinter = s->h.signbias[0] != s->h.signbias[1] || - s->h.signbias[0] != s->h.signbias[2]; - if (s->h.allowcompinter) { - if (s->h.signbias[0] == s->h.signbias[1]) { - s->h.fixcompref = 2; - s->h.varcompref[0] = 0; - s->h.varcompref[1] = 1; - } else if (s->h.signbias[0] == s->h.signbias[2]) { - s->h.fixcompref = 1; - s->h.varcompref[0] = 0; - s->h.varcompref[1] = 2; + s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] || + s->s.h.signbias[0] != s->s.h.signbias[2]; + if (s->s.h.allowcompinter) { + if (s->s.h.signbias[0] == s->s.h.signbias[1]) { + s->s.h.fixcompref = 2; + s->s.h.varcompref[0] = 0; + s->s.h.varcompref[1] = 1; + } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) { + s->s.h.fixcompref = 1; + s->s.h.varcompref[0] = 0; + s->s.h.varcompref[1] = 2; } else { - s->h.fixcompref = 0; - s->h.varcompref[0] = 1; - s->h.varcompref[1] = 2; + s->s.h.fixcompref = 0; + s->s.h.varcompref[0] = 1; + s->s.h.varcompref[1] = 2; } } for (i = 0; i < 3; i++) { - AVFrame *ref = s->refs[s->h.refidx[i]].f; + AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f; int refw = ref->width, refh = ref->height; if (ref->format != fmt) { @@ -619,56 +603,56 @@ static int decode_frame_header(AVCodecContext *ctx, } } } - s->h.refreshctx = s->h.errorres ? 0 : get_bits1(&s->gb); - s->h.parallelmode = s->h.errorres ? 1 : get_bits1(&s->gb); - s->h.framectxid = c = get_bits(&s->gb, 2); + s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb); + s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb); + s->s.h.framectxid = c = get_bits(&s->gb, 2); /* loopfilter header data */ - if (s->h.keyframe || s->h.errorres || s->h.intraonly) { + if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) { // reset loopfilter defaults - s->h.lf_delta.ref[0] = 1; - s->h.lf_delta.ref[1] = 0; - s->h.lf_delta.ref[2] = -1; - s->h.lf_delta.ref[3] = -1; - s->h.lf_delta.mode[0] = 0; - s->h.lf_delta.mode[1] = 0; - memset(s->h.segmentation.feat, 0, sizeof(s->h.segmentation.feat)); + s->s.h.lf_delta.ref[0] = 1; + s->s.h.lf_delta.ref[1] = 0; + s->s.h.lf_delta.ref[2] = -1; + s->s.h.lf_delta.ref[3] = -1; + s->s.h.lf_delta.mode[0] = 0; + s->s.h.lf_delta.mode[1] = 0; + memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat)); } - s->h.filter.level = get_bits(&s->gb, 6); + s->s.h.filter.level = get_bits(&s->gb, 6); sharp = get_bits(&s->gb, 3); // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep // the old cache values since they are still valid - if (s->h.filter.sharpness != sharp) + if (s->s.h.filter.sharpness != sharp) memset(s->filter_lut.lim_lut, 0, sizeof(s->filter_lut.lim_lut)); - s->h.filter.sharpness = sharp; - if ((s->h.lf_delta.enabled = get_bits1(&s->gb))) { - if ((s->h.lf_delta.updated = get_bits1(&s->gb))) { + s->s.h.filter.sharpness = sharp; + if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) { + if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) { for (i = 0; i < 4; i++) if (get_bits1(&s->gb)) - s->h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6); + s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6); for (i = 0; i < 2; i++) if (get_bits1(&s->gb)) - s->h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6); + s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6); } } /* quantization header data */ - s->h.yac_qi = get_bits(&s->gb, 8); - s->h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; - s->h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; - s->h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; - s->h.lossless = s->h.yac_qi == 0 && s->h.ydc_qdelta == 0 && - s->h.uvdc_qdelta == 0 && s->h.uvac_qdelta == 0; - if (s->h.lossless) + s->s.h.yac_qi = get_bits(&s->gb, 8); + s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; + s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; + s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0; + s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 && + s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0; + if (s->s.h.lossless) ctx->properties |= FF_CODEC_PROPERTY_LOSSLESS; /* segmentation header info */ - if ((s->h.segmentation.enabled = get_bits1(&s->gb))) { - if ((s->h.segmentation.update_map = get_bits1(&s->gb))) { + if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) { + if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) { for (i = 0; i < 7; i++) s->prob.seg[i] = get_bits1(&s->gb) ? get_bits(&s->gb, 8) : 255; - if ((s->h.segmentation.temporal = get_bits1(&s->gb))) { + if ((s->s.h.segmentation.temporal = get_bits1(&s->gb))) { for (i = 0; i < 3; i++) s->prob.segpred[i] = get_bits1(&s->gb) ? get_bits(&s->gb, 8) : 255; @@ -676,65 +660,65 @@ static int decode_frame_header(AVCodecContext *ctx, } if (get_bits1(&s->gb)) { - s->h.segmentation.absolute_vals = get_bits1(&s->gb); + s->s.h.segmentation.absolute_vals = get_bits1(&s->gb); for (i = 0; i < 8; i++) { - if ((s->h.segmentation.feat[i].q_enabled = get_bits1(&s->gb))) - s->h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8); - if ((s->h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb))) - s->h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6); - if ((s->h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb))) - s->h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2); - s->h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb); + if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb))) + s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8); + if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb))) + s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6); + if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb))) + s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2); + s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb); } } } // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas - for (i = 0; i < (s->h.segmentation.enabled ? 8 : 1); i++) { + for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) { int qyac, qydc, quvac, quvdc, lflvl, sh; - if (s->h.segmentation.enabled && s->h.segmentation.feat[i].q_enabled) { - if (s->h.segmentation.absolute_vals) - qyac = av_clip_uintp2(s->h.segmentation.feat[i].q_val, 8); + if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) { + if (s->s.h.segmentation.absolute_vals) + qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8); else - qyac = av_clip_uintp2(s->h.yac_qi + s->h.segmentation.feat[i].q_val, 8); + qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8); } else { - qyac = s->h.yac_qi; + qyac = s->s.h.yac_qi; } - qydc = av_clip_uintp2(qyac + s->h.ydc_qdelta, 8); - quvdc = av_clip_uintp2(qyac + s->h.uvdc_qdelta, 8); - quvac = av_clip_uintp2(qyac + s->h.uvac_qdelta, 8); + qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8); + quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8); + quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8); qyac = av_clip_uintp2(qyac, 8); - s->h.segmentation.feat[i].qmul[0][0] = vp9_dc_qlookup[s->bpp_index][qydc]; - s->h.segmentation.feat[i].qmul[0][1] = vp9_ac_qlookup[s->bpp_index][qyac]; - s->h.segmentation.feat[i].qmul[1][0] = vp9_dc_qlookup[s->bpp_index][quvdc]; - s->h.segmentation.feat[i].qmul[1][1] = vp9_ac_qlookup[s->bpp_index][quvac]; + s->s.h.segmentation.feat[i].qmul[0][0] = vp9_dc_qlookup[s->bpp_index][qydc]; + s->s.h.segmentation.feat[i].qmul[0][1] = vp9_ac_qlookup[s->bpp_index][qyac]; + s->s.h.segmentation.feat[i].qmul[1][0] = vp9_dc_qlookup[s->bpp_index][quvdc]; + s->s.h.segmentation.feat[i].qmul[1][1] = vp9_ac_qlookup[s->bpp_index][quvac]; - sh = s->h.filter.level >= 32; - if (s->h.segmentation.enabled && s->h.segmentation.feat[i].lf_enabled) { - if (s->h.segmentation.absolute_vals) - lflvl = av_clip_uintp2(s->h.segmentation.feat[i].lf_val, 6); + sh = s->s.h.filter.level >= 32; + if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) { + if (s->s.h.segmentation.absolute_vals) + lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6); else - lflvl = av_clip_uintp2(s->h.filter.level + s->h.segmentation.feat[i].lf_val, 6); + lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6); } else { - lflvl = s->h.filter.level; + lflvl = s->s.h.filter.level; } - if (s->h.lf_delta.enabled) { - s->h.segmentation.feat[i].lflvl[0][0] = - s->h.segmentation.feat[i].lflvl[0][1] = - av_clip_uintp2(lflvl + (s->h.lf_delta.ref[0] << sh), 6); + if (s->s.h.lf_delta.enabled) { + s->s.h.segmentation.feat[i].lflvl[0][0] = + s->s.h.segmentation.feat[i].lflvl[0][1] = + av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] << sh), 6); for (j = 1; j < 4; j++) { - s->h.segmentation.feat[i].lflvl[j][0] = - av_clip_uintp2(lflvl + ((s->h.lf_delta.ref[j] + - s->h.lf_delta.mode[0]) * (1 << sh)), 6); - s->h.segmentation.feat[i].lflvl[j][1] = - av_clip_uintp2(lflvl + ((s->h.lf_delta.ref[j] + - s->h.lf_delta.mode[1]) * (1 << sh)), 6); + s->s.h.segmentation.feat[i].lflvl[j][0] = + av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] + + s->s.h.lf_delta.mode[0]) * (1 << sh)), 6); + s->s.h.segmentation.feat[i].lflvl[j][1] = + av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] + + s->s.h.lf_delta.mode[1]) * (1 << sh)), 6); } } else { - memset(s->h.segmentation.feat[i].lflvl, lflvl, - sizeof(s->h.segmentation.feat[i].lflvl)); + memset(s->s.h.segmentation.feat[i].lflvl, lflvl, + sizeof(s->s.h.segmentation.feat[i].lflvl)); } } @@ -743,30 +727,30 @@ static int decode_frame_header(AVCodecContext *ctx, av_log(ctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n", w, h, fmt); return res; } - for (s->h.tiling.log2_tile_cols = 0; - s->sb_cols > (64 << s->h.tiling.log2_tile_cols); - s->h.tiling.log2_tile_cols++) ; + for (s->s.h.tiling.log2_tile_cols = 0; + s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols); + s->s.h.tiling.log2_tile_cols++) ; for (max = 0; (s->sb_cols >> max) >= 4; max++) ; max = FFMAX(0, max - 1); - while (max > s->h.tiling.log2_tile_cols) { + while (max > s->s.h.tiling.log2_tile_cols) { if (get_bits1(&s->gb)) - s->h.tiling.log2_tile_cols++; + s->s.h.tiling.log2_tile_cols++; else break; } - s->h.tiling.log2_tile_rows = decode012(&s->gb); - s->h.tiling.tile_rows = 1 << s->h.tiling.log2_tile_rows; - if (s->h.tiling.tile_cols != (1 << s->h.tiling.log2_tile_cols)) { - s->h.tiling.tile_cols = 1 << s->h.tiling.log2_tile_cols; + s->s.h.tiling.log2_tile_rows = decode012(&s->gb); + s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows; + if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) { + s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols; s->c_b = av_fast_realloc(s->c_b, &s->c_b_size, - sizeof(VP56RangeCoder) * s->h.tiling.tile_cols); + sizeof(VP56RangeCoder) * s->s.h.tiling.tile_cols); if (!s->c_b) { av_log(ctx, AV_LOG_ERROR, "Ran out of memory during range coder init\n"); return AVERROR(ENOMEM); } } - if (s->h.keyframe || s->h.errorres || (s->h.intraonly && s->h.resetctx == 3)) { + if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) { s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p = s->prob_ctx[3].p = vp9_default_probs; memcpy(s->prob_ctx[0].coef, vp9_default_coef_probs, @@ -777,7 +761,7 @@ static int decode_frame_header(AVCodecContext *ctx, sizeof(vp9_default_coef_probs)); memcpy(s->prob_ctx[3].coef, vp9_default_coef_probs, sizeof(vp9_default_coef_probs)); - } else if (s->h.intraonly && s->h.resetctx == 2) { + } else if (s->s.h.intraonly && s->s.h.resetctx == 2) { s->prob_ctx[c].p = vp9_default_probs; memcpy(s->prob_ctx[c].coef, vp9_default_coef_probs, sizeof(vp9_default_coef_probs)); @@ -796,7 +780,7 @@ static int decode_frame_header(AVCodecContext *ctx, return AVERROR_INVALIDDATA; } - if (s->h.keyframe || s->h.intraonly) { + if (s->s.h.keyframe || s->s.h.intraonly) { memset(s->counts.coef, 0, sizeof(s->counts.coef)); memset(s->counts.eob, 0, sizeof(s->counts.eob)); } else { @@ -808,14 +792,14 @@ static int decode_frame_header(AVCodecContext *ctx, s->prob.p = s->prob_ctx[c].p; // txfm updates - if (s->h.lossless) { - s->h.txfmmode = TX_4X4; + if (s->s.h.lossless) { + s->s.h.txfmmode = TX_4X4; } else { - s->h.txfmmode = vp8_rac_get_uint(&s->c, 2); - if (s->h.txfmmode == 3) - s->h.txfmmode += vp8_rac_get(&s->c); + s->s.h.txfmmode = vp8_rac_get_uint(&s->c, 2); + if (s->s.h.txfmmode == 3) + s->s.h.txfmmode += vp8_rac_get(&s->c); - if (s->h.txfmmode == TX_SWITCHABLE) { + if (s->s.h.txfmmode == TX_SWITCHABLE) { for (i = 0; i < 2; i++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]); @@ -866,7 +850,7 @@ static int decode_frame_header(AVCodecContext *ctx, p[3] = 0; } } - if (s->h.txfmmode == i) + if (s->s.h.txfmmode == i) break; } @@ -874,14 +858,14 @@ static int decode_frame_header(AVCodecContext *ctx, for (i = 0; i < 3; i++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]); - if (!s->h.keyframe && !s->h.intraonly) { + if (!s->s.h.keyframe && !s->s.h.intraonly) { for (i = 0; i < 7; i++) for (j = 0; j < 3; j++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_mode[i][j] = update_prob(&s->c, s->prob.p.mv_mode[i][j]); - if (s->h.filtermode == FILTER_SWITCHABLE) + if (s->s.h.filtermode == FILTER_SWITCHABLE) for (i = 0; i < 4; i++) for (j = 0; j < 2; j++) if (vp56_rac_get_prob_branchy(&s->c, 252)) @@ -892,20 +876,20 @@ static int decode_frame_header(AVCodecContext *ctx, if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]); - if (s->h.allowcompinter) { - s->h.comppredmode = vp8_rac_get(&s->c); - if (s->h.comppredmode) - s->h.comppredmode += vp8_rac_get(&s->c); - if (s->h.comppredmode == PRED_SWITCHABLE) + if (s->s.h.allowcompinter) { + s->s.h.comppredmode = vp8_rac_get(&s->c); + if (s->s.h.comppredmode) + s->s.h.comppredmode += vp8_rac_get(&s->c); + if (s->s.h.comppredmode == PRED_SWITCHABLE) for (i = 0; i < 5; i++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.comp[i] = update_prob(&s->c, s->prob.p.comp[i]); } else { - s->h.comppredmode = PRED_SINGLEREF; + s->s.h.comppredmode = PRED_SINGLEREF; } - if (s->h.comppredmode != PRED_COMPREF) { + if (s->s.h.comppredmode != PRED_COMPREF) { for (i = 0; i < 5; i++) { if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.single_ref[i][0] = @@ -916,7 +900,7 @@ static int decode_frame_header(AVCodecContext *ctx, } } - if (s->h.comppredmode != PRED_SINGLEREF) { + if (s->s.h.comppredmode != PRED_SINGLEREF) { for (i = 0; i < 5; i++) if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.comp_ref[i] = @@ -972,7 +956,7 @@ static int decode_frame_header(AVCodecContext *ctx, (vp8_rac_get_uint(&s->c, 7) << 1) | 1; } - if (s->h.highprecisionmvs) { + if (s->s.h.highprecisionmvs) { for (i = 0; i < 2; i++) { if (vp56_rac_get_prob_branchy(&s->c, 252)) s->prob.p.mv_comp[i].class0_hp = @@ -1097,7 +1081,7 @@ static void find_ref_mvs(VP9Context *s, } while (0) if (row > 0) { - struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col]; + struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col]; if (mv->ref[0] == ref) { RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]); } else if (mv->ref[1] == ref) { @@ -1105,7 +1089,7 @@ static void find_ref_mvs(VP9Context *s, } } if (col > s->tile_col_start) { - struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1]; + struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1]; if (mv->ref[0] == ref) { RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]); } else if (mv->ref[1] == ref) { @@ -1122,7 +1106,7 @@ static void find_ref_mvs(VP9Context *s, int c = p[i][0] + col, r = p[i][1] + row; if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) { - struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c]; + struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c]; if (mv->ref[0] == ref) { RETURN_MV(mv->mv[0]); @@ -1133,11 +1117,11 @@ static void find_ref_mvs(VP9Context *s, } // MV at this position in previous frame, using same reference frame - if (s->h.use_last_frame_mvs) { - struct VP9mvrefPair *mv = &s->frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col]; + if (s->s.h.use_last_frame_mvs) { + struct VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col]; - if (!s->frames[REF_FRAME_MVPAIR].uses_2pass) - ff_thread_await_progress(&s->frames[REF_FRAME_MVPAIR].tf, row >> 3, 0); + if (!s->s.frames[REF_FRAME_MVPAIR].uses_2pass) + ff_thread_await_progress(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3, 0); if (mv->ref[0] == ref) { RETURN_MV(mv->mv[0]); } else if (mv->ref[1] == ref) { @@ -1160,33 +1144,33 @@ static void find_ref_mvs(VP9Context *s, int c = p[i][0] + col, r = p[i][1] + row; if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) { - struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c]; + struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c]; if (mv->ref[0] != ref && mv->ref[0] >= 0) { - RETURN_SCALE_MV(mv->mv[0], s->h.signbias[mv->ref[0]] != s->h.signbias[ref]); + RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]); } if (mv->ref[1] != ref && mv->ref[1] >= 0 && // BUG - libvpx has this condition regardless of whether // we used the first ref MV and pre-scaling AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) { - RETURN_SCALE_MV(mv->mv[1], s->h.signbias[mv->ref[1]] != s->h.signbias[ref]); + RETURN_SCALE_MV(mv->mv[1], s->s.h.signbias[mv->ref[1]] != s->s.h.signbias[ref]); } } } // MV at this position in previous frame, using different reference frame - if (s->h.use_last_frame_mvs) { - struct VP9mvrefPair *mv = &s->frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col]; + if (s->s.h.use_last_frame_mvs) { + struct VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col]; // no need to await_progress, because we already did that above if (mv->ref[0] != ref && mv->ref[0] >= 0) { - RETURN_SCALE_MV(mv->mv[0], s->h.signbias[mv->ref[0]] != s->h.signbias[ref]); + RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]); } if (mv->ref[1] != ref && mv->ref[1] >= 0 && // BUG - libvpx has this condition regardless of whether // we used the first ref MV and pre-scaling AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) { - RETURN_SCALE_MV(mv->mv[1], s->h.signbias[mv->ref[1]] != s->h.signbias[ref]); + RETURN_SCALE_MV(mv->mv[1], s->s.h.signbias[mv->ref[1]] != s->s.h.signbias[ref]); } } @@ -1265,7 +1249,7 @@ static void fill_mv(VP9Context *s, mode == NEWMV ? -1 : sb); // FIXME maybe move this code into find_ref_mvs() if ((mode == NEWMV || sb == -1) && - !(hp = s->h.highprecisionmvs && abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) { + !(hp = s->s.h.highprecisionmvs && abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) { if (mv[0].y & 1) { if (mv[0].y < 0) mv[0].y++; @@ -1295,7 +1279,7 @@ static void fill_mv(VP9Context *s, find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV, mode == NEWMV ? -1 : sb); if ((mode == NEWMV || sb == -1) && - !(hp = s->h.highprecisionmvs && abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) { + !(hp = s->s.h.highprecisionmvs && abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) { if (mv[1].y & 1) { if (mv[1].y < 0) mv[1].y++; @@ -1390,22 +1374,22 @@ static void decode_mode(AVCodecContext *ctx) int have_a = row > 0, have_l = col > s->tile_col_start; int vref, filter_id; - if (!s->h.segmentation.enabled) { + if (!s->s.h.segmentation.enabled) { b->seg_id = 0; - } else if (s->h.keyframe || s->h.intraonly) { - b->seg_id = !s->h.segmentation.update_map ? 0 : + } else if (s->s.h.keyframe || s->s.h.intraonly) { + b->seg_id = !s->s.h.segmentation.update_map ? 0 : vp8_rac_get_tree(&s->c, vp9_segmentation_tree, s->prob.seg); - } else if (!s->h.segmentation.update_map || - (s->h.segmentation.temporal && + } else if (!s->s.h.segmentation.update_map || + (s->s.h.segmentation.temporal && vp56_rac_get_prob_branchy(&s->c, s->prob.segpred[s->above_segpred_ctx[col] + s->left_segpred_ctx[row7]]))) { - if (!s->h.errorres && s->frames[REF_FRAME_SEGMAP].segmentation_map) { + if (!s->s.h.errorres && s->s.frames[REF_FRAME_SEGMAP].segmentation_map) { int pred = 8, x; - uint8_t *refsegmap = s->frames[REF_FRAME_SEGMAP].segmentation_map; + uint8_t *refsegmap = s->s.frames[REF_FRAME_SEGMAP].segmentation_map; - if (!s->frames[REF_FRAME_SEGMAP].uses_2pass) - ff_thread_await_progress(&s->frames[REF_FRAME_SEGMAP].tf, row >> 3, 0); + if (!s->s.frames[REF_FRAME_SEGMAP].uses_2pass) + ff_thread_await_progress(&s->s.frames[REF_FRAME_SEGMAP].tf, row >> 3, 0); for (y = 0; y < h4; y++) { int idx_base = (y + row) * 8 * s->sb_cols + col; for (x = 0; x < w4; x++) @@ -1426,24 +1410,24 @@ static void decode_mode(AVCodecContext *ctx) memset(&s->above_segpred_ctx[col], 0, w4); memset(&s->left_segpred_ctx[row7], 0, h4); } - if (s->h.segmentation.enabled && - (s->h.segmentation.update_map || s->h.keyframe || s->h.intraonly)) { - setctx_2d(&s->frames[CUR_FRAME].segmentation_map[row * 8 * s->sb_cols + col], + if (s->s.h.segmentation.enabled && + (s->s.h.segmentation.update_map || s->s.h.keyframe || s->s.h.intraonly)) { + setctx_2d(&s->s.frames[CUR_FRAME].segmentation_map[row * 8 * s->sb_cols + col], bw4, bh4, 8 * s->sb_cols, b->seg_id); } - b->skip = s->h.segmentation.enabled && - s->h.segmentation.feat[b->seg_id].skip_enabled; + b->skip = s->s.h.segmentation.enabled && + s->s.h.segmentation.feat[b->seg_id].skip_enabled; if (!b->skip) { int c = s->left_skip_ctx[row7] + s->above_skip_ctx[col]; b->skip = vp56_rac_get_prob(&s->c, s->prob.p.skip[c]); s->counts.skip[c][b->skip]++; } - if (s->h.keyframe || s->h.intraonly) { + if (s->s.h.keyframe || s->s.h.intraonly) { b->intra = 1; - } else if (s->h.segmentation.enabled && s->h.segmentation.feat[b->seg_id].ref_enabled) { - b->intra = !s->h.segmentation.feat[b->seg_id].ref_val; + } else if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].ref_enabled) { + b->intra = !s->s.h.segmentation.feat[b->seg_id].ref_val; } else { int c, bit; @@ -1459,7 +1443,7 @@ static void decode_mode(AVCodecContext *ctx) b->intra = !bit; } - if ((b->intra || !b->skip) && s->h.txfmmode == TX_SWITCHABLE) { + if ((b->intra || !b->skip) && s->s.h.txfmmode == TX_SWITCHABLE) { int c; if (have_a) { if (have_l) { @@ -1502,10 +1486,10 @@ static void decode_mode(AVCodecContext *ctx) break; } } else { - b->tx = FFMIN(max_tx, s->h.txfmmode); + b->tx = FFMIN(max_tx, s->s.h.txfmmode); } - if (s->h.keyframe || s->h.intraonly) { + if (s->s.h.keyframe || s->s.h.intraonly) { uint8_t *a = &s->above_mode_ctx[col * 2]; uint8_t *l = &s->left_mode_ctx[(row7) << 1]; @@ -1607,14 +1591,14 @@ static void decode_mode(AVCodecContext *ctx) { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 }, }; - if (s->h.segmentation.enabled && s->h.segmentation.feat[b->seg_id].ref_enabled) { - av_assert2(s->h.segmentation.feat[b->seg_id].ref_val != 0); + if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].ref_enabled) { + av_assert2(s->s.h.segmentation.feat[b->seg_id].ref_val != 0); b->comp = 0; - b->ref[0] = s->h.segmentation.feat[b->seg_id].ref_val - 1; + b->ref[0] = s->s.h.segmentation.feat[b->seg_id].ref_val - 1; } else { // read comp_pred flag - if (s->h.comppredmode != PRED_SWITCHABLE) { - b->comp = s->h.comppredmode == PRED_COMPREF; + if (s->s.h.comppredmode != PRED_SWITCHABLE) { + b->comp = s->s.h.comppredmode == PRED_COMPREF; } else { int c; @@ -1625,23 +1609,23 @@ static void decode_mode(AVCodecContext *ctx) c = 4; } else if (s->above_comp_ctx[col]) { c = 2 + (s->left_intra_ctx[row7] || - s->left_ref_ctx[row7] == s->h.fixcompref); + s->left_ref_ctx[row7] == s->s.h.fixcompref); } else if (s->left_comp_ctx[row7]) { c = 2 + (s->above_intra_ctx[col] || - s->above_ref_ctx[col] == s->h.fixcompref); + s->above_ref_ctx[col] == s->s.h.fixcompref); } else { c = (!s->above_intra_ctx[col] && - s->above_ref_ctx[col] == s->h.fixcompref) ^ + s->above_ref_ctx[col] == s->s.h.fixcompref) ^ (!s->left_intra_ctx[row7] && - s->left_ref_ctx[row & 7] == s->h.fixcompref); + s->left_ref_ctx[row & 7] == s->s.h.fixcompref); } } else { c = s->above_comp_ctx[col] ? 3 : - (!s->above_intra_ctx[col] && s->above_ref_ctx[col] == s->h.fixcompref); + (!s->above_intra_ctx[col] && s->above_ref_ctx[col] == s->s.h.fixcompref); } } else if (have_l) { c = s->left_comp_ctx[row7] ? 3 : - (!s->left_intra_ctx[row7] && s->left_ref_ctx[row7] == s->h.fixcompref); + (!s->left_intra_ctx[row7] && s->left_ref_ctx[row7] == s->s.h.fixcompref); } else { c = 1; } @@ -1653,9 +1637,9 @@ static void decode_mode(AVCodecContext *ctx) // FIXME probably cache a few variables here to prevent repetitive // memory accesses below if (b->comp) /* two references */ { - int fix_idx = s->h.signbias[s->h.fixcompref], var_idx = !fix_idx, c, bit; + int fix_idx = s->s.h.signbias[s->s.h.fixcompref], var_idx = !fix_idx, c, bit; - b->ref[fix_idx] = s->h.fixcompref; + b->ref[fix_idx] = s->s.h.fixcompref; // FIXME can this codeblob be replaced by some sort of LUT? if (have_a) { if (have_l) { @@ -1663,35 +1647,35 @@ static void decode_mode(AVCodecContext *ctx) if (s->left_intra_ctx[row7]) { c = 2; } else { - c = 1 + 2 * (s->left_ref_ctx[row7] != s->h.varcompref[1]); + c = 1 + 2 * (s->left_ref_ctx[row7] != s->s.h.varcompref[1]); } } else if (s->left_intra_ctx[row7]) { - c = 1 + 2 * (s->above_ref_ctx[col] != s->h.varcompref[1]); + c = 1 + 2 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]); } else { int refl = s->left_ref_ctx[row7], refa = s->above_ref_ctx[col]; - if (refl == refa && refa == s->h.varcompref[1]) { + if (refl == refa && refa == s->s.h.varcompref[1]) { c = 0; } else if (!s->left_comp_ctx[row7] && !s->above_comp_ctx[col]) { - if ((refa == s->h.fixcompref && refl == s->h.varcompref[0]) || - (refl == s->h.fixcompref && refa == s->h.varcompref[0])) { + if ((refa == s->s.h.fixcompref && refl == s->s.h.varcompref[0]) || + (refl == s->s.h.fixcompref && refa == s->s.h.varcompref[0])) { c = 4; } else { c = (refa == refl) ? 3 : 1; } } else if (!s->left_comp_ctx[row7]) { - if (refa == s->h.varcompref[1] && refl != s->h.varcompref[1]) { + if (refa == s->s.h.varcompref[1] && refl != s->s.h.varcompref[1]) { c = 1; } else { - c = (refl == s->h.varcompref[1] && - refa != s->h.varcompref[1]) ? 2 : 4; + c = (refl == s->s.h.varcompref[1] && + refa != s->s.h.varcompref[1]) ? 2 : 4; } } else if (!s->above_comp_ctx[col]) { - if (refl == s->h.varcompref[1] && refa != s->h.varcompref[1]) { + if (refl == s->s.h.varcompref[1] && refa != s->s.h.varcompref[1]) { c = 1; } else { - c = (refa == s->h.varcompref[1] && - refl != s->h.varcompref[1]) ? 2 : 4; + c = (refa == s->s.h.varcompref[1] && + refl != s->s.h.varcompref[1]) ? 2 : 4; } } else { c = (refl == refa) ? 4 : 2; @@ -1701,24 +1685,24 @@ static void decode_mode(AVCodecContext *ctx) if (s->above_intra_ctx[col]) { c = 2; } else if (s->above_comp_ctx[col]) { - c = 4 * (s->above_ref_ctx[col] != s->h.varcompref[1]); + c = 4 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]); } else { - c = 3 * (s->above_ref_ctx[col] != s->h.varcompref[1]); + c = 3 * (s->above_ref_ctx[col] != s->s.h.varcompref[1]); } } } else if (have_l) { if (s->left_intra_ctx[row7]) { c = 2; } else if (s->left_comp_ctx[row7]) { - c = 4 * (s->left_ref_ctx[row7] != s->h.varcompref[1]); + c = 4 * (s->left_ref_ctx[row7] != s->s.h.varcompref[1]); } else { - c = 3 * (s->left_ref_ctx[row7] != s->h.varcompref[1]); + c = 3 * (s->left_ref_ctx[row7] != s->s.h.varcompref[1]); } } else { c = 2; } bit = vp56_rac_get_prob(&s->c, s->prob.p.comp_ref[c]); - b->ref[var_idx] = s->h.varcompref[bit]; + b->ref[var_idx] = s->s.h.varcompref[bit]; s->counts.comp_ref[c][bit]++; } else /* single reference */ { int bit, c; @@ -1727,22 +1711,22 @@ static void decode_mode(AVCodecContext *ctx) if (have_l && !s->left_intra_ctx[row7]) { if (s->left_comp_ctx[row7]) { if (s->above_comp_ctx[col]) { - c = 1 + (!s->h.fixcompref || !s->left_ref_ctx[row7] || + c = 1 + (!s->s.h.fixcompref || !s->left_ref_ctx[row7] || !s->above_ref_ctx[col]); } else { c = (3 * !s->above_ref_ctx[col]) + - (!s->h.fixcompref || !s->left_ref_ctx[row7]); + (!s->s.h.fixcompref || !s->left_ref_ctx[row7]); } } else if (s->above_comp_ctx[col]) { c = (3 * !s->left_ref_ctx[row7]) + - (!s->h.fixcompref || !s->above_ref_ctx[col]); + (!s->s.h.fixcompref || !s->above_ref_ctx[col]); } else { c = 2 * !s->left_ref_ctx[row7] + 2 * !s->above_ref_ctx[col]; } } else if (s->above_intra_ctx[col]) { c = 2; } else if (s->above_comp_ctx[col]) { - c = 1 + (!s->h.fixcompref || !s->above_ref_ctx[col]); + c = 1 + (!s->s.h.fixcompref || !s->above_ref_ctx[col]); } else { c = 4 * (!s->above_ref_ctx[col]); } @@ -1750,7 +1734,7 @@ static void decode_mode(AVCodecContext *ctx) if (s->left_intra_ctx[row7]) { c = 2; } else if (s->left_comp_ctx[row7]) { - c = 1 + (!s->h.fixcompref || !s->left_ref_ctx[row7]); + c = 1 + (!s->s.h.fixcompref || !s->left_ref_ctx[row7]); } else { c = 4 * (!s->left_ref_ctx[row7]); } @@ -1769,7 +1753,7 @@ static void decode_mode(AVCodecContext *ctx) if (s->above_intra_ctx[col]) { c = 2; } else if (s->above_comp_ctx[col]) { - c = 1 + 2 * (s->h.fixcompref == 1 || + c = 1 + 2 * (s->s.h.fixcompref == 1 || s->above_ref_ctx[col] == 1); } else if (!s->above_ref_ctx[col]) { c = 3; @@ -1780,7 +1764,7 @@ static void decode_mode(AVCodecContext *ctx) if (s->left_intra_ctx[row7]) { c = 2; } else if (s->left_comp_ctx[row7]) { - c = 1 + 2 * (s->h.fixcompref == 1 || + c = 1 + 2 * (s->s.h.fixcompref == 1 || s->left_ref_ctx[row7] == 1); } else if (!s->left_ref_ctx[row7]) { c = 3; @@ -1790,25 +1774,25 @@ static void decode_mode(AVCodecContext *ctx) } else if (s->above_comp_ctx[col]) { if (s->left_comp_ctx[row7]) { if (s->left_ref_ctx[row7] == s->above_ref_ctx[col]) { - c = 3 * (s->h.fixcompref == 1 || + c = 3 * (s->s.h.fixcompref == 1 || s->left_ref_ctx[row7] == 1); } else { c = 2; } } else if (!s->left_ref_ctx[row7]) { - c = 1 + 2 * (s->h.fixcompref == 1 || + c = 1 + 2 * (s->s.h.fixcompref == 1 || s->above_ref_ctx[col] == 1); } else { c = 3 * (s->left_ref_ctx[row7] == 1) + - (s->h.fixcompref == 1 || s->above_ref_ctx[col] == 1); + (s->s.h.fixcompref == 1 || s->above_ref_ctx[col] == 1); } } else if (s->left_comp_ctx[row7]) { if (!s->above_ref_ctx[col]) { - c = 1 + 2 * (s->h.fixcompref == 1 || + c = 1 + 2 * (s->s.h.fixcompref == 1 || s->left_ref_ctx[row7] == 1); } else { c = 3 * (s->above_ref_ctx[col] == 1) + - (s->h.fixcompref == 1 || s->left_ref_ctx[row7] == 1); + (s->s.h.fixcompref == 1 || s->left_ref_ctx[row7] == 1); } } else if (!s->above_ref_ctx[col]) { if (!s->left_ref_ctx[row7]) { @@ -1827,7 +1811,7 @@ static void decode_mode(AVCodecContext *ctx) (!s->above_comp_ctx[col] && !s->above_ref_ctx[col])) { c = 2; } else if (s->above_comp_ctx[col]) { - c = 3 * (s->h.fixcompref == 1 || s->above_ref_ctx[col] == 1); + c = 3 * (s->s.h.fixcompref == 1 || s->above_ref_ctx[col] == 1); } else { c = 4 * (s->above_ref_ctx[col] == 1); } @@ -1837,7 +1821,7 @@ static void decode_mode(AVCodecContext *ctx) (!s->left_comp_ctx[row7] && !s->left_ref_ctx[row7])) { c = 2; } else if (s->left_comp_ctx[row7]) { - c = 3 * (s->h.fixcompref == 1 || s->left_ref_ctx[row7] == 1); + c = 3 * (s->s.h.fixcompref == 1 || s->left_ref_ctx[row7] == 1); } else { c = 4 * (s->left_ref_ctx[row7] == 1); } @@ -1852,7 +1836,7 @@ static void decode_mode(AVCodecContext *ctx) } if (b->bs <= BS_8x8) { - if (s->h.segmentation.enabled && s->h.segmentation.feat[b->seg_id].skip_enabled) { + if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].skip_enabled) { b->mode[0] = b->mode[1] = b->mode[2] = b->mode[3] = ZEROMV; } else { static const uint8_t off[10] = { @@ -1871,7 +1855,7 @@ static void decode_mode(AVCodecContext *ctx) } } - if (s->h.filtermode == FILTER_SWITCHABLE) { + if (s->s.h.filtermode == FILTER_SWITCHABLE) { int c; if (have_a && s->above_mode_ctx[col] >= NEARESTMV) { @@ -1892,7 +1876,7 @@ static void decode_mode(AVCodecContext *ctx) s->counts.filter[c][filter_id]++; b->filter = vp9_filter_lut[filter_id]; } else { - b->filter = s->h.filtermode; + b->filter = s->s.h.filtermode; } if (b->bs > BS_8x8) { @@ -1948,7 +1932,7 @@ static void decode_mode(AVCodecContext *ctx) AV_COPY32(&b->mv[3][1], &b->mv[0][1]); } - vref = b->ref[b->comp ? s->h.signbias[s->h.varcompref[0]] : 0]; + vref = b->ref[b->comp ? s->s.h.signbias[s->s.h.varcompref[0]] : 0]; } #if HAVE_FAST_64BIT @@ -1994,13 +1978,13 @@ static void decode_mode(AVCodecContext *ctx) SPLAT_CTX(s->dir##_skip_ctx[off], b->skip, n); \ SPLAT_CTX(s->dir##_txfm_ctx[off], b->tx, n); \ SPLAT_CTX(s->dir##_partition_ctx[off], dir##_ctx[b->bs], n); \ - if (!s->h.keyframe && !s->h.intraonly) { \ + if (!s->s.h.keyframe && !s->s.h.intraonly) { \ SPLAT_CTX(s->dir##_intra_ctx[off], b->intra, n); \ SPLAT_CTX(s->dir##_comp_ctx[off], b->comp, n); \ SPLAT_CTX(s->dir##_mode_ctx[off], b->mode[3], n); \ if (!b->intra) { \ SPLAT_CTX(s->dir##_ref_ctx[off], vref, n); \ - if (s->h.filtermode == FILTER_SWITCHABLE) { \ + if (s->s.h.filtermode == FILTER_SWITCHABLE) { \ SPLAT_CTX(s->dir##_filter_ctx[off], filter_id, n); \ } \ } \ @@ -2020,7 +2004,7 @@ static void decode_mode(AVCodecContext *ctx) #undef SPLAT_CTX #undef SET_CTXS - if (!s->h.keyframe && !s->h.intraonly) { + if (!s->s.h.keyframe && !s->s.h.intraonly) { if (b->bs > BS_8x8) { int mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]); @@ -2049,7 +2033,7 @@ static void decode_mode(AVCodecContext *ctx) // FIXME kinda ugly for (y = 0; y < h4; y++) { int x, o = (row + y) * s->sb_cols * 8 + col; - struct VP9mvrefPair *mv = &s->frames[CUR_FRAME].mv[o]; + struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[o]; if (b->intra) { for (x = 0; x < w4; x++) { @@ -2250,8 +2234,8 @@ static av_always_inline int decode_coeffs(AVCodecContext *ctx, int is8bitsperpix int end_x = FFMIN(2 * (s->cols - col), w4); int end_y = FFMIN(2 * (s->rows - row), h4); int n, pl, x, y, res; - int16_t (*qmul)[2] = s->h.segmentation.feat[b->seg_id].qmul; - int tx = 4 * s->h.lossless + b->tx; + int16_t (*qmul)[2] = s->s.h.segmentation.feat[b->seg_id].qmul; + int tx = 4 * s->s.h.lossless + b->tx; const int16_t * const *yscans = vp9_scans[tx]; const int16_t (* const *ynbs)[2] = vp9_scans_nb[tx]; const int16_t *uvscan = vp9_scans[b->uvtx][DCT_DCT]; @@ -2606,9 +2590,9 @@ static av_always_inline void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2); int end_x = FFMIN(2 * (s->cols - col), w4); int end_y = FFMIN(2 * (s->rows - row), h4); - int tx = 4 * s->h.lossless + b->tx, uvtx = b->uvtx + 4 * s->h.lossless; + int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless; int uvstep1d = 1 << b->uvtx, p; - uint8_t *dst = s->dst[0], *dst_r = s->frames[CUR_FRAME].tf.f->data[0] + y_off; + uint8_t *dst = s->dst[0], *dst_r = s->s.frames[CUR_FRAME].tf.f->data[0] + y_off; LOCAL_ALIGNED_32(uint8_t, a_buf, [96]); LOCAL_ALIGNED_32(uint8_t, l, [64]); @@ -2623,7 +2607,7 @@ static av_always_inline void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n]; mode = check_intra_mode(s, mode, &a, ptr_r, - s->frames[CUR_FRAME].tf.f->linesize[0], + s->s.frames[CUR_FRAME].tf.f->linesize[0], ptr, s->y_stride, l, col, x, w4, row, y, b->tx, 0, 0, 0, bytesperpixel); s->dsp.intra_pred[b->tx][mode](ptr, s->y_stride, l, a); @@ -2631,7 +2615,7 @@ static av_always_inline void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, s->dsp.itxfm_add[tx][txtp](ptr, s->y_stride, s->block + 16 * n * bytesperpixel, eob); } - dst_r += 4 * step1d * s->frames[CUR_FRAME].tf.f->linesize[0]; + dst_r += 4 * step1d * s->s.frames[CUR_FRAME].tf.f->linesize[0]; dst += 4 * step1d * s->y_stride; } @@ -2642,7 +2626,7 @@ static av_always_inline void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, step = 1 << (b->uvtx * 2); for (p = 0; p < 2; p++) { dst = s->dst[1 + p]; - dst_r = s->frames[CUR_FRAME].tf.f->data[1 + p] + uv_off; + dst_r = s->s.frames[CUR_FRAME].tf.f->data[1 + p] + uv_off; for (n = 0, y = 0; y < end_y; y += uvstep1d) { uint8_t *ptr = dst, *ptr_r = dst_r; for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d * bytesperpixel, @@ -2652,7 +2636,7 @@ static av_always_inline void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n]; mode = check_intra_mode(s, mode, &a, ptr_r, - s->frames[CUR_FRAME].tf.f->linesize[1], + s->s.frames[CUR_FRAME].tf.f->linesize[1], ptr, s->uv_stride, l, col, x, w4, row, y, b->uvtx, p + 1, s->ss_h, s->ss_v, bytesperpixel); s->dsp.intra_pred[b->uvtx][mode](ptr, s->uv_stride, l, a); @@ -2660,7 +2644,7 @@ static av_always_inline void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride, s->uvblock[p] + 16 * n * bytesperpixel, eob); } - dst_r += 4 * uvstep1d * s->frames[CUR_FRAME].tf.f->linesize[1]; + dst_r += 4 * uvstep1d * s->s.frames[CUR_FRAME].tf.f->linesize[1]; dst += 4 * uvstep1d * s->uv_stride; } } @@ -2786,8 +2770,8 @@ static av_always_inline void mc_luma_scaled(VP9Context *s, vp9_scaled_mc_func sm int bw, int bh, int w, int h, int bytesperpixel, const uint16_t *scale, const uint8_t *step) { - if (s->frames[CUR_FRAME].tf.f->width == ref_frame->f->width && - s->frames[CUR_FRAME].tf.f->height == ref_frame->f->height) { + if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width && + s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) { mc_luma_unscaled(s, mc, dst, dst_stride, ref, ref_stride, ref_frame, y, x, in_mv, bw, bh, w, h, bytesperpixel); } else { @@ -2842,8 +2826,8 @@ static av_always_inline void mc_chroma_scaled(VP9Context *s, vp9_scaled_mc_func int bw, int bh, int w, int h, int bytesperpixel, const uint16_t *scale, const uint8_t *step) { - if (s->frames[CUR_FRAME].tf.f->width == ref_frame->f->width && - s->frames[CUR_FRAME].tf.f->height == ref_frame->f->height) { + if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width && + s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) { mc_chroma_unscaled(s, mc, dst_u, dst_v, dst_stride, ref_u, src_stride_u, ref_v, src_stride_v, ref_frame, y, x, in_mv, bw, bh, w, h, bytesperpixel); @@ -2957,7 +2941,7 @@ static av_always_inline void inter_recon(AVCodecContext *ctx, int bytesperpixel) int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2); int end_x = FFMIN(2 * (s->cols - col), w4); int end_y = FFMIN(2 * (s->rows - row), h4); - int tx = 4 * s->h.lossless + b->tx, uvtx = b->uvtx + 4 * s->h.lossless; + int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless; int uvstep1d = 1 << b->uvtx, p; uint8_t *dst = s->dst[0]; @@ -3139,7 +3123,7 @@ static void decode_b(AVCodecContext *ctx, int row, int col, int bytesperpixel = s->bytesperpixel; int w4 = bwh_tab[1][bs][0], h4 = bwh_tab[1][bs][1], lvl; int emu[2]; - AVFrame *f = s->frames[CUR_FRAME].tf.f; + AVFrame *f = s->s.frames[CUR_FRAME].tf.f; s->row = row; s->row7 = row & 7; @@ -3289,8 +3273,8 @@ static void decode_b(AVCodecContext *ctx, int row, int col, } // pick filter level and find edges to apply filter to - if (s->h.filter.level && - (lvl = s->h.segmentation.feat[b->seg_id].lflvl[b->intra ? 0 : b->ref[0] + 1] + if (s->s.h.filter.level && + (lvl = s->s.h.segmentation.feat[b->seg_id].lflvl[b->intra ? 0 : b->ref[0] + 1] [b->mode[3] != ZEROMV]) > 0) { int x_end = FFMIN(s->cols - col, w4), y_end = FFMIN(s->rows - row, h4); int skip_inter = !b->intra && b->skip, col7 = s->col7, row7 = s->row7; @@ -3304,7 +3288,7 @@ static void decode_b(AVCodecContext *ctx, int row, int col, b->uvtx, skip_inter); if (!s->filter_lut.lim_lut[lvl]) { - int sharp = s->h.filter.sharpness; + int sharp = s->s.h.filter.sharpness; int limit = lvl; if (sharp > 0) { @@ -3335,11 +3319,11 @@ static void decode_sb(AVCodecContext *ctx, int row, int col, struct VP9Filter *l VP9Context *s = ctx->priv_data; int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) | (((s->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1); - const uint8_t *p = s->h.keyframe || s->h.intraonly ? vp9_default_kf_partition_probs[bl][c] : + const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? vp9_default_kf_partition_probs[bl][c] : s->prob.p.partition[bl][c]; enum BlockPartition bp; ptrdiff_t hbs = 4 >> bl; - AVFrame *f = s->frames[CUR_FRAME].tf.f; + AVFrame *f = s->s.frames[CUR_FRAME].tf.f; ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1]; int bytesperpixel = s->bytesperpixel; @@ -3414,7 +3398,7 @@ static void decode_sb_mem(AVCodecContext *ctx, int row, int col, struct VP9Filte VP9Context *s = ctx->priv_data; VP9Block *b = s->b; ptrdiff_t hbs = 4 >> bl; - AVFrame *f = s->frames[CUR_FRAME].tf.f; + AVFrame *f = s->s.frames[CUR_FRAME].tf.f; ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1]; int bytesperpixel = s->bytesperpixel; @@ -3613,7 +3597,7 @@ static void loopfilter_sb(AVCodecContext *ctx, struct VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff) { VP9Context *s = ctx->priv_data; - AVFrame *f = s->frames[CUR_FRAME].tf.f; + AVFrame *f = s->s.frames[CUR_FRAME].tf.f; uint8_t *dst = f->data[0] + yoff; ptrdiff_t ls_y = f->linesize[0], ls_uv = f->linesize[1]; uint8_t (*uv_masks)[8][4] = lflvl->mask[s->ss_h | s->ss_v]; @@ -3664,8 +3648,8 @@ static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1, static void adapt_probs(VP9Context *s) { int i, j, k, l, m; - prob_context *p = &s->prob_ctx[s->h.framectxid].p; - int uf = (s->h.keyframe || s->h.intraonly || !s->last_keyframe) ? 112 : 128; + prob_context *p = &s->prob_ctx[s->s.h.framectxid].p; + int uf = (s->s.h.keyframe || s->s.h.intraonly || !s->last_keyframe) ? 112 : 128; // coefficients for (i = 0; i < 4; i++) @@ -3673,7 +3657,7 @@ static void adapt_probs(VP9Context *s) for (k = 0; k < 2; k++) for (l = 0; l < 6; l++) for (m = 0; m < 6; m++) { - uint8_t *pp = s->prob_ctx[s->h.framectxid].coef[i][j][k][l][m]; + uint8_t *pp = s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m]; unsigned *e = s->counts.eob[i][j][k][l][m]; unsigned *c = s->counts.coef[i][j][k][l][m]; @@ -3685,7 +3669,7 @@ static void adapt_probs(VP9Context *s) adapt_prob(&pp[2], c[1], c[2], 24, uf); } - if (s->h.keyframe || s->h.intraonly) { + if (s->s.h.keyframe || s->s.h.intraonly) { memcpy(p->skip, s->prob.p.skip, sizeof(p->skip)); memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p)); memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p)); @@ -3702,19 +3686,19 @@ static void adapt_probs(VP9Context *s) adapt_prob(&p->intra[i], s->counts.intra[i][0], s->counts.intra[i][1], 20, 128); // comppred flag - if (s->h.comppredmode == PRED_SWITCHABLE) { + if (s->s.h.comppredmode == PRED_SWITCHABLE) { for (i = 0; i < 5; i++) adapt_prob(&p->comp[i], s->counts.comp[i][0], s->counts.comp[i][1], 20, 128); } // reference frames - if (s->h.comppredmode != PRED_SINGLEREF) { + if (s->s.h.comppredmode != PRED_SINGLEREF) { for (i = 0; i < 5; i++) adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0], s->counts.comp_ref[i][1], 20, 128); } - if (s->h.comppredmode != PRED_COMPREF) { + if (s->s.h.comppredmode != PRED_COMPREF) { for (i = 0; i < 5; i++) { uint8_t *pp = p->single_ref[i]; unsigned (*c)[2] = s->counts.single_ref[i]; @@ -3736,7 +3720,7 @@ static void adapt_probs(VP9Context *s) } // tx size - if (s->h.txfmmode == TX_SWITCHABLE) { + if (s->s.h.txfmmode == TX_SWITCHABLE) { for (i = 0; i < 2; i++) { unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i]; @@ -3750,7 +3734,7 @@ static void adapt_probs(VP9Context *s) } // interpolation filter - if (s->h.filtermode == FILTER_SWITCHABLE) { + if (s->s.h.filtermode == FILTER_SWITCHABLE) { for (i = 0; i < 4; i++) { uint8_t *pp = p->filter[i]; unsigned *c = s->counts.filter[i]; @@ -3826,7 +3810,7 @@ static void adapt_probs(VP9Context *s) adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128); adapt_prob(&pp[2], c[2], c[3], 20, 128); - if (s->h.highprecisionmvs) { + if (s->s.h.highprecisionmvs) { adapt_prob(&p->mv_comp[i].class0_hp, s->counts.mv_comp[i].class0_hp[0], s->counts.mv_comp[i].class0_hp[1], 20, 128); adapt_prob(&p->mv_comp[i].hp, s->counts.mv_comp[i].hp[0], @@ -3896,14 +3880,14 @@ static av_cold int vp9_decode_free(AVCodecContext *ctx) int i; for (i = 0; i < 3; i++) { - if (s->frames[i].tf.f->data[0]) - vp9_unref_frame(ctx, &s->frames[i]); - av_frame_free(&s->frames[i].tf.f); + if (s->s.frames[i].tf.f->data[0]) + vp9_unref_frame(ctx, &s->s.frames[i]); + av_frame_free(&s->s.frames[i].tf.f); } for (i = 0; i < 8; i++) { - if (s->refs[i].f->data[0]) - ff_thread_release_buffer(ctx, &s->refs[i]); - av_frame_free(&s->refs[i].f); + if (s->s.refs[i].f->data[0]) + ff_thread_release_buffer(ctx, &s->s.refs[i]); + av_frame_free(&s->s.refs[i].f); if (s->next_refs[i].f->data[0]) ff_thread_release_buffer(ctx, &s->next_refs[i]); av_frame_free(&s->next_refs[i].f); @@ -3923,8 +3907,8 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, int size = pkt->size; VP9Context *s = ctx->priv_data; int res, tile_row, tile_col, i, ref, row, col; - int retain_segmap_ref = s->frames[REF_FRAME_SEGMAP].segmentation_map && - (!s->h.segmentation.enabled || !s->h.segmentation.update_map); + int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map && + (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map); ptrdiff_t yoff, uvoff, ls_y, ls_uv; AVFrame *f; int bytesperpixel; @@ -3932,19 +3916,19 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, if ((res = decode_frame_header(ctx, data, size, &ref)) < 0) { return res; } else if (res == 0) { - if (!s->refs[ref].f->data[0]) { + if (!s->s.refs[ref].f->data[0]) { av_log(ctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref); return AVERROR_INVALIDDATA; } - if ((res = av_frame_ref(frame, s->refs[ref].f)) < 0) + if ((res = av_frame_ref(frame, s->s.refs[ref].f)) < 0) return res; ((AVFrame *)frame)->pkt_pts = pkt->pts; ((AVFrame *)frame)->pkt_dts = pkt->dts; for (i = 0; i < 8; i++) { if (s->next_refs[i].f->data[0]) ff_thread_release_buffer(ctx, &s->next_refs[i]); - if (s->refs[i].f->data[0] && - (res = ff_thread_ref_frame(&s->next_refs[i], &s->refs[i])) < 0) + if (s->s.refs[i].f->data[0] && + (res = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0) return res; } *got_frame = 1; @@ -3953,42 +3937,42 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, data += res; size -= res; - if (!retain_segmap_ref || s->h.keyframe || s->h.intraonly) { - if (s->frames[REF_FRAME_SEGMAP].tf.f->data[0]) - vp9_unref_frame(ctx, &s->frames[REF_FRAME_SEGMAP]); - if (!s->h.keyframe && !s->h.intraonly && !s->h.errorres && s->frames[CUR_FRAME].tf.f->data[0] && - (res = vp9_ref_frame(ctx, &s->frames[REF_FRAME_SEGMAP], &s->frames[CUR_FRAME])) < 0) + if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) { + if (s->s.frames[REF_FRAME_SEGMAP].tf.f->data[0]) + vp9_unref_frame(ctx, &s->s.frames[REF_FRAME_SEGMAP]); + if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->data[0] && + (res = vp9_ref_frame(ctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0) return res; } - if (s->frames[REF_FRAME_MVPAIR].tf.f->data[0]) - vp9_unref_frame(ctx, &s->frames[REF_FRAME_MVPAIR]); - if (!s->h.intraonly && !s->h.keyframe && !s->h.errorres && s->frames[CUR_FRAME].tf.f->data[0] && - (res = vp9_ref_frame(ctx, &s->frames[REF_FRAME_MVPAIR], &s->frames[CUR_FRAME])) < 0) + if (s->s.frames[REF_FRAME_MVPAIR].tf.f->data[0]) + vp9_unref_frame(ctx, &s->s.frames[REF_FRAME_MVPAIR]); + if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->data[0] && + (res = vp9_ref_frame(ctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0) return res; - if (s->frames[CUR_FRAME].tf.f->data[0]) - vp9_unref_frame(ctx, &s->frames[CUR_FRAME]); - if ((res = vp9_alloc_frame(ctx, &s->frames[CUR_FRAME])) < 0) + if (s->s.frames[CUR_FRAME].tf.f->data[0]) + vp9_unref_frame(ctx, &s->s.frames[CUR_FRAME]); + if ((res = vp9_alloc_frame(ctx, &s->s.frames[CUR_FRAME])) < 0) return res; - f = s->frames[CUR_FRAME].tf.f; - f->key_frame = s->h.keyframe; - f->pict_type = (s->h.keyframe || s->h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; + f = s->s.frames[CUR_FRAME].tf.f; + f->key_frame = s->s.h.keyframe; + f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; ls_y = f->linesize[0]; ls_uv =f->linesize[1]; - if (s->frames[REF_FRAME_SEGMAP].tf.f->data[0] && - (s->frames[REF_FRAME_MVPAIR].tf.f->width != s->frames[CUR_FRAME].tf.f->width || - s->frames[REF_FRAME_MVPAIR].tf.f->height != s->frames[CUR_FRAME].tf.f->height)) { - vp9_unref_frame(ctx, &s->frames[REF_FRAME_SEGMAP]); + if (s->s.frames[REF_FRAME_SEGMAP].tf.f->data[0] && + (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width || + s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) { + vp9_unref_frame(ctx, &s->s.frames[REF_FRAME_SEGMAP]); } // ref frame setup for (i = 0; i < 8; i++) { if (s->next_refs[i].f->data[0]) ff_thread_release_buffer(ctx, &s->next_refs[i]); - if (s->h.refreshrefmask & (1 << i)) { - res = ff_thread_ref_frame(&s->next_refs[i], &s->frames[CUR_FRAME].tf); - } else if (s->refs[i].f->data[0]) { - res = ff_thread_ref_frame(&s->next_refs[i], &s->refs[i]); + if (s->s.h.refreshrefmask & (1 << i)) { + res = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf); + } else if (s->s.refs[i].f->data[0]) { + res = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]); } if (res < 0) return res; @@ -3998,7 +3982,7 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, bytesperpixel = s->bytesperpixel; memset(s->above_partition_ctx, 0, s->cols); memset(s->above_skip_ctx, 0, s->cols); - if (s->h.keyframe || s->h.intraonly) { + if (s->s.h.keyframe || s->s.h.intraonly) { memset(s->above_mode_ctx, DC_PRED, s->cols * 2); } else { memset(s->above_mode_ctx, NEARESTMV, s->cols); @@ -4007,14 +3991,14 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h); memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h); memset(s->above_segpred_ctx, 0, s->cols); - s->pass = s->frames[CUR_FRAME].uses_2pass = - ctx->active_thread_type == FF_THREAD_FRAME && s->h.refreshctx && !s->h.parallelmode; + s->pass = s->s.frames[CUR_FRAME].uses_2pass = + ctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode; if ((res = update_block_buffers(ctx)) < 0) { av_log(ctx, AV_LOG_ERROR, "Failed to allocate block buffers\n"); return res; } - if (s->h.refreshctx && s->h.parallelmode) { + if (s->s.h.refreshctx && s->s.h.parallelmode) { int j, k, l, m; for (i = 0; i < 4; i++) { @@ -4022,14 +4006,14 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, for (k = 0; k < 2; k++) for (l = 0; l < 6; l++) for (m = 0; m < 6; m++) - memcpy(s->prob_ctx[s->h.framectxid].coef[i][j][k][l][m], + memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m], s->prob.coef[i][j][k][l][m], 3); - if (s->h.txfmmode == i) + if (s->s.h.txfmmode == i) break; } - s->prob_ctx[s->h.framectxid].p = s->prob.p; + s->prob_ctx[s->s.h.framectxid].p = s->prob.p; ff_thread_finish_setup(ctx); - } else if (!s->h.refreshctx) { + } else if (!s->s.h.refreshctx) { ff_thread_finish_setup(ctx); } @@ -4043,15 +4027,15 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, s->uveob[0] = s->uveob_base[0]; s->uveob[1] = s->uveob_base[1]; - for (tile_row = 0; tile_row < s->h.tiling.tile_rows; tile_row++) { + for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) { set_tile_offset(&s->tile_row_start, &s->tile_row_end, - tile_row, s->h.tiling.log2_tile_rows, s->sb_rows); + tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows); if (s->pass != 2) { - for (tile_col = 0; tile_col < s->h.tiling.tile_cols; tile_col++) { + for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) { int64_t tile_size; - if (tile_col == s->h.tiling.tile_cols - 1 && - tile_row == s->h.tiling.tile_rows - 1) { + if (tile_col == s->s.h.tiling.tile_cols - 1 && + tile_row == s->s.h.tiling.tile_rows - 1) { tile_size = size; } else { tile_size = AV_RB32(data); @@ -4059,12 +4043,12 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, size -= 4; } if (tile_size > size) { - ff_thread_report_progress(&s->frames[CUR_FRAME].tf, INT_MAX, 0); + ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); return AVERROR_INVALIDDATA; } ff_vp56_init_range_decoder(&s->c_b[tile_col], data, tile_size); if (vp56_rac_get_prob_branchy(&s->c_b[tile_col], 128)) { // marker bit - ff_thread_report_progress(&s->frames[CUR_FRAME].tf, INT_MAX, 0); + ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); return AVERROR_INVALIDDATA; } data += tile_size; @@ -4077,14 +4061,14 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, struct VP9Filter *lflvl_ptr = s->lflvl; ptrdiff_t yoff2 = yoff, uvoff2 = uvoff; - for (tile_col = 0; tile_col < s->h.tiling.tile_cols; tile_col++) { + for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) { set_tile_offset(&s->tile_col_start, &s->tile_col_end, - tile_col, s->h.tiling.log2_tile_cols, s->sb_cols); + tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols); if (s->pass != 2) { memset(s->left_partition_ctx, 0, 8); memset(s->left_skip_ctx, 0, 8); - if (s->h.keyframe || s->h.intraonly) { + if (s->s.h.keyframe || s->s.h.intraonly) { memset(s->left_mode_ctx, DC_PRED, 16); } else { memset(s->left_mode_ctx, NEARESTMV, 8); @@ -4138,7 +4122,7 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, } // loopfilter one row - if (s->h.filter.level) { + if (s->s.h.filter.level) { yoff2 = yoff; uvoff2 = uvoff; lflvl_ptr = s->lflvl; @@ -4152,28 +4136,28 @@ static int vp9_decode_frame(AVCodecContext *ctx, void *frame, // FIXME maybe we can make this more finegrained by running the // loopfilter per-block instead of after each sbrow // In fact that would also make intra pred left preparation easier? - ff_thread_report_progress(&s->frames[CUR_FRAME].tf, row >> 3, 0); + ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0); } } - if (s->pass < 2 && s->h.refreshctx && !s->h.parallelmode) { + if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) { adapt_probs(s); ff_thread_finish_setup(ctx); } } while (s->pass++ == 1); - ff_thread_report_progress(&s->frames[CUR_FRAME].tf, INT_MAX, 0); + ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0); // ref frame setup for (i = 0; i < 8; i++) { - if (s->refs[i].f->data[0]) - ff_thread_release_buffer(ctx, &s->refs[i]); + if (s->s.refs[i].f->data[0]) + ff_thread_release_buffer(ctx, &s->s.refs[i]); if (s->next_refs[i].f->data[0] && - (res = ff_thread_ref_frame(&s->refs[i], &s->next_refs[i])) < 0) + (res = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0) return res; } - if (!s->h.invisible) { - if ((res = av_frame_ref(frame, s->frames[CUR_FRAME].tf.f)) < 0) + if (!s->s.h.invisible) { + if ((res = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0) return res; *got_frame = 1; } @@ -4187,9 +4171,9 @@ static void vp9_decode_flush(AVCodecContext *ctx) int i; for (i = 0; i < 3; i++) - vp9_unref_frame(ctx, &s->frames[i]); + vp9_unref_frame(ctx, &s->s.frames[i]); for (i = 0; i < 8; i++) - ff_thread_release_buffer(ctx, &s->refs[i]); + ff_thread_release_buffer(ctx, &s->s.refs[i]); } static int init_frames(AVCodecContext *ctx) @@ -4198,17 +4182,17 @@ static int init_frames(AVCodecContext *ctx) int i; for (i = 0; i < 3; i++) { - s->frames[i].tf.f = av_frame_alloc(); - if (!s->frames[i].tf.f) { + s->s.frames[i].tf.f = av_frame_alloc(); + if (!s->s.frames[i].tf.f) { vp9_decode_free(ctx); av_log(ctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i); return AVERROR(ENOMEM); } } for (i = 0; i < 8; i++) { - s->refs[i].f = av_frame_alloc(); + s->s.refs[i].f = av_frame_alloc(); s->next_refs[i].f = av_frame_alloc(); - if (!s->refs[i].f || !s->next_refs[i].f) { + if (!s->s.refs[i].f || !s->next_refs[i].f) { vp9_decode_free(ctx); av_log(ctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i); return AVERROR(ENOMEM); @@ -4224,7 +4208,7 @@ static av_cold int vp9_decode_init(AVCodecContext *ctx) ctx->internal->allocate_progress = 1; s->last_bpp = 0; - s->h.filter.sharpness = -1; + s->s.h.filter.sharpness = -1; return init_frames(ctx); } @@ -4248,37 +4232,37 @@ static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecCo } for (i = 0; i < 3; i++) { - if (s->frames[i].tf.f->data[0]) - vp9_unref_frame(dst, &s->frames[i]); - if (ssrc->frames[i].tf.f->data[0]) { - if ((res = vp9_ref_frame(dst, &s->frames[i], &ssrc->frames[i])) < 0) + if (s->s.frames[i].tf.f->data[0]) + vp9_unref_frame(dst, &s->s.frames[i]); + if (ssrc->s.frames[i].tf.f->data[0]) { + if ((res = vp9_ref_frame(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0) return res; } } for (i = 0; i < 8; i++) { - if (s->refs[i].f->data[0]) - ff_thread_release_buffer(dst, &s->refs[i]); + if (s->s.refs[i].f->data[0]) + ff_thread_release_buffer(dst, &s->s.refs[i]); if (ssrc->next_refs[i].f->data[0]) { - if ((res = ff_thread_ref_frame(&s->refs[i], &ssrc->next_refs[i])) < 0) + if ((res = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0) return res; } } - s->h.invisible = ssrc->h.invisible; - s->h.keyframe = ssrc->h.keyframe; - s->h.intraonly = ssrc->h.intraonly; + s->s.h.invisible = ssrc->s.h.invisible; + s->s.h.keyframe = ssrc->s.h.keyframe; + s->s.h.intraonly = ssrc->s.h.intraonly; s->ss_v = ssrc->ss_v; s->ss_h = ssrc->ss_h; - s->h.segmentation.enabled = ssrc->h.segmentation.enabled; - s->h.segmentation.update_map = ssrc->h.segmentation.update_map; - s->h.segmentation.absolute_vals = ssrc->h.segmentation.absolute_vals; + s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled; + s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map; + s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals; s->bytesperpixel = ssrc->bytesperpixel; s->bpp = ssrc->bpp; s->bpp_index = ssrc->bpp_index; memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx)); - memcpy(&s->h.lf_delta, &ssrc->h.lf_delta, sizeof(s->h.lf_delta)); - memcpy(&s->h.segmentation.feat, &ssrc->h.segmentation.feat, - sizeof(s->h.segmentation.feat)); + memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta)); + memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat, + sizeof(s->s.h.segmentation.feat)); return 0; } diff --git a/libavcodec/vp9.h b/libavcodec/vp9.h index 189acaccc43b5..1ee98b70b61ef 100644 --- a/libavcodec/vp9.h +++ b/libavcodec/vp9.h @@ -26,6 +26,9 @@ #include +#include "thread.h" +#include "vp56.h" + enum BlockLevel { BL_64X64, BL_32X32, @@ -115,7 +118,20 @@ enum CompPredMode { PRED_SWITCHABLE, }; -struct Vp9BitstreamHeader { +struct VP9mvrefPair { + VP56mv mv[2]; + int8_t ref[2]; +}; + +typedef struct VP9Frame { + ThreadFrame tf; + AVBufferRef *extradata; + uint8_t *segmentation_map; + struct VP9mvrefPair *mv; + int uses_2pass; +} VP9Frame; + +typedef struct VP9BitstreamHeader { // bitstream header uint8_t profile; uint8_t keyframe; @@ -172,6 +188,16 @@ struct Vp9BitstreamHeader { unsigned log2_tile_cols, log2_tile_rows; unsigned tile_cols, tile_rows; } tiling; -}; +} VP9BitstreamHeader; + +typedef struct VP9SharedContext { + VP9BitstreamHeader h; + + ThreadFrame refs[8]; +#define CUR_FRAME 0 +#define REF_FRAME_MVPAIR 1 +#define REF_FRAME_SEGMAP 2 + VP9Frame frames[3]; +} VP9SharedContext; #endif /* AVCODEC_VP9_H */ diff --git a/libavcodec/vp9_mc_template.c b/libavcodec/vp9_mc_template.c index 1aa993ebbed2b..38d9a6da9fa48 100644 --- a/libavcodec/vp9_mc_template.c +++ b/libavcodec/vp9_mc_template.c @@ -36,14 +36,14 @@ static void FN(inter_pred)(AVCodecContext *ctx) VP9Context *s = ctx->priv_data; VP9Block *b = s->b; int row = s->row, col = s->col; - ThreadFrame *tref1 = &s->refs[s->h.refidx[b->ref[0]]], *tref2; + ThreadFrame *tref1 = &s->s.refs[s->s.h.refidx[b->ref[0]]], *tref2; AVFrame *ref1 = tref1->f, *ref2; int w1 = ref1->width, h1 = ref1->height, w2, h2; ptrdiff_t ls_y = s->y_stride, ls_uv = s->uv_stride; int bytesperpixel = BYTES_PER_PIXEL; if (b->comp) { - tref2 = &s->refs[s->h.refidx[b->ref[1]]]; + tref2 = &s->s.refs[s->s.h.refidx[b->ref[1]]]; ref2 = tref2->f; w2 = ref2->width; h2 = ref2->height;