|
| 1 | +/* |
| 2 | + * DXVA2 VP9 HW acceleration. |
| 3 | + * |
| 4 | + * copyright (c) 2015 Hendrik Leppkes |
| 5 | + * |
| 6 | + * This file is part of FFmpeg. |
| 7 | + * |
| 8 | + * FFmpeg is free software; you can redistribute it and/or |
| 9 | + * modify it under the terms of the GNU Lesser General Public |
| 10 | + * License as published by the Free Software Foundation; either |
| 11 | + * version 2.1 of the License, or (at your option) any later version. |
| 12 | + * |
| 13 | + * FFmpeg is distributed in the hope that it will be useful, |
| 14 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | + * Lesser General Public License for more details. |
| 17 | + * |
| 18 | + * You should have received a copy of the GNU Lesser General Public |
| 19 | + * License along with FFmpeg; if not, write to the Free Software |
| 20 | + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 21 | + */ |
| 22 | + |
| 23 | +#include "libavutil/avassert.h" |
| 24 | +#include "libavutil/pixdesc.h" |
| 25 | + |
| 26 | +#include "vp9.h" |
| 27 | + |
| 28 | +#include "dxva2_internal.h" |
| 29 | + |
| 30 | +#include "compat/windows/dxva_vpx.h" |
| 31 | + |
| 32 | + |
| 33 | +struct vp9_dxva2_picture_context { |
| 34 | + DXVA_PicParams_VP9 pp; |
| 35 | + DXVA_Slice_VPx_Short slice; |
| 36 | + const uint8_t *bitstream; |
| 37 | + unsigned bitstream_size; |
| 38 | +}; |
| 39 | + |
| 40 | +static void fill_picture_entry(DXVA_PicEntry_VPx *pic, |
| 41 | + unsigned index, unsigned flag) |
| 42 | +{ |
| 43 | + av_assert0((index & 0x7f) == index && (flag & 0x01) == flag); |
| 44 | + pic->bPicEntry = index | (flag << 7); |
| 45 | +} |
| 46 | + |
| 47 | +static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *ctx, const VP9SharedContext *h, |
| 48 | + DXVA_PicParams_VP9 *pp) |
| 49 | +{ |
| 50 | + int i; |
| 51 | + const AVPixFmtDescriptor * pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); |
| 52 | + |
| 53 | + if (!pixdesc) |
| 54 | + return -1; |
| 55 | + |
| 56 | + memset(pp, 0, sizeof(*pp)); |
| 57 | + |
| 58 | + fill_picture_entry(&pp->CurrPic, ff_dxva2_get_surface_index(avctx, ctx, h->frames[CUR_FRAME].tf.f), 0); |
| 59 | + |
| 60 | + pp->profile = h->h.profile; |
| 61 | + pp->wFormatAndPictureInfoFlags = ((h->h.keyframe == 0) << 0) | |
| 62 | + ((h->h.invisible == 0) << 1) | |
| 63 | + (h->h.errorres << 2) | |
| 64 | + (1 << 3) | /* subsampling_x */ |
| 65 | + (1 << 4) | /* subsampling_y */ |
| 66 | + (0 << 5) | /* extra_plane */ |
| 67 | + (h->h.refreshctx << 6) | |
| 68 | + (h->h.parallelmode << 7) | |
| 69 | + (h->h.intraonly << 8) | |
| 70 | + (h->h.framectxid << 9) | |
| 71 | + (h->h.resetctx << 11) | |
| 72 | + ((h->h.keyframe ? 0 : h->h.highprecisionmvs) << 13) | |
| 73 | + (0 << 14); /* ReservedFormatInfo2Bits */ |
| 74 | + |
| 75 | + pp->width = avctx->width; |
| 76 | + pp->height = avctx->height; |
| 77 | + pp->BitDepthMinus8Luma = pixdesc->comp[0].depth - 8; |
| 78 | + pp->BitDepthMinus8Chroma = pixdesc->comp[0].depth - 8; |
| 79 | + /* swap 0/1 to match the reference */ |
| 80 | + pp->interp_filter = (h->h.filtermode > 1) ? h->h.filtermode : (h->h.filtermode == 1 ? 0 : 1); |
| 81 | + pp->Reserved8Bits = 0; |
| 82 | + |
| 83 | + for (i = 0; i < 8; i++) { |
| 84 | + if (h->refs[i].f->data[0]) { |
| 85 | + fill_picture_entry(&pp->ref_frame_map[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[i].f), 0); |
| 86 | + pp->ref_frame_coded_width[i] = h->refs[i].f->width; |
| 87 | + pp->ref_frame_coded_height[i] = h->refs[i].f->height; |
| 88 | + } else |
| 89 | + pp->ref_frame_map[i].bPicEntry = 0xFF; |
| 90 | + } |
| 91 | + |
| 92 | + for (i = 0; i < 3; i++) { |
| 93 | + uint8_t refidx = h->h.refidx[i]; |
| 94 | + if (h->refs[refidx].f->data[0]) |
| 95 | + fill_picture_entry(&pp->frame_refs[i], ff_dxva2_get_surface_index(avctx, ctx, h->refs[refidx].f), 0); |
| 96 | + else |
| 97 | + pp->frame_refs[i].bPicEntry = 0xFF; |
| 98 | + |
| 99 | + pp->ref_frame_sign_bias[i + 1] = h->h.signbias[i]; |
| 100 | + } |
| 101 | + |
| 102 | + pp->filter_level = h->h.filter.level; |
| 103 | + pp->sharpness_level = h->h.filter.sharpness; |
| 104 | + |
| 105 | + pp->wControlInfoFlags = (h->h.lf_delta.enabled << 0) | |
| 106 | + (h->h.lf_delta.updated << 1) | |
| 107 | + (h->h.use_last_frame_mvs << 2) | |
| 108 | + (0 << 3); /* ReservedControlInfo5Bits */ |
| 109 | + |
| 110 | + for (i = 0; i < 4; i++) |
| 111 | + pp->ref_deltas[i] = h->h.lf_delta.ref[i]; |
| 112 | + |
| 113 | + for (i = 0; i < 2; i++) |
| 114 | + pp->mode_deltas[i] = h->h.lf_delta.mode[i]; |
| 115 | + |
| 116 | + pp->base_qindex = h->h.yac_qi; |
| 117 | + pp->y_dc_delta_q = h->h.ydc_qdelta; |
| 118 | + pp->uv_dc_delta_q = h->h.uvdc_qdelta; |
| 119 | + pp->uv_ac_delta_q = h->h.uvac_qdelta; |
| 120 | + |
| 121 | + /* segmentation data */ |
| 122 | + pp->stVP9Segments.wSegmentInfoFlags = (h->h.segmentation.enabled << 0) | |
| 123 | + (h->h.segmentation.update_map << 1) | |
| 124 | + (h->h.segmentation.temporal << 2) | |
| 125 | + (h->h.segmentation.absolute_vals << 3) | |
| 126 | + (0 << 4); /* ReservedSegmentFlags4Bits */ |
| 127 | + |
| 128 | + for (i = 0; i < 7; i++) |
| 129 | + pp->stVP9Segments.tree_probs[i] = h->h.segmentation.prob[i]; |
| 130 | + |
| 131 | + if (h->h.segmentation.temporal) |
| 132 | + for (i = 0; i < 3; i++) |
| 133 | + pp->stVP9Segments.pred_probs[i] = h->h.segmentation.pred_prob[i]; |
| 134 | + else |
| 135 | + memset(pp->stVP9Segments.pred_probs, 255, sizeof(pp->stVP9Segments.pred_probs)); |
| 136 | + |
| 137 | + for (i = 0; i < 8; i++) { |
| 138 | + pp->stVP9Segments.feature_mask[i] = (h->h.segmentation.feat[i].q_enabled << 0) | |
| 139 | + (h->h.segmentation.feat[i].lf_enabled << 1) | |
| 140 | + (h->h.segmentation.feat[i].ref_enabled << 2) | |
| 141 | + (h->h.segmentation.feat[i].skip_enabled << 3); |
| 142 | + |
| 143 | + pp->stVP9Segments.feature_data[i][0] = h->h.segmentation.feat[i].q_val; |
| 144 | + pp->stVP9Segments.feature_data[i][1] = h->h.segmentation.feat[i].lf_val; |
| 145 | + pp->stVP9Segments.feature_data[i][2] = h->h.segmentation.feat[i].ref_val; |
| 146 | + pp->stVP9Segments.feature_data[i][3] = 0; /* no data for skip */ |
| 147 | + } |
| 148 | + |
| 149 | + pp->log2_tile_cols = h->h.tiling.log2_tile_cols; |
| 150 | + pp->log2_tile_rows = h->h.tiling.log2_tile_rows; |
| 151 | + |
| 152 | + pp->uncompressed_header_size_byte_aligned = h->h.uncompressed_header_size; |
| 153 | + pp->first_partition_size = h->h.compressed_header_size; |
| 154 | + |
| 155 | + pp->StatusReportFeedbackNumber = 1 + DXVA_CONTEXT_REPORT_ID(avctx, ctx)++; |
| 156 | + return 0; |
| 157 | +} |
| 158 | + |
| 159 | +static void fill_slice_short(DXVA_Slice_VPx_Short *slice, |
| 160 | + unsigned position, unsigned size) |
| 161 | +{ |
| 162 | + memset(slice, 0, sizeof(*slice)); |
| 163 | + slice->BSNALunitDataLocation = position; |
| 164 | + slice->SliceBytesInBuffer = size; |
| 165 | + slice->wBadSliceChopping = 0; |
| 166 | +} |
| 167 | + |
| 168 | +static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx, |
| 169 | + DECODER_BUFFER_DESC *bs, |
| 170 | + DECODER_BUFFER_DESC *sc) |
| 171 | +{ |
| 172 | + const VP9SharedContext *h = avctx->priv_data; |
| 173 | + AVDXVAContext *ctx = avctx->hwaccel_context; |
| 174 | + struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private; |
| 175 | + void *dxva_data_ptr; |
| 176 | + uint8_t *dxva_data; |
| 177 | + unsigned dxva_size; |
| 178 | + unsigned padding; |
| 179 | + unsigned type; |
| 180 | + |
| 181 | + /* Create an annex B bitstream buffer with only slice NAL and finalize slice */ |
| 182 | +#if CONFIG_D3D11VA |
| 183 | + if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) { |
| 184 | + type = D3D11_VIDEO_DECODER_BUFFER_BITSTREAM; |
| 185 | + if (FAILED(ID3D11VideoContext_GetDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, |
| 186 | + D3D11VA_CONTEXT(ctx)->decoder, |
| 187 | + type, |
| 188 | + &dxva_size, &dxva_data_ptr))) |
| 189 | + return -1; |
| 190 | + } |
| 191 | +#endif |
| 192 | +#if CONFIG_DXVA2 |
| 193 | + if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) { |
| 194 | + type = DXVA2_BitStreamDateBufferType; |
| 195 | + if (FAILED(IDirectXVideoDecoder_GetBuffer(DXVA2_CONTEXT(ctx)->decoder, |
| 196 | + type, |
| 197 | + &dxva_data_ptr, &dxva_size))) |
| 198 | + return -1; |
| 199 | + } |
| 200 | +#endif |
| 201 | + |
| 202 | + dxva_data = dxva_data_ptr; |
| 203 | + |
| 204 | + if (ctx_pic->slice.SliceBytesInBuffer > dxva_size) { |
| 205 | + av_log(avctx, AV_LOG_ERROR, "Failed to build bitstream"); |
| 206 | + return -1; |
| 207 | + } |
| 208 | + |
| 209 | + memcpy(dxva_data, ctx_pic->bitstream, ctx_pic->slice.SliceBytesInBuffer); |
| 210 | + |
| 211 | + padding = FFMIN(128 - ((ctx_pic->slice.SliceBytesInBuffer) & 127), dxva_size - ctx_pic->slice.SliceBytesInBuffer); |
| 212 | + if (padding > 0) { |
| 213 | + memset(dxva_data + ctx_pic->slice.SliceBytesInBuffer, 0, padding); |
| 214 | + ctx_pic->slice.SliceBytesInBuffer += padding; |
| 215 | + } |
| 216 | + |
| 217 | +#if CONFIG_D3D11VA |
| 218 | + if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) |
| 219 | + if (FAILED(ID3D11VideoContext_ReleaseDecoderBuffer(D3D11VA_CONTEXT(ctx)->video_context, D3D11VA_CONTEXT(ctx)->decoder, type))) |
| 220 | + return -1; |
| 221 | +#endif |
| 222 | +#if CONFIG_DXVA2 |
| 223 | + if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) |
| 224 | + if (FAILED(IDirectXVideoDecoder_ReleaseBuffer(DXVA2_CONTEXT(ctx)->decoder, type))) |
| 225 | + return -1; |
| 226 | +#endif |
| 227 | + |
| 228 | +#if CONFIG_D3D11VA |
| 229 | + if (avctx->pix_fmt == AV_PIX_FMT_D3D11VA_VLD) { |
| 230 | + D3D11_VIDEO_DECODER_BUFFER_DESC *dsc11 = bs; |
| 231 | + memset(dsc11, 0, sizeof(*dsc11)); |
| 232 | + dsc11->BufferType = type; |
| 233 | + dsc11->DataSize = ctx_pic->slice.SliceBytesInBuffer; |
| 234 | + dsc11->NumMBsInBuffer = 0; |
| 235 | + |
| 236 | + type = D3D11_VIDEO_DECODER_BUFFER_SLICE_CONTROL; |
| 237 | + } |
| 238 | +#endif |
| 239 | +#if CONFIG_DXVA2 |
| 240 | + if (avctx->pix_fmt == AV_PIX_FMT_DXVA2_VLD) { |
| 241 | + DXVA2_DecodeBufferDesc *dsc2 = bs; |
| 242 | + memset(dsc2, 0, sizeof(*dsc2)); |
| 243 | + dsc2->CompressedBufferType = type; |
| 244 | + dsc2->DataSize = ctx_pic->slice.SliceBytesInBuffer; |
| 245 | + dsc2->NumMBsInBuffer = 0; |
| 246 | + |
| 247 | + type = DXVA2_SliceControlBufferType; |
| 248 | + } |
| 249 | +#endif |
| 250 | + |
| 251 | + return ff_dxva2_commit_buffer(avctx, ctx, sc, |
| 252 | + type, |
| 253 | + &ctx_pic->slice, sizeof(ctx_pic->slice), 0); |
| 254 | +} |
| 255 | + |
| 256 | + |
| 257 | +static int dxva2_vp9_start_frame(AVCodecContext *avctx, |
| 258 | + av_unused const uint8_t *buffer, |
| 259 | + av_unused uint32_t size) |
| 260 | +{ |
| 261 | + const VP9SharedContext *h = avctx->priv_data; |
| 262 | + AVDXVAContext *ctx = avctx->hwaccel_context; |
| 263 | + struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private; |
| 264 | + |
| 265 | + if (DXVA_CONTEXT_DECODER(avctx, ctx) == NULL || |
| 266 | + DXVA_CONTEXT_CFG(avctx, ctx) == NULL || |
| 267 | + DXVA_CONTEXT_COUNT(avctx, ctx) <= 0) |
| 268 | + return -1; |
| 269 | + av_assert0(ctx_pic); |
| 270 | + |
| 271 | + /* Fill up DXVA_PicParams_VP9 */ |
| 272 | + if (fill_picture_parameters(avctx, ctx, h, &ctx_pic->pp) < 0) |
| 273 | + return -1; |
| 274 | + |
| 275 | + ctx_pic->bitstream_size = 0; |
| 276 | + ctx_pic->bitstream = NULL; |
| 277 | + return 0; |
| 278 | +} |
| 279 | + |
| 280 | +static int dxva2_vp9_decode_slice(AVCodecContext *avctx, |
| 281 | + const uint8_t *buffer, |
| 282 | + uint32_t size) |
| 283 | +{ |
| 284 | + const VP9SharedContext *h = avctx->priv_data; |
| 285 | + struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private; |
| 286 | + unsigned position; |
| 287 | + |
| 288 | + if (!ctx_pic->bitstream) |
| 289 | + ctx_pic->bitstream = buffer; |
| 290 | + ctx_pic->bitstream_size += size; |
| 291 | + |
| 292 | + position = buffer - ctx_pic->bitstream; |
| 293 | + fill_slice_short(&ctx_pic->slice, position, size); |
| 294 | + |
| 295 | + return 0; |
| 296 | +} |
| 297 | + |
| 298 | +static int dxva2_vp9_end_frame(AVCodecContext *avctx) |
| 299 | +{ |
| 300 | + VP9SharedContext *h = avctx->priv_data; |
| 301 | + struct vp9_dxva2_picture_context *ctx_pic = h->frames[CUR_FRAME].hwaccel_picture_private; |
| 302 | + int ret; |
| 303 | + |
| 304 | + if (ctx_pic->bitstream_size <= 0) |
| 305 | + return -1; |
| 306 | + |
| 307 | + av_log(avctx, AV_LOG_WARNING, "Frame End, Size: %d, UC Header: %d, C Header: %d\n", ctx_pic->bitstream_size, ctx_pic->pp.uncompressed_header_size_byte_aligned, ctx_pic->pp.first_partition_size); |
| 308 | + |
| 309 | + ret = ff_dxva2_common_end_frame(avctx, h->frames[CUR_FRAME].tf.f, |
| 310 | + &ctx_pic->pp, sizeof(ctx_pic->pp), |
| 311 | + NULL, 0, |
| 312 | + commit_bitstream_and_slice_buffer); |
| 313 | + return ret; |
| 314 | +} |
| 315 | + |
| 316 | +#if CONFIG_VP9_DXVA2_HWACCEL |
| 317 | +AVHWAccel ff_vp9_dxva2_hwaccel = { |
| 318 | + .name = "vp9_dxva2", |
| 319 | + .type = AVMEDIA_TYPE_VIDEO, |
| 320 | + .id = AV_CODEC_ID_VP9, |
| 321 | + .pix_fmt = AV_PIX_FMT_DXVA2_VLD, |
| 322 | + .start_frame = dxva2_vp9_start_frame, |
| 323 | + .decode_slice = dxva2_vp9_decode_slice, |
| 324 | + .end_frame = dxva2_vp9_end_frame, |
| 325 | + .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context), |
| 326 | +}; |
| 327 | +#endif |
| 328 | + |
| 329 | +#if CONFIG_VP9_D3D11VA_HWACCEL |
| 330 | +AVHWAccel ff_vp9_d3d11va_hwaccel = { |
| 331 | + .name = "vp9_d3d11va", |
| 332 | + .type = AVMEDIA_TYPE_VIDEO, |
| 333 | + .id = AV_CODEC_ID_VP9, |
| 334 | + .pix_fmt = AV_PIX_FMT_D3D11VA_VLD, |
| 335 | + .start_frame = dxva2_vp9_start_frame, |
| 336 | + .decode_slice = dxva2_vp9_decode_slice, |
| 337 | + .end_frame = dxva2_vp9_end_frame, |
| 338 | + .frame_priv_data_size = sizeof(struct vp9_dxva2_picture_context), |
| 339 | +}; |
| 340 | +#endif |
0 commit comments