Permalink
Cannot retrieve contributors at this time
| /* | |
| * Copyright (c) 2017 Paul B Mahol | |
| * | |
| * This file is part of FFmpeg. | |
| * | |
| * FFmpeg is free software; you can redistribute it and/or | |
| * modify it under the terms of the GNU Lesser General Public | |
| * License as published by the Free Software Foundation; either | |
| * version 2.1 of the License, or (at your option) any later version. | |
| * | |
| * FFmpeg is distributed in the hope that it will be useful, | |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
| * Lesser General Public License for more details. | |
| * | |
| * You should have received a copy of the GNU Lesser General Public | |
| * License along with FFmpeg; if not, write to the Free Software | |
| * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
| */ | |
| #include "libavutil/opt.h" | |
| #include "libavutil/pixdesc.h" | |
| #include "libavcodec/avfft.h" | |
| #include "avfilter.h" | |
| #include "formats.h" | |
| #include "framesync2.h" | |
| #include "internal.h" | |
| #include "video.h" | |
| typedef struct ConvolveContext { | |
| const AVClass *class; | |
| FFFrameSync fs; | |
| FFTContext *hfft; | |
| FFTContext *vfft; | |
| FFTContext *ihfft; | |
| FFTContext *ivfft; | |
| int fft_hbits[4]; | |
| int fft_vbits[4]; | |
| int fft_hlen[4]; | |
| int fft_vlen[4]; | |
| FFTComplex *fft_hdata[4]; | |
| FFTComplex *fft_vdata[4]; | |
| FFTComplex *fft_hdata_impulse[4]; | |
| FFTComplex *fft_vdata_impulse[4]; | |
| int planes; | |
| } ConvolveContext; | |
| #define OFFSET(x) offsetof(ConvolveContext, x) | |
| #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM | |
| static const AVOption convolve_options[] = { | |
| { "planes", "set planes to convolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64 = 15}, 0, 15, FLAGS }, | |
| { NULL }, | |
| }; | |
| FRAMESYNC_DEFINE_CLASS(convolve, ConvolveContext, fs); | |
| static int query_formats(AVFilterContext *ctx) | |
| { | |
| static const enum AVPixelFormat pixel_fmts_fftfilt[] = { | |
| AV_PIX_FMT_GRAY8, | |
| AV_PIX_FMT_GBRP, | |
| AV_PIX_FMT_YUV444P, | |
| AV_PIX_FMT_NONE | |
| }; | |
| AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts_fftfilt); | |
| if (!fmts_list) | |
| return AVERROR(ENOMEM); | |
| return ff_set_common_formats(ctx, fmts_list); | |
| } | |
| static int config_input_main(AVFilterLink *inlink) | |
| { | |
| ConvolveContext *s = inlink->dst->priv; | |
| int fft_hbits, fft_vbits, i; | |
| const AVPixFmtDescriptor *desc; | |
| desc = av_pix_fmt_desc_get(inlink->format); | |
| for (i = 0; i < desc->nb_components; i++) { | |
| int w = inlink->w; | |
| int h = inlink->h; | |
| for (fft_hbits = 1; 1 << fft_hbits < w * 2; fft_hbits++); | |
| for (fft_vbits = 1; 1 << fft_vbits < h * 2; fft_vbits++); | |
| s->fft_hbits[i] = fft_hbits; | |
| s->fft_vbits[i] = fft_vbits; | |
| s->fft_hlen[i] = 1 << s->fft_hbits[i]; | |
| s->fft_vlen[i] = 1 << s->fft_vbits[i]; | |
| av_log(inlink->dst, AV_LOG_DEBUG, "%d: %dx%d - %dx%d\n", i, s->fft_hlen[i], s->fft_vlen[i], w, h); | |
| if (!(s->fft_hdata[i] = av_calloc(h, s->fft_hlen[i] * sizeof(FFTComplex)))) | |
| return AVERROR(ENOMEM); | |
| if (!(s->fft_vdata[i] = av_calloc(s->fft_hlen[i], s->fft_vlen[i] * sizeof(FFTComplex)))) | |
| return AVERROR(ENOMEM); | |
| if (!(s->fft_hdata_impulse[i] = av_calloc(h, s->fft_hlen[i] * sizeof(FFTComplex)))) | |
| return AVERROR(ENOMEM); | |
| if (!(s->fft_vdata_impulse[i] = av_calloc(s->fft_hlen[i], s->fft_vlen[i] * sizeof(FFTComplex)))) | |
| return AVERROR(ENOMEM); | |
| } | |
| return 0; | |
| } | |
| static int config_input_impulse(AVFilterLink *inlink) | |
| { | |
| AVFilterContext *ctx = inlink->dst; | |
| if (ctx->inputs[0]->w != ctx->inputs[1]->w || | |
| ctx->inputs[0]->h != ctx->inputs[1]->h) { | |
| av_log(ctx, AV_LOG_ERROR, "Width and height of input videos must be same.\n"); | |
| return AVERROR(EINVAL); | |
| } | |
| if (ctx->inputs[0]->format != ctx->inputs[1]->format) { | |
| av_log(ctx, AV_LOG_ERROR, "Inputs must be of same pixel format.\n"); | |
| return AVERROR(EINVAL); | |
| } | |
| return 0; | |
| } | |
| static void fft_horizontal(ConvolveContext *s, FFTComplex *fft_hdata, int fft_hlen, | |
| AVFrame *in, int w, int h, int plane, float scale) | |
| { | |
| int y, x; | |
| for (y = 0; y < h; y++) { | |
| for (x = 0; x < w; x++) { | |
| fft_hdata[y * fft_hlen + x].re = in->data[plane][in->linesize[plane]*y+x] * scale; | |
| fft_hdata[y * fft_hlen + x].im = 0; | |
| } | |
| for (; x < w*2; x++) { | |
| fft_hdata[y * fft_hlen + x].re = in->data[plane][in->linesize[plane]*y+x-w] * scale; | |
| fft_hdata[y * fft_hlen + x].im = 0; | |
| } | |
| for (; x < fft_hlen; x++) { | |
| fft_hdata[y * fft_hlen + x].re = fft_hdata[y * fft_hlen + fft_hlen - x].re; | |
| fft_hdata[y * fft_hlen + x].im = 0; | |
| } | |
| av_fft_permute(s->hfft, fft_hdata + y * fft_hlen); | |
| av_fft_calc(s->hfft, fft_hdata + y * fft_hlen); | |
| } | |
| } | |
| static void fft_vertical(ConvolveContext *s, FFTComplex *fft_hdata, FFTComplex *fft_vdata, | |
| int fft_hlen, int fft_vlen, int h, int plane) | |
| { | |
| int y, x; | |
| for (y = 0; y < fft_hlen; y++) { | |
| for (x = 0; x < h; x++) { | |
| fft_vdata[y * fft_vlen + x].re = fft_hdata[x * fft_hlen + y].re; | |
| fft_vdata[y * fft_vlen + x].im = fft_hdata[x * fft_hlen + y].im; | |
| } | |
| for (; x < h*2; x++) { | |
| fft_vdata[y * fft_vlen + x].re = fft_hdata[(x-h) * fft_hlen + y].re; | |
| fft_vdata[y * fft_vlen + x].im = fft_hdata[(x-h) * fft_hlen + y].im; | |
| } | |
| for (; x < fft_vlen; x++) { | |
| fft_vdata[y * fft_vlen + x].re = fft_vdata[y * fft_vlen + fft_vlen - x].re; | |
| fft_vdata[y * fft_vlen + x].im = fft_vdata[y * fft_vlen + fft_vlen - x].im; | |
| } | |
| av_fft_permute(s->vfft, fft_vdata + y * fft_vlen); | |
| av_fft_calc(s->vfft, fft_vdata + y * fft_vlen); | |
| } | |
| } | |
| static void ifft_vertical(ConvolveContext *s, int fft_hlen, int fft_vlen, int h, int plane) | |
| { | |
| int y, x; | |
| for (y = 0; y < fft_hlen; y++) { | |
| av_fft_permute(s->ivfft, s->fft_vdata[plane] + y * fft_vlen); | |
| av_fft_calc(s->ivfft, s->fft_vdata[plane] + y * fft_vlen); | |
| for (x = 0; x < h; x++) { | |
| s->fft_hdata[plane][x * fft_hlen + y].re = s->fft_vdata[plane][y * fft_vlen + x].re; | |
| s->fft_hdata[plane][x * fft_hlen + y].im = s->fft_vdata[plane][y * fft_vlen + x].im; | |
| } | |
| } | |
| } | |
| static void ifft_horizontal(ConvolveContext *s, AVFrame *out, int fft_hlen, int fft_vlen, | |
| int w, int h, int plane) | |
| { | |
| int y, x; | |
| for (y = 0; y < h; y++) { | |
| av_fft_permute(s->ihfft, s->fft_hdata[plane] + y * fft_hlen); | |
| av_fft_calc(s->ihfft, s->fft_hdata[plane] + y * fft_hlen); | |
| for (x = 0; x < w; x++) | |
| out->data[plane][out->linesize[plane] * y + x] = | |
| av_clip(s->fft_hdata[plane][y * fft_hlen + x + w].re * 1 / (fft_hlen * fft_vlen * 4), 0, 255); | |
| } | |
| } | |
| static void fftshift(uint8_t *out, ptrdiff_t out_linesize, | |
| const uint8_t *in, ptrdiff_t in_linesize, | |
| int w, int h) | |
| { | |
| int hw = (w >> 1), hh = (h >> 1); | |
| int y, x; | |
| for (x = 0; x < hw; x++) | |
| out[x+1] = in[(h-hh) * in_linesize + x + hw]; | |
| for (; x < w-1; x++) | |
| out[x+1] = in[(h-hh) * in_linesize + x - hw]; | |
| out[0] = in[(h-hh) * in_linesize + x - hw]; | |
| out += out_linesize; | |
| for (y = 0; y < hh; y++) { | |
| for (x = 0; x < hw; x++) | |
| out[x+1] = in[(y+hh) * in_linesize + x + hw]; | |
| for (; x < w-1; x++) | |
| out[x+1] = in[(y+hh) * in_linesize + x - hw]; | |
| out[0] = in[(y+hh) * in_linesize + x - hw]; | |
| out += out_linesize; | |
| } | |
| for (; y < h-1; y++) { | |
| for (x = 0; x < hw; x++) | |
| out[x+1] = in[(y-hh) * in_linesize + x + hw]; | |
| for (; x < w-1; x++) | |
| out[x+1] = in[(y-hh) * in_linesize + x - hw]; | |
| out[0] = in[(y-hh) * in_linesize + x - hw]; | |
| out += out_linesize; | |
| } | |
| } | |
| static int do_convolve(FFFrameSync *fs) | |
| { | |
| AVFilterContext *ctx = fs->parent; | |
| AVFilterLink *outlink = ctx->outputs[0]; | |
| const AVPixFmtDescriptor *desc; | |
| ConvolveContext *s = ctx->priv; | |
| AVFrame *out, *mainpic = NULL, *impulsepic = NULL; | |
| int ret, y, x, plane; | |
| ret = ff_framesync2_dualinput_get(fs, &mainpic, &impulsepic); | |
| if (ret < 0) | |
| return ret; | |
| if (!impulsepic) | |
| return ff_filter_frame(ctx->outputs[0], mainpic); | |
| out = ff_get_video_buffer(outlink, outlink->w, outlink->h); | |
| if (!out) { | |
| av_frame_free(&mainpic); | |
| return AVERROR(ENOMEM); | |
| } | |
| av_frame_copy_props(out, mainpic); | |
| desc = av_pix_fmt_desc_get(outlink->format); | |
| for (plane = 0; plane < desc->nb_components; plane++) { | |
| const int fft_hlen = s->fft_hlen[plane]; | |
| const int fft_vlen = s->fft_vlen[plane]; | |
| int w = outlink->w; | |
| int h = outlink->h; | |
| float total = 0; | |
| if (plane == 1 || plane == 2) { | |
| w = AV_CEIL_RSHIFT(w, desc->log2_chroma_w); | |
| h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h); | |
| } | |
| fft_horizontal(s, s->fft_hdata[plane], fft_hlen, mainpic, w, h, plane, 1.f); | |
| fft_vertical(s, s->fft_hdata[plane], s->fft_vdata[plane], | |
| fft_hlen, fft_vlen, h, plane); | |
| for (y = 0; y < h; y++) { | |
| for (x = 0; x < w; x++) { | |
| total += impulsepic->data[plane][y * impulsepic->linesize[plane] + x]; | |
| } | |
| } | |
| total = FFMAX(1, total); | |
| fft_horizontal(s, s->fft_hdata_impulse[plane], fft_hlen, impulsepic, w, h, plane, 1 / total); | |
| fft_vertical(s, s->fft_hdata_impulse[plane], s->fft_vdata_impulse[plane], | |
| fft_hlen, fft_vlen, h, plane); | |
| for (y = 0; y < fft_hlen; y++) { | |
| for (x = 0; x < fft_vlen; x++) { | |
| FFTSample re, im, ire, iim; | |
| re = s->fft_vdata[plane][y*fft_vlen + x].re; | |
| im = s->fft_vdata[plane][y*fft_vlen + x].im; | |
| ire = s->fft_vdata_impulse[plane][y*fft_vlen + x].re; | |
| iim = s->fft_vdata_impulse[plane][y*fft_vlen + x].im; | |
| s->fft_vdata[plane][y*fft_vlen + x].re = ire * re - iim * im; | |
| s->fft_vdata[plane][y*fft_vlen + x].im = iim * re + ire * im; | |
| } | |
| } | |
| ifft_vertical(s, fft_hlen, fft_vlen, h, plane); | |
| ifft_horizontal(s, mainpic, fft_hlen, fft_vlen, w, h, plane); | |
| fftshift(out->data[plane], out->linesize[plane], | |
| mainpic->data[plane], mainpic->linesize[plane], w, h); | |
| } | |
| av_frame_free(&mainpic); | |
| return ff_filter_frame(outlink, out); | |
| } | |
| static int config_output(AVFilterLink *outlink) | |
| { | |
| AVFilterContext *ctx = outlink->src; | |
| ConvolveContext *s = ctx->priv; | |
| AVFilterLink *mainlink = ctx->inputs[0]; | |
| int ret; | |
| s->fs.on_event = do_convolve; | |
| ret = ff_framesync2_init_dualinput(&s->fs, ctx); | |
| if (ret < 0) | |
| return ret; | |
| outlink->w = mainlink->w; | |
| outlink->h = mainlink->h; | |
| outlink->time_base = mainlink->time_base; | |
| outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio; | |
| outlink->frame_rate = mainlink->frame_rate; | |
| if ((ret = ff_framesync2_configure(&s->fs)) < 0) | |
| return ret; | |
| s->hfft = av_fft_init(s->fft_hbits[0], 0); | |
| s->vfft = av_fft_init(s->fft_vbits[0], 0); | |
| s->ihfft = av_fft_init(s->fft_hbits[0], 1); | |
| s->ivfft = av_fft_init(s->fft_vbits[0], 1); | |
| return 0; | |
| } | |
| static int activate(AVFilterContext *ctx) | |
| { | |
| ConvolveContext *s = ctx->priv; | |
| return ff_framesync2_activate(&s->fs); | |
| } | |
| static av_cold void uninit(AVFilterContext *ctx) | |
| { | |
| ConvolveContext *s = ctx->priv; | |
| int i; | |
| for (i = 0; i < 4; i++) { | |
| av_freep(&s->fft_hdata[i]); | |
| av_freep(&s->fft_vdata[i]); | |
| av_freep(&s->fft_hdata_impulse[i]); | |
| av_freep(&s->fft_vdata_impulse[i]); | |
| } | |
| av_fft_end(s->hfft); | |
| av_fft_end(s->vfft); | |
| av_fft_end(s->ihfft); | |
| av_fft_end(s->ivfft); | |
| ff_framesync2_uninit(&s->fs); | |
| } | |
| static const AVFilterPad convolve_inputs[] = { | |
| { | |
| .name = "main", | |
| .type = AVMEDIA_TYPE_VIDEO, | |
| .config_props = config_input_main, | |
| },{ | |
| .name = "impulse", | |
| .type = AVMEDIA_TYPE_VIDEO, | |
| .config_props = config_input_impulse, | |
| }, | |
| { NULL } | |
| }; | |
| static const AVFilterPad convolve_outputs[] = { | |
| { | |
| .name = "default", | |
| .type = AVMEDIA_TYPE_VIDEO, | |
| .config_props = config_output, | |
| }, | |
| { NULL } | |
| }; | |
| AVFilter ff_vf_convolve = { | |
| .name = "convolve", | |
| .description = NULL_IF_CONFIG_SMALL("Convolve first video stream with second video stream."), | |
| .preinit = convolve_framesync_preinit, | |
| .uninit = uninit, | |
| .query_formats = query_formats, | |
| .activate = activate, | |
| .priv_size = sizeof(ConvolveContext), | |
| .priv_class = &convolve_class, | |
| .inputs = convolve_inputs, | |
| .outputs = convolve_outputs, | |
| }; |