Skip to content
Permalink
Branch: master
Find file Copy path
Find file Copy path
1 contributor

Users who have contributed to this file

3852 lines (3255 sloc) 197 KB
from vapoursynth import core
from functools import partial
import vapoursynth as vs
import havsfunc as haf
import mvsfunc as mvf
import muvsfunc as muf
import nnedi3_resample as nnrs
import math, mvmulti
"""
Copyright 2019 Pao-Ming Lin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Main functions:
FineSharp
psharpen
MCDegrainSharp
LSFmod
SeeSaw
QTGMC
SMDegrain
TemporalDegrain2
mClean
STPressoHD
MLDegrain
NonlinUSM
DetailSharpen
Hysteria
SuperToon
EdgeDetect
JohnFPS
SpotLess
HQDeringmod
MaskedDHA
daamod
LUSM
"""
def FineSharp(clip, mode=1, sstr=2.5, cstr=None, xstr=0, lstr=1.5, pstr=1.28, ldmp=None, hdmp=0.01, rep=12):
"""
Original author: Didée (https://forum.doom9.org/showthread.php?t=166082)
Small and relatively fast realtime-sharpening function, for 1080p,
or after scaling 720p → 1080p during playback.
(to make 720p look more like being 1080p)
It's a generic sharpener. Only for good quality sources!
(If the source is crap, FineSharp will happily sharpen the crap) :)
Noise/grain will be enhanced, too. The method is GENERIC.
Modus operandi: A basic nonlinear sharpening method is performed,
then the *blurred* sharp-difference gets subtracted again.
Args:
mode (int) - 1 to 3, weakest to strongest. When negative -1 to -3,
a broader kernel for equalisation is used.
sstr (float) - strength of sharpening.
cstr (float) - strength of equalisation (recommended 0.5 to 1.25)
xstr (float) - strength of XSharpen-style final sharpening, 0.0 to 1.0.
(but, better don't go beyond 0.25...)
lstr (float) - modifier for non-linear sharpening.
pstr (float) - exponent for non-linear sharpening.
ldmp (float) - "low damp", to not over-enhance very small differences.
(noise coming out of flat areas)
hdmp (float) - "high damp", this damping term has a larger effect than ldmp
when the sharp-difference is larger than 1, vice versa.
rep (int) - repair mode used in final sharpening, recommended modes are 1/12/13.
"""
color = clip.format.color_family
bd = clip.format.bits_per_sample
isFLOAT = clip.format.sample_type == vs.FLOAT
mid = 0 if isFLOAT else 1 << (bd - 1)
i = 0.00392 if isFLOAT else 1 << (bd - 8)
xy = 'x y - {} /'.format(i) if bd != 8 else 'x y -'
R = core.rgsf.Repair if isFLOAT else core.rgvs.Repair
mat1 = [1, 2, 1, 2, 4, 2, 1, 2, 1]
mat2 = [1, 1, 1, 1, 1, 1, 1, 1, 1]
if not isinstance(clip, vs.VideoNode):
raise TypeError("FineSharp: This is not a clip!")
if clip.format.color_family == vs.COMPAT:
raise TypeError("FineSharp: COMPAT color family is not supported!")
if cstr is None:
cstr = spline(sstr, {0: 0, 0.5: 0.1, 1: 0.6, 2: 0.9, 2.5: 1, 3: 1.1, 3.5: 1.15, 4: 1.2, 8: 1.25, 255: 1.5})
cstr **= 0.8 if mode > 0 else cstr
if ldmp is None:
ldmp = sstr
sstr = max(sstr, 0)
cstr = max(cstr, 0)
xstr = min(max(xstr, 0), 1)
ldmp = max(ldmp, 0)
hdmp = max(hdmp, 0)
if sstr < 0.01 and cstr < 0.01 and xstr < 0.01:
return clip
tmp = core.std.ShufflePlanes(clip, [0], vs.GRAY) if color in [vs.YUV, vs.YCOCG] else clip
if abs(mode) == 1:
c2 = core.std.Convolution(tmp, matrix=mat1).std.Median()
else:
c2 = core.std.Median(tmp).std.Convolution(matrix=mat1)
if abs(mode) == 3:
c2 = c2.std.Median()
if sstr >= 0.01:
expr = 'x y = x dup {} dup dup dup abs {} / {} pow swap3 abs {} + / swap dup * dup {} + / * * {} * + ?'
shrp = core.std.Expr([tmp, c2], [expr.format(xy, lstr, 1/pstr, hdmp, ldmp, sstr*i)])
if cstr >= 0.01:
diff = core.std.MakeDiff(shrp, tmp)
if cstr != 1:
expr = 'x {} *'.format(cstr) if isFLOAT else 'x {} - {} * {} +'.format(mid, cstr, mid)
diff = core.std.Expr([diff], [expr])
diff = core.std.Convolution(diff, matrix=mat1) if mode > 0 else core.std.Convolution(diff, matrix=mat2)
shrp = core.std.MakeDiff(shrp, diff)
if xstr >= 0.01:
xyshrp = core.std.Expr([shrp, core.std.Convolution(shrp, matrix=mat2)], ['x dup y - 9.69 * +'])
rpshrp = R(xyshrp, shrp, [rep])
shrp = core.std.Merge(shrp, rpshrp, [xstr])
return core.std.ShufflePlanes([shrp, clip], [0, 1, 2], color) if color in [vs.YUV, vs.YCOCG] else shrp
def psharpen(clip, strength=25, threshold=75, ssx=1, ssy=1, dw=None, dh=None):
"""
From https://forum.doom9.org/showthread.php?p=683344 by ilpippo80.
Sharpeing function similar to LimitedSharpenFaster,
performs two-point sharpening to avoid overshoot.
Args:
strength (float) - Strength of the sharpening, 0 to 100.
threshold (float) - Controls "how much" to be sharpened, 0 to 100.
ssx, ssy (float) - Supersampling factor (reduce aliasing on edges).
dw, dh (int) - Output resolution after sharpening.
"""
if not isinstance(clip, vs.VideoNode):
raise TypeError("psharpen: This is not a clip!")
if clip.format.sample_type != vs.INTEGER:
raise TypeError("psharpen: clip must be of integer sample type")
if clip.format.color_family == vs.COMPAT:
raise TypeError("psharpen: COMPAT color family is not supported!")
color = clip.format.color_family
ow = clip.width
oh = clip.height
ssx = max(ssx, 1.0)
ssy = max(ssy, 1.0)
strength = min(max(strength, 0), 100)
threshold = min(max(threshold, 0), 100)
xss = m4(ow * ssx)
yss = m4(oh * ssy)
if dw is None:
dw = ow
if dh is None:
dh = oh
# oversampling
if ssx > 1 or ssy > 1:
clip = core.resize.Spline36(clip, xss, yss)
tmp = core.std.ShufflePlanes(clip, [0], vs.GRAY) if color in [vs.YUV, vs.YCOCG] else clip
# calculating the max and min in every 3*3 square
maxi = core.std.Maximum(tmp)
mini = core.std.Minimum(tmp)
# normalizing max and val to values from 0 to (max-min)
nmax = core.std.Expr([maxi, mini], ['x y -'])
nval = core.std.Expr([tmp, mini], ['x y -'])
# initializing expression used to obtain the output luma value
s = strength / 100
t = threshold / 100
st = 1 - (s / (s + t - s * t))
expr = 'x y / 2 * 1 - abs {} < {} 1 = x y 2 / = 0 y 2 / ? x y / 2 * 1 - abs 1 {} - / ? x y / 2 * 1 - abs 1 {} - * {} + ? x y 2 / > 1 -1 ? * 1 + y * 2 /'
expr = expr.format(st, s, s, t, t)
# calculates the new luma value pushing it towards min or max
nval = core.std.Expr([nval, nmax], [expr])
# normalizing val to values from min to max
tmp = core.std.Expr([nval, mini], ['x y +'])
# resizing the image to the output resolution
# applying the new luma value to clip
if dw != ow or dh != oh:
if color in [vs.YUV, vs.YCOCG]:
tmp = core.std.ShufflePlanes([tmp, clip], [0, 1, 2], color)
return core.resize.Spline36(tmp, dw, dh)
elif ssx > 1 or ssy > 1:
if color in [vs.YUV, vs.YCOCG]:
tmp = core.std.ShufflePlanes([tmp, clip], [0, 1, 2], color)
return core.resize.Spline36(tmp, dw, dh)
elif color in [vs.YUV, vs.YCOCG]:
return core.std.ShufflePlanes([tmp, clip], [0, 1, 2], color)
else:
return tmp
def MCDegrainSharp(clip, tr=3, bblur=.69, csharp=.69, thSAD=400, rec=False, chroma=True, analyse_args=None, recalculate_args=None):
"""
Based on MCDegrain By Didée:
https://forum.doom9.org/showthread.php?t=161594
Also based on Didée observations in this thread:
https://forum.doom9.org/showthread.php?t=161580
Denoise with MDegrainX, do slight sharpening where motionmatch
is good, do slight blurring where motionmatch is bad.
In areas where MAnalyse cannot find good matches,
the Blur() will be dominant.
In areas where good matches are found,
the Sharpen()'ed pixels will overweight the Blur()'ed pixels
when the pixel averaging is performed.
Args:
bblur, csharp (int, float or function) The method to smooth/sharpen the image.
If it's an int or a float, it specifies the amount in Blur() and Sharpen().
If it's a function, it will use the specified function to smooth/sharpen the image.
tr (int) - Strength of the denoising (1-24).
thSAD (int) - Soft threshold of block sum absolute differences.
Low value can result in staggered denoising,
High value can result in ghosting and artifacts.
rec (bool) - Recalculate the motion vectors to obtain more precision.
chroma (bool) - Whether to process chroma.
"""
if not isinstance(clip, vs.VideoNode) or clip.format.color_family not in [vs.GRAY, vs.YUV]:
raise TypeError("MCDegrainSharp: This is not a GRAY or YUV clip!")
isFLOAT = clip.format.sample_type == vs.FLOAT
isGRAY = clip.format.color_family == vs.GRAY
chroma = False if isGRAY else chroma
planes = [0, 1, 2] if chroma else [0]
plane = 4 if chroma else 0
bs = 32 if clip.width > 2400 else 16 if clip.width > 960 else 8
pel = 1 if clip.width > 960 else 2
truemotion = False if clip.width > 960 else True
A = core.mvsf.Analyse if isFLOAT else core.mv.Analyse
S = core.mvsf.Super if isFLOAT else core.mv.Super
R = core.mvsf.Recalculate if isFLOAT else core.mv.Recalculate
D1 = core.mvsf.Degrain1 if isFLOAT else core.mv.Degrain1
D2 = core.mvsf.Degrain2 if isFLOAT else core.mv.Degrain2
D3 = core.mvsf.Degrain3 if isFLOAT else core.mv.Degrain3
if isinstance(bblur, (int, float)):
c2 = muf.Blur(clip, amountH=bblur, planes=planes)
elif callable(bblur):
c2 = bblur(clip)
else:
raise TypeError("MCDegrainSharp: bblur must be an int, a float or a function!")
if isinstance(csharp, (int, float)):
c4 = muf.Sharpen(clip, amountH=csharp, planes=planes)
elif callable(csharp):
c4 = csharp(clip)
else:
raise TypeError("MCDegrainSharp: csharp must be an int, a float or a function!")
if analyse_args is None:
analyse_args = dict(blksize=bs, overlap=bs//2, search=5, chroma=chroma, truemotion=truemotion)
if recalculate_args is None:
recalculate_args = dict(blksize=bs//2, overlap=bs//4, search=5, chroma=chroma, truemotion=truemotion)
if tr > 3 and not isFLOAT:
raise TypeError("MCDegrainSharp: DegrainN is only available in float")
super_b = S(DitherLumaRebuild(c2, 1), hpad=bs, vpad=bs, pel=pel, sharp=1, rfilter=4)
super_rend = S(c4, hpad=bs, vpad=bs, pel=pel, levels=1, rfilter=1)
if tr < 4:
bv1 = A(super_b, isb=True, delta=1, **analyse_args)
fv1 = A(super_b, isb=False, delta=1, **analyse_args)
if tr > 1:
bv2 = A(super_b, isb=True, delta=2, **analyse_args)
fv2 = A(super_b, isb=False, delta=2, **analyse_args)
if tr > 2:
bv3 = A(super_b, isb=True, delta=3, **analyse_args)
fv3 = A(super_b, isb=False, delta=3, **analyse_args)
else:
vec = mvmulti.Analyze(super_b, tr=tr, **analyse_args)
if rec:
if tr < 4:
bv1 = R(super_b, bv1, **recalculate_args)
fv1 = R(super_b, fv1, **recalculate_args)
if tr > 1:
bv2 = R(super_b, bv2, **recalculate_args)
fv2 = R(super_b, fv2, **recalculate_args)
if tr > 2:
bv3 = R(super_b, bv3, **recalculate_args)
fv3 = R(super_b, fv3, **recalculate_args)
else:
vec = mvmulti.Recalculate(super_b, vec, tr=tr, **recalculate_args)
if tr <= 1:
return D1(c2, super_rend, bv1, fv1, thSAD, plane=plane)
elif tr == 2:
return D2(c2, super_rend, bv1, fv1, bv2, fv2, thSAD, plane=plane)
elif tr == 3:
return D3(c2, super_rend, bv1, fv1, bv2, fv2, bv3, fv3, thSAD, plane=plane)
else:
return mvmulti.DegrainN(c2, super_rend, vec, tr=tr, thsad=thSAD, plane=plane)
def LSFmod(clip, strength=100, Smode=None, Smethod=None, kernel=11, preblur=False, secure=None, source=None,
Szrp=16, Spwr=None, SdmpLo=None, SdmpHi=None, Lmode=None, overshoot=None, undershoot=None, overshoot2=None, undershoot2=None,
soft=None, soothe=None, keep=None, edgemode=0, edgemaskHQ=None, ssx=None, ssy=None, dw=None, dh=None, defaults='slow'):
"""
LimitedSharpenFaster Modded Version by LaTo INV.
+--------------+
| DEPENDENCIES |
+--------------+
→ RGVS / RGSF
+---------+
| GENERAL |
+---------+
strength [int]
--------------
Strength of the sharpening
Smode [int]
----------------------
Sharpen mode:
= 1 : Range sharpening
= 2 : Nonlinear sharpening (corrected version)
= 3 : Nonlinear sharpening (original version)
Smethod [int]
--------------------
Sharpen method:
= 1 : 3x3 kernel
= 2 : Min/Max
= 3 : Min/Max + 3x3 kernel
kernel [int]
-------------------------
Kernel used in Smethod=1&3
In strength order: + 19 >> 20 > 11/12 -
Negative: absolute value specifies the MinBlur radius used.
+---------+
| SPECIAL |
+---------+
preblur [bool]
--------------------------------
Mode to avoid noise sharpening & ringing
secure [bool]
-------------
Mode to avoid banding & oil painting (or face wax) effect of sharpening
source [clip]
-------------
If source is defined, LSFmod doesn't sharp more a denoised clip than this source clip
In this mode, you can safely set Lmode = 0 & PP = OFF
+----------------------+
| NONLINEAR SHARPENING |
+----------------------+
Szrp [int]
----------
Zero Point:
- differences below Szrp are amplified (overdrive sharpening)
- differences above Szrp are reduced (reduced sharpening)
Spwr [int]
----------
Power: exponent for sharpener
SdmpLo [int]
------------
Damp Low: reduce sharpening for small changes [0:disable]
SdmpHi [int]
------------
Damp High: reduce sharpening for big changes [0:disable]
+----------+
| LIMITING |
+----------+
Lmode [int]
--------------------------
Limit mode:
< 0 : Limit with Repair (ex: Lmode = -1 → Repair(1), Lmode = -5 → Repair(5)...)
= 0 : No limit
= 1 : Limit to over/undershoot
= 2 : Limit to over/undershoot on edges and no limit on not-edges
= 3 : Limit to zero on edges and to over/undershoot on not-edges
= 4 : Limit to over/undershoot on edges and to over/undershoot2 on not-edges
overshoot [int]
---------------
Limit for pixels that get brighter during sharpening
undershoot [int]
----------------
Limit for pixels that get darker during sharpening
overshoot2 [int]
----------------
Same as overshoot, only for Lmode = 4
undershoot2 [int]
-----------------
Same as undershoot, only for Lmode = 4
+-----------------+
| POST-PROCESSING |
+-----------------+
soft [int]
-------------------------
Soft the sharpening effect (Negative: new autocalculate)
soothe [bool]
-------------
= True : Enable soothe temporal stabilization
= False : Disable soothe temporal stabilization
keep [int]
-------------------
Minimum percent of the original sharpening to keep (only with soothe=True)
+-------+
| EDGES |
+-------+
edgemode [int]
------------------------
=-1 : Show edgemask
= 0 : Sharpening all
= 1 : Sharpening only edges
= 2 : Sharpening only not-edges
edgemaskHQ [bool]
-----------------
= True : Original edgemask
= False : Faster edgemask
+------------+
| UPSAMPLING |
+------------+
ssx ssy [float]
-------------------
Supersampling factor (reduce aliasing on edges)
dw dh [int]
---------------------
Output resolution after sharpening (avoid a resizing step)
+----------+
| SETTINGS |
+----------+
defaults [string]
--------------------------------------------
= "old" : Reset settings to original version (output will be THE SAME AS LSF)
= "slow" : Enable SLOW modded version settings
= "fast" : Enable FAST modded version settings
defaults = "old" : - strength = 100
---------------- - Smode = 1
- Smethod = Smode == 1 ? 2 : 1
- kernel = 11
- preblur = False
- secure = False
- source = undefined
- Szrp = 16
- Spwr = 2
- SdmpLo = strength / 25
- SdmpHi = 0
- Lmode = 1
- overshoot = 1
- undershoot = overshoot
- overshoot2 = overshoot * 2
- undershoot2 = overshoot2
- soft = 0
- soothe = False
- keep = 25
- edgemode = 0
- edgemaskHQ = True
- ssx = Smode == 1 ? 1.50 : 1.25
- ssy = ssx
- dw = ow
- dh = oh
defaults = "slow" : - strength = 100
----------------- - Smode = 2
- Smethod = 3
- kernel = 11
- preblur = False
- secure = True
- source = undefined
- Szrp = 16
- Spwr = 4
- SdmpLo = 4
- SdmpHi = 48
- Lmode = 4
- overshoot = strength / 100
- undershoot = overshoot
- overshoot2 = overshoot * 2
- undershoot2 = overshoot2
- soft = -2
- soothe = True
- keep = 20
- edgemode = 0
- edgemaskHQ = True
- ssx = 1.50
- ssy = ssx
- dw = ow
- dh = oh
defaults = "fast" : - strength = 100
----------------- - Smode = 1
- Smethod = 2
- kernel = 11
- preblur = False
- secure = True
- source = undefined
- Szrp = 16
- Spwr = 4
- SdmpLo = 4
- SdmpHi = 48
- Lmode = 1
- overshoot = strength / 100
- undershoot = overshoot
- overshoot2 = overshoot * 2
- undershoot2 = overshoot2
- soft = 0
- soothe = True
- keep = 20
- edgemode = 0
- edgemaskHQ = False
- ssx = 1.25
- ssy = ssx
- dw = ow
- dh = oh
"""
# Modified from havsfunc: https://github.com/HomeOfVapourSynthEvolution/havsfunc/blob/r31/havsfunc.py#L4492
if not isinstance(clip, vs.VideoNode):
raise TypeError('LSFmod: This is not a clip!')
if source is not None and (not isinstance(source, vs.VideoNode) or source.format.id != clip.format.id):
raise TypeError("LSFmod: source must be the same format as clip")
if source is not None and (source.width != clip.width or source.height != clip.height):
raise TypeError("LSFmod: source must be the same size as clip")
if clip.format.color_family == vs.COMPAT:
raise TypeError("LSFmod: COMPAT color family is not supported!")
color = clip.format.color_family
bd = clip.format.bits_per_sample
isFLOAT = clip.format.sample_type == vs.FLOAT
mid = 0 if isFLOAT else 1 << (bd - 1)
i = 0.00392 if isFLOAT else 1 << (bd - 8)
R = core.rgsf.Repair if isFLOAT else core.rgvs.Repair
ow = clip.width
oh = clip.height
csrc = clip
### DEFAULTS
try:
num = ['old', 'slow', 'fast'].index(defaults.lower())
except:
raise ValueError('LSFmod: Defaults must be "old" or "slow" or "fast"')
if Smode is None:
Smode = [1, 2, 1][num]
if Smethod is None:
Smethod = [2 if Smode == 1 else 1, 3, 2][num]
if secure is None:
secure = [False, True, True][num]
if Spwr is None:
Spwr = [2, 4, 4][num]
if SdmpLo is None:
SdmpLo = [strength/25, 4, 4][num]
if SdmpHi is None:
SdmpHi = [0, 48, 48][num]
if Lmode is None:
Lmode = [1, 4, 1][num]
if overshoot is None:
overshoot = [1, strength/100, strength/100][num]
if undershoot is None:
undershoot = overshoot
if overshoot2 is None:
overshoot2 = overshoot * 2
if undershoot2 is None:
undershoot2 = overshoot2
if soft is None:
soft = [0, -2, 0][num]
if soothe is None:
soothe = [False, True, True][num]
if keep is None:
keep = [25, 20, 20][num]
if edgemaskHQ is None:
edgemaskHQ = [True, True, False][num]
if ssx is None:
ssx = [1.5 if Smode == 1 else 1.25, 1.5, 1.25][num]
if ssy is None:
ssy = ssx
if dw is None:
dw = ow
if dh is None:
dh = oh
if kernel <= 0:
Filter = partial(MinBlur, r=abs(kernel))
elif kernel == 4:
Filter = partial(core.std.Median)
elif kernel in [11, 12]:
Filter = partial(core.std.Convolution, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])
elif kernel == 19:
Filter = partial(core.std.Convolution, matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1])
elif kernel == 20:
Filter = partial(core.std.Convolution, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1])
else:
if isFLOAT:
Filter = partial(core.rgsf.RemoveGrain, mode=[kernel])
else:
Filter = partial(core.rgvs.RemoveGrain, mode=[kernel])
if soft < 0:
soft = (1 + (2 / (ssx + ssy))) * math.sqrt(strength)
Spwr = max(Spwr, 1)
SdmpLo = max(SdmpLo, 0)
SdmpHi = max(SdmpHi, 0)
ssx = max(ssx, 1)
ssy = max(ssy, 1)
soft = max(min(soft/100, 1), 0)
keep = max(min(keep/100, 1), 0)
xss = m4(ow * ssx)
yss = m4(oh * ssy)
strength = max(strength, 0)
Str = strength / 100
### SHARP
if ssx > 1 or ssy > 1:
clip = core.resize.Spline36(clip, xss, yss)
tmp = core.std.ShufflePlanes(clip, [0], vs.GRAY) if color in [vs.YUV, vs.YCOCG] else clip
pre = MinBlur(tmp) if preblur else tmp
darklimit = core.std.Minimum(pre)
brightlimit = core.std.Maximum(pre)
if Smethod <= 1:
method = Filter(pre)
elif Smethod == 2:
method = core.std.Merge(darklimit, brightlimit)
else:
method = Filter(core.std.Merge(darklimit, brightlimit))
if secure:
method = core.std.Expr([method, pre], ['x y < x {} + x y > x {} - x ? ?'.format(i, i)])
if preblur:
method = core.std.Expr([method, tmp, pre], ['x y + z -'])
if Smode <= 1:
normsharp = core.std.Expr([tmp, method], ['x dup y - {} * +'.format(Str)])
elif Smode == 2:
xy = 'x y - abs {} /'.format(i) if bd != 8 else 'x y - abs'
Hi = 1 if SdmpHi == 0 else '1 {} {} / 4 pow + 1 {} {} / 4 pow + /'.format(Szrp, SdmpHi, xy, SdmpHi)
expr1 = 'x y = x dup {} dup {} / {} pow swap dup * dup {} {} + * swap {} + {} * / * {} * {} * x y > 1 -1 ? * + ?'
normsharp = core.std.Expr([tmp, method], [expr1.format(xy, Szrp, 1/Spwr, Szrp**2, SdmpLo, SdmpLo, Szrp**2, Hi, Szrp*Str*i)])
else:
xy = 'x y - abs {} /'.format(i) if bd != 8 else 'x y - abs'
Hi = 1 if SdmpHi == 0 else '1 {} {} / 4 pow +'.format(xy, SdmpHi)
expr1 = 'x y = x dup {} dup {} / {} pow swap dup * dup {} + / * {} / {} * x y > 1 -1 ? * + ?'
normsharp = core.std.Expr([tmp, method], [expr1.format(xy, Szrp, 1/Spwr, SdmpLo, Hi, Szrp*Str*i)])
### LIMIT
normal = haf.Clamp(normsharp, brightlimit, darklimit, max(overshoot, 0)*i, max(undershoot, 0)*i)
if edgemaskHQ:
edge = core.std.Expr([core.std.Sobel(tmp, scale=2)], 'x {} / 0.87 pow {} *'.format(128*i, 255*i))
else:
edge = core.std.Expr([core.std.Maximum(tmp), core.std.Minimum(tmp)], ['x y - {} / 0.87 pow {} *'.format(32*i, 255*i)])
inflate = core.std.Inflate(edge)
if Lmode == 0:
limit1 = normsharp
elif Lmode == 1:
limit1 = normal
elif Lmode == 2:
limit1 = core.std.MaskedMerge(normsharp, normal, inflate)
elif Lmode == 3:
zero = haf.Clamp(normsharp, brightlimit, darklimit, 0, 0)
limit1 = core.std.MaskedMerge(normal, zero, inflate)
elif Lmode == 4:
second = haf.Clamp(normsharp, brightlimit, darklimit, max(overshoot2, 0)*i, max(undershoot2, 0)*i)
limit1 = core.std.MaskedMerge(second, normal, inflate)
else:
limit1 = R(normsharp, tmp, abs(Lmode))
if edgemode < 0:
return core.resize.Spline36(edge, dw, dh)
if edgemode == 0:
limit2 = limit1
elif edgemode == 1:
limit2 = core.std.MaskedMerge(tmp, limit1, inflate.std.Inflate().std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]))
else:
limit2 = core.std.MaskedMerge(limit1, tmp, inflate.std.Inflate().std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1]))
### SOFT
if soft == 0:
PP1 = limit2
else:
sharpdiff = core.std.MakeDiff(tmp, limit2)
if isFLOAT:
expr2 = 'x abs y abs > y {} * x {} * + x ?'.format(soft, 1-soft)
else:
expr2 = 'x {} - abs y {} - abs > y {} * x {} * + x ?'.format(mid, mid, soft, 1-soft)
sharpdiff = core.std.Expr([sharpdiff, core.std.Convolution(sharpdiff, matrix=[1]*9)],[expr2])
PP1 = core.std.MakeDiff(tmp, sharpdiff)
### SOOTHE
if soothe:
diff = core.std.MakeDiff(tmp, PP1)
if isFLOAT:
expr3 = 'x y * 0 < x {} * x abs y abs > x {} * y {} * + x ? ?'.format(keep, keep, 1-keep)
else:
expr3 = 'x {m} - y {m} - * 0 < x {m} - {k} * {m} + x {m} - abs y {m} - abs > x {k} * y {j} * + x ? ?'.format(m=mid, k=keep, j=1-keep)
diff = core.std.Expr([diff, haf.AverageFrames(diff, [1, 1, 1], 0.125)],[expr3])
PP2 = core.std.MakeDiff(tmp, diff)
else:
PP2 = PP1
### OUTPUT
if dw != ow or dh != oh:
if color in [vs.YUV, vs.YCOCG]:
PP2 = core.std.ShufflePlanes([PP2, clip], [0, 1, 2], color)
PPP = core.resize.Spline36(PP2, dw, dh)
elif ssx > 1 or ssy > 1:
if color in [vs.YUV, vs.YCOCG]:
PP2 = core.std.ShufflePlanes([PP2, clip], [0, 1, 2], color)
PPP = core.resize.Spline36(PP2, dw, dh)
elif color in [vs.YUV, vs.YCOCG]:
PPP = core.std.ShufflePlanes([PP2, clip], [0, 1, 2], color)
else:
PPP = PP2
if source is not None:
if dw != ow or dh != oh:
src = core.resize.Spline36(source, dw, dh)
In = core.resize.Spline36(csrc, dw, dh)
else:
src = source
In = csrc
shrpD = core.std.MakeDiff(In, PPP, [0]) if color in [vs.YUV, vs.YCOCG] else core.std.MakeDiff(In, PPP)
expr4 = 'x abs y abs < x y ?' if isFLOAT else 'x {} - abs y {} - abs < x y ?'.format(mid, mid)
if color in [vs.YUV, vs.YCOCG]:
shrpL = core.std.Expr([R(shrpD, core.std.MakeDiff(In, src, [0]), [1, 0]), shrpD], [expr4, ''])
else:
shrpL = core.std.Expr([R(shrpD, core.std.MakeDiff(In, src), [1]), shrpD], [expr4])
return core.std.MakeDiff(In, shrpL, [0]) if color in [vs.YUV, vs.YCOCG] else core.std.MakeDiff(In, shrpL)
else:
return PPP
def SeeSaw(clip, denoised=None, NRlimit=2, NRlimit2=None, sstr=2, Slimit=None, Spower=4, SdampLo=None, SdampHi=24, Szp=16, bias=50,
Smode=None, NSmode=1, sootheT=50, sootheS=0, ssx=1, ssy=None, diff=False):
"""
Author: Didée (https://avisynth.nl/images/SeeSaw.avs)
(Full Name: "Denoiser-and-Sharpener-are-riding-the-SeeSaw")
This function provides a (simple) implementation of the "crystality sharpen" principle.
In conjunction with a user-specified denoised clip, the aim is to enhance weak detail,
hopefully without oversharpening or creating jaggies on strong detail,
and produce a result that is temporally stable without detail shimmering,
while keeping everything within reasonable bitrate requirements.
This is done by intermixing source, denoised source and a modified sharpening process, in a seesaw-like manner.
You're very much encouraged to feed your own custom denoised clip into SeeSaw.
If the "denoised" clip parameter is omitted, a simple "spatial pressdown" filter is used.
Args:
NRlimit (int) - Absolute limit for pixel change by denoising.
NRlimit2 (int) - Limit for intermediate denoising.
sstr (float) - Sharpening strength (don't touch this too much).
Slimit (int) - Positive: absolute limit for pixel change by sharpening.
Negative: pixel's sharpening difference is reduced to pow(diff, 1/abs(Slimit)).
Spower (float) - Exponent for modified sharpener.
Szp (float) - Zero point, below: overdrive sharpening, above: reduced sharpening.
SdampLo (float) - Reduces overdrive sharpening for very small changes.
SdampHi (float) - Further reduces sharpening for big sharpening changes. Try 15~30. "0" disables.
bias (float) - Bias towards detail (> 50), or towards calm result (< 50).
Smode (int) - RemoveGrain mode used in the modified sharpening function (sharpen2).
Negative: absolute value specifies the MinBlur radius used.
NSmode (int) - 0 = corrected, 1 = original nonlinear sharpening.
sootheT (int) - 0 = minimum, 100 = maximum soothing of sharpener's temporal instability.
Negative: will chain 2 instances of temporal soothing.
sootheS (int) - 0 = minimum, 100 = maximum smoothing of sharpener's spatial effect.
ssx, ssy (int) - SeeSaw doesn't require supersampling urgently, if at all, small values ~1.25 seems to be enough.
diff (bool) - When True, limit the sharp-difference instead of the sharpened clip.
Relative limiting is more safe, less aliasing, but also less sharpening.
This parameter has no effect when Smode <= 0.
"""
# Modified from muvsfunc: https://github.com/WolframRhodium/muvsfunc/blob/v0.2.0/muvsfunc.py#L2007
if not isinstance(clip, vs.VideoNode):
raise TypeError("SeeSaw: This is not a clip!")
if clip.format.color_family == vs.COMPAT:
raise TypeError("SeeSaw: COMPAT color family is not supported!")
if NRlimit2 is None:
NRlimit2 = NRlimit + 1
if Slimit is None:
Slimit = NRlimit + 2
if SdampLo is None:
SdampLo = Spower + 1
if Smode is None:
if ssx <= 1.25:
Smode = 11
elif ssx <= 1.6:
Smode = 20
else:
Smode = 19
if ssy is None:
ssy = ssx
color = clip.format.color_family
Spower = max(Spower, 1)
SdampLo = max(SdampLo, 0)
SdampHi = max(SdampHi, 0)
ssx = max(ssx, 1)
ssy = max(ssy, 1)
ow = clip.width
oh = clip.height
xss = m4(ow * ssx)
yss = m4(oh * ssy)
isFLOAT = clip.format.sample_type == vs.FLOAT
bd = clip.format.bits_per_sample
i = 0.00392 if isFLOAT else 1 << (bd - 8)
mid = 0 if isFLOAT else 1 << (bd - 1)
peak = 1.0 if isFLOAT else (1 << bd) - 1
bias = max(min(bias/100, 1), 0)
NRL = scale(NRlimit, peak)
NRL2 = scale(NRlimit2, peak)
NRLL = scale(NRlimit2/bias - 1, peak)
SLIM = scale(Slimit, peak) if Slimit >= 0 else 1/abs(Slimit)
R = core.rgsf.Repair if isFLOAT else core.rgvs.Repair
if denoised is None:
dnexpr = 'x {N} + y < x {N} + x {N} - y > x {N} - y ? ?'.format(N=NRL)
denoised = core.std.Expr([clip, core.std.Median(clip, [0])], [dnexpr, '']) if color in [vs.YUV, vs.YCOCG] else core.std.Expr([clip, core.std.Median(clip)], [dnexpr])
else:
if not isinstance(denoised, vs.VideoNode) or denoised.format.id != clip.format.id:
raise TypeError("SeeSaw: denoised must be the same format as clip!")
if denoised.width != clip.width or denoised.height != clip.height:
raise TypeError("SeeSaw: denoised must be the same size as clip!")
if color in [vs.YUV, vs.YCOCG]:
tmp = core.std.ShufflePlanes(clip, [0], vs.GRAY)
tmp2 = core.std.ShufflePlanes(denoised, [0], vs.GRAY) if clip != denoised else tmp
else:
tmp = clip
tmp2 = denoised
tameexpr = 'x {N} + y < x {N2} + x {N} - y > x {N2} - x {B} * y {j} * + ? ?'.format(N=NRLL, N2=NRL2, B=bias, j=1-bias)
tame = core.std.Expr([tmp, tmp2], [tameexpr])
if Smode > 0:
head = Sharpen2(tame, sstr, Spower, Szp, SdampLo, SdampHi, 4, NSmode, diff)
head = core.std.MaskedMerge(head, tame, tame.std.Sobel().std.Maximum().std.Convolution(matrix=[1]*9))
if ssx == 1 and ssy == 1:
if Smode <= 0:
sharp = Sharpen2(tame, sstr, Spower, Szp, SdampLo, SdampHi, Smode, NSmode, diff)
else:
sharp = R(Sharpen2(tame, sstr, Spower, Szp, SdampLo, SdampHi, Smode, NSmode, diff), head, [1])
else:
if Smode <= 0:
sharp = Sharpen2(tame.resize.Spline36(xss, yss), sstr, Spower, Szp, SdampLo, SdampHi, Smode, NSmode, diff).resize.Spline36(ow, oh)
else:
sharp = R(Sharpen2(tame.resize.Spline36(xss, yss), sstr, Spower, Szp, SdampLo, SdampHi, Smode, NSmode, diff), head.resize.Spline36(xss, yss), [1]).resize.Spline36(ow, oh)
if diff and Smode > 0:
sharp = core.std.MergeDiff(tame, sharp)
soothed = SootheSS(sharp, tame, sootheT, sootheS)
sharpdiff = core.std.MakeDiff(tame, soothed)
if NRlimit == 0 or clip == denoised:
calm = tmp
else:
NRdiff = core.std.MakeDiff(tmp, tmp2)
if isFLOAT:
expr = 'y {N} > x {N} - y -{N} < x {N} + x y - ? ?'.format(N=NRL)
else:
expr = 'y {m} {N} + > x {N} - y {m} {N} - < x {N} + x y {m} - - ? ?'.format(m=mid, N=NRL)
calm = core.std.Expr([tmp, NRdiff], [expr])
if Slimit >= 0:
if isFLOAT:
limitexpr = 'y {S} > x {S} - y -{S} < x {S} + x y - ? ?'.format(S=SLIM)
else:
limitexpr = 'y {m} {S} + > x {S} - y {m} {S} - < x {S} + x y {m} - - ? ?'.format(m=mid, S=SLIM)
limited = core.std.Expr([calm, sharpdiff], [limitexpr])
else:
if isFLOAT:
limitexpr = 'y 0 = x dup y abs {i} / {S} pow {i} * y 0 > 1 -1 ? * - ?'.format(S=SLIM, i=i)
else:
limitexpr = 'y {m} = x dup y {m} - abs {i} / {S} pow {i} * y {m} > 1 -1 ? * - ?'.format(m=mid, S=SLIM, i=i)
limited = core.std.Expr([calm, sharpdiff], [limitexpr])
return core.std.ShufflePlanes([limited, clip], [0, 1, 2], color) if color in [vs.YUV, vs.YCOCG] else limited
def Sharpen2(clip, sstr, power, zp, ldmp, hdmp, rg, mode, diff):
"""Modified sharpening function from SeeSaw()"""
color = clip.format.color_family
isFLOAT = clip.format.sample_type == vs.FLOAT
bd = clip.format.bits_per_sample
i = 0.00392 if isFLOAT else 1 << (bd - 8)
R = core.rgsf.RemoveGrain if isFLOAT else core.rgvs.RemoveGrain
if not isinstance(clip, vs.VideoNode):
raise TypeError("Sharpen2: This is not a clip!")
if color == vs.COMPAT:
raise TypeError("Sharpen2: COMPAT color family is not supported!")
tmp = core.std.ShufflePlanes(clip, [0], vs.GRAY) if color in [vs.YUV, vs.YCOCG] else clip
if rg <= 0:
diff = False
method = MinBlur(tmp, abs(rg))
elif rg == 4:
method = tmp.std.Median()
elif rg in [11, 12]:
method = tmp.std.Convolution(matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])
elif rg == 19:
method = tmp.std.Convolution(matrix=[1, 1, 1, 1, 0, 1, 1, 1, 1])
elif rg == 20:
method = tmp.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1])
else:
method = R(tmp, mode=[rg])
if mode == 0:
xy = 'x y - abs {} /'.format(i) if bd != 8 else 'x y - abs'
Hi = 1 if hdmp == 0 else '1 {} {} / 4 pow + 1 {} {} / 4 pow + /'.format(zp, hdmp, xy, hdmp)
expr = 'x y = x dup {} dup {} / {} pow swap dup * dup {} {} + * swap {} + {} * / * {} * {} * x y > 1 -1 ? * + ?'
expr = expr.format(xy, zp, 1/power, zp**2, ldmp, ldmp, zp**2, Hi, zp*sstr*i)
normsharp = core.std.Expr([tmp, method], [expr])
else:
xy = 'x y - abs {} /'.format(i) if bd != 8 else 'x y - abs'
Hi = 1 if hdmp == 0 else '1 {} {} / 4 pow +'.format(xy, hdmp)
expr = 'x y = x dup {} dup {} / {} pow swap dup * dup {} + / * {} / {} * x y > 1 -1 ? * + ?'
normsharp = core.std.Expr([tmp, method], [expr.format(xy, zp, 1/power, ldmp, Hi, zp*sstr*i)])
if color in [vs.YUV, vs.YCOCG]:
normsharp = core.std.ShufflePlanes([normsharp, clip], [0, 1, 2], color)
return normsharp if not diff else core.std.MakeDiff(normsharp, clip)
def SootheSS(sharp, src, sootheT=50, sootheS=0):
"""Soothe() function to stabilze sharpening from SeeSaw()"""
if not isinstance(sharp, vs.VideoNode):
raise TypeError("SootheSS: sharp must be a clip!")
if sharp.format.color_family == vs.COMPAT:
raise TypeError("Sharpen2: COMPAT color family is not supported!")
if not isinstance(src, vs.VideoNode) or src.format.id != sharp.format.id:
raise TypeError("SootheSS: src must be of the same format as sharp!")
if src.width != sharp.width or src.height != sharp.height:
raise TypeError("SootheSS: src must be of the same size as sharp!")
ssrc = src
ST = min(abs(sootheT)/100, 1)
SS = min(abs(sootheS)/100, 1)
color = sharp.format.color_family
isFLOAT = sharp.format.sample_type == vs.FLOAT
mid = 0 if isFLOAT else 1 << (sharp.format.bits_per_sample - 1)
if color in [vs.YUV, vs.YCOCG]:
sharp = core.std.ShufflePlanes(sharp, [0], vs.GRAY)
src = core.std.ShufflePlanes(src, [0], vs.GRAY)
diff = core.std.MakeDiff(src, sharp)
if isFLOAT:
expr1 = 'x y * 0 < x {S} * x abs y abs > x {S} * y {SS} * + x ? ?'.format(S=1-SS, SS=SS)
else:
expr1 = 'x {m} - y {m} - * 0 < x {m} - {SS} * {m} + x {m} - abs y {m} - abs > x {S} * y {SS} * + x ? ?'.format(m=mid, S=1-SS, SS=SS)
if isFLOAT:
expr2 = 'x y * 0 < x {T} * x abs y abs > x {T} * y {ST} * + x ? ?'.format(T=1-ST, ST=ST)
else:
expr2 = 'x {m} - y {m} - * 0 < x {m} - {T} * {m} + x {m} - abs y {m} - abs > x {T} * y {ST} * + x ? ?'.format(m=mid, T=1-ST, ST=ST)
if SS > 0:
soothed = core.std.Expr([diff, core.std.Convolution(diff, [1]*9)], [expr1])
if ST > 0:
soothed = core.std.Expr([diff, haf.AverageFrames(diff, [1, 1, 1], 0.125)], [expr2])
if sootheT < 0:
soothed = core.std.Expr([diff, haf.AverageFrames(diff, [1, 1, 1], 0.125)], [expr2])
soothed = core.std.MakeDiff(src, soothed)
return core.std.ShufflePlanes([soothed, ssrc], [0, 1, 2], color) if color in [vs.YUV, vs.YCOCG] else soothed
def QTGMC(clip, Preset='Slower', TR0=None, TR1=None, TR2=None, Rep0=None, Rep1=0, Rep2=None, EdiMode=None, RepChroma=True, NNSize=None, NNeurons=None, EdiQual=1, EdiMaxD=None, ChromaEdi='',
EdiExt=None, Sharpness=None, SMode=None, SLMode=None, SLRad=None, SOvs=0, SVThin=0, Sbb=None, SrchClipPP=None, SubPel=None, SubPelInterp=2, BlockSize=None, Overlap=None, Search=None,
SearchParam=None, PelSearch=None, ChromaMotion=None, TrueMotion=False, Lambda=None, LSAD=None, PNew=None, PLevel=None, GlobalMotion=True, DCT=0, ThSAD1=625, ThSAD2=256, ThSCD1=180, ThSCD2=100,
SourceMatch=0, MatchPreset=None, MatchEdi=None, MatchPreset2=None, MatchEdi2=None, MatchTR2=1, MatchEnhance=0.5, Lossless=0, NoiseProcess=None, EZDenoise=None, EZKeepGrain=None,
NoisePreset='Medium', Denoiser=None, FftThreads=1, DenoiseMC=None, NoiseTR=None, Sigma=None, ChromaNoise=False, ShowNoise=0, GrainRestore=None, NoiseRestore=None, NoiseDeint=None,
StabilizeNoise=None, InputType=0, ProgSADMask=None, FPSDivisor=1, ShutterBlur=0, ShutterAngleSrc=180, ShutterAngleOut=180, SBlurLimit=4, Border=False, Precise=None, RefineMotion=False,
ShowSettings=False, ForceTR=0, TFF=None, pscrn=None, int16_prescreener=None, int16_predictor=None, exp=None, alpha=None, beta=None, gamma=None, nrad=None, vcheck=None, opencl=False, device=None):
"""
QTGMC by Vit
A high quality deinterlacer using motion-compensated temporal smoothing, with a range of features for quality and convenience
Originally based on TempGaussMC by Didée
--- REQUIREMENTS ---
Core plugins:
MVTools / MVTools-sf
Miscellaneous Filters
znedi3 / nnedi3
RGVS / RGSF
fmtconv
Additional plugins:
eedi3m - if selected directly or via a source-match preset
FFT3DFilter - if selected for noise processing
DFTTest - if selected for noise processing
KNLMeansCL - if selected for noise processing
AddGrain - if NoiseDeint = "Generate" selected for noise bypass
For FFT3DFilter & DFTTest you also need the FFTW3 library (FFTW.org). On Windows the file needed for both is libfftw3f-3.dll.
--- GETTING STARTED ---
The "Preset" used selects sensible settings for a given encoding speed. Choose a preset from:
"Placebo", "Very Slow", "Slower", "Slow", "Medium", "Fast", "Faster", "Very Fast", "Super Fast", "Ultra Fast" & "Draft"
Don't be obsessed with using slower settings as the differences can be small. HD material benefits little from extreme settings (and will be very slow)
There are many settings for tweaking the script, full details in the main documentation. You can display settings currently being used with QTGMC(ShowSettings=True)
"""
# Modified from havsfunc: https://github.com/HomeOfVapourSynthEvolution/havsfunc/blob/r31/havsfunc.py#L846
if not isinstance(clip, vs.VideoNode) or clip.format.color_family not in [vs.GRAY, vs.YUV]:
raise TypeError("QTGMC: This is not a GRAY or YUV clip!")
# Presets
# Select presets / tuning
presets = ['placebo', 'very slow', 'slower', 'slow', 'medium', 'fast', 'faster', 'very fast', 'super fast', 'ultra fast', 'draft']
try:
pNum = presets.index(Preset.lower())
except:
raise ValueError("QTGMC: Preset choice is invalid")
if MatchPreset is None:
mpNum1 = min(pNum + 2, 9)
MatchPreset = presets[mpNum1]
else:
try:
mpNum1 = presets[:10].index(MatchPreset.lower())
except:
raise ValueError("QTGMC: MatchPreset choice is invalid")
if MatchPreset2 is None:
mpNum2 = min(mpNum1 + 2, 9)
MatchPreset2 = presets[mpNum2]
else:
try:
mpNum2 = presets[:10].index(MatchPreset2.lower())
except:
raise ValueError("QTGMC: MatchPreset2 choice is invalid")
try:
npNum = presets[2:7].index(NoisePreset.lower())
except:
raise ValueError("QTGMC: NoisePreset choice is invalid")
bs = 16 if BlockSize is None else BlockSize
bs2 = 32 if BlockSize is None else BlockSize
# Very Very Super Ultra
# Preset groups: Placebo Slow Slower Slow Medium Fast Faster Fast Fast Fast Draft
if TR0 is None: TR0 = [ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 0 ][pNum]
if TR1 is None: TR1 = [ 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1 ][pNum]
if TR2 is None: TR2 = [ 3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0 ][pNum]
if Rep0 is None: Rep0 = [ 4, 4, 4, 4, 3, 3, 0, 0, 0, 0, 0 ][pNum]
if Rep2 is None: Rep2 = [ 4, 4, 4, 4, 4, 4, 4, 4, 3, 3, 0 ][pNum]
if EdiMode is None: EdiMode = ['nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'bob' ][pNum]
if NNSize is None: NNSize = [ 1, 1, 1, 1, 5, 5, 4, 4, 4, 4, 4 ][pNum]
if NNeurons is None: NNeurons = [ 3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0 ][pNum]
if EdiMaxD is None: EdiMaxD = [ 12, 10, 8, 7, 7, 6, 6, 5, 4, 4, 4 ][pNum]
if SMode is None: SMode = [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0 ][pNum]
if SLMode is None: SLMod = [ 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0 ][pNum]
if SLRad is None: SLRad = [ 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ][pNum]
if Sbb is None: Sbb = [ 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0 ][pNum]
if SrchClipPP is None: SrchClipPP = [ 3, 3, 3, 3, 3, 2, 2, 2, 1, 1, 0 ][pNum]
if SubPel is None: SubPel = [ 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1 ][pNum]
if BlockSize is None: BlockSize = [ bs, bs, bs, bs, bs, bs, bs2, bs2, bs2, bs2, bs2 ][pNum]
if Overlap is None: Overlap = [ bs//2, bs//2, bs//2, bs//2, bs//2, bs//2, bs2//2, bs2//4, bs2//4, bs2//4, bs2//4][pNum]
if Search is None: Search = [ 3, 5, 5, 4, 4, 4, 4, 4, 0, 0, 0 ][pNum]
if SearchParam is None: SearchParam = [ 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 1 ][pNum]
if PelSearch is None: PelSearch = [ 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1 ][pNum]
if ChromaMotion is None: ChromaMotion = [ True, True, True, False, False, False, False, False, False, False, False ][pNum]
if Precise is None: Precise = [ True, True, False, False, False, False, False, False, False, False, False ][pNum]
if ProgSADMask is None: ProgSADMask = [ 10, 10, 10, 10, 10, 0, 0, 0, 0, 0, 0 ][pNum]
# Noise presets Slower Slow Medium Fast Faster
if Denoiser is None: Denoiser = ['dfttest', 'dfttest', 'dfttest', 'fft3d', 'fft3d'][npNum]
if DenoiseMC is None: DenoiseMC = [ True, True, False, False, False ][npNum]
if NoiseTR is None: NoiseTR = [ 2, 1, 1, 1, 0 ][npNum]
if NoiseDeint is None: NoiseDeint = ['generate', 'bob', '', '', '' ][npNum]
if StabilizeNoise is None: StabilizeNoise = [ True, True, True, False, False ][npNum]
# The basic source-match step corrects and re-runs the interpolation of the input clip. So it initialy uses same interpolation settings as the main preset
MatchEdi = EdiMode
MatchNNSize = NNSize
MatchNNeurons = NNeurons
MatchEdiQual = EdiQual
MatchEdiMaxD = EdiMaxD
# However, can use a faster initial interpolation when using source-match allowing the basic source-match step to "correct" it with higher quality settings
if SourceMatch > 0 and (mpNum1 < pNum or mpNum2 < pNum):
raise ValueError("QTGMC: MatchPreset cannot use a slower setting than Preset")
# Basic source-match presets
# Main interpolation is actually done by basic-source match step when enabled, so a little swap and wriggle is needed
if SourceMatch > 0:
# Very Very Super Ultra
# Placebo Slow Slower Slow Medium Fast Faster Fast Fast Fast
EdiMode = ['nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', ''][mpNum1]
NNSize = [ 1, 1, 1, 1, 5, 5, 4, 4, 4, 4 ][mpNum1]
NNeurons = [ 3, 2, 1, 1, 1, 0, 0, 0, 0, 0 ][mpNum1]
EdiQual = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ][mpNum1]
EdiMaxD = [ 12, 10, 8, 7, 7, 6, 6, 5, 4, 4 ][mpNum1]
# Refined source-match presets Very Very Super Ultra
# Placebo Slow Slower Slow Medium Fast Faster Fast Fast Fast
MatchEdi2 = ['nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', 'nnedi3', ''][mpNum2]
MatchNNSize2 = [ 1, 1, 1, 1, 5, 5, 4, 4, 4, 4 ][mpNum2]
MatchNNeurons2 = [ 3, 2, 1, 1, 1, 0, 0, 0, 0, 0 ][mpNum2]
MatchEdiQual2 = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 ][mpNum2]
MatchEdiMaxD2 = [ 12, 10, 8, 7, 7, 6, 6, 5, 4, 4 ][mpNum2]
EdiMode = EdiMode.lower()
ChromaEdi = ChromaEdi.lower()
MatchEdi = MatchEdi.lower()
MatchEdi2 = MatchEdi2.lower()
Denoiser = Denoiser.lower()
NoiseDeint = NoiseDeint.lower()
#---------------------------------------
# Settings
if EdiExt is not None and (not isinstance(EdiExt, vs.VideoNode) or EdiExt.format.id != clip.format.id):
raise TypeError("QTGMC: EdiExt must be the same format as clip")
if InputType != 1 and not isinstance(TFF, bool):
raise TypeError("QTGMC: TFF must be set when InputType is not 1. Setting TFF to True means top field first and False means bottom field first")
bd = clip.format.bits_per_sample
isFLOAT = clip.format.sample_type == vs.FLOAT
isGRAY = clip.format.color_family == vs.GRAY
mid = 0 if isFLOAT else 1 << (bd - 1)
i = 0.00392 if isFLOAT else 1 << (bd - 8)
S = core.mvsf.Super if isFLOAT else core.mv.Super
A = core.mvsf.Analyse if isFLOAT else core.mv.Analyse
C = core.mvsf.Compensate if isFLOAT else core.mv.Compensate
R = core.mvsf.Recalculate if isFLOAT else core.mv.Recalculate
M = core.mvsf.Mask if isFLOAT else core.mv.Mask
D1 = core.mvsf.Degrain1 if isFLOAT else core.mv.Degrain1
D2 = core.mvsf.Degrain2 if isFLOAT else core.mv.Degrain2
D3 = core.mvsf.Degrain3 if isFLOAT else core.mv.Degrain3
FB = core.mvsf.FlowBlur if isFLOAT else core.mv.FlowBlur
RE = core.rgsf.Repair if isFLOAT else core.rgvs.Repair
V = core.rgsf.VerticalCleaner if isFLOAT else core.rgvs.VerticalCleaner
mat = [1, 2, 1, 2, 4, 2, 1, 2, 1]
hpad = BlockSize
vpad = BlockSize
# Core defaults
if SourceMatch > 0:
TR2 = max(TR2, 1) # TR2 defaults always at least 1 when using source-match
# Source-match defaults
MatchTR1 = TR1
# Sharpness defaults. Sharpness default is always 1.0 (0.2 with source-match), but adjusted to give roughly same sharpness for all settings
if SLMode is None:
SLMode = SLMod
if SourceMatch > 0:
SLMode = 0 # Sharpness limiting disabled by default for source-match
if SLRad <= 0:
SLMode = 0
if Sharpness is None:
Sharpness = 0 if SMode <= 0 else 0.2 if SourceMatch > 0 else 1 # Default sharpness is 1.0, or 0.2 if using source-match
if Sharpness <= 0:
SMode = 0
if SMode <= 0:
Sbb = 0
sharpMul = 2 if SLMode in [2, 4] else 1.5 if SLMode in [1, 3] else 1 # Adjust sharpness based on other settings
sharpAdj = Sharpness * (sharpMul * (TR1 + TR2 + 1) / 4 + (0.1 if SMode == 1 else 0)) # [This needs a bit more refinement]
# Noise processing settings
if not (EZDenoise is None or EZKeepGrain is None) and EZDenoise > 0 and EZKeepGrain > 0:
raise ValueError("QTGMC: EZDenoise and EZKeepGrain cannot be used together")
if NoiseProcess is None:
if EZDenoise is not None and EZDenoise > 0:
NoiseProcess = 1
elif (EZKeepGrain is not None and EZKeepGrain > 0) or Preset in ['placebo', 'very slow']:
NoiseProcess = 2
else:
NoiseProcess = 0
if GrainRestore is None:
if EZDenoise is not None and EZDenoise > 0:
GrainRestore = 0
elif EZKeepGrain is not None and EZKeepGrain > 0:
GrainRestore = 0.3 * math.sqrt(EZKeepGrain)
else:
GrainRestore = [0, 0.7, 0.3][NoiseProcess]
if NoiseRestore is None:
if EZDenoise is not None and EZDenoise > 0:
NoiseRestore = 0
elif EZKeepGrain is not None and EZKeepGrain > 0:
NoiseRestore = 0.1 * math.sqrt(EZKeepGrain)
else:
NoiseRestore = [0, 0.3, 0.1][NoiseProcess]
if Sigma is None:
if EZDenoise is not None and EZDenoise > 0:
Sigma = EZDenoise
elif EZKeepGrain is not None and EZKeepGrain > 0:
Sigma = 4 * EZKeepGrain
else:
Sigma = 2
if isinstance(ShowNoise, bool):
ShowNoise = 10 if ShowNoise else 0
if ShowNoise > 0:
NoiseProcess = 2
NoiseRestore = 1
if NoiseProcess <= 0:
NoiseTR = 0
GrainRestore = 0
NoiseRestore = 0
totalRestore = GrainRestore + NoiseRestore
if totalRestore <= 0:
StabilizeNoise = False
noiseTD = [1, 3, 5][NoiseTR]
noiseCentre = mid if Denoiser in ['dfttest', 'knlmeanscl'] else 128.5*i
# MVTools settings
if Lambda is None:
Lambda = (1000 if TrueMotion else 100) * (BlockSize ** 2) // 64
if LSAD is None:
LSAD = 1200 if TrueMotion else 400
if PNew is None:
PNew = 50 if TrueMotion else 25
if PLevel is None:
PLevel = 1 if TrueMotion else 0
# Motion blur settings
if ShutterAngleOut * FPSDivisor == ShutterAngleSrc:
ShutterBlur = 0 # If motion blur output is same as clip
# Miscellaneous
if InputType < 2:
ProgSADMask = 0
if isGRAY:
ChromaMotion = False
ChromaNoise = False
RepChroma = False
# Get maximum temporal radius needed
maxTR = SLRad if SLMode in [2, 4] else 0
maxTR = max(maxTR, MatchTR2, TR1, TR2, NoiseTR)
maxTR = max(maxTR, ForceTR)
if ProgSADMask > 0 or StabilizeNoise or ShutterBlur > 0:
maxTR = max(maxTR, 1)
# Show settings
if ShowSettings:
text = "TR0={} | TR1={} | TR2={} | Rep0={} | Rep1={} | Rep2={} | RepChroma={} | EdiMode='{}' | NNSize={} | NNeurons={} | EdiQual={} | EdiMaxD={} | " + \
"ChromaEdi='{}' | Sharpness={} | SMode={} | SLMode={} | SLRad={} | SOvs={} | SVThin={} | Sbb={} | SrchClipPP={} | SubPel={} | " + \
"SubPelInterp={} | BlockSize={} | Overlap={} | Search={} | SearchParam={} | PelSearch={} | ChromaMotion={} | TrueMotion={} | Lambda={} | " + \
"LSAD={} | PNew={} | PLevel={} | GlobalMotion={} | DCT={} | ThSAD1={} | ThSAD2={} | ThSCD1={} | ThSCD2={} | SourceMatch={} | " + \
"MatchPreset='{}' | MatchEdi='{}' | MatchPreset2='{}' | MatchEdi2='{}' | MatchTR2={} | MatchEnhance={} | Lossless={} | NoiseProcess={} | " + \
"Denoiser='{}' | FftThreads={} | DenoiseMC={} | NoiseTR={} | Sigma={} | ChromaNoise={} | ShowNoise={} | GrainRestore={} | NoiseRestore={} | " + \
"NoiseDeint='{}' | StabilizeNoise={} | InputType={} | ProgSADMask={} | FPSDivisor={} | ShutterBlur={} | ShutterAngleSrc={} | " + \
"ShutterAngleOut={} | SBlurLimit={} | Border={} | Precise={} | RefineMotion={} | Preset='{}' | ForceTR={}"
text = text.format(TR0, TR1, TR2, Rep0, Rep1, Rep2, RepChroma, EdiMode, NNSize, NNeurons, EdiQual, EdiMaxD, ChromaEdi, Sharpness, SMode,
SLMode, SLRad, SOvs, SVThin, Sbb, SrchClipPP, SubPel, SubPelInterp, BlockSize, Overlap, Search, SearchParam, PelSearch,
ChromaMotion, TrueMotion, Lambda, LSAD, PNew, PLevel, GlobalMotion, DCT, ThSAD1, ThSAD2, ThSCD1, ThSCD2, SourceMatch,
MatchPreset, MatchEdi, MatchPreset2, MatchEdi2, MatchTR2, MatchEnhance, Lossless, NoiseProcess, Denoiser, FftThreads,
DenoiseMC, NoiseTR, Sigma, ChromaNoise, ShowNoise, GrainRestore, NoiseRestore, NoiseDeint, StabilizeNoise, InputType,
ProgSADMask, FPSDivisor, ShutterBlur, ShutterAngleSrc, ShutterAngleOut, SBlurLimit, Border, Precise, RefineMotion, Preset, ForceTR)
return core.text.Text(clip, text)
#---------------------------------------
# Pre-Processing
w = clip.width
h = clip.height
# Reverse "field" dominance for progressive repair mode 3 (only difference from mode 2)
if InputType >= 3:
TFF = not TFF
# Pad vertically during processing (to prevent artifacts at top & bottom edges)
if Border:
h += 8
clip = core.resize.Point(clip, w, h, src_top=-4, src_height=h)
#---------------------------------------
# Motion Analysis
# Bob the input as a starting point for motion search clip
if InputType <= 0:
bobbed = haf.Bob(clip, 0, 0.5, TFF)
elif InputType == 1:
bobbed = clip
else:
bobbed = core.std.Convolution(clip, matrix=[1, 2, 1], mode='v')
CMplanes = [0, 1, 2] if ChromaMotion else [0]
# The bobbed clip will shimmer due to being derived from alternating fields. Temporally smooth over the neighboring frames using a binomial kernel.
# Binomial kernels give equal weight to even and odd frames and hence average away the shimmer. The two kernels used are [1 2 1] and [1 4 6 4 1] for radius 1 and 2.
# These kernels are approximately Gaussian kernels, which work well as a prefilter before motion analysis (hence the original name for this script)
# Create linear weightings of neighbors first -2 -1 0 1 2
if TR0 > 0: ts1 = haf.AverageFrames(bobbed, weights=[1]*3, scenechange=0.11, planes=CMplanes) # 0.00 0.33 0.33 0.33 0.00
if TR0 > 1: ts2 = haf.AverageFrames(bobbed, weights=[1]*5, scenechange=0.11, planes=CMplanes) # 0.20 0.20 0.20 0.20 0.20
# Combine linear weightings to give binomial weightings - TR0 = 0: (1), TR0 = 1: (1:2:1), TR0 = 2: (1:4:6:4:1)
if TR0 <= 0:
binomial0 = bobbed
elif TR0 == 1:
binomial0 = core.std.Merge(ts1, bobbed, [0.25] if ChromaMotion or isGRAY else [0.25, 0])
else:
binomial0 = core.std.Merge(core.std.Merge(ts1, ts2, [0.357] if ChromaMotion or isGRAY else [0.357, 0]), bobbed, [0.125] if ChromaMotion or isGRAY else [0.125, 0])
# Remove areas of difference between temporal blurred motion search clip and bob that are not due to bob-shimmer - removes general motion blur
repair0 = binomial0 if Rep0 <= 0 else QTGMC_KeepOnlyBobShimmerFixes(binomial0, bobbed, Rep0, RepChroma)
# Blur image and soften edges to assist in motion matching of edge blocks. Blocks are matched by SAD (sum of absolute differences between blocks),
# but even a slight change in an edge from frame to frame will give a high SAD due to the higher contrast of edges
if SrchClipPP == 1:
spatialBlur = core.resize.Bilinear(repair0, m4(w/2), m4(h/2)).std.Convolution(matrix=mat, planes=CMplanes).resize.Bilinear(w, h)
elif SrchClipPP > 1:
spatialBlur = core.tcanny.TCanny(repair0, sigma=2, mode=-1, planes=CMplanes)
spatialBlur = core.std.Merge(spatialBlur, repair0, [0.1] if ChromaMotion or isGRAY else [0.1, 0])
if SrchClipPP <= 0:
srchClip = repair0
elif SrchClipPP < 3:
srchClip = spatialBlur
else:
expr1 = 'x {a} + y < x {a} + x {a} - y > x {a} - y ? ?'.format(a=3*i)
tweaked = core.std.Expr([repair0, bobbed], [expr1] if ChromaMotion or isGRAY else [expr1, ''])
expr2 = 'x {a} + y < x {b} + x {a} - y > x {b} - x y + 2 / ? ?'.format(a=7*i, b=2*i)
srchClip = core.std.Expr([spatialBlur, tweaked], [expr2] if ChromaMotion or isGRAY else [expr2, ''])
# Calculate forward and backward motion vectors from motion search clip
analyse_args = dict(blksize=BlockSize, overlap=Overlap, search=Search, searchparam=SearchParam, pelsearch=PelSearch, truemotion=TrueMotion, _lambda=Lambda, lsad=LSAD, pnew=PNew, plevel=PLevel, _global=GlobalMotion, dct=DCT, chroma=ChromaMotion)
srchSuper = S(DitherLumaRebuild(srchClip, s0=1, chroma=ChromaMotion), pel=SubPel, sharp=1, rfilter=4, hpad=hpad, vpad=vpad, chroma=ChromaMotion) if maxTR > 0 else None
bVec1 = A(srchSuper, isb=True, delta=1, **analyse_args) if maxTR > 0 else None
fVec1 = A(srchSuper, isb=False, delta=1, **analyse_args) if maxTR > 0 else None
bVec2 = A(srchSuper, isb=True, delta=2, **analyse_args) if maxTR > 1 else None
fVec2 = A(srchSuper, isb=False, delta=2, **analyse_args) if maxTR > 1 else None
bVec3 = A(srchSuper, isb=True, delta=3, **analyse_args) if maxTR > 2 else None
fVec3 = A(srchSuper, isb=False, delta=3, **analyse_args) if maxTR > 2 else None
if RefineMotion and maxTR > 0:
recalculate_args = dict(blksize=BlockSize/2, overlap=Overlap/2, search=Search, searchparam=SearchParam, truemotion=TrueMotion, _lambda=Lambda/4, pnew=PNew, dct=DCT, chroma=ChromaMotion)
bVec1 = R(srchSuper, bVec1, **recalculate_args)
fVec1 = R(srchSuper, fVec1, **recalculate_args)
bVec2 = R(srchSuper, bVec2, **recalculate_args) if maxTR > 1 else bVec2
fVec2 = R(srchSuper, fVec2, **recalculate_args) if maxTR > 1 else fVec2
bVec3 = R(srchSuper, bVec3, **recalculate_args) if maxTR > 2 else bVec3
fVec3 = R(srchSuper, fVec3, **recalculate_args) if maxTR > 2 else fVec3
#---------------------------------------
# Noise Processing
# Expand fields to full frame size before extracting noise (allows use of motion vectors which are frame-sized)
if NoiseProcess > 0:
fullClip = clip if InputType > 0 else haf.Bob(clip, 0, 1, TFF)
if NoiseTR > 0:
fullSuper = S(fullClip, pel=SubPel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad, chroma=ChromaNoise)
CNplanes = [0, 1, 2] if ChromaNoise else [0]
# Create a motion compensated temporal window around current frame and use to guide denoisers
if NoiseProcess > 0:
if not DenoiseMC or NoiseTR <= 0:
noiseWindow = fullClip
elif NoiseTR == 1:
noiseWindow = core.std.Interleave([C(fullClip, fullSuper, fVec1, thscd1=ThSCD1, thscd2=ThSCD2), fullClip,
C(fullClip, fullSuper, bVec1, thscd1=ThSCD1, thscd2=ThSCD2)])
else:
noiseWindow = core.std.Interleave([C(fullClip, fullSuper, fVec2, thscd1=ThSCD1, thscd2=ThSCD2),
C(fullClip, fullSuper, fVec1, thscd1=ThSCD1, thscd2=ThSCD2), fullClip,
C(fullClip, fullSuper, bVec1, thscd1=ThSCD1, thscd2=ThSCD2),
C(fullClip, fullSuper, bVec2, thscd1=ThSCD1, thscd2=ThSCD2)])
if Denoiser == 'dfttest':
dnWindow = core.dfttest.DFTTest(noiseWindow, sigma=Sigma * 4, tbsize=noiseTD, planes=CNplanes)
elif Denoiser == 'knlmeanscl':
dnWindow = haf.KNLMeansCL(noiseWindow, d=NoiseTR, h=Sigma) if ChromaNoise else noiseWindow.knlm.KNLMeansCL(d=NoiseTR, h=Sigma)
else:
dnWindow = core.fft3dfilter.FFT3DFilter(noiseWindow, sigma=Sigma * i, planes=CNplanes, bt=noiseTD, ncpu=FftThreads)
# Rework denoised clip to match source format - various code paths here: discard the motion compensation window, discard doubled lines (from PointResize)
# Also reweave to get interlaced noise if source was interlaced (could keep the full frame of noise, but it will be poor quality from PointResize)
if not DenoiseMC:
denoised = dnWindow if InputType > 0 else haf.Weave(core.std.SeparateFields(dnWindow, TFF).std.SelectEvery(4, [0, 3]), TFF)
elif InputType > 0:
denoised = dnWindow if NoiseTR <= 0 else dnWindow[NoiseTR::noiseTD]
else:
denoised = haf.Weave(core.std.SeparateFields(dnWindow, TFF).std.SelectEvery(noiseTD * 4, [NoiseTR * 2, NoiseTR * 6 + 3]), TFF)
# Get actual noise from difference. Then "deinterlace" where we have weaved noise - create the missing lines of noise in various ways
if NoiseProcess > 0 and totalRestore > 0:
noise = core.std.MakeDiff(clip, denoised, planes=CNplanes)
if InputType > 0:
deintNoise = noise
elif NoiseDeint == 'bob':
deintNoise = haf.Bob(noise, 0, 0.5, TFF)
elif NoiseDeint == 'generate':
deintNoise = QTGMC_Generate2ndFieldNoise(noise, denoised, ChromaNoise, TFF)
else:
deintNoise = core.std.SeparateFields(noise, TFF).std.DoubleWeave(TFF)
# Motion-compensated stabilization of generated noise
if StabilizeNoise:
noiseSuper = S(deintNoise, pel=SubPel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad, chroma=ChromaNoise)
mcNoise = C(deintNoise, noiseSuper, bVec1, thscd1=ThSCD1, thscd2=ThSCD2)
expr3 = 'x abs y abs > x y ? 0.6 * x y + 0.2 * +' if isFLOAT else 'x {} - abs y {} - abs > x y ? 0.6 * x y + 0.2 * +'.format(mid, mid)
finalNoise = core.std.Expr([deintNoise, mcNoise], [expr3] if ChromaNoise or isGRAY else [expr3, ''])
else:
finalNoise = deintNoise
if ShowNoise > 0:
expr4 = 'x {} *'.format(ShowNoise) if isFLOAT else 'x {} - {} * {} +'.format(mid, ShowNoise, mid)
return core.std.Expr([finalNoise], [expr4] if ChromaNoise or isGRAY else [expr4, repr(mid)])
# If NoiseProcess == 1 denoise input clip. If NoiseProcess == 2 leave noise in the clip and let the temporal blurs "denoise" it for a stronger effect
innerClip = denoised if NoiseProcess == 1 else clip
#---------------------------------------
# Interpolation
# Support badly deinterlaced progressive content - drop half the fields and reweave to get 1/2fps interlaced stream appropriate for QTGMC processing
ediInput = haf.Weave(core.std.SeparateFields(innerClip, TFF).std.SelectEvery(4, [0, 3]), TFF) if InputType > 1 else innerClip
# Create interpolated image as starting point for output
if EdiExt is not None:
edi1 = core.resize.Point(EdiExt, w, h, src_top=(EdiExt.height - h) / 2, src_height=h)
else:
edi1 = QTGMC_Interpolate(ediInput, InputType, EdiMode, NNSize, NNeurons, EdiQual, EdiMaxD, pscrn, int16_prescreener, int16_predictor, exp, alpha, beta, gamma, nrad, vcheck, bobbed, ChromaEdi, TFF, opencl, device)
# InputType == 2 or 3: use motion mask to blend luma between original clip & reweaved clip based on ProgSADMask setting. Use chroma from original clip in any case
if InputType < 2:
edi = edi1
elif ProgSADMask <= 0:
edi = edi1 if isGRAY else core.std.ShufflePlanes([edi1, innerClip], [0, 1, 2], clip.format.color_family)
else:
inputTypeBlend = M(srchClip.fmtc.bitdepth(bits=8, dmode=1), bVec1, kind=1, ml=ProgSADMask).fmtc.bitdepth(bits=bd) if 8 < bd <= 16 else M(srchClip, bVec1, kind=1, ml=ProgSADMask)
edi = core.std.MaskedMerge(innerClip, edi1, inputTypeBlend, [0])
# Get the min/max value for each pixel over neighboring motion-compensated frames - used for temporal sharpness limiting
if TR1 > 0 or SLMode in [2, 4]:
ediSuper = S(edi, pel=SubPel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad)
if SLMode in [2, 4]:
bComp1 = C(edi, ediSuper, bVec1, thscd1=ThSCD1, thscd2=ThSCD2)
fComp1 = C(edi, ediSuper, fVec1, thscd1=ThSCD1, thscd2=ThSCD2)
tMax = core.std.Expr([core.std.Expr([edi, fComp1], ['x y max']), bComp1], ['x y max'])
tMin = core.std.Expr([core.std.Expr([edi, fComp1], ['x y min']), bComp1], ['x y min'])
if SLRad > 1:
bComp3 = C(edi, ediSuper, bVec3, thscd1=ThSCD1, thscd2=ThSCD2)
fComp3 = C(edi, ediSuper, fVec3, thscd1=ThSCD1, thscd2=ThSCD2)
tMax = core.std.Expr([core.std.Expr([tMax, fComp3], ['x y max']), bComp3], ['x y max'])
tMin = core.std.Expr([core.std.Expr([tMin, fComp3], ['x y min']), bComp3], ['x y min'])
#---------------------------------------
# Create basic output
# Use motion vectors to blur interpolated image (edi) with motion-compensated previous and next frames. As above, this is done to remove shimmer
# from alternate frames so the same binomial kernels are used. However, by using motion-compensated smoothing this time we avoid motion blur.
# The use of MDegrain1 (motion compensated) rather than TemporalSmooth makes the weightings *look* different, but they evaluate to the same values
# Create linear weightings of neighbors first -2 -1 0 1 2
if TR1 > 0: degrain1 = D1(edi, ediSuper, bVec1, fVec1, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2) # 0.00 0.33 0.33 0.33 0.00
if TR1 > 1: degrain2 = D1(edi, ediSuper, bVec2, fVec2, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2) # 0.33 0.00 0.33 0.00 0.33
# Combine linear weightings to give binomial weightings - TR1 == 0: (1), TR1 == 1: (1:2:1), TR1 == 2: (1:4:6:4:1)
if TR1 <= 0:
binomial1 = edi
elif TR1 == 1:
binomial1 = core.std.Merge(degrain1, edi, [0.25])
else:
binomial1 = core.std.Merge(core.std.Merge(degrain1, degrain2, [0.2]), edi, [0.0625])
# Remove areas of difference between smoothed image and interpolated image that are not bob-shimmer fixes: repairs residual motion blur from temporal smooth
repair1 = binomial1 if Rep1 <= 0 else QTGMC_KeepOnlyBobShimmerFixes(binomial1, edi, Rep1, RepChroma)
# Apply source match - use difference between output and source to succesively refine output [extracted to function to clarify main code path]
if SourceMatch <= 0:
match = repair1
else:
match = QTGMC_ApplySourceMatch(repair1, InputType, ediInput, bVec1, fVec1, bVec2, fVec2, SubPel, SubPelInterp, hpad, vpad, ThSAD1, ThSCD1, ThSCD2, SourceMatch, MatchTR1,
MatchEdi, MatchNNSize, MatchNNeurons, MatchEdiQual, MatchEdiMaxD, MatchTR2, MatchEdi2, MatchNNSize2, MatchNNeurons2, MatchEdiQual2,
MatchEdiMaxD2, MatchEnhance, pscrn, int16_prescreener, int16_predictor, exp, alpha, beta, gamma, nrad, vcheck, TFF, opencl, device)
# Lossless == 2 - after preparing an interpolated, de-shimmered clip, restore the original source fields into it and clean up any artifacts
# This mode will not give a true lossless result because the resharpening and final temporal smooth are still to come, but it will add further detail
# However, it can introduce minor combing. This setting is best used together with source-match (it's effectively the final source-match stage)
lossed1 = QTGMC_MakeLossless(match, innerClip, InputType, TFF) if Lossless >= 2 else match
#---------------------------------------
# Resharpen / retouch output
# Resharpen to counteract temporal blurs. Little sharpening needed for source-match mode since it has already recovered sharpness from source
if SMode <= 0:
resharp = lossed1
elif SMode == 1:
resharp = core.std.Expr([lossed1, MinBlur(lossed1)], ['x dup y - {} * +'.format(sharpAdj)])
else:
vresharp = core.std.Merge(core.std.Maximum(lossed1, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]), core.std.Minimum(lossed1, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]))
if Precise:
vresharp = core.std.Expr([vresharp, lossed1], ['x y < x {} + x y > x {} - x ? ?'.format(i, i)]) # Precise mode: reduce tiny overshoot
resharp = core.std.Expr([lossed1, MinBlur(vresharp)], ['x dup y - {} * +'.format(sharpAdj)])
# Slightly thin down 1-pixel high horizontal edges that have been widened into neigboring field lines by the interpolator
SVThinSc = SVThin * 6
if SVThin > 0:
expr5 = 'y x - {} *'.format(SVThinSc) if isFLOAT else 'y x - {} * {} +'.format(SVThinSc, mid)
vertMedD = core.std.Expr([lossed1, V(lossed1, [1] if isGRAY else [1, 0])], [expr5] if isGRAY else [expr5, ''])
vertMedD = core.std.Convolution(vertMedD, matrix=[1, 2, 1], planes=[0], mode='h')
expr6 = 'y abs x abs > y 0 ?' if isFLOAT else 'y {m} - abs x {m} - abs > y {m} ?'.format(m=mid)
neighborD = core.std.Expr([vertMedD, core.std.Convolution(vertMedD, matrix=mat, planes=[0])], [expr6] if isGRAY else [expr6, ''])
thin = core.std.MergeDiff(resharp, neighborD, [0])
else:
thin = resharp
# Back blend the blurred difference between sharpened & unsharpened clip, before (1st) sharpness limiting (Sbb == 1 or 3). A small fidelity improvement
if Sbb not in [1, 3]:
backBlend1 = thin
else:
backBlend1 = core.std.MakeDiff(thin, core.std.MakeDiff(thin, lossed1, [0]).tcanny.TCanny(sigma=1.4, mode=-1, planes=[0]), [0])
# Limit over-sharpening by clamping to neighboring (spatial or temporal) min/max values in original
# Occurs here (before final temporal smooth) if SLMode == 1 or 2. This location will restrict sharpness more, but any artifacts introduced will be smoothed
if SLMode == 1:
if SLRad <= 1:
sharpLimit1 = RE(backBlend1, edi, 1)
else:
sharpLimit1 = RE(backBlend1, RE(backBlend1, edi, 12), 1)
elif SLMode == 2:
sharpLimit1 = haf.Clamp(backBlend1, tMax, tMin, SOvs*i, SOvs*i)
else:
sharpLimit1 = backBlend1
# Back blend the blurred difference between sharpened & unsharpened clip, after (1st) sharpness limiting (Sbb == 2 or 3). A small fidelity improvement
if Sbb < 2:
backBlend2 = sharpLimit1
else:
backBlend2 = core.std.MakeDiff(sharpLimit1, core.std.MakeDiff(sharpLimit1, lossed1, [0]).tcanny.TCanny(sigma=1.4, mode=-1, planes=[0]), [0])
# Add back any extracted noise, prior to final temporal smooth - this will restore detail that was removed as "noise" without restoring the noise itself
# Average luma of FFT3DFilter extracted noise is 128.5, so deal with that too
if GrainRestore <= 0:
addNoise1 = backBlend2
else:
expr7 = 'x {} *'.format(GrainRestore) if isFLOAT else 'x {} - {} * {} +'.format(noiseCentre, GrainRestore, mid)
addNoise1 = core.std.MergeDiff(backBlend2, core.std.Expr([finalNoise], [expr7] if ChromaNoise or isGRAY else [expr7, '']), planes=CNplanes)
# Final light linear temporal smooth for denoising
if TR2 > 0:
stableSuper = S(addNoise1, pel=SubPel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad)
if TR2 <= 0:
stable = addNoise1
elif TR2 == 1:
stable = D1(addNoise1, stableSuper, bVec1, fVec1, thsad=ThSAD2, thscd1=ThSCD1, thscd2=ThSCD2)
elif TR2 == 2:
stable = D2(addNoise1, stableSuper, bVec1, fVec1, bVec2, fVec2, thsad=ThSAD2, thscd1=ThSCD1, thscd2=ThSCD2)
else:
stable = D3(addNoise1, stableSuper, bVec1, fVec1, bVec2, fVec2, bVec3, fVec3, thsad=ThSAD2, thscd1=ThSCD1, thscd2=ThSCD2)
# Remove areas of difference between final output & basic interpolated image that are not bob-shimmer fixes: repairs motion blur caused by temporal smooth
repair2 = stable if Rep2 <= 0 else QTGMC_KeepOnlyBobShimmerFixes(stable, edi, Rep2, RepChroma)
# Limit over-sharpening by clamping to neighboring (spatial or temporal) min/max values in original
# Occurs here (after final temporal smooth) if SLMode == 3 or 4. Allows more sharpening here, but more prone to introducing minor artifacts
if SLMode == 3:
if SLRad <= 1:
sharpLimit2 = RE(repair2, edi, 1)
else:
sharpLimit2 = RE(repair2, RE(repair2, edi, 12), 1)
elif SLMode >= 4:
sharpLimit2 = haf.Clamp(repair2, tMax, tMin, SOvs*i, SOvs*i)
else:
sharpLimit2 = repair2
# Lossless == 1 - inject source fields into result and clean up inevitable artifacts. Provided NoiseRestore == 0.0 or 1.0, this mode will make the script
# result properly lossless, but this will retain source artifacts and cause some combing (where the smoothed deinterlace doesn't quite match the source)
lossed2 = QTGMC_MakeLossless(sharpLimit2, innerClip, InputType, TFF) if Lossless == 1 else sharpLimit2
# Add back any extracted noise, after final temporal smooth. This will appear as noise/grain in the output
# Average luma of FFT3DFilter extracted noise is 128.5, so deal with that too
if NoiseRestore <= 0:
addNoise2 = lossed2
else:
expr8 = 'x {} *'.format(NoiseRestore) if isFLOAT else 'x {} - {} * {} +'.format(noiseCentre, NoiseRestore, mid)
addNoise2 = core.std.MergeDiff(lossed2, core.std.Expr([finalNoise], [expr8] if ChromaNoise or isGRAY else [expr8, '']), planes=CNplanes)
#---------------------------------------
# Post-Processing
# Shutter motion blur - get level of blur depending on output framerate and blur already in source
blurLevel = (ShutterAngleOut * FPSDivisor - ShutterAngleSrc) / 3.6
if blurLevel < 0:
raise ValueError('QTGMC: Cannot reduce motion blur already in source: increase ShutterAngleOut or FPSDivisor')
elif blurLevel > 200:
raise ValueError('QTGMC: Exceeded maximum motion blur level: decrease ShutterAngleOut or FPSDivisor')
# Shutter motion blur - get finer resolution motion vectors to reduce blur "bleeding" into static areas
rBlockDivide = [1, 1, 2, 4][ShutterBlur]
rBlockSize = max(BlockSize // rBlockDivide, 4)
rOverlap = max(Overlap // rBlockDivide, 2)
rBlockDivide = BlockSize // rBlockSize
rLambda = Lambda // (rBlockDivide ** 2)
if ShutterBlur > 1:
recalculate_args = dict(blksize=rBlockSize, overlap=rOverlap, search=Search, searchparam=SearchParam, truemotion=TrueMotion, _lambda=rLambda, pnew=PNew, dct=DCT, chroma=ChromaMotion)
sbBVec1 = R(srchSuper, bVec1, **recalculate_args)
sbFVec1 = R(srchSuper, fVec1, **recalculate_args)
elif ShutterBlur > 0:
sbBVec1 = bVec1
sbFVec1 = fVec1
# Shutter motion blur - use MFlowBlur to blur along motion vectors
if ShutterBlur > 0:
sblurSuper = S(addNoise2, pel=SubPel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad)
sblur = FB(addNoise2, sblurSuper, sbBVec1, sbFVec1, blur=blurLevel, thscd1=ThSCD1, thscd2=ThSCD2)
# Shutter motion blur - use motion mask to reduce blurring in areas of low motion - also helps reduce blur "bleeding" into static areas, then select blur type
if ShutterBlur <= 0:
sblurred = addNoise2
elif SBlurLimit <= 0:
sblurred = sblur
else:
sbMotionMask = M(srchClip.fmtc.bitdepth(bits=8, dmode=1), bVec1, kind=0, ml=SBlurLimit).fmtc.bitdepth(bits=bd) if 8 < bd <= 16 else M(srchClip, bVec1, kind=0, ml=SBlurLimit)
sblurred = core.std.MaskedMerge(addNoise2, sblur, sbMotionMask)
# Reduce frame rate
decimated = sblurred[::FPSDivisor] if FPSDivisor > 1 else sblurred
# Crop off temporary vertical padding
# Show output of choice
return decimated.std.Crop(top=4, bottom=4) if Border else decimated
#---------------------------------------
# Helper function: Interpolate input clip using method given in EdiMode. Use Fallback or Bob as result if mode not in list. If ChromaEdi is set then interpolate
# chroma separately with that method (only really useful for EEDIx). The function is used as main algorithm starting point and for first two source-match stages.
def QTGMC_Interpolate(clip, InputType, EdiMode, NNSize, NNeurons, EdiQual, EdiMaxD, pscrn, int16_prescreener, int16_predictor, exp, alpha, beta, gamma, nrad, vcheck, Fallback, ChromaEdi, TFF, opencl, device):
if opencl:
NNEDI3 = core.nnedi3cl.NNEDI3CL
EEDI3 = core.eedi3m.EEDI3CL
nnedi3_args = dict(nsize=NNSize, nns=NNeurons, qual=EdiQual, pscrn=pscrn, device=device)
eedi3_args = dict(alpha=alpha, beta=beta, gamma=gamma, nrad=nrad, mdis=EdiMaxD, vcheck=vcheck, device=device)
else:
NNEDI3 = core.znedi3.nnedi3 if hasattr(core, 'znedi3') and clip.format.sample_type != vs.FLOAT else core.nnedi3.nnedi3
EEDI3 = core.eedi3m.EEDI3 if hasattr(core, 'eedi3m') else core.eedi3.eedi3
nnedi3_args = dict(nsize=NNSize, nns=NNeurons, qual=EdiQual, pscrn=pscrn, int16_prescreener=int16_prescreener, int16_predictor=int16_predictor, exp=exp)
eedi3_args = dict(alpha=alpha, beta=beta, gamma=gamma, nrad=nrad, mdis=EdiMaxD, vcheck=vcheck)
isGRAY = clip.format.color_family == vs.GRAY
ChromaEdi = '' if isGRAY else ChromaEdi
planes = [0] if ChromaEdi or isGRAY else [0, 1, 2]
field = 3 if TFF else 2
if InputType == 1:
return clip
elif EdiMode == 'nnedi3':
interp = NNEDI3(clip, field=field, planes=planes, **nnedi3_args)
elif EdiMode == 'eedi3+nnedi3':
interp = EEDI3(clip, field=field, planes=planes, **eedi3_args, sclip=NNEDI3(clip, field=field, planes=planes, **nnedi3_args))
elif EdiMode == 'eedi3':
interp = EEDI3(clip, field=field, planes=planes, **eedi3_args)
else:
interp = Fallback if isinstance(Fallback, vs.VideoNode) else haf.Bob(clip, 0, 0.5, TFF)
if ChromaEdi == 'nnedi3':
interpuv = NNEDI3(clip, field=field, planes=[1, 2], **nnedi3_args)
elif ChromaEdi == 'eedi3':
interpuv = EEDI3(clip, field=field, planes=[1, 2], **eedi3_args)
elif ChromaEdi == 'bob':
interpuv = haf.Bob(clip, 0, 0.5, TFF)
else:
return interp
return core.std.ShufflePlanes([interp, interpuv], [0, 1, 2], clip.format.color_family)
# Helper function: Compare processed clip with reference clip: only allow thin, horizontal areas of difference, i.e. bob shimmer fixes
# Rough algorithm: Get difference, deflate vertically by a couple of pixels or so, then inflate again. Thin regions will be removed by this process.
# Restore remaining areas of difference back to as they were in reference clip.
def QTGMC_KeepOnlyBobShimmerFixes(clip, Ref, Rep=1, Chroma=True):
bd = clip.format.bits_per_sample
isFLOAT = clip.format.sample_type == vs.FLOAT
isGRAY = clip.format.color_family == vs.GRAY
mid = 0 if isFLOAT else 1 << (bd - 1)
i = 0.00392 if isFLOAT else 1 << (bd - 8)
planes = [0, 1, 2] if Chroma and not isGRAY else [0]
diff = core.std.MakeDiff(Ref, clip)
# ed is the erosion distance - how much to deflate then reflate to remove thin areas of interest: 0 = minimum to 7 = maximum
# od is over-dilation level - extra inflation to ensure areas to restore back are fully caught: 0 = none to 3 = one full pixel
# If Rep < 10, then ed = Rep and od = 0, otherwise ed = 10s digit and od = 1s digit (nasty method, but kept for compatibility with original TGMC)
ed = Rep if Rep < 10 else Rep // 10
od = 0 if Rep < 10 else Rep % 10
# Areas of positive difference # ed = 0 1 2 3 4 5 6 7
choke1 = core.std.Minimum(diff, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # x x x x x x x x 1 pixel \
if ed > 2: choke1 = core.std.Minimum(choke1, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . . x x x x x 1 pixel | Deflate to remove thin areas
if ed > 5: choke1 = core.std.Minimum(choke1, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . . . . . x x 1 pixel /
if ed % 3 != 0: choke1 = core.std.Deflate(choke1, planes) # . x x . x x . x A bit more deflate & some horizonal effect
if ed in [2, 5]: choke1 = core.std.Median(choke1, planes) # . . x . . x . . Local median
choke1 = core.std.Maximum(choke1, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # x x x x x x x x 1 pixel \
if ed > 1: choke1 = core.std.Maximum(choke1, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . x x x x x x 1 pixel | Reflate again
if ed > 4: choke1 = core.std.Maximum(choke1, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0]) # . . . . . x x x 1 pixel /
# Over-dilation - extra reflation up to about 1 pixel
if od == 1:
choke1 = core.std.Inflate(choke1, planes)
elif od == 2:
choke1 = core.std.Inflate(choke1, planes).std.Inflate(planes)
elif od >= 3:
choke1 = core.std.Maximum(choke1, planes)
# Areas of negative difference (similar to above)
choke2 = core.std.Maximum(diff, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0])
if ed > 2: choke2 = core.std.Maximum(choke2, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0])
if ed > 5: choke2 = core.std.Maximum(choke2, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0])
if ed % 3 != 0: choke2 = core.std.Inflate(choke2, planes)
if ed in [2, 5]: choke2 = core.std.Median(choke2, planes)
choke2 = core.std.Minimum(choke2, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0])
if ed > 1: choke2 = core.std.Minimum(choke2, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0])
if ed > 4: choke2 = core.std.Minimum(choke2, planes, coordinates=[0, 1, 0, 0, 0, 0, 1, 0])
if od == 1:
choke2 = core.std.Deflate(choke2, planes)
elif od == 2:
choke2 = core.std.Deflate(choke2, planes).std.Deflate(planes)
elif od >= 3:
choke2 = core.std.Minimum(choke2, planes)
# Combine above areas to find those areas of difference to restore
expr1 = 'x 0.00392 < x y 0 < 0 y ? ?' if isFLOAT else 'x {} < x y {} < {} y ? ?'.format(129*i, mid, mid)
expr2 = 'x -0.00392 > x y 0 > 0 y ? ?' if isFLOAT else 'x {} > x y {} > {} y ? ?'.format(127*i, mid, mid)
restore = core.std.Expr([core.std.Expr([diff, choke1], [expr1] if Chroma or isGRAY else [expr1, '']), choke2], [expr2] if Chroma or isGRAY else [expr2, ''])
return core.std.MergeDiff(clip, restore, planes)
# Given noise extracted from an interlaced source (i.e. the noise is interlaced), generate "progressive" noise with a new "field" of noise injected.
# The new noise is centered on a weighted local average and uses the difference between local min & max as an estimate of local variance.
def QTGMC_Generate2ndFieldNoise(clip, InterleavedClip, ChromaNoise, TFF):
bd = clip.format.bits_per_sample
isFLOAT = clip.format.sample_type == vs.FLOAT
isGRAY = clip.format.color_family == vs.GRAY
mid = 0 if isFLOAT else 1 << (bd - 1)
GRAY = [0.5] if isFLOAT and isGRAY else [0.5, 0, 0] if isFLOAT else [mid] * clip.format.num_planes
planes = [0, 1, 2] if ChromaNoise and not isGRAY else [0]
origNoise = core.std.SeparateFields(clip, TFF)
noiseMax = core.std.Maximum(origNoise, planes).std.Maximum(planes, coordinates=[0, 0, 0, 1, 1, 0, 0, 0])
noiseMin = core.std.Minimum(origNoise, planes).std.Minimum(planes, coordinates=[0, 0, 0, 1, 1, 0, 0, 0])
random = core.std.SeparateFields(InterleavedClip, TFF).std.BlankClip(color=GRAY).grain.Add(var=1800, uvar=1800 if ChromaNoise else 0)
expr = 'x y *' if isFLOAT else 'x {} - y * {} / {} +'.format(mid, 1 << bd, mid)
varRandom = core.std.Expr([core.std.MakeDiff(noiseMax, noiseMin, planes), random], [expr] if ChromaNoise or isGRAY else [expr, ''])
newNoise = core.std.MergeDiff(noiseMin, varRandom, planes)
return haf.Weave(core.std.Interleave([origNoise, newNoise]), TFF)
# Insert the source lines into the result to create a true lossless output. However, the other lines in the result have had considerable processing
# and won't exactly match source lines. There will be some slight residual combing. Use vertical medians to clean a little of this away.
def QTGMC_MakeLossless(clip, Source, InputType, TFF):
isFLOAT = clip.format.sample_type == vs.FLOAT
mid = 0 if isFLOAT else 1 << (clip.format.bits_per_sample - 1)
V = core.rgsf.VerticalCleaner if isFLOAT else core.rgvs.VerticalCleaner
RE = core.rgsf.Repair if isFLOAT else core.rgvs.Repair
RG = core.rgsf.RemoveGrain if isFLOAT else core.rgvs.RemoveGrain
if InputType == 1:
raise ValueError('QTGMC: Lossless modes are incompatible with InputType = 1')
# Weave the source fields and the "new" fields that have generated in the clip
srcFields = core.std.SeparateFields(Source, TFF) if InputType <= 0 else core.std.SeparateFields(Source, TFF).std.SelectEvery(4, [0, 3])
newFields = core.std.SeparateFields(clip, TFF).std.SelectEvery(4, [1, 2])
processed = haf.Weave(core.std.Interleave([srcFields, newFields]).std.SelectEvery(4, [0, 1, 3, 2]), TFF)
# Clean some of the artifacts caused by the above - creating a second version of the "new" fields
vertMedian = V(processed, 1)
vertMedDiff = core.std.MakeDiff(processed, vertMedian)
vmNewDiff1 = core.std.SeparateFields(vertMedDiff, TFF).std.SelectEvery(4, [1, 2])
expr = 'x y * 0 < 0 x abs y abs < x y ? ?' if isFLOAT else 'x {m} - y {m} - * 0 < {m} x {m} - abs y {m} - abs < x y ? ?'.format(m=mid)
vmNewDiff2 = core.std.Expr([V(vmNewDiff1, 1), vmNewDiff1], [expr])
vmNewDiff3 = RE(vmNewDiff2, RG(vmNewDiff2, 2), 1)
# Reweave final result
return haf.Weave(core.std.Interleave([srcFields, core.std.MakeDiff(newFields, vmNewDiff3)]).std.SelectEvery(4, [0, 1, 3, 2]), TFF)
# Source-match, a three stage process that takes the difference between deinterlaced clip and the original interlaced source,
# to shift the clip more towards the source without introducing shimmer. All other arguments defined in main script.
def QTGMC_ApplySourceMatch(Deinterlace, InputType, Source, bVec1, fVec1, bVec2, fVec2, SubPel, SubPelInterp, hpad, vpad, ThSAD1, ThSCD1, ThSCD2, SourceMatch,
MatchTR1, MatchEdi, MatchNNSize, MatchNNeurons, MatchEdiQual, MatchEdiMaxD, MatchTR2, MatchEdi2, MatchNNSize2, MatchNNeurons2, MatchEdiQual2, MatchEdiMaxD2,
MatchEnhance, pscrn, int16_prescreener, int16_predictor, exp, alpha, beta, gamma, nrad, vcheck, TFF, opencl, device):
# Basic source-match. Find difference between source clip & equivalent fields in interpolated/smoothed clip (called the "error" in formula below).
# Ideally there should be no difference, we want the fields in the output to be as close as possible to the source whilst remaining shimmer-free.
# So adjust the *source* in such a way that smoothing it will give a result closer to the unadjusted source.
# Then rerun the interpolation (edi) and binomial smooth with this new source. Result will still be shimmer-free and closer to the original source.
# Formula used for correction is P0' = P0 + (P0-P1)/(k+S(1-k)), where P0 is original image, P1 is the 1st attempt at interpolation/smoothing,
# P0' is the revised image to use as new source for interpolation/smoothing, k is the weighting given to the current frame in the smooth,
# and S is a factor indicating "temporal similarity" of the error from frame to frame, i.e. S = average over all pixels of [neighbor frame error / current frame error]
# Decreasing S will make the result sharper, sensible range is about -0.25 to 1.0. Empirically, S = 0.5 is effective.
errorTemporalSimilarity = 0.4 # S in formula described above
errorAdjust1 = [1.0, 2 / (1 + errorTemporalSimilarity), 8 / (3 + 5 * errorTemporalSimilarity)][MatchTR1]
isFLOAT = (Deinterlace.format.sample_type == vs.FLOAT)
S = core.mvsf.Super if isFLOAT else core.mv.Super
D1 = core.mvsf.Degrain1 if isFLOAT else core.mv.Degrain1
match1Clip = Deinterlace if SourceMatch < 1 or InputType == 1 else haf.Weave(core.std.SeparateFields(Deinterlace, TFF).std.SelectEvery(4, [0, 3]), TFF)
match1Update = Source if SourceMatch < 1 or MatchTR1 <= 0 else core.std.Expr([Source, match1Clip], ['x {} * y {} * -'.format(errorAdjust1+1, errorAdjust1)])
if SourceMatch > 0:
match1Edi = QTGMC_Interpolate(match1Update, InputType, MatchEdi, MatchNNSize, MatchNNeurons, MatchEdiQual, MatchEdiMaxD, pscrn, int16_prescreener, int16_predictor, exp, alpha, beta, gamma, nrad, vcheck, None, '', TFF, opencl, device)
if MatchTR1 > 0:
match1Super = S(match1Edi, pel=SubPel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad)
match1Degrain1 = D1(match1Edi, match1Super, bVec1, fVec1, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2)
if MatchTR1 > 1:
match1Degrain2 = D1(match1Edi, match1Super, bVec2, fVec2, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2)
if SourceMatch < 1:
match1 = Deinterlace
elif MatchTR1 <= 0:
match1 = match1Edi
elif MatchTR1 == 1:
match1 = core.std.Merge(match1Degrain1, match1Edi, [0.25])
else:
match1 = core.std.Merge(core.std.Merge(match1Degrain1, match1Degrain2, [0.2]), match1Edi, [0.0625])
if SourceMatch < 2:
return match1
# Enhance effect of source-match stages 2 & 3 by sharpening clip prior to refinement (source-match tends to underestimate so this will leave result sharper)
match1Shp = core.std.Expr([match1, MinBlur(match1)], ['x dup y - {} * +'.format(MatchEnhance)]) if SourceMatch > 1 and MatchEnhance > 0 else match1
# Source-match refinement. Find difference between source clip & equivalent fields in (updated) interpolated/smoothed clip.
# Interpolate & binomially smooth this difference then add it back to output. Helps restore differences that the basic match missed.
# However, as this pass works on a difference rather than the source image it can be prone to occasional artifacts (difference images are not ideal for interpolation).
# In fact a lower quality interpolation such as a simple bob often performs nearly as well as advanced, slower methods (e.g. NNEDI3)
match2Clip = match1Shp if SourceMatch < 2 or InputType == 1 else haf.Weave(core.std.SeparateFields(match1Shp, TFF).std.SelectEvery(4, [0, 3]), TFF)
if SourceMatch > 1:
match2Diff = core.std.MakeDiff(Source, match2Clip)
match2Edi = QTGMC_Interpolate(match2Diff, InputType, MatchEdi2, MatchNNSize2, MatchNNeurons2, MatchEdiQual2, MatchEdiMaxD2, pscrn, int16_prescreener, int16_predictor, exp, alpha, beta, gamma, nrad, vcheck, None, '', TFF, opencl, device)
if MatchTR2 > 0:
match2Super = S(match2Edi, pel=SubPel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad)
match2Degrain1 = D1(match2Edi, match2Super, bVec1, fVec1, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2)
if MatchTR2 > 1:
match2Degrain2 = D1(match2Edi, match2Super, bVec2, fVec2, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2)
if SourceMatch < 2:
match2 = match1
elif MatchTR2 <= 0:
match2 = match2Edi
elif MatchTR2 == 1:
match2 = core.std.Merge(match2Degrain1, match2Edi, [0.25])
else:
match2 = core.std.Merge(core.std.Merge(match2Degrain1, match2Degrain2, [0.2]), match2Edi, [0.0625])
# Source-match second refinement - correct error introduced in the refined difference by temporal smoothing. Similar to error correction from basic step
errorAdjust2 = [1.0, 2 / (1 + errorTemporalSimilarity), 8 / (3 + 5 * errorTemporalSimilarity)][MatchTR2]
match3Update = match2Edi if SourceMatch < 3 or MatchTR2 <= 0 else core.std.Expr([match2Edi, match2], ['x {} * y {} * -'.format(errorAdjust2+1, errorAdjust2)])
if SourceMatch > 2:
if MatchTR2 > 0:
match3Super = S(match3Update, pel=SubPel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad)
match3Degrain1 = D1(match3Update, match3Super, bVec1, fVec1, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2)
if MatchTR2 > 1:
match3Degrain2 = D1(match3Update, match3Super, bVec2, fVec2, thsad=ThSAD1, thscd1=ThSCD1, thscd2=ThSCD2)
if SourceMatch < 3:
match3 = match2
elif MatchTR2 <= 0:
match3 = match3Update
elif MatchTR2 == 1:
match3 = core.std.Merge(match3Degrain1, match3Update, [0.25])
else:
match3 = core.std.Merge(core.std.Merge(match3Degrain1, match3Degrain2, [0.2]), match3Update, [0.0625])
# Apply difference calculated in source-match refinement
return core.std.MergeDiff(match1Shp, match3)
def SMDegrain(clip, tr=2, thSAD=314, thSADC=None, RefineMotion=False, contrasharp=None, CClip=None, interlaced=False, TFF=None,
pel=None, subpixel=2, prefilter=-1, mfilter=None, blksize=None, overlap=None, search=5, truemotion=None, DCT=0,
MVglobal=None, limit=255, limitc=None, thSCD1=400, thSCD2=128, chroma=True, searchparam=2, Str=1, Amp=0.0625):
"""
Simple MDegrain Mod - SMDegrain()
Mod by Dogway - Original idea by Caroliano
Special Thanks: Didée, cretindesalpes, Sagekilla, Gavino and MVTools people
General purpose simple degrain function. Pure temporal denoiser.
Basically a wrapper(function)/frontend of MVTools2 + MDegrain with some added common related options.
Goal is accessibility and quality but not targeted to any specific kind of source.
The reason behind is to keep it simple so you will only need MVTools2.
Check documentation for deep explanation on settings and defaults.
VideoHelp thread: (https://forum.videohelp.com/threads/369142)
"""
# tr > 3 uses mvsf hence requires float input, also requires mvmulti module.
# Modified from havsfunc: https://github.com/HomeOfVapourSynthEvolution/havsfunc/blob/r31/havsfunc.py#L3186
bd = clip.format.bits_per_sample
isFLOAT = clip.format.sample_type == vs.FLOAT
isGRAY = clip.format.color_family == vs.GRAY
chroma = False if isGRAY else chroma
peak = 1.0 if isFLOAT else (1 << bd) - 1
i = 0.00392 if isFLOAT else 1 << (bd - 8)
A = core.mvsf.Analyse if isFLOAT else core.mv.Analyse
S = core.mvsf.Super if isFLOAT else core.mv.Super
R = core.mvsf.Recalculate if isFLOAT else core.mv.Recalculate
D1 = core.mvsf.Degrain1 if isFLOAT else core.mv.Degrain1
D2 = core.mvsf.Degrain2 if isFLOAT else core.mv.Degrain2
D3 = core.mvsf.Degrain3 if isFLOAT else core.mv.Degrain3
w = clip.width
h = clip.height
preclip = isinstance(prefilter, vs.VideoNode)
ifC = isinstance(contrasharp, bool)
if1 = isinstance(CClip, vs.VideoNode)
planes = [0, 1, 2] if chroma else [0]
plane = 4 if chroma else 0
limit = scale(limit, peak)
limitc = limit if limitc is None else scale(limitc, peak)
# Defaults & Conditionals
if thSADC is None:
thSADC = thSAD // 2
if contrasharp is None:
contrasharp = if1
if pel is None:
pel = 1 if w > 960 else 2
if pel < 2:
subpixel = min(subpixel, 2)
ppp = pel > 1 and subpixel > 2
if blksize is None:
blksize = 16 if w > 960 else 8
if overlap is None:
overlap = blksize // 2
if truemotion is None:
truemotion = w <= 960
# Error Report
if not isinstance(clip, vs.VideoNode) or clip.format.color_family not in [vs.GRAY, vs.YUV]:
raise TypeError("SMDegrain: This is not a GRAY or YUV clip!")
if not (ifC or isinstance(contrasharp, int)):
raise TypeError("SMDegrain: contrasharp only accepts bool and integer inputs")
if if1 and (CClip.format.id != clip.format.id):
raise TypeError("SMDegrain: CClip must be the same format as clip")
if if1 and (CClip.width != clip.width or CClip.height != clip.height):
raise TypeError("SMDegrain: CClip must be the same size as clip")
if interlaced and h & 3:
raise ValueError('SMDegrain: Interlaced source requires mod 4 height sizes')
if interlaced and not isinstance(TFF, bool):
raise TypeError("SMDegrain: TFF must be set if source is interlaced. Setting TFF to True means top field first or False means bottom field first")
if not (isinstance(prefilter, int) or preclip):
raise TypeError("SMDegrain: prefilter only accepts integer and clip inputs")
if preclip and prefilter.format.id != clip.format.id:
raise TypeError("SMDegrain: prefilter must be the same format as clip")
if preclip and (prefilter.width != clip.width or prefilter.height != clip.height):
raise TypeError("SMDegrain: prefilter must be the same size as clip")
if mfilter is not None and (not isinstance(mfilter, vs.VideoNode) or mfilter.format.id != clip.format.id):
raise TypeError("SMDegrain: mfilter must be the same format as clip")
if mfilter is not None and (mfilter.width != clip.width or mfilter.height != clip.height):
raise TypeError("SMDegrain: mfilter must be the same size as clip")
if RefineMotion and blksize < 8:
raise ValueError('SMDegrain: For RefineMotion you need a blksize of at least 8')
if not isFLOAT and tr > 3:
raise ValueError("SMDegrain: tr > 3 requires input of float sample type")
# RefineMotion Variables
if RefineMotion:
halfblksize = blksize // 2 # MRecalculate works with half block size
halfoverlap = max(overlap // 2, 2) # Halve the overlap to suit the halved block size
halfthSAD = thSAD // 2 # MRecalculate uses a more strict thSAD, which defaults to 157 (half of function's default of 314)
# clip preparation for Interlacing
inpu = clip if not interlaced else core.std.SeparateFields(clip, TFF)
# Prefilter & Motion Filter
if mfilter is None:
mfilter = inpu
if preclip:
pref = prefilter
elif 0 <= prefilter < 3:
pref = MinBlur(inpu, prefilter, planes)
elif prefilter == 3:
expr = 'x {a} < {p} x {b} > 0 {p} x {a} - {p} {b} {a} - / * - ? ?'.format(a=16*i, b=75*i, p=peak)
mask = core.std.Expr([core.std.ShufflePlanes(inpu, [0], vs.GRAY)], [expr])
pref = core.std.MaskedMerge(core.dfttest.DFTTest(inpu, tbsize=1, slocation=[0.0,4.0, 0.2,9.0, 1.0,15.0], planes=planes), inpu, mask)
elif prefilter == 4:
pref = haf.KNLMeansCL(inpu, d=1, a=1, h=7) if chroma else inpu.knlm.KNLMeansCL(d=1, a=1, h=7)
elif prefilter == 5:
pref = SpotLess(inpu, chroma=chroma)
else:
pref = inpu
# Default Auto-Prefilter - Luma expansion TV → PC (up to 16% more values for motion estimation)
pref = DitherLumaRebuild(pref, s0=Str, c=Amp, chroma=chroma)
# Subpixel 3
# Motion vectors search
super_args = dict(hpad=blksize, vpad=blksize, pel=pel)
analyse_args = dict(blksize=blksize, search=search, chroma=chroma, truemotion=truemotion, _global=MVglobal, overlap=overlap, dct=DCT, searchparam=searchparam)
if RefineMotion:
recalculate_args = dict(thsad=halfthSAD, blksize=halfblksize, search=search, chroma=chroma, truemotion=truemotion, overlap=halfoverlap, dct=DCT, searchparam=searchparam)
if ppp:
cshift = 0.25 if pel == 2 else 0.375
pclip = nnrs.nnedi3_resample(pref, w * pel, h * pel, cshift, cshift, nns=4)
pclip2 = nnrs.nnedi3_resample(inpu, w * pel, h * pel, cshift, cshift, nns=4)
super_search = S(pref, chroma=chroma, rfilter=4, pelclip=pclip, **super_args)
super_render = S(inpu, chroma=chroma, rfilter=1, pelclip=pclip2, levels=1, **super_args)
else:
super_search = S(pref, chroma=chroma, rfilter=4, sharp=1, **super_args)
super_render = S(inpu, chroma=chroma, rfilter=1, sharp=subpixel, levels=1, **super_args)
if tr < 4:
if interlaced:
if tr > 2:
bv6 = A(super_search, isb=True, delta=6, **analyse_args)
fv6 = A(super_search, isb=False, delta=6, **analyse_args)
if RefineMotion:
bv6 = R(super_search, bv6, **recalculate_args)
fv6 = R(super_search, fv6, **recalculate_args)
if tr > 1:
bv4 = A(super_search, isb=True, delta=4, **analyse_args)
fv4 = A(super_search, isb=False, delta=4, **analyse_args)
if RefineMotion:
bv4 = R(super_search, bv4, **recalculate_args)
fv4 = R(super_search, fv4, **recalculate_args)
else:
if tr > 2:
bv3 = A(super_search, isb=True, delta=3, **analyse_args)
fv3 = A(super_search, isb=False, delta=3, **analyse_args)
if RefineMotion:
bv3 = R(super_search, bv3, **recalculate_args)
fv3 = R(super_search, fv3, **recalculate_args)
bv1 = A(super_search, isb=True, delta=1, **analyse_args)
fv1 = A(super_search, isb=False, delta=1, **analyse_args)
if RefineMotion:
bv1 = R(super_search, bv1, **recalculate_args)
fv1 = R(super_search, fv1, **recalculate_args)
if interlaced or tr > 1:
bv2 = A(super_search, isb=True, delta=2, **analyse_args)
fv2 = A(super_search, isb=False, delta=2, **analyse_args)
if RefineMotion:
bv2 = R(super_search, bv2, **recalculate_args)
fv2 = R(super_search, fv2, **recalculate_args)
else:
vec = Analyze(super_search, tr=tr, d=interlaced, **analyse_args)
if RefineMotion:
vec = mvmulti.Recalculate(super_search, vec, tr=tr, **recalculate_args)
# Finally, MDegrain
if isFLOAT:
degrain_args = dict(thsad=[thSAD, thSADC, thSADC], plane=plane, limit=[limit, limitc, limitc], thscd1=thSCD1, thscd2=thSCD2)
else:
degrain_args = dict(thsad=thSAD, thsadc=thSADC, plane=plane, limit=limit, limitc=limitc, thscd1=thSCD1, thscd2=thSCD2)
if tr < 4:
if interlaced:
if tr == 3:
output = D3(mfilter, super_render, bv2, fv2, bv4, fv4, bv6, fv6, **degrain_args)
elif tr == 2:
output = D2(mfilter, super_render, bv2, fv2, bv4, fv4, **degrain_args)
else:
output = D1(mfilter, super_render, bv2, fv2, **degrain_args)
else:
if tr == 3:
output = D3(mfilter, super_render, bv1, fv1, bv2, fv2, bv3, fv3, **degrain_args)
elif tr == 2:
output = D2(mfilter, super_render, bv1, fv1, bv2, fv2, **degrain_args)
else:
output = D1(mfilter, super_render, bv1, fv1, **degrain_args)
else:
output = mvmulti.DegrainN(mfilter, super_render, vec, tr=tr, **degrain_args)
# Contrasharp (only sharpens luma)
if contrasharp:
if if1:
if interlaced:
CClip = core.std.SeparateFields(CClip, TFF)
else:
CClip = inpu
# Output
if contrasharp:
if interlaced:
if ifC:
return haf.Weave(ContraSharpening(output, CClip, planes=planes), TFF)
else:
return haf.Weave(LSFmod(output, contrasharp, source=CClip, kernel=-1, ssx=1, Lmode=0, soothe=False), TFF)
elif ifC:
return ContraSharpening(output, CClip, planes=planes)
else:
return LSFmod(output, contrasharp, source=CClip, kernel=-1, ssx=1, Lmode=0, soothe=False)
elif interlaced:
return haf.Weave(output, TFF)
else:
return output
def TemporalDegrain2(clip, degrainTR=2, degrainPlane=4, meAlg=5, meAlgPar=None, meSubpel=None, meBlksz=None, meTM=False,
limitSigma=None, limitBlksz=None, fftThreads=None, postFFT=0, postTR=1, postSigma=1, knlDevId=0, ppSAD1=10, ppSAD2=5,
ppSCD1=4, thSCD2=100, DCT=0, SubPelInterp=2, SrchClipPP=3, GlobalMotion=True, ChromaMotion=True, rec=False, extraSharp=False):
"""
Temporal Degrain Updated by ErazorTT
Based on function by Sagekilla, idea + original script created by Didee
Works as a simple temporal degraining function that'll remove
MOST or even ALL grain and noise from video sources,
including dancing grain, like the grain found on 300.
Also note, the parameters don't need to be tweaked much.
Required plugins:
FFT3DFilter: https://github.com/myrsloik/VapourSynth-FFT3DFilter
MVtools(sf): https://github.com/dubhater/vapoursynth-mvtools (https://github.com/IFeelBloated/vapoursynth-mvtools-sf)
Optional plugins:
dfttest: https://github.com/HomeOfVapourSynthEvolution/VapourSynth-DFTTest
KNLMeansCL: https://github.com/Khanattila/KNLMeansCL
recommendation:
1. start with default settings
2a.if there is too much denoising for your taste use degrainTR=1
2b.if more denoising is needed try postFFT=1 with postSigma=1, then tune postSigma (obvious blocking and banding of the sky are indications of a value which is at least a factor 2 too high)
3. do not increase degrainTR above 1/8 of the fps (at 24fps up to 3)
4. if there are any issues with banding switch to postFFT=3
use only the following knobs (all other settings should already be were they need to be):
- degrainTR, temporal radius of degrain, usefull range: min=1, default=2, max=fps/8. Higher values do clean the video more, but also increase probability of wrongly identified motion meAlg which leads to washed out regions
- postFFT, if you want to remove absolutely all remaining noise suggestion is to use 3 (dfttest) for its quality, 1/2 (ff3dfilter) is much faster but can introduce banding, 4 is KNLMeansCL.
- postSigma, increase it to remove all the remaining noise you want removed, but do not increase too much since unnecessary high values have severe negative impact on either banding and/or sharpness
- degrainPlane, if you just want to denoise the chroma use 3 (helps with compressability with the clip being almost identical to the original)
"""
if not isinstance(clip, vs.VideoNode) or clip.format.color_family not in [vs.GRAY, vs.YUV]:
raise TypeError("TemporalDegrain2: This is not a GRAY or YUV clip!")
w = clip.width
h = clip.height
WH = max(w, h)
bd = clip.format.bits_per_sample
isFLOAT = clip.format.sample_type == vs.FLOAT
isGRAY = clip.format.color_family == vs.GRAY
i = 0.00392 if isFLOAT else 1 << (bd - 8)
mid = 0.5 if isFLOAT else 1 << (bd - 1)
S = core.mvsf.Super if isFLOAT else core.mv.Super
A = core.mvsf.Analyse if isFLOAT else core.mv.Analyse
C = core.mvsf.Compensate if isFLOAT else core.mv.Compensate
R = core.mvsf.Recalculate if isFLOAT else core.mv.Recalculate
D1 = core.mvsf.Degrain1 if isFLOAT else core.mv.Degrain1
D2 = core.mvsf.Degrain2 if isFLOAT else core.mv.Degrain2
D3 = core.mvsf.Degrain3 if isFLOAT else core.mv.Degrain3
RG = core.rgsf.RemoveGrain if isFLOAT else core.rgvs.RemoveGrain
rad = 3 if extraSharp else None
mat = [1, 2, 1, 2, 4, 2, 1, 2, 1]
ChromaNoise = (degrainPlane > 0)
hpad = meBlksz
vpad = meBlksz
if meSubpel is None:
if WH < 960:
meSubpel = 4
elif WH < 2080:
meSubpel = 2
else:
meSubpel = 1
if meBlksz is None:
if WH < 1280:
meBlksz = 8
elif WH < 2080:
meBlksz = 16
else:
meBlksz = 32
if limitSigma is None:
if WH < 960:
limitSigma = 8
elif WH < 1280:
limitSigma = 12
elif WH < 2080:
limitSigma = 16
else:
limitSigma = 32
if limitBlksz is None:
if WH < 960:
limitBlksz = 16
elif WH < 1280:
limitBlksz = 24
elif WH < 2080:
limitBlksz = 32
else:
limitBlksz = 64
if postFFT <= 0:
postTR = 0
if isGRAY:
ChromaMotion = False
ChromaNoise = False
degrainPlane = 0
if degrainPlane == 0:
fPlane = [0]
elif degrainPlane == 1:
fPlane = [1]
elif degrainPlane == 2:
fPlane = [2]
elif degrainPlane == 3:
fPlane = [1, 2]
else:
fPlane = [0, 1, 2]
if postFFT == 3:
postTR = min(postTR, 7)
if postFFT in [1, 2]:
postTR = min(postTR, 2)
postTD = postTR * 2 + 1
maxTR = max(degrainTR, postTR)
Overlap = meBlksz / 2
Lambda = (1000 if meTM else 100) * (meBlksz ** 2) // 64
LSAD = 1200 if meTM else 400
PNew = 50 if meTM else 25
PLevel = 1 if meTM else 0
thSAD1 = int(ppSAD1 * 64)
thSAD2 = int(ppSAD2 * 64)
thSCD1 = int(ppSCD1 * 64)
CMplanes = [0, 1, 2] if ChromaMotion else [0]
if maxTR > 3 and not isFLOAT:
raise ValueError("TemporalDegrain2: maxTR > 3 requires input of float sample type")
if SrchClipPP == 1:
spatialBlur = core.resize.Bilinear(clip, m4(w/2), m4(h/2)).std.Convolution(matrix=mat, planes=CMplanes).resize.Bilinear(w, h)
elif SrchClipPP > 1:
spatialBlur = core.tcanny.TCanny(clip, sigma=2, mode=-1, planes=CMplanes)
spatialBlur = core.std.Merge(spatialBlur, clip, [0.1] if ChromaMotion or isGRAY else [0.1, 0])
else:
spatialBlur = clip
if SrchClipPP < 3:
srchClip = spatialBlur
else:
expr = 'x {a} + y < x {b} + x {a} - y > x {b} - x y + 2 / ? ?'.format(a=7*i, b=2*i)
srchClip = core.std.Expr([spatialBlur, clip], [expr] if ChromaMotion or isGRAY else [expr, ''])
analyse_args = dict(blksize=meBlksz, overlap=Overlap, search=meAlg, searchparam=meAlgPar, pelsearch=meSubpel, truemotion=meTM, _lambda=Lambda, lsad=LSAD, pnew=PNew, plevel=PLevel, _global=GlobalMotion, dct=DCT, chroma=ChromaMotion)
recalculate_args = dict(blksize=Overlap, overlap=Overlap/2, search=meAlg, searchparam=meAlgPar, truemotion=meTM, _lambda=Lambda/4, pnew=PNew, dct=DCT, chroma=ChromaMotion)
srchSuper = S(DitherLumaRebuild(srchClip, s0=1, chroma=ChromaMotion), pel=meSubpel, sharp=1, rfilter=4, hpad=hpad, vpad=vpad, chroma=ChromaMotion)
if (maxTR > 0) and (degrainTR < 4 or postTR < 4):
bVec1 = A(srchSuper, isb=True, delta=1, **analyse_args)
fVec1 = A(srchSuper, isb=False, delta=1, **analyse_args)
if rec:
bVec1 = R(srchSuper, bVec1, **recalculate_args)
fVec1 = R(srchSuper, fVec1, **recalculate_args)
if maxTR > 1:
bVec2 = A(srchSuper, isb=True, delta=2, **analyse_args)
fVec2 = A(srchSuper, isb=False, delta=2, **analyse_args)
if rec:
bVec2 = R(srchSuper, bVec2, **recalculate_args)
fVec2 = R(srchSuper, fVec2, **recalculate_args)
if maxTR > 2:
bVec3 = A(srchSuper, isb=True, delta=3, **analyse_args)
fVec3 = A(srchSuper, isb=False, delta=3, **analyse_args)
if rec:
bVec3 = R(srchSuper, bVec3, **recalculate_args)
fVec3 = R(srchSuper, fVec3, **recalculate_args)
if degrainTR > 3:
vmulti1 = mvmulti.Analyze(srchSuper, tr=degrainTR, **analyse_args)
if rec:
vmulti1 = mvmulti.Recalculate(srchSuper, vmulti1, tr=degrainTR, **recalculate_args)
if postTR > 3:
vmulti2 = mvmulti.Analyze(srchSuper, tr=postTR, **analyse_args)
if rec:
vmulti2 = mvmulti.Recalculate(srchSuper, vmulti2, tr=postTR, **recalculate_args)
#---------------------------------------
# Degrain
# "spat" is a prefiltered clip which is used to limit the effect of the 1st MV-denoise stage.
if degrainTR > 0:
limitSigma *= i
s2 = limitSigma * 0.625
s3 = limitSigma * 0.375
s4 = limitSigma * 0.250
spat = core.fft3dfilter.FFT3DFilter(clip, planes=fPlane, sigma=limitSigma, sigma2=s2, sigma3=s3, sigma4=s4, bt=3, bw=limitBlksz, bh=limitBlksz, ncpu=fftThreads)
spatD = core.std.MakeDiff(clip, spat)
# First MV-denoising stage. Usually here's some temporal-medianfiltering going on.
# For simplicity, we just use MDegrain.
if degrainTR > 0:
supero = S(clip, pel=meSubpel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad, chroma=ChromaNoise)
if degrainTR < 2:
NR1 = D1(clip, supero, bVec1, fVec1, plane=degrainPlane, thsad=thSAD1, thscd1=thSCD1, thscd2=thSCD2)
elif degrainTR < 3:
NR1 = D2(clip, supero, bVec1, fVec1, bVec2, fVec2, plane=degrainPlane, thsad=thSAD1, thscd1=thSCD1, thscd2=thSCD2)
elif degrainTR < 4:
NR1 = D3(clip, supero, bVec1, fVec1, bVec2, fVec2, bVec3, fVec3, plane=degrainPlane, thsad=thSAD1, thscd1=thSCD1, thscd2=thSCD2)
else:
NR1 = mvmulti.DegrainN(clip, supero, vmulti1, tr=degrainTR, plane=degrainPlane, thsad=thSAD1, thscd1=thSCD1, thscd2=thSCD2)
# Limit NR1 to not do more than what "spat" would do.
if degrainTR > 0:
NR1D = core.std.MakeDiff(clip, NR1)
expr = 'x abs y abs < x y ?' if isFLOAT else 'x {} - abs y {} - abs < x y ?'.format(mid, mid)
DD = core.std.Expr([spatD, NR1D], [expr])
NR1x = core.std.MakeDiff(clip, DD, [0])
# Second MV-denoising stage. We use MDegrain.
if degrainTR > 0:
NR1x_super = S(NR1x, pel=meSubpel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad, chroma=ChromaNoise)
if degrainTR < 2:
NR2 = D1(NR1x, NR1x_super, bVec1, fVec1, plane=degrainPlane, thsad=thSAD2, thscd1=thSCD1, thscd2=thSCD2)
elif degrainTR < 3:
NR2 = D2(NR1x, NR1x_super, bVec1, fVec1, bVec2, fVec2, plane=degrainPlane, thsad=thSAD2, thscd1=thSCD1, thscd2=thSCD2)
elif degrainTR < 4:
NR2 = D3(NR1x, NR1x_super, bVec1, fVec1, bVec2, fVec2, bVec3, fVec3, plane=degrainPlane, thsad=thSAD2, thscd1=thSCD1, thscd2=thSCD2)
else:
NR2 = mvmulti.DegrainN(NR1x, NR1x_super, vmulti1, tr=degrainTR, plane=degrainPlane, thsad=thSAD2, thscd1=thSCD1, thscd2=thSCD2)
else:
NR2 = clip
NR2 = RG(NR2, mode=1) # Filter to remove last bits of dancing pixels, YMMV.
#---------------------------------------
# post FFT
if postTR > 0:
fullSuper = S(NR2, pel=meSubpel, sharp=SubPelInterp, levels=1, rfilter=1, hpad=hpad, vpad=vpad, chroma=ChromaNoise)
if postTR > 0:
if postTR == 1:
noiseWindow = core.std.Interleave([C(NR2, fullSuper, fVec1, thscd1=thSCD1, thscd2=thSCD2), NR2,
C(NR2, fullSuper, bVec1, thscd1=thSCD1, thscd2=thSCD2)])
elif postTR == 2:
noiseWindow = core.std.Interleave([C(NR2, fullSuper, fVec2, thscd1=thSCD1, thscd2=thSCD2),
C(NR2, fullSuper, fVec1, thscd1=thSCD1, thscd2=thSCD2), NR2,
C(NR2, fullSuper, bVec1, thscd1=thSCD1, thscd2=thSCD2),
C(NR2, fullSuper, bVec2, thscd1=thSCD1, thscd2=thSCD2)])
elif postTR == 3:
noiseWindow = core.std.Interleave([C(NR2, fullSuper, fVec3, thscd1=thSCD1, thscd2=thSCD2),
C(NR2, fullSuper, fVec2, thscd1=thSCD1, thscd2=thSCD2),
C(NR2, fullSuper, fVec1, thscd1=thSCD1, thscd2=thSCD2), NR2,
C(NR2, fullSuper, bVec1, thscd1=thSCD1, thscd2=thSCD2),
C(NR2, fullSuper, bVec2, thscd1=thSCD1, thscd2=thSCD2),
C(NR2, fullSuper, bVec3, thscd1=thSCD1, thscd2=thSCD2)])
else:
noiseWindow = mvmulti.Compensate(NR2, fullSuper, vmulti2, thscd1=thSCD1, thscd2=thSCD2, tr=postTR)
else:
noiseWindow = NR2
if postFFT == 3:
dnWindow = core.dfttest.DFTTest(noiseWindow, sigma=postSigma*4, tbsize=postTD, planes=fPlane)
elif postFFT == 4:
dnWindow = haf.KNLMeansCL(noiseWindow, d=postTR, a=2, h=postSigma/2, device_id=knlDevId) if ChromaNoise else noiseWindow.knlm.KNLMeansCL(dnWindow, d=postTR, a=2, h=postSigma/2, device_id=knlDevId)
elif postFFT > 0:
dnWindow = core.fft3dfilter.FFT3DFilter(noiseWindow, sigma=postSigma*i, planes=fPlane, bt=postTD, ncpu=fftThreads)
else:
dnWindow = noiseWindow
if postTR > 0:
dnWindow = dnWindow[postTR::postTD]
return ContraSharpening(dnWindow, clip, rad)
def mClean(clip, thSAD=400, chroma=True, sharp=10, rn=14, deband=0, depth=0, strength=20, outbits=None, icalc=True, rgmode=18):
"""
From: https://forum.doom9.org/showthread.php?t=174804 by burfadel
mClean spatio/temporal denoiser
+++ Description +++
Typical spatial filters work by removing large variations in the image on a small scale, reducing noise but also making the image less
sharp or temporally stable. mClean removes noise whilst retaining as much detail as possible, as well as provide optional image enhancement.
mClean works primarily in the temporal domain, although there is some spatial limiting.
Chroma is processed a little differently to luma for optimal results.
Chroma processing can be disabled with chroma = False.
+++ Artifacts +++
Spatial picture artifacts may remain as removing them is a fine balance between removing the unwanted artifact whilst not removing detail.
Additional dering/dehalo/deblock filters may be required, but should ONLY be uses if required due the detail loss/artifact removal balance.
+++ Sharpening +++
Applies a modified unsharp mask to edges and major detected detail. Range of normal sharpening is 0-20. There are 4 additional settings,
21-24 that provide 'overboost' sharpening. Overboost sharpening is only suitable typically for high definition, high quality sources.
Actual sharpening calculation is scaled based on resolution.
+++ ReNoise +++
ReNoise adds back some of the removed luma noise. Re-adding original noise would be counterproductive, therefore ReNoise modifies this noise
both spatially and temporally. The result of this modification is the noise becomes much nicer and it's impact on compressibility is greatly
reduced. It is not applied on areas where the sharpening occurs as that would be counterproductive. Settings range from 0 to 20.
The strength of renoise is affected by the the amount of original noise removed and how this noise varies between frames.
It's main purpose is to reduce the 'flatness' that occurs with any form of effective denoising.
+++ Deband +++
This will perceptibly improve the quality of the image by reducing banding effect and adding a small amount of temporally stabilised grain
to both luma and chroma. The settings are not adjustable as the default settings are suitable for most cases without having a large effect
on compressibility. 0 = disabled, 1 = deband only, 2 = deband and veed
+++ Depth +++
This applies a modified warp sharpening on the image that may be useful for certain things, and can improve the perception of image depth.
Settings range up from 0 to 5. This function will distort the image, for animation a setting of 1 or 2 can be beneficial to improve lines.
+++ Strength +++
The strength of the denoising effect can be adjusted using this parameter. It ranges from 20 percent denoising effect with strength 0, up to the
100 percent of the denoising with strength 20. This function works by blending a scaled percentage of the original image with the processed image.
+++ Outbits +++
Specifies the bits per component (bpc) for the output for processing by additional filters. It will also be the bpc that mClean will process.
If you output at a higher bpc keep in mind that there may be limitations to what subsequent filters and the encoder may support.
"""
# New parameter icalc, set to True to enable pure integer processing for faster speed. (Ignored if input is of float sample type)
defH = max(clip.height, clip.width // 4 * 3) # Resolution calculation for auto blksize settings
sharp = min(max(sharp, 0), 24) # Sharp multiplier
rn = min(max(rn, 0), 20) # Luma ReNoise strength
deband = min(max(deband, 0), 5) # Apply deband/veed
depth = min(max(depth, 0), 5) # Depth enhancement
strength = min(max(strength, 0), 20) # Strength of denoising
bd = clip.format.bits_per_sample
isFLOAT = clip.format.sample_type == vs.FLOAT
icalc = False if isFLOAT else icalc
S = core.mv.Super if icalc else core.mvsf.Super
A = core.mv.Analyse if icalc else core.mvsf.Analyse
R = core.mv.Recalculate if icalc else core.mvsf.Recalculate
if not isinstance(clip, vs.VideoNode) or clip.format.color_family != vs.YUV:
raise TypeError("mClean: This is not a YUV clip!")
if outbits is None: # Output bits, default input depth
outbits = bd
if deband or depth:
outbits = min(outbits, 16)
RE = core.rgsf.Repair if outbits == 32 else core.rgvs.Repair
RG = core.rgsf.RemoveGrain if outbits == 32 else core.rgvs.RemoveGrain
sc = 8 if defH > 2880 else 4 if defH > 1440 else 2 if defH > 720 else 1
i = 0.00392 if outbits == 32 else 1 << (outbits - 8)
peak = 1.0 if outbits == 32 else (1 << outbits) - 1
bs = 16 if defH / sc > 360 else 8
ov = 6 if bs > 12 else 2
pel = 1 if defH > 720 else 2
truemotion = False if defH > 720 else True
lampa = 777 * (bs ** 2) // 64
depth2 = -depth*3
depth = depth*2
if sharp > 20:
sharp += 30
elif defH <= 2500:
sharp = 15 + defH * sharp * 0.0007
else:
sharp = 50
# Denoise preparation
c = core.vcmod.Median(clip, plane=[0, 1, 1]) if chroma else clip
# Temporal luma noise filter
if not (isFLOAT or icalc):
c = c.fmtc.bitdepth(flt=1)
cy = core.std.ShufflePlanes(c, [0], vs.GRAY)
super1 = S(c if chroma else cy, hpad=bs, vpad=bs, pel=pel, rfilter=4, sharp=1)
super2 = S(c if chroma else cy, hpad=bs, vpad=bs, pel=pel, rfilter=1, levels=1)
analyse_args = dict(blksize=bs, overlap=ov, search=5, truemotion=truemotion)
recalculate_args = dict(blksize=bs, overlap=ov, search=5, truemotion=truemotion, thsad=180, _lambda=lampa)
# Analysis
bvec4 = R(super1, A(super1, isb=True, delta=4, **analyse_args), **recalculate_args) if not icalc else None
bvec3 = R(super1, A(super1, isb=True, delta=3, **analyse_args), **recalculate_args)
bvec2 = R(super1, A(super1, isb=True, delta=2, badsad=1100, lsad=1120, **analyse_args), **recalculate_args)
bvec1 = R(super1, A(super1, isb=True, delta=1, badsad=1500, lsad=980, badrange=27, **analyse_args), **recalculate_args)
fvec1 = R(super1, A(super1, isb=False, delta=1, badsad=1500, lsad=980, badrange=27, **analyse_args), **recalculate_args)
fvec2 = R(super1, A(super1, isb=False, delta=2, badsad=1100, lsad=1120, **analyse_args), **recalculate_args)
fvec3 = R(super1, A(super1, isb=False, delta=3, **analyse_args), **recalculate_args)
fvec4 = R(super1, A(super1, isb=False, delta=4, **analyse_args), **recalculate_args) if not icalc else None
# Applying cleaning
if not icalc:
clean = core.mvsf.Degrain4(c if chroma else cy, super2, bvec1, fvec1, bvec2, fvec2, bvec3, fvec3, bvec4, fvec4, thsad=thSAD)
else:
clean = core.mv.Degrain3(c if chroma else cy, super2, bvec1, fvec1, bvec2, fvec2, bvec3, fvec3, thsad=thSAD)
if c.format.bits_per_sample != outbits:
c = c.fmtc.bitdepth(bits=outbits, dmode=1)
cy = cy.fmtc.bitdepth(bits=outbits, dmode=1)
clean = clean.fmtc.bitdepth(bits=outbits, dmode=1)
uv = core.std.MergeDiff(clean, core.tmedian.TemporalMedian(core.std.MakeDiff(c, clean, [1, 2]), 1, [1, 2]), [1, 2]) if chroma else c
clean = core.std.ShufflePlanes(clean, [0], vs.GRAY) if clean.format.num_planes != 1 else clean
# Post clean, pre-process deband
filt = core.std.ShufflePlanes([clean, uv], [0, 1, 2], vs.YUV)
if deband:
filt = filt.f3kdb.Deband(range=16, preset="high" if chroma else "luma", grainy=defH/15, grainc=defH/16 if chroma else 0, output_depth=outbits)
clean = core.std.ShufflePlanes(filt, [0], vs.GRAY)
filt = core.vcmod.Veed(filt) if deband == 2 else filt
# Spatial luma denoising
clean2 = RG(clean, rgmode)
# Unsharp filter for spatial detail enhancement
if sharp:
if sharp <= 50:
clsharp = core.std.MakeDiff(clean, muf.Blur(clean2, amountH=0.08+0.03*sharp))
else:
clsharp = core.std.MakeDiff(clean, clean2.tcanny.TCanny(sigma=(sharp-46)/4, mode=-1))
clsharp = core.std.MergeDiff(clean2, RE(clsharp.tmedian.TemporalMedian(), clsharp, 12))
# If selected, combining ReNoise
noise_diff = core.std.MakeDiff(clean2, cy)
if rn:
expr = "x {a} < 0 x {b} > {p} 0 x {c} - {p} {a} {d} - / * - ? ?".format(a=32*i, b=45*i, c=35*i, d=65*i, p=peak)
clean1 = core.std.Merge(clean2, core.std.MergeDiff(clean2, Tweak(noise_diff.tmedian.TemporalMedian(), cont=1.008+0.00016*rn)), 0.3+rn*0.035)
clean2 = core.std.MaskedMerge(clean2, clean1, core.std.Expr([core.std.Expr([clean, clean.std.Invert()], 'x y min')], [expr]))
# Combining spatial detail enhancement with spatial noise reduction using prepared mask
noise_diff = noise_diff.std.Binarize().std.Invert()
clean2 = core.std.MaskedMerge(clean2, clsharp if sharp else clean, core.std.Expr([noise_diff, clean.std.Sobel()], 'x y max'))
# Combining result of luma and chroma cleaning
output = core.std.ShufflePlanes([clean2, filt], [0, 1, 2], vs.YUV)
output = core.std.Merge(c, output, 0.2+0.04*strength) if strength < 20 else output
return core.std.MergeDiff(output, core.std.MakeDiff(output.warp.AWarpSharp2(128, 3, 1, depth2, 1), output.warp.AWarpSharp2(128, 2, 1, depth, 1))) if depth else output
def STPressoHD(clip, limit=4, bias=20, tlimit=4, tbias=40, conv=[1]*25, thSAD=400, tr=2, back=2, rec=False, chroma=True, analyse_args=None, recalculate_args=None):
"""
Original STPresso by Didée: https://forum.doom9.org/showthread.php?t=163819
Dampen the grain just a little, to keep the original look
Spatial part uses a 5×5 BoxFilter / Temporal part uses MDegrain
Args:
limit (int) - The spatial part won't change a pixel more than this.
bias (int) - The percentage of the spatial filter that will apply.
tlimit (int) - The temporal part won't change a pixel more than this.
tbias (int) - The percentage of the temporal filter that will apply.
conv (int[]) - Convolution kernel used in the spatial part.
thSAD (int) - Soft threshold of block sum absolute differences.
Low value can result in staggered denoising,
High value can result in ghosting and artifacts.
tr (int) - Strength of temporal denoising (0-24). 0 means no temporal processing.
back (int) - After all changes have been calculated, reduce all pixel changes by this value (shift "back" towards original value)
rec (bool) - Recalculate the motion vectors to obtain more precision.
chroma (bool) - Whether to process chroma.
"""
if not isinstance(clip, vs.VideoNode) or clip.format.color_family not in [vs.GRAY, vs.YUV]:
raise TypeError('STPressoHD: This is not a GRAY or YUV clip!')
isFLOAT = clip.format.sample_type == vs.FLOAT
isGRAY = clip.format.color_family == vs.GRAY
S = core.mvsf.Super if isFLOAT else core.mv.Super
A = core.mvsf.Analyse if isFLOAT else core.mv.Analyse
R = core.mvsf.Recalculate if isFLOAT else core.mv.Recalculate
D1 = core.mvsf.Degrain1 if isFLOAT else core.mv.Degrain1
D2 = core.mvsf.Degrain2 if isFLOAT else core.mv.Degrain2
D3 = core.mvsf.Degrain3 if isFLOAT else core.mv.Degrain3
bia = min(max(bias/100, 0), 1)
tbia = min(max(tbias/100, 0), 1)
bs = 32 if clip.width > 2400 else 16 if clip.width > 960 else 8
pel = 1 if clip.width > 960 else 2
truemotion = False if clip.width > 960 else True
chroma = False if isGRAY else chroma
plane = 4 if chroma else 0
planes = [0, 1, 2] if chroma else 0
if analyse_args is None:
analyse_args = dict(blksize=bs, overlap=bs//2, search=5, chroma=chroma, truemotion=truemotion)
if recalculate_args is None:
recalculate_args = dict(blksize=bs//2, overlap=bs//4, search=5, chroma=chroma, truemotion=truemotion)
if tr > 3 and not isFLOAT:
raise TypeError("STPressoHD: DegrainN is only available in float")
if tr > 0:
super_b = S(DitherLumaRebuild(clip, 1), hpad=bs, vpad=bs, pel=pel, sharp=1, rfilter=4)
super_rend = S(clip, hpad=bs, vpad=bs, pel=pel, levels=1, rfilter=1)
if tr < 4:
bv1 = A(super_b, isb=True, delta=1, **analyse_args)
fv1 = A(super_b, isb=False, delta=1, **analyse_args)
if tr > 1:
bv2 = A(super_b, isb=True, delta=2, **analyse_args)
fv2 = A(super_b, isb=False, delta=2, **analyse_args)
if tr > 2:
bv3 = A(super_b, isb=True, delta=3, **analyse_args)
fv3 = A(super_b, isb=False, delta=3, **analyse_args)
else:
vec = mvmulti.Analyze(super_b, tr=tr, **analyse_args)
if rec:
if tr < 4:
bv1 = R(super_b, bv1, **recalculate_args)
fv1 = R(super_b, fv1, **recalculate_args)
if tr > 1:
bv2 = R(super_b, bv2, **recalculate_args)
fv2 = R(super_b, fv2, **recalculate_args)
if tr > 2:
bv3 = R(super_b, bv3, **recalculate_args)
fv3 = R(super_b, fv3, **recalculate_args)
else:
vec = mvmulti.Recalculate(super_b, vec, tr=tr, **recalculate_args)
bd = clip.format.bits_per_sample
peak = 1.0 if isFLOAT else (1 << bd) - 1
i = 0.00392 if isFLOAT else 1 << (bd - 8)
lim = scale(limit, peak)
tlim = scale(tlimit, peak)
back = scale(back, peak)
LIM = lim/bia - i if limit > 0 else scale(1/bia, peak)
TLIM = tlim/tbia - i if tlimit > 0 else scale(1/tbia, peak)
sexpr = 'x y - abs {i} < x x {L1} + y < x {L2} + x {L1} - y > x {L2} - x {j} * y {B} * + ? ? ?'.format(i=i, L1=LIM, L2=lim, j=1-bia, B=bia) if limit > 0 else 'x y - abs {} < x x x y > {} -{} ? - ?'.format(LIM, i, i)
texpr = 'x y - abs {i} < x x {T1} + y < x {T2} + x {T1} - y > x {T2} - x {j} * y {T} * + ? ? ?'.format(i=i, T1=TLIM, T2=tlim, j=1-tbia, T=tbia) if tlimit > 0 else 'x y - abs {} < x x x y > {} -{} ? - ?'.format(TLIM, i, i)
if isinstance(conv, list):
spat = core.std.Convolution(clip, matrix=conv, planes=planes)
elif callable(conv):
spat = conv(clip)
else:
raise TypeError("STPressoHD: conv must be a list or a function!")
down = core.std.Expr([clip, spat], [sexpr if i in planes else '' for i in range(clip.format.num_planes)])
if tr > 0:
if tr == 1:
temp = D1(spat, super_rend, bv1, fv1, thSAD, plane=plane)
elif tr == 2:
temp = D2(spat, super_rend, bv1, fv1, bv2, fv2, thSAD, plane=plane)
elif tr == 3:
temp = D3(spat, super_rend, bv1, fv1, bv2, fv2, bv3, fv3, thSAD, plane=plane)
else:
temp = mvmulti.DegrainN(spat, super_rend, vec, tr=tr, thsad=thSAD, plane=plane)
diff = core.std.Expr([down, temp, spat], ['x y + z -' if i in planes else '' for i in range(clip.format.num_planes)])
down = core.std.Expr([down, diff], [texpr if i in planes else '' for i in range(clip.format.num_planes)])
if back > 0:
bexpr = 'x {BK} + y < x {BK} + x {BK} - y > x {BK} - y ? ?'.format(BK=back)
return core.std.Expr([down, clip], [bexpr if i in planes else '' for i in range(clip.format.num_planes)])
else:
return down
def MLDegrain(clip, scale1=1.5, scale2=2, thSAD=400, tr=3, rec=False, chroma=True, soft=[0]*3):
"""
Multi-Level MDegrain
Multi level in the sense of using multiple scalings.
The observation was that when downscaling the source to a smallish resolution, then a vanilla MDegrain can produce a very stable result.
Hence, it's an obvious approach to first make a small-but-stable-denoised clip, and then work the way upwards to the original resolution.
From: https://forum.doom9.org/showthread.php?p=1512413 by Didée
Args:
scale1 (float) - Scaling factor between original and medium scale
scale2 (float) - Scaling factor between medium and small scale
tr (int) - Strength of the denoising (1-24).
thSAD (int) - Soft threshold of block sum absolute differences.
Low value can result in staggered denoising,
High value can result in ghosting and artifacts.
rec (bool) - Recalculate the motion vectors to obtain more precision.
chroma (bool) - Whether to process chroma.
soft (float[]) - [small, medium, original] ranges from 0 to 1, 0 means disabled, 1 means 100% strength.
Do slight sharpening where motionmatch is good, do slight blurring where motionmatch is bad.
"""
isFLOAT = clip.format.sample_type == vs.FLOAT
if not isinstance(clip, vs.VideoNode) or clip.format.color_family not in [vs.GRAY, vs.YUV]:
raise TypeError('MLDegrain: This is not a GRAY or YUV clip!')
if tr > 3 and not isFLOAT:
raise TypeError("MLDegrain: DegrainN is only available in float")
w = clip.width
h = clip.height
w1 = m4(w / scale1)
h1 = m4(h / scale1)
w2 = m4(w1 / scale2)
h2 = m4(h1 / scale2)
sm1 = clip.resize.Bicubic(w1, h1) # medium scale
sm2 = sm1.resize.Bicubic(w2, h2) # small scale
D12 = core.std.MakeDiff(sm2.resize.Bicubic(w1, h1), sm1) # residual of (small)<>(medium)
D10 = core.std.MakeDiff(sm1.resize.Bicubic(w, h), clip) # residual of (medium)<>(original)
lev2 = MLD_helper(sm2, sm2, tr, thSAD, rec, chroma, soft[0]) # Filter on smalle scale
up1 = lev2.resize.Bicubic(w1, h1)
up2 = up1.resize.Bicubic(w, h)
M1 = MLD_helper(D12, up1, tr, thSAD, rec, chroma, soft[1]) # Filter on medium scale
lev1 = core.std.MakeDiff(up1, M1)
up3 = lev1.resize.Bicubic(w, h)
M2 = MLD_helper(D10, up2, tr, thSAD, rec, chroma, soft[2]) # Filter on original scale
return core.std.MakeDiff(up3, M2)
def MLD_helper(clip, srch, tr, thSAD, rec, chroma, soft):
""" Helper function used in Multi-Level MDegrain"""
if not isinstance(clip, vs.VideoNode) or clip.format.color_family not in [vs.GRAY, vs.YUV]:
raise TypeError('MLD_helper: This is not a GRAY or YUV clip!')
isFLOAT = clip.format.sample_type == vs.FLOAT
isGRAY = clip.format.color_family == vs.GRAY
S = core.mvsf.Super if isFLOAT else core.mv.Super
A = core.mvsf.Analyse if isFLOAT else core.mv.Analyse
R = core.mvsf.Recalculate if isFLOAT else core.mv.Recalculate
D1 = core.mvsf.Degrain1 if isFLOAT else core.mv.Degrain1
D2 = core.mvsf.Degrain2 if isFLOAT else core.mv.Degrain2
D3 = core.mvsf.Degrain3 if isFLOAT else core.mv.Degrain3
bs = 32 if clip.width > 2400 else 16 if clip.width > 960 else 8
pel = 1 if clip.width > 960 else 2
truemotion = False if clip.width > 960 else True
chroma = False if isGRAY else chroma
planes = [0, 1, 2] if chroma else [0]
plane = 4 if chroma else 0
analyse_args = dict(blksize=bs, overlap=bs//2, search=5, chroma=chroma, truemotion=truemotion)
recalculate_args = dict(blksize=bs//2, overlap=bs//4, search=5, chroma=chroma, truemotion=truemotion)
sup1 = S(DitherLumaRebuild(srch, 1), hpad=bs, vpad=bs, pel=pel, sharp=1, rfilter=4)
if soft > 0:
if clip.width > 1280:
RG = core.std.Convolution(clip, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1], planes=planes)
elif clip.width > 640:
RG = core.std.Convolution(clip, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1], planes=planes)
else:
RG = MinBlur(clip, 1, planes)
RG = core.std.Merge(clip, RG, [soft] if chroma or isGRAY else [soft, 0]) if soft < 1 else RG
sup2 = S(core.std.Expr([clip, RG], ['x dup y - +'] if chroma or isGRAY else ['x dup y - +', '']), hpad=bs, vpad=bs, pel=pel, levels=1, rfilter=1)
else:
RG = clip
sup2 = S(clip, hpad=bs, vpad=bs, pel=pel, levels=1, rfilter=1)
if tr < 4:
bv1 = A(sup1, isb=True, delta=1, **analyse_args)
fv1 = A(sup1, isb=False, delta=1, **analyse_args)
if tr > 1:
bv2 = A(sup1, isb=True, delta=2, **analyse_args)
fv2 = A(sup1, isb=False, delta=2, **analyse_args)
if tr > 2:
bv3 = A(sup1, isb=True, delta=3, **analyse_args)
fv3 = A(sup1, isb=False, delta=3, **analyse_args)
else:
vec = mvmulti.Analyze(sup1, tr=tr, **analyse_args)
if rec:
if tr < 4:
bv1 = R(sup1, bv1, **recalculate_args)
fv1 = R(sup1, fv1, **recalculate_args)
if tr > 1:
bv2 = R(sup1, bv2, **recalculate_args)
fv2 = R(sup1, fv2, **recalculate_args)
if tr > 2:
bv3 = R(sup1, bv3, **recalculate_args)
fv3 = R(sup1, fv3, **recalculate_args)
else:
vec = mvmulti.Recalculate(sup1, vec, tr=tr, **recalculate_args)
if tr < 4:
if tr == 1:
return D1(RG, sup2, bv1, fv1, thsad=thSAD, plane=plane)
elif tr == 2:
return D2(RG, sup2, bv1, fv1, bv2, fv2, thsad=thSAD, plane=plane)
else:
return D3(RG, sup2, bv1, fv1, bv2, fv2, bv3, fv3, thsad=thSAD, plane=plane)
else:
return mvmulti.DegrainN(RG, sup2, vec, tr=tr, thsad=thSAD, plane=plane)
def NonlinUSM(clip, z=6.0, power=1.6, sstr=1, rad=9.0, ldmp=0.01, hdmp=0.01):
"""
From: https://forum.doom9.org/showthread.php?p=1555234 by Didée.
Non-linear Unsharp Masking, uses a wide-range Gaussian instead of a small-range kernel.
Args:
z (float) - zero point of sharpening.
power (float) - exponent for non-linear sharpening.
sstr (float) - strength of sharpening.
rad (float) - radius for gauss.
ldmp (float) - "low damp", damping for very small differences.
hdmp (float) - "high damp", this damping term has a larger effect than ldmp
when the sharp-difference is larger than 1, vice versa.
Examples:
NonlinUSM(power=4) # enhance: for low bitrate sources
NonlinUSM(z=3, power=4, sstr=1, rad=6) # enhance less
NonlinUSM(z=3, sstr=0.5, rad=9, power=1) # enhance less
NonlinUSM(z=6, sstr=1.5, rad=0.6).Sharpen(0.3) # sharpen: for hi-q sources
NonlinUSM(z=3, sstr=2.5, rad=0.6) # sharpen: less noise
NonlinUSM(z=6, power=1, sstr=1, rad=6) # unsharp
NonlinUSM(z=6, power=1, rad=2, sstr=0.7) # "smoothen" for noisy sources
NonlinUSM(z=6, power=1, rad=18, sstr=0.5) # smear: soft glow
NonlinUSM(z=6, power=4, sstr=1, rad=36) # local contrast
NonlinUSM(z=6, power=1, sstr=1, rad=36) # local contrast
NonlinUSM(z=16, power=4, sstr=18, rad=6) # B+W psychedelic
NonlinUSM(z=16, power=2, sstr=2, rad=36) # solarized
NonlinUSM(z=16, power=4, sstr=3, rad=6) # sepia/artistic
"""
ldmp = max(ldmp, 0)
hdmp = max(hdmp, 0)
bd = clip.format.bits_per_sample
isFLOAT = clip.format.sample_type == vs.FLOAT
i = 0.00392 if isFLOAT else 1 << (bd - 8)
xy = 'x y - {} /'.format(i) if bd != 8 else 'x y -'
color = clip.format.color_family
if not isinstance(clip, vs.VideoNode):
raise TypeError("NonlinUSM: This is not a clip!")
if color == vs.COMPAT:
raise TypeError("NonlinUSM: COMPAT color family is not supported!")
tmp = core.std.ShufflePlanes(clip, [0], vs.GRAY) if color in [vs.YUV, vs.YCOCG] else clip
gauss = core.resize.Bicubic(tmp, m4(clip.width/rad), m4(clip.height/rad)).resize.Bicubic(clip.width, clip.height, filter_param_a=1, filter_param_b=0)
expr = 'x y = x dup {} dup dup dup abs {} / {} pow swap3 dup * dup {} + / swap2 abs {} + / * * {} * + ?'
tmp = core.std.Expr([tmp, gauss], [expr.format(xy, z, 1/power, ldmp, hdmp, sstr*z*i)])
return core.std.ShufflePlanes([tmp, clip], [0, 1, 2], color) if color in [vs.YUV, vs.YCOCG] else tmp
def DetailSharpen(clip, z=4, sstr=1.5, power=4, ldmp=1, mode=1, med=False):
"""
From: https://forum.doom9.org/showthread.php?t=163598
Didée: Wanna some sharpening that causes no haloing, without any edge masking?
Args:
z (float) - zero point.
sstr (float) - strength of non-linear sharpening.
power (float) - exponent of non-linear sharpening.
ldmp (float) - "low damp", to not over-enhance very small differences.
mode (int) - 0: gaussian kernel 1: box kernel
med (bool) - When True, median is used to achieve stronger sharpening.
Examples:
DetailSharpen() # Original DetailSharpen by Didée.
DetailSharpen(power=1.5, mode=0, med=True) # Mini-SeeSaw...just without See, and without Saw.
"""
ldmp = max(ldmp, 0)
bd = clip.format.bits_per_sample
isFLOAT = clip.format.sample_type == vs.FLOAT
i = 0.00392 if isFLOAT else 1 << (bd - 8)
xy = 'x y - {} /'.format(i) if bd != 8 else 'x y -'
color = clip.format.color_family
if not isinstance(clip, vs.VideoNode):
raise TypeError("DetailSharpen: This is not a clip!")
if color == vs.COMPAT:
raise TypeError("DetailSharpen: COMPAT color family is not supported!")
tmp = core.std.ShufflePlanes(clip, [0], vs.GRAY) if color in [vs.YUV, vs.YCOCG] else clip
if mode == 1:
blur = core.std.Convolution(tmp, matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1])
else:
blur = core.std.Convolution(tmp, matrix=[1, 2, 1, 2, 4, 2, 1, 2, 1])
if med:
blur = blur.std.Median()
expr = 'x y = x dup {} dup dup abs {} / {} pow swap2 abs {} + / * {} * + ?'
tmp = core.std.Expr([tmp, blur], [expr.format(xy, z, 1/power, ldmp, sstr*z*i)])
return core.std.ShufflePlanes([tmp, clip], [0, 1, 2], color) if color in [vs.YUV, vs.YCOCG] else tmp
def Hysteria(clip, sstr=1.0, usemask=True, lthr=7, hthr=20, lcap=192, maxchg=255, minchg=0, showmask=False):
"""
Hysteria, a line darkening script by Scintilla
Args:
sstr (float) - This is a multiplicative factor for the amounts by which the pixels are darkened.
Ordinarily, each pixel is darkened by the difference between its luma value and the average
luma value of its brighter spatial neighbours. So if you want more darkening, increase this value.
usemask (bool) - Whether or not to apply the mask. If False, the entire image will have its edges darkened
instead of just the edges detected in the mask. Could be useful on some sources
(specifically, it will often make dark lines look thicker), but you will probably
want to stick to a lower value of strength if you choose to go that route.
lthr (float) - This is the threshold used for the noisy mask. Increase this value if your mask is picking up
too much noise around the edges, or decrease it if the mask is not being grown thick enough.
hthr (float) - This is the threshold used for the clean mask. Increase this value if your mask is picking up
too many weaker edges, or decrease it if the mask is not picking up enough.
lcap (float) - Luma cap, an idea swiped from FLD/VMToon. Any pixels brighter than this value will not be darkened at all,
no matter what the rest of the parameters are. This is useful if you have lighter edges that you do not want darkened.
0 will result in no darkening at all, while 255 will turn off the cap.
maxchg (float) - No pixel will be darkened by more than this amount, no matter how high you set the strength parameter.
This can be useful if you want to darken your weaker lines more without going overboard on the stronger ones.
0 will result in no darkening at all, while 255 will turn off the limiting.
minchg (float) - Another idea swiped from FLD/VMToon (though in those functions it was called "threshold").
Any pixels that would have been darkened by le