Skip to content
Permalink
Browse files

SAE : WARNING, RETRAIN IS REQUIRED !

fixed model sizes from previous update.
avoided bug in ML framework(keras) that forces to train the model on random noise.

Converter: added blur on the same keys as sharpness

Added new model 'TrueFace'. This is a GAN model ported from https://github.com/NVlabs/FUNIT
Model produces near zero morphing and high detail face.
Model has higher failure rate than other models.
Keep src and dst faceset in same lighting conditions.
  • Loading branch information...
iperov committed Sep 19, 2019
1 parent 201b762 commit dc11ec32be8be79b4e4abf5d9e27ffa609e2d0d5
@@ -302,8 +302,8 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i
k_size *= 2
out_face_bgr = imagelib.LinearMotionBlur (out_face_bgr, k_size , frame_info.motion_deg)

if cfg.sharpen_mode != 0 and cfg.sharpen_amount != 0:
out_face_bgr = cfg.sharpen_func ( out_face_bgr, cfg.sharpen_mode, 3, cfg.sharpen_amount)
if cfg.blursharpen_amount != 0:
out_face_bgr = cfg.blursharpen_func ( out_face_bgr, cfg.sharpen_mode, 3, cfg.blursharpen_amount)

new_out = cv2.warpAffine( out_face_bgr, face_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT )
out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (new_out*img_face_mask_aaa) , 0, 1.0 )
@@ -18,7 +18,7 @@ def __init__(self, type=0):
self.type = type

self.superres_func = None
self.sharpen_func = None
self.blursharpen_func = None
self.fanseg_input_size = None
self.fanseg_extract_func = None
self.ebs_ct_func = None
@@ -29,7 +29,7 @@ def __init__(self, type=0):
#default changeable params
self.super_resolution_mode = 0
self.sharpen_mode = 0
self.sharpen_amount = 0
self.blursharpen_amount = 0

def copy(self):
return copy.copy(self)
@@ -43,7 +43,7 @@ def ask_settings(self):
self.sharpen_mode = io.input_int (s, 0, valid_list=self.sharpen_dict.keys(), help_message="Enhance details by applying sharpen filter.")

if self.sharpen_mode != 0:
self.sharpen_amount = np.clip ( io.input_int ("Choose sharpen amount [0..100] (skip:%d) : " % 10, 10), 0, 100 )
self.blursharpen_amount = np.clip ( io.input_int ("Choose blur/sharpen amount [-100..100] (skip:0) : ", 0), -100, 100 )

s = """Choose super resolution mode: \n"""
for key in self.super_res_dict.keys():
@@ -55,8 +55,8 @@ def toggle_sharpen_mode(self):
a = list( self.sharpen_dict.keys() )
self.sharpen_mode = a[ (a.index(self.sharpen_mode)+1) % len(a) ]

def add_sharpen_amount(self, diff):
self.sharpen_amount = np.clip ( self.sharpen_amount+diff, 0, 100)
def add_blursharpen_amount(self, diff):
self.blursharpen_amount = np.clip ( self.blursharpen_amount+diff, -100, 100)

def toggle_super_resolution_mode(self):
a = list( self.super_res_dict.keys() )
@@ -68,7 +68,7 @@ def __eq__(self, other):

if isinstance(other, ConverterConfig):
return self.sharpen_mode == other.sharpen_mode and \
(self.sharpen_mode == 0 or ((self.sharpen_mode == other.sharpen_mode) and (self.sharpen_amount == other.sharpen_amount) )) and \
self.blursharpen_amount == other.blursharpen_amount and \
self.super_resolution_mode == other.super_resolution_mode

return False
@@ -77,8 +77,7 @@ def __eq__(self, other):
def to_string(self, filename):
r = ""
r += f"sharpen_mode : {self.sharpen_dict[self.sharpen_mode]}\n"
if self.sharpen_mode != 0:
r += f"sharpen_amount : {self.sharpen_amount}\n"
r += f"blursharpen_amount : {self.blursharpen_amount}\n"
r += f"super_resolution_mode : {self.super_res_dict[self.super_resolution_mode]}\n"
return r

@@ -183,6 +183,55 @@
[0.205322 , 31.408738 , -21.903670 ],
[-7.198266 , 30.844876 , -20.328022 ] ], dtype=np.float32)

def convert_98_to_68(lmrks):
#jaw
result = [ lmrks[0] ]
for i in range(2,16,2):
result += [ ( lmrks[i] + (lmrks[i-1]+lmrks[i+1])/2 ) / 2 ]
result += [ lmrks[16] ]
for i in range(18,32,2):
result += [ ( lmrks[i] + (lmrks[i-1]+lmrks[i+1])/2 ) / 2 ]
result += [ lmrks[32] ]

#eyebrows averaging
result += [ lmrks[33],
(lmrks[34]+lmrks[41])/2,
(lmrks[35]+lmrks[40])/2,
(lmrks[36]+lmrks[39])/2,
(lmrks[37]+lmrks[38])/2,
]

result += [ (lmrks[42]+lmrks[50])/2,
(lmrks[43]+lmrks[49])/2,
(lmrks[44]+lmrks[48])/2,
(lmrks[45]+lmrks[47])/2,
lmrks[46]
]

#nose
result += list ( lmrks[51:60] )

#left eye (from our view)
result += [ lmrks[60],
lmrks[61],
lmrks[63],
lmrks[64],
lmrks[65],
lmrks[67] ]

#right eye
result += [ lmrks[68],
lmrks[69],
lmrks[71],
lmrks[72],
lmrks[73],
lmrks[75] ]

#mouth
result += list ( lmrks[76:96] )

return np.concatenate (result).reshape ( (68,2) )

def transform_points(points, mat, invert=False):
if invert:
mat = cv2.invertAffineTransform (mat)
@@ -310,8 +359,8 @@ def alpha_to_color (img_alpha, color):
result[:,:] = color

return result * img_alpha



def get_cmask (image_shape, lmrks, eyebrows_expand_mod=1.0):
h,w,c = image_shape
@@ -361,7 +410,7 @@ def get_dists(name, thickness=0):
s,e = d[name]
result = dists[...,s:e]
if thickness != 0:
result = np.abs(result)-thickness
result = np.abs(result)-thickness
return np.min (result, axis=-1)

return get_dists
@@ -371,7 +420,7 @@ def get_dists(name, thickness=0):
l_brow = lmrks[22:27]
r_brow = lmrks[17:22]
mouth = lmrks[48:60]

up_nose = np.concatenate( (lmrks[27:31], lmrks[33:34]) )
down_nose = lmrks[31:36]
nose = np.concatenate ( (up_nose, down_nose) )
@@ -400,7 +449,7 @@ def get_dists(name, thickness=0):

mouth_fall_dist = w // 32
mouth_thickness = max( w // 64, 1 )

eyes_mask = gdf('eyes',eyes_thickness)
eyes_mask = 1-np.clip( eyes_mask/ eyes_fall_dist, 0, 1)
#eyes_mask = np.clip ( 1- ( np.sqrt( np.maximum(eyes_mask,0) ) / eyes_fall_dist ), 0, 1)
@@ -409,15 +458,15 @@ def get_dists(name, thickness=0):
brows_mask = gdf('brows', brows_thickness)
brows_mask = 1-np.clip( brows_mask / brows_fall_dist, 0, 1)
#brows_mask = np.clip ( 1- ( np.sqrt( np.maximum(brows_mask,0) ) / brows_fall_dist ), 0, 1)

mouth_mask = gdf('mouth', mouth_thickness)
mouth_mask = 1-np.clip( mouth_mask / mouth_fall_dist, 0, 1)
#mouth_mask = np.clip ( 1- ( np.sqrt( np.maximum(mouth_mask,0) ) / mouth_fall_dist ), 0, 1)

def blend(a,b,k):
x = np.clip ( 0.5+0.5*(b-a)/k, 0.0, 1.0 )
return (a-b)*x+b - k*x*(1.0-x)


#nose_mask = (a-b)*x+b - k*x*(1.0-x)

@@ -426,7 +475,7 @@ def blend(a,b,k):

nose_mask = blend ( gdf('up_nose', nose_thickness), gdf('down_nose', nose_thickness), nose_thickness*3 )
nose_mask = 1-np.clip( nose_mask / nose_fall_dist, 0, 1)

up_nose_mask = gdf('up_nose', nose_thickness)
up_nose_mask = 1-np.clip( up_nose_mask / nose_fall_dist, 0, 1)
#up_nose_mask = np.clip ( 1- ( np.cbrt( np.maximum(up_nose_mask,0) ) / nose_fall_dist ), 0, 1)
@@ -441,17 +490,17 @@ def blend(a,b,k):
#nose_mask = down_nose_mask

#nose_mask = np.zeros_like(nose_mask)

eyes_mask = eyes_mask * (1-mouth_mask)
nose_mask = nose_mask * (1-eyes_mask)

hull_mask = hull[...,0].copy()
hull_mask = hull_mask * (1-eyes_mask) * (1-brows_mask) * (1-nose_mask) * (1-mouth_mask)

#eyes_mask = eyes_mask * (1-nose_mask)

mouth_mask= mouth_mask * (1-nose_mask)

brows_mask = brows_mask * (1-nose_mask)* (1-eyes_mask )

hull_mask = alpha_to_color(hull_mask, (0,1,0) )
@@ -613,5 +662,5 @@ def estimate_pitch_yaw_roll(aligned_256px_landmarks):
pitch, yaw, roll = mathlib.rotationMatrixToEulerAngles( cv2.Rodrigues(rotation_vector)[0] )
pitch = np.clip ( pitch/1.30, -1.0, 1.0 )
yaw = np.clip ( yaw / 1.11, -1.0, 1.0 )
roll = np.clip ( roll/3.15, -1.0, 1.0 )
roll = np.clip ( roll/3.15, -1.0, 1.0 ) #todo radians
return -pitch, yaw, roll
@@ -154,7 +154,7 @@ def progress_bar_close(self):
self.pg_bar = None
else: print("progress_bar not set.")

def progress_bar_generator(self, data, desc, leave=True, initial=0):
def progress_bar_generator(self, data, desc=None, leave=True, initial=0):
self.pg_bar = tqdm( data, desc=desc, leave=leave, ascii=True, initial=initial )
for x in self.pg_bar:
yield x
20 main.py
@@ -49,7 +49,21 @@ def process_extract(arguments):
p.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Extract on CPU. Forces to use MT extractor.")
p.set_defaults (func=process_extract)


def process_dev_extract_vggface2_dataset(arguments):
os_utils.set_process_lowest_prio()
from mainscripts import dev_misc
dev_misc.extract_vggface2_dataset( arguments.input_dir,
device_args={'cpu_only' : arguments.cpu_only,
'multi_gpu' : arguments.multi_gpu,
}
)

p = subparsers.add_parser( "dev_extract_vggface2_dataset", help="")
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
p.add_argument('--multi-gpu', action="store_true", dest="multi_gpu", default=False, help="Enables multi GPU.")
p.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Extract on CPU.")
p.set_defaults (func=process_dev_extract_vggface2_dataset)

def process_dev_extract_umd_csv(arguments):
os_utils.set_process_lowest_prio()
from mainscripts import Extractor
@@ -152,7 +166,8 @@ def process_train(arguments):

def process_convert(arguments):
os_utils.set_process_lowest_prio()
args = {'input_dir' : arguments.input_dir,
args = {'training_data_src_dir' : arguments.training_data_src_dir,
'input_dir' : arguments.input_dir,
'output_dir' : arguments.output_dir,
'aligned_dir' : arguments.aligned_dir,
'model_dir' : arguments.model_dir,
@@ -165,6 +180,7 @@ def process_convert(arguments):
Converter.main (args, device_args)

p = subparsers.add_parser( "convert", help="Converter")
p.add_argument('--training-data-src-dir', action=fixPathAction, dest="training_data_src_dir", help="(optional, may be required by some models) Dir of extracted SRC faceset.")
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
p.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir", help="Output directory. This is where the converted files will be stored.")
p.add_argument('--aligned-dir', action=fixPathAction, dest="aligned_dir", help="Aligned directory. This is where the extracted of dst faces stored.")
@@ -87,22 +87,26 @@ def on_initialize(self, client_dict):
#therefore forcing active_DeviceConfig to CPU only
nnlib.active_DeviceConfig = nnlib.DeviceConfig (cpu_only=True)

def sharpen_func (img, sharpen_mode=0, kernel_size=3, amount=150):
def blursharpen_func (img, sharpen_mode=0, kernel_size=3, amount=100):
if kernel_size % 2 == 0:
kernel_size += 1

if sharpen_mode == 1: #box
kernel = np.zeros( (kernel_size, kernel_size), dtype=np.float32)
kernel[ kernel_size//2, kernel_size//2] = 1.0
box_filter = np.ones( (kernel_size, kernel_size), dtype=np.float32) / (kernel_size**2)
kernel = kernel + (kernel - box_filter) * amount
return cv2.filter2D(img, -1, kernel)
elif sharpen_mode == 2: #gaussian
if amount > 0:
if sharpen_mode == 1: #box
kernel = np.zeros( (kernel_size, kernel_size), dtype=np.float32)
kernel[ kernel_size//2, kernel_size//2] = 1.0
box_filter = np.ones( (kernel_size, kernel_size), dtype=np.float32) / (kernel_size**2)
kernel = kernel + (kernel - box_filter) * amount
return cv2.filter2D(img, -1, kernel)
elif sharpen_mode == 2: #gaussian
blur = cv2.GaussianBlur(img, (kernel_size, kernel_size) , 0)
img = cv2.addWeighted(img, 1.0 + (0.5 * amount), blur, -(0.5 * amount), 0)
return img
elif amount < 0:
blur = cv2.GaussianBlur(img, (kernel_size, kernel_size) , 0)
img = cv2.addWeighted(img, 1.0 + (0.5 * amount), blur, -(0.5 * amount), 0)
return img
img = cv2.addWeighted(img, 1.0 - a / 50.0, blur, a /50.0, 0)
return img
return img
self.sharpen_func = sharpen_func
self.blursharpen_func = blursharpen_func

self.fanseg_by_face_type = {}
self.fanseg_input_size = 256
@@ -128,7 +132,7 @@ def ebs_ct(*args, **kwargs):
#override
def process_data(self, pf): #pf=ProcessingFrame
cfg = pf.cfg.copy()
cfg.sharpen_func = self.sharpen_func
cfg.blursharpen_func = self.blursharpen_func
cfg.superres_func = self.superres_func
cfg.ebs_ct_func = self.ebs_ct_func

@@ -221,11 +225,13 @@ def superres_func(mode, *args, **kwargs):

session_data = None
if self.is_interactive and self.converter_session_filepath.exists():
try:
with open( str(self.converter_session_filepath), "rb") as f:
session_data = pickle.loads(f.read())
except Exception as e:
pass

if io.input_bool ("Use saved session? (y/n skip:y) : ", True):
try:
with open( str(self.converter_session_filepath), "rb") as f:
session_data = pickle.loads(f.read())
except Exception as e:
pass

self.frames = frames
self.frames_idxs = [ *range(len(self.frames)) ]
@@ -430,9 +436,9 @@ def on_tick(self):
elif chr_key == 'g':
cfg.add_color_degrade_power(-1 if not shift_pressed else -5)
elif chr_key == 'y':
cfg.add_sharpen_amount(1 if not shift_pressed else 5)
cfg.add_blursharpen_amount(1 if not shift_pressed else 5)
elif chr_key == 'h':
cfg.add_sharpen_amount(-1 if not shift_pressed else -5)
cfg.add_blursharpen_amount(-1 if not shift_pressed else -5)
elif chr_key == 'u':
cfg.add_output_face_scale(1 if not shift_pressed else 5)
elif chr_key == 'j':
@@ -453,9 +459,9 @@ def on_tick(self):

else:
if chr_key == 'y':
cfg.add_sharpen_amount(1 if not shift_pressed else 5)
cfg.add_blursharpen_amount(1 if not shift_pressed else 5)
elif chr_key == 'h':
cfg.add_sharpen_amount(-1 if not shift_pressed else -5)
cfg.add_blursharpen_amount(-1 if not shift_pressed else -5)
elif chr_key == 's':
cfg.toggle_add_source_image()
elif chr_key == 'v':
@@ -576,6 +582,8 @@ def get_result(self):
def main (args, device_args):
io.log_info ("Running converter.\r\n")

training_data_src_dir = args.get('training_data_src_dir', None)
training_data_src_path = Path(training_data_src_dir) if training_data_src_dir is not None else None
aligned_dir = args.get('aligned_dir', None)
avaperator_aligned_dir = args.get('avaperator_aligned_dir', None)

@@ -598,7 +606,7 @@ def main (args, device_args):
is_interactive = io.input_bool ("Use interactive converter? (y/n skip:y) : ", True) if not io.is_colab() else False

import models
model = models.import_model( args['model_name'] )(model_path, device_args=device_args)
model = models.import_model( args['model_name'])(model_path, device_args=device_args, training_data_src_path=training_data_src_path)
converter_session_filepath = model.get_strpath_storage_for_file('converter_session.dat')
predictor_func, predictor_input_shape, cfg = model.get_ConverterConfig()

0 comments on commit dc11ec3

Please sign in to comment.
You can’t perform that action at this time.