Skip to content
Permalink
Browse files

fixed mask editor

added FacesetEnhancer
4.2.other) data_src util faceset enhance best GPU.bat
4.2.other) data_src util faceset enhance multi GPU.bat

FacesetEnhancer greatly increases details in your source face set,
same as Gigapixel enhancer, but in fully automatic mode.
In OpenCL build it works on CPU only.

Please consider a donation.
  • Loading branch information
iperov committed Dec 26, 2019
1 parent 3be223a commit d46fb5cfd3d1278c20821745bf814653212ea67a
Showing with 476 additions and 6 deletions.
  1. BIN facelib/FaceEnhancer.h5
  2. +154 −0 facelib/FaceEnhancer.py
  3. +2 −1 facelib/__init__.py
  4. +17 −3 main.py
  5. +163 −0 mainscripts/FacesetEnhancer.py
  6. +140 −2 nnlib/nnlib.py
Binary file not shown.
@@ -0,0 +1,154 @@
import operator
from pathlib import Path

import cv2
import numpy as np



class FaceEnhancer(object):
"""
x4 face enhancer
"""
def __init__(self):
from nnlib import nnlib
exec( nnlib.import_all(), locals(), globals() )

model_path = Path(__file__).parent / "FaceEnhancer.h5"
if not model_path.exists():
return

bgr_inp = Input ( (192,192,3) )
t_param_inp = Input ( (1,) )
t_param1_inp = Input ( (1,) )
x = Conv2D (64, 3, strides=1, padding='same' )(bgr_inp)

a = Dense (64, use_bias=False) ( t_param_inp )
a = Reshape( (1,1,64) )(a)
b = Dense (64, use_bias=False ) ( t_param1_inp )
b = Reshape( (1,1,64) )(b)
x = Add()([x,a,b])

x = LeakyReLU(0.1)(x)

x = LeakyReLU(0.1)(Conv2D (64, 3, strides=1, padding='same' )(x))
x = e0 = LeakyReLU(0.1)(Conv2D (64, 3, strides=1, padding='same')(x))

x = AveragePooling2D()(x)
x = LeakyReLU(0.1)(Conv2D (112, 3, strides=1, padding='same')(x))
x = e1 = LeakyReLU(0.1)(Conv2D (112, 3, strides=1, padding='same')(x))

x = AveragePooling2D()(x)
x = LeakyReLU(0.1)(Conv2D (192, 3, strides=1, padding='same')(x))
x = e2 = LeakyReLU(0.1)(Conv2D (192, 3, strides=1, padding='same')(x))

x = AveragePooling2D()(x)
x = LeakyReLU(0.1)(Conv2D (336, 3, strides=1, padding='same')(x))
x = e3 = LeakyReLU(0.1)(Conv2D (336, 3, strides=1, padding='same')(x))

x = AveragePooling2D()(x)
x = LeakyReLU(0.1)(Conv2D (512, 3, strides=1, padding='same')(x))
x = e4 = LeakyReLU(0.1)(Conv2D (512, 3, strides=1, padding='same')(x))

x = AveragePooling2D()(x)
x = LeakyReLU(0.1)(Conv2D (512, 3, strides=1, padding='same')(x))
x = LeakyReLU(0.1)(Conv2D (512, 3, strides=1, padding='same')(x))
x = LeakyReLU(0.1)(Conv2D (512, 3, strides=1, padding='same')(x))
x = LeakyReLU(0.1)(Conv2D (512, 3, strides=1, padding='same')(x))

x = Concatenate()([ BilinearInterpolation()(x), e4 ])

x = LeakyReLU(0.1)(Conv2D (512, 3, strides=1, padding='same')(x))
x = LeakyReLU(0.1)(Conv2D (512, 3, strides=1, padding='same')(x))

x = Concatenate()([ BilinearInterpolation()(x), e3 ])
x = LeakyReLU(0.1)(Conv2D (512, 3, strides=1, padding='same')(x))
x = LeakyReLU(0.1)(Conv2D (512, 3, strides=1, padding='same')(x))

x = Concatenate()([ BilinearInterpolation()(x), e2 ])
x = LeakyReLU(0.1)(Conv2D (288, 3, strides=1, padding='same')(x))
x = LeakyReLU(0.1)(Conv2D (288, 3, strides=1, padding='same')(x))

x = Concatenate()([ BilinearInterpolation()(x), e1 ])
x = LeakyReLU(0.1)(Conv2D (160, 3, strides=1, padding='same')(x))
x = LeakyReLU(0.1)(Conv2D (160, 3, strides=1, padding='same')(x))

x = Concatenate()([ BilinearInterpolation()(x), e0 ])
x = LeakyReLU(0.1)(Conv2D (96, 3, strides=1, padding='same')(x))
x = d0 = LeakyReLU(0.1)(Conv2D (96, 3, strides=1, padding='same')(x))

x = LeakyReLU(0.1)(Conv2D (48, 3, strides=1, padding='same')(x))

x = Conv2D (3, 3, strides=1, padding='same', activation='tanh')(x)
out1x = Add()([bgr_inp, x])

x = d0
x = LeakyReLU(0.1)(Conv2D (96, 3, strides=1, padding='same')(x))
x = LeakyReLU(0.1)(Conv2D (96, 3, strides=1, padding='same')(x))
x = d2x = BilinearInterpolation()(x)

x = LeakyReLU(0.1)(Conv2D (48, 3, strides=1, padding='same')(x))
x = Conv2D (3, 3, strides=1, padding='same', activation='tanh')(x)

out2x = Add()([BilinearInterpolation()(out1x), x])

x = d2x
x = LeakyReLU(0.1)(Conv2D (72, 3, strides=1, padding='same')(x))
x = LeakyReLU(0.1)(Conv2D (72, 3, strides=1, padding='same')(x))
x = d4x = BilinearInterpolation()(x)

x = LeakyReLU(0.1)(Conv2D (36, 3, strides=1, padding='same')(x))
x = Conv2D (3, 3, strides=1, padding='same', activation='tanh')(x)
out4x = Add()([BilinearInterpolation()(out2x), x ])

self.model = keras.models.Model ( [bgr_inp,t_param_inp,t_param1_inp], [out4x] )
self.model.load_weights (str(model_path))


def enhance (self, inp_img, is_tanh=False, preserve_size=True):
if not is_tanh:
inp_img = np.clip( inp_img * 2 -1, -1, 1 )

param = np.array([0.2])
param1 = np.array([1.0])
up_res = 4
patch_size = 192
patch_size_half = patch_size // 2

h,w,c = inp_img.shape

i_max = w-patch_size+1
j_max = h-patch_size+1

final_img = np.zeros ( (h*up_res,w*up_res,c), dtype=np.float32 )
final_img_div = np.zeros ( (h*up_res,w*up_res,1), dtype=np.float32 )

x = np.concatenate ( [ np.linspace (0,1,patch_size_half*up_res), np.linspace (1,0,patch_size_half*up_res) ] )
x,y = np.meshgrid(x,x)
patch_mask = (x*y)[...,None]

j=0
while j < j_max:
i = 0
while i < i_max:
patch_img = inp_img[j:j+patch_size, i:i+patch_size,:]
x = self.model.predict( [ patch_img[None,...], param, param1 ] )[0]
final_img [j*up_res:(j+patch_size)*up_res, i*up_res:(i+patch_size)*up_res,:] += x*patch_mask
final_img_div[j*up_res:(j+patch_size)*up_res, i*up_res:(i+patch_size)*up_res,:] += patch_mask
if i == i_max-1:
break
i = min( i+patch_size_half, i_max-1)
if j == j_max-1:
break
j = min( j+patch_size_half, j_max-1)

final_img_div[final_img_div==0] = 1.0
final_img /= final_img_div

if preserve_size:
final_img = cv2.resize (final_img, (w,h), cv2.INTER_LANCZOS4)

if not is_tanh:
final_img = np.clip( final_img/2+0.5, 0, 1 )

return final_img
@@ -3,4 +3,5 @@
from .MTCExtractor import MTCExtractor
from .S3FDExtractor import S3FDExtractor
from .FANExtractor import FANExtractor
from .PoseEstimator import PoseEstimator
from .PoseEstimator import PoseEstimator
from .FaceEnhancer import FaceEnhancer
20 main.py
@@ -286,6 +286,21 @@ def process_labelingtool_edit_mask(arguments):

p.set_defaults(func=process_labelingtool_edit_mask)

facesettool_parser = subparsers.add_parser( "facesettool", help="Faceset tools.").add_subparsers()

def process_faceset_enhancer(arguments):
os_utils.set_process_lowest_prio()
from mainscripts import FacesetEnhancer
FacesetEnhancer.process_folder ( Path(arguments.input_dir), multi_gpu=arguments.multi_gpu, cpu_only=arguments.cpu_only )

p = facesettool_parser.add_parser ("enhance", help="Enhance details in DFL faceset.")
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory of aligned faces.")
p.add_argument('--multi-gpu', action="store_true", dest="multi_gpu", default=False, help="Enables multi GPU.")
p.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Process on CPU.")

p.set_defaults(func=process_faceset_enhancer)

"""
def process_relight_faceset(arguments):
os_utils.set_process_lowest_prio()
from mainscripts import FacesetRelighter
@@ -295,9 +310,7 @@ def process_delete_relighted(arguments):
os_utils.set_process_lowest_prio()
from mainscripts import FacesetRelighter
FacesetRelighter.delete_relighted (arguments.input_dir)

facesettool_parser = subparsers.add_parser( "facesettool", help="Faceset tools.").add_subparsers()

p = facesettool_parser.add_parser ("relight", help="Synthesize new faces from existing ones by relighting them. With the relighted faces neural network will better reproduce face shadows.")
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory of aligned faces.")
p.add_argument('--lighten', action="store_true", dest="lighten", default=None, help="Lighten the faces.")
@@ -307,6 +320,7 @@ def process_delete_relighted(arguments):
p = facesettool_parser.add_parser ("delete_relighted", help="Delete relighted faces.")
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory of aligned faces.")
p.set_defaults(func=process_delete_relighted)
"""

def bad_args(arguments):
parser.print_help()
@@ -0,0 +1,163 @@
import multiprocessing
import shutil

from DFLIMG import *
from interact import interact as io
from joblib import Subprocessor
from nnlib import nnlib
from utils import Path_utils
from utils.cv2_utils import *


class FacesetEnhancerSubprocessor(Subprocessor):

#override
def __init__(self, image_paths, output_dirpath, multi_gpu=False, cpu_only=False):
self.image_paths = image_paths
self.output_dirpath = output_dirpath
self.result = []
self.devices = FacesetEnhancerSubprocessor.get_devices_for_config(multi_gpu, cpu_only)

super().__init__('FacesetEnhancer', FacesetEnhancerSubprocessor.Cli, 600)

#override
def on_clients_initialized(self):
io.progress_bar (None, len (self.image_paths))

#override
def on_clients_finalized(self):
io.progress_bar_close()

#override
def process_info_generator(self):
base_dict = {'output_dirpath':self.output_dirpath}

for (device_idx, device_type, device_name, device_total_vram_gb) in self.devices:
client_dict = base_dict.copy()
client_dict['device_idx'] = device_idx
client_dict['device_name'] = device_name
client_dict['device_type'] = device_type
yield client_dict['device_name'], {}, client_dict

#override
def get_data(self, host_dict):
if len (self.image_paths) > 0:
return self.image_paths.pop(0)

#override
def on_data_return (self, host_dict, data):
self.image_paths.insert(0, data)

#override
def on_result (self, host_dict, data, result):
io.progress_bar_inc(1)
if result[0] == 1:
self.result +=[ (result[1], result[2]) ]

#override
def get_result(self):
return self.result

@staticmethod
def get_devices_for_config (multi_gpu, cpu_only):
backend = nnlib.device.backend
if 'cpu' in backend:
cpu_only = True

if not cpu_only and backend == "plaidML":
cpu_only = True

if not cpu_only:
devices = []
if multi_gpu:
devices = nnlib.device.getValidDevicesWithAtLeastTotalMemoryGB(2)

if len(devices) == 0:
idx = nnlib.device.getBestValidDeviceIdx()
if idx != -1:
devices = [idx]

if len(devices) == 0:
cpu_only = True

result = []
for idx in devices:
dev_name = nnlib.device.getDeviceName(idx)
dev_vram = nnlib.device.getDeviceVRAMTotalGb(idx)

result += [ (idx, 'GPU', dev_name, dev_vram) ]

return result

if cpu_only:
return [ (i, 'CPU', 'CPU%d' % (i), 0 ) for i in range( min(8, multiprocessing.cpu_count() // 2) ) ]

class Cli(Subprocessor.Cli):

#override
def on_initialize(self, client_dict):
device_idx = client_dict['device_idx']
cpu_only = client_dict['device_type'] == 'CPU'
self.output_dirpath = client_dict['output_dirpath']

device_config = nnlib.DeviceConfig ( cpu_only=cpu_only, force_gpu_idx=device_idx, allow_growth=True)
nnlib.import_all (device_config)

device_vram = device_config.gpu_vram_gb[0]

intro_str = 'Running on %s.' % (client_dict['device_name'])
if not cpu_only and device_vram <= 2:
intro_str += " Recommended to close all programs using this device."

self.log_info (intro_str)

from facelib import FaceEnhancer
self.fe = FaceEnhancer()

#override
def process_data(self, filepath):
try:
dflimg = DFLIMG.load (filepath)
if dflimg is None:
self.log_err ("%s is not a dfl image file" % (filepath.name) )
else:
img = cv2_imread(filepath).astype(np.float32) / 255.0

img = self.fe.enhance(img)

img = np.clip (img*255, 0, 255).astype(np.uint8)

output_filepath = self.output_dirpath / filepath.name

cv2_imwrite ( str(output_filepath), img, [int(cv2.IMWRITE_JPEG_QUALITY), 100] )
dflimg.embed_and_set ( str(output_filepath) )
return (1, filepath, output_filepath)
except:
self.log_err (f"Exception occured while processing file {filepath}. Error: {traceback.format_exc()}")

return (0, filepath, None)

def process_folder ( dirpath, multi_gpu=False, cpu_only=False ):
output_dirpath = dirpath.parent / (dirpath.name + '_enhanced')
output_dirpath.mkdir (exist_ok=True, parents=True)

dirpath_parts = '/'.join( dirpath.parts[-2:])
output_dirpath_parts = '/'.join( output_dirpath.parts[-2:] )
io.log_info (f"Enhancing faceset in {dirpath_parts}.")
io.log_info ( f"Processing to {output_dirpath_parts}.")

output_images_paths = Path_utils.get_image_paths(output_dirpath)
if len(output_images_paths) > 0:
for filename in output_images_paths:
Path(filename).unlink()

image_paths = [Path(x) for x in Path_utils.get_image_paths( dirpath )]
result = FacesetEnhancerSubprocessor ( image_paths, output_dirpath, multi_gpu=multi_gpu, cpu_only=cpu_only).run()

io.log_info (f"Copying processed files to {dirpath_parts}.")

for (filepath, output_filepath) in result:
shutil.copy (output_filepath, filepath)

io.log_info (f"Removing {output_dirpath_parts}.")
shutil.rmtree(output_dirpath)

0 comments on commit d46fb5c

Please sign in to comment.
You can’t perform that action at this time.