Skip to content
Permalink
Browse files

fixed error "Failed to get convolution algorithm" on some systems

fixed error "dll load failed" on some systems
Expanded eyebrows line of face masks. It does not affect mask of FAN-x converter mode.
  • Loading branch information...
iperov committed Aug 11, 2019
1 parent 582c974 commit b72d5a3f9aef4637ca18ae09a98aedcf694eb850
@@ -20,8 +20,9 @@ def __init__(self, predictor_func,
self.predictor_input_size = predictor_input_size

#dummy predict and sleep, tensorflow caching kernels. If remove it, conversion speed will be x2 slower
predictor_func ( np.zeros ( (predictor_input_size,predictor_input_size,3), dtype=np.float32 ),
np.zeros ( (predictor_input_size,predictor_input_size,1), dtype=np.float32 ) )
predictor_func ( np.zeros ( (predictor_input_size,predictor_input_size,3), dtype=np.float32 ),
np.zeros ( (predictor_input_size,predictor_input_size,3), dtype=np.float32 ),
np.zeros ( (predictor_input_size,predictor_input_size,3), dtype=np.float32 ) )
time.sleep(2)

predictor_func_host, predictor_func = SubprocessFunctionCaller.make_pair(predictor_func)
@@ -33,38 +34,28 @@ def on_host_tick(self):
self.predictor_func_host.obj.process_messages()

#override
def cli_convert_face (self, img_bgr, img_face_landmarks, debug, avaperator_face_bgr=None, **kwargs):
def cli_convert_face (self, f0, f0_lmrk, f1, f1_lmrk, f2, f2_lmrk, debug, **kwargs):
if debug:
debugs = [img_bgr.copy()]

img_size = img_bgr.shape[1], img_bgr.shape[0]

img_face_mask_a = LandmarksProcessor.get_image_hull_mask (img_bgr.shape, img_face_landmarks)
img_face_mask_aaa = np.repeat(img_face_mask_a, 3, -1)
debugs = []

inp_size = self.predictor_input_size

f0_mat = LandmarksProcessor.get_transform_mat (f0_lmrk, inp_size, face_type=FaceType.FULL_NO_ALIGN)
f1_mat = LandmarksProcessor.get_transform_mat (f1_lmrk, inp_size, face_type=FaceType.FULL_NO_ALIGN)
f2_mat = LandmarksProcessor.get_transform_mat (f2_lmrk, inp_size, face_type=FaceType.FULL_NO_ALIGN)

output_size = self.predictor_input_size
face_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, output_size, face_type=FaceType.FULL)
inp_f0 = cv2.warpAffine( f0, f0_mat, (inp_size, inp_size), flags=cv2.INTER_CUBIC )
inp_f1 = cv2.warpAffine( f1, f1_mat, (inp_size, inp_size), flags=cv2.INTER_CUBIC )
inp_f2 = cv2.warpAffine( f2, f2_mat, (inp_size, inp_size), flags=cv2.INTER_CUBIC )

dst_face_mask_a_0 = cv2.warpAffine( img_face_mask_a, face_mat, (output_size, output_size), flags=cv2.INTER_CUBIC )
prd_f = self.predictor_func ( inp_f0, inp_f1, inp_f2 )

predictor_input_dst_face_mask_a_0 = cv2.resize (dst_face_mask_a_0, (self.predictor_input_size,self.predictor_input_size), cv2.INTER_CUBIC )
prd_inp_dst_face_mask_a = predictor_input_dst_face_mask_a_0[...,np.newaxis]
out_img = np.clip(prd_f, 0.0, 1.0)

prd_inp_avaperator_face_bgr = cv2.resize (avaperator_face_bgr, (self.predictor_input_size,self.predictor_input_size), cv2.INTER_CUBIC )

prd_face_bgr = self.predictor_func ( prd_inp_avaperator_face_bgr, prd_inp_dst_face_mask_a )

out_img = img_bgr.copy()
out_img = cv2.warpAffine( prd_face_bgr, face_mat, img_size, out_img, cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4, cv2.BORDER_TRANSPARENT )
out_img = np.clip(out_img, 0.0, 1.0)
out_img = np.concatenate ( [cv2.resize ( inp_f1, (prd_f.shape[1], prd_f.shape[0]) ),
out_img], axis=1 )

if debug:
debugs += [out_img.copy()]

out_img = np.clip( img_bgr*(1-img_face_mask_aaa) + (out_img*img_face_mask_aaa) , 0, 1.0 )

if debug:
debugs += [out_img.copy()]


return debugs if debug else out_img
@@ -4,10 +4,10 @@ class FaceType(IntEnum):
HALF = 0,
FULL = 1,
HEAD = 2,
AVATAR = 3, #centered nose only
MARK_ONLY = 4, #no align at all, just embedded faceinfo
QTY = 5


FULL_NO_ALIGN = 5,
MARK_ONLY = 10, #no align at all, just embedded faceinfo
@staticmethod
def fromString (s):
r = from_string_dict.get (s.lower())
@@ -17,17 +17,17 @@ def fromString (s):

@staticmethod
def toString (face_type):
return to_string_list[face_type]
return to_string_dict[face_type]

from_string_dict = {'half_face': FaceType.HALF,
'full_face': FaceType.FULL,
'head' : FaceType.HEAD,
'avatar' : FaceType.AVATAR,
'mark_only' : FaceType.MARK_ONLY,
'full_face_no_align' : FaceType.FULL_NO_ALIGN,
}
to_string_list = [ 'half_face',
'full_face',
'head',
'avatar',
'mark_only'
]
to_string_dict = { FaceType.HALF : 'half_face',
FaceType.FULL : 'full_face',
FaceType.HEAD : 'head',
FaceType.MARK_ONLY :'mark_only',
FaceType.FULL_NO_ALIGN : 'full_face_no_align'
}
@@ -109,10 +109,19 @@
[0.205322 , 31.408738 , -21.903670 ],
[-7.198266 , 30.844876 , -20.328022 ] ], dtype=np.float32)

def transform_points(points, mat, invert=False):
if invert:
mat = cv2.invertAffineTransform (mat)
points = np.expand_dims(points, axis=1)
points = cv2.transform(points, mat, points.shape)
points = np.squeeze(points)
return points

def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
if not isinstance(image_landmarks, np.ndarray):
image_landmarks = np.array (image_landmarks)

"""
if face_type == FaceType.AVATAR:
centroid = np.mean (image_landmarks, axis=0)
@@ -128,76 +137,79 @@ def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):
mat = mat * scale * (output_size / 3)
mat[:,2] += output_size / 2
else:
if face_type == FaceType.HALF:
padding = 0
elif face_type == FaceType.FULL:
padding = (output_size / 64) * 12
elif face_type == FaceType.HEAD:
padding = (output_size / 64) * 24
else:
raise ValueError ('wrong face_type: ', face_type)

mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
mat = mat * (output_size - 2 * padding)
mat[:,2] += padding
mat *= (1 / scale)
mat[:,2] += -output_size*( ( (1 / scale) - 1.0 ) / 2 )
"""
remove_align = False
if face_type == FaceType.FULL_NO_ALIGN:
face_type = FaceType.FULL
remove_align = True

if face_type == FaceType.HALF:
padding = 0
elif face_type == FaceType.FULL:
padding = (output_size / 64) * 12
elif face_type == FaceType.HEAD:
padding = (output_size / 64) * 24
else:
raise ValueError ('wrong face_type: ', face_type)

mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
mat = mat * (output_size - 2 * padding)
mat[:,2] += padding
mat *= (1 / scale)
mat[:,2] += -output_size*( ( (1 / scale) - 1.0 ) / 2 )

if remove_align:
bbox = transform_points ( [ (0,0), (0,output_size-1), (output_size-1, output_size-1), (output_size-1,0) ], mat, True)
area = mathlib.polygon_area(bbox[:,0], bbox[:,1] )
side = math.sqrt(area) / 2
center = transform_points ( [(output_size/2,output_size/2)], mat, True)

pts1 = np.float32([ center+[-side,-side], center+[side,-side], center+[-side,side] ])
pts2 = np.float32([[0,0],[output_size-1,0],[0,output_size-1]])
mat = cv2.getAffineTransform(pts1,pts2)

return mat

def transform_points(points, mat, invert=False):
if invert:
mat = cv2.invertAffineTransform (mat)
points = np.expand_dims(points, axis=1)
points = cv2.transform(points, mat, points.shape)
points = np.squeeze(points)
return points


def get_image_hull_mask (image_shape, image_landmarks, ie_polys=None):
if len(image_landmarks) != 68:
raise Exception('get_image_hull_mask works only with 68 landmarks')
int_lmrks = np.array(image_landmarks, dtype=np.int)
int_lmrks = np.array(image_landmarks.copy(), dtype=np.int)

hull_mask = np.zeros(image_shape[0:2]+(1,),dtype=np.float32)

cv2.fillConvexPoly( hull_mask, cv2.convexHull(
np.concatenate ( (int_lmrks[0:9],
int_lmrks[17:18]))) , (1,) )

cv2.fillConvexPoly( hull_mask, cv2.convexHull(
np.concatenate ( (int_lmrks[8:17],
int_lmrks[26:27]))) , (1,) )
# #nose
ml_pnt = (int_lmrks[36] + int_lmrks[0]) // 2
mr_pnt = (int_lmrks[16] + int_lmrks[45]) // 2

cv2.fillConvexPoly( hull_mask, cv2.convexHull(
np.concatenate ( (int_lmrks[17:20],
int_lmrks[8:9]))) , (1,) )
# mid points between the mid points and eye
ql_pnt = (int_lmrks[36] + ml_pnt) // 2
qr_pnt = (int_lmrks[45] + mr_pnt) // 2

cv2.fillConvexPoly( hull_mask, cv2.convexHull(
np.concatenate ( (int_lmrks[24:27],
int_lmrks[8:9]))) , (1,) )
# Top of the eye arrays
bot_l = np.array((ql_pnt, int_lmrks[36], int_lmrks[37], int_lmrks[38], int_lmrks[39]))
bot_r = np.array((int_lmrks[42], int_lmrks[43], int_lmrks[44], int_lmrks[45], qr_pnt))

cv2.fillConvexPoly( hull_mask, cv2.convexHull(
np.concatenate ( (int_lmrks[19:25],
int_lmrks[8:9],
))) , (1,) )
# Eyebrow arrays
top_l = int_lmrks[17:22]
top_r = int_lmrks[22:27]

cv2.fillConvexPoly( hull_mask, cv2.convexHull(
np.concatenate ( (int_lmrks[17:22],
int_lmrks[27:28],
int_lmrks[31:36],
int_lmrks[8:9]
))) , (1,) )
# Adjust eyebrow arrays
int_lmrks[17:22] = top_l + ((top_l - bot_l) // 2)
int_lmrks[22:27] = top_r + ((top_r - bot_r) // 2)

cv2.fillConvexPoly( hull_mask, cv2.convexHull(
np.concatenate ( (int_lmrks[22:27],
int_lmrks[27:28],
int_lmrks[31:36],
int_lmrks[8:9]
))) , (1,) )
r_jaw = (int_lmrks[0:9], int_lmrks[17:18])
l_jaw = (int_lmrks[8:17], int_lmrks[26:27])
r_cheek = (int_lmrks[17:20], int_lmrks[8:9])
l_cheek = (int_lmrks[24:27], int_lmrks[8:9])
nose_ridge = (int_lmrks[19:25], int_lmrks[8:9],)
r_eye = (int_lmrks[17:22], int_lmrks[27:28], int_lmrks[31:36], int_lmrks[8:9])
l_eye = (int_lmrks[22:27], int_lmrks[27:28], int_lmrks[31:36], int_lmrks[8:9])
nose = (int_lmrks[27:31], int_lmrks[31:36])
parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose]

#nose
cv2.fillConvexPoly( hull_mask, cv2.convexHull(int_lmrks[27:36]), (1,) )
for item in parts:
merged = np.concatenate(item)
cv2.fillConvexPoly(hull_mask, cv2.convexHull(merged), 1)

if ie_polys is not None:
ie_polys.overlay_mask(hull_mask)
@@ -309,7 +321,7 @@ def draw_landmarks (image, image_landmarks, color=(0,255,0), transparent_mask=Fa
mask = get_image_hull_mask (image.shape, image_landmarks, ie_polys)
image[...] = ( image * (1-mask) + image * mask / 2 )[...]

def draw_rect_landmarks (image, rect, image_landmarks, face_size, face_type, transparent_mask=False, ie_polys=None, landmarks_color=(0,255,0) ):
def draw_rect_landmarks (image, rect, image_landmarks, face_size, face_type, transparent_mask=False, ie_polys=None, landmarks_color=(0,255,0)):
draw_landmarks(image, image_landmarks, color=landmarks_color, transparent_mask=transparent_mask, ie_polys=ie_polys)
imagelib.draw_rect (image, rect, (255,0,0), 2 )

@@ -40,7 +40,7 @@ def process_extract(arguments):
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
p.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir", help="Output directory. This is where the extracted files will be stored.")
p.add_argument('--debug-dir', action=fixPathAction, dest="debug_dir", help="Writes debug images to this directory.")
p.add_argument('--face-type', dest="face_type", choices=['half_face', 'full_face', 'head', 'avatar', 'mark_only'], default='full_face', help="Default 'full_face'. Don't change this option, currently all models uses 'full_face'")
p.add_argument('--face-type', dest="face_type", choices=['half_face', 'full_face', 'head', 'full_face_no_align', 'mark_only'], default='full_face', help="Default 'full_face'. Don't change this option, currently all models uses 'full_face'")
p.add_argument('--detector', dest="detector", choices=['dlib','mt','s3fd','manual'], default='dlib', help="Type of detector. Default 'dlib'. 'mt' (MTCNNv1) - faster, better, almost no jitter, perfect for gathering thousands faces for src-set. It is also good for dst-set, but can generate false faces in frames where main face not recognized! In this case for dst-set use either 'dlib' with '--manual-fix' or '--detector manual'. Manual detector suitable only for dst-set.")
p.add_argument('--multi-gpu', action="store_true", dest="multi_gpu", default=False, help="Enables multi GPU.")
p.add_argument('--manual-fix', action="store_true", dest="manual_fix", default=False, help="Enables manual extract only frames where faces were not recognized.")
@@ -151,7 +151,6 @@ def process_convert(arguments):
args = {'input_dir' : arguments.input_dir,
'output_dir' : arguments.output_dir,
'aligned_dir' : arguments.aligned_dir,
'avaperator_aligned_dir' : arguments.avaperator_aligned_dir,
'model_dir' : arguments.model_dir,
'model_name' : arguments.model_name,
'debug' : arguments.debug,
@@ -166,7 +165,6 @@ def process_convert(arguments):
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
p.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir", help="Output directory. This is where the converted files will be stored.")
p.add_argument('--aligned-dir', action=fixPathAction, dest="aligned_dir", help="Aligned directory. This is where the extracted of dst faces stored.")
p.add_argument('--avaperator-aligned-dir', action=fixPathAction, dest="avaperator_aligned_dir", help="Only for AVATAR model. Directory of aligned avatar operator faces.")
p.add_argument('--model-dir', required=True, action=fixPathAction, dest="model_dir", help="Model dir.")
p.add_argument('--model', required=True, dest="model_name", choices=Path_utils.get_all_dir_names_startswith ( Path(__file__).parent / 'models' , 'Model_'), help="Type of model")
p.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug converter.")

0 comments on commit b72d5a3

Please sign in to comment.
You can’t perform that action at this time.