Skip to content
Permalink
Browse files

fixed GPU detection and indexes, got rid of using nvml, now using dir…

…ect cuda lib to determine gpu info that match tensorflow indexes,

removed TrueFace model.

added SAEv2 model. Differences from SAE:
+ default e_ch_dims is now 21
+ new encoder produces more stable face and less scale jitter
  before: https://i.imgur.com/4jUcol8.gifv
  after:  https://i.imgur.com/lyiax49.gifv - scale of the face is less changed within frame size
+ decoder now has only 1 residual block instead of 2, result is same quality with less decoder size
+ added mid-full face, which covers 30% more area than half face.
+ added option " Enable 'true face' training "
  Enable it only after 50k iters, when the face is sharp enough.
  the result face will be more like src.
  The most src-like face with 'true-face-training' you can achieve with DF architecture.
  • Loading branch information...
iperov committed Oct 5, 2019
1 parent 353bcdf commit d781af3d1f585c4a0bec175b5f64ba55446a22d8
@@ -71,17 +71,17 @@ def ConvertMaskedFace (predictor_func, predictor_input_shape, cfg, frame_info, i

if cfg.face_type == FaceType.FULL:
FAN_dst_face_mask_a_0 = cv2.resize (dst_face_fanseg_mask, (output_size,output_size), cv2.INTER_CUBIC)
elif cfg.face_type == FaceType.HALF:
half_face_fanseg_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, cfg.fanseg_input_size, face_type=FaceType.HALF)
else:
face_fanseg_mat = LandmarksProcessor.get_transform_mat (img_face_landmarks, cfg.fanseg_input_size, face_type=cfg.face_type)

fanseg_rect_corner_pts = np.array ( [ [0,0], [cfg.fanseg_input_size-1,0], [0,cfg.fanseg_input_size-1] ], dtype=np.float32 )
a = LandmarksProcessor.transform_points (fanseg_rect_corner_pts, half_face_fanseg_mat, invert=True )
a = LandmarksProcessor.transform_points (fanseg_rect_corner_pts, face_fanseg_mat, invert=True )
b = LandmarksProcessor.transform_points (a, full_face_fanseg_mat )
m = cv2.getAffineTransform(b, fanseg_rect_corner_pts)
FAN_dst_face_mask_a_0 = cv2.warpAffine(dst_face_fanseg_mask, m, (cfg.fanseg_input_size,)*2, flags=cv2.INTER_CUBIC )
FAN_dst_face_mask_a_0 = cv2.resize (FAN_dst_face_mask_a_0, (output_size,output_size), cv2.INTER_CUBIC)
else:
raise ValueError ("cfg.face_type unsupported")
#else:
# raise ValueError ("cfg.face_type unsupported")

if cfg.mask_mode == 3: #FAN-prd
prd_face_mask_a_0 = FAN_prd_face_mask_a_0
@@ -117,8 +117,8 @@ def __init__(self, face_type=FaceType.FULL,
super().__init__(type=ConverterConfig.TYPE_MASKED)

self.face_type = face_type
if self.face_type not in [FaceType.FULL, FaceType.HALF]:
raise ValueError("ConverterConfigMasked supports only full or half face masks.")
if self.face_type not in [FaceType.HALF, FaceType.MID_FULL, FaceType.FULL ]:
raise ValueError("ConverterConfigMasked does not support this type of face.")

self.default_mode = default_mode
self.clip_hborder_mask_per = clip_hborder_mask_per
@@ -2,11 +2,12 @@

class FaceType(IntEnum):
#enumerating in order "next contains prev"
HALF = 0,
FULL = 1,
FULL_NO_ALIGN = 3,
HEAD = 4,
HEAD_NO_ALIGN = 5,
HALF = 0
MID_FULL = 1
FULL = 2
FULL_NO_ALIGN = 3
HEAD = 4
HEAD_NO_ALIGN = 5

MARK_ONLY = 10, #no align at all, just embedded faceinfo

@@ -22,13 +23,15 @@ def toString (face_type):
return to_string_dict[face_type]

from_string_dict = {'half_face': FaceType.HALF,
'midfull_face': FaceType.MID_FULL,
'full_face': FaceType.FULL,
'head' : FaceType.HEAD,
'mark_only' : FaceType.MARK_ONLY,
'full_face_no_align' : FaceType.FULL_NO_ALIGN,
'head_no_align' : FaceType.HEAD_NO_ALIGN,
}
to_string_dict = { FaceType.HALF : 'half_face',
FaceType.MID_FULL : 'midfull_face',
FaceType.FULL : 'full_face',
FaceType.HEAD : 'head',
FaceType.MARK_ONLY :'mark_only',
@@ -271,6 +271,8 @@ def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0):

if face_type == FaceType.HALF:
padding = 0
elif face_type == FaceType.MID_FULL:
padding = int(output_size * 0.06)
elif face_type == FaceType.FULL:
padding = (output_size / 64) * 12
elif face_type == FaceType.HEAD:
@@ -435,9 +437,6 @@ def get_dists(name, thickness=0):
)
)


#import code
#code.interact(local=dict(globals(), **locals()))
eyes_fall_dist = w // 32
eyes_thickness = max( w // 64, 1 )

Large diffs are not rendered by default.

File renamed without changes.

This file was deleted.

BIN +6.94 KB (100%) models/archived_models.zip
Binary file not shown.

0 comments on commit d781af3

Please sign in to comment.
You can’t perform that action at this time.