Skip to content
Permalink
Browse files

removing trailing spaces

  • Loading branch information...
iperov committed Mar 19, 2019
1 parent fa4e579 commit a3df04999cfff13de8ae1369c37a337ed8eecdfd
Showing with 2,104 additions and 2,097 deletions.
  1. +8 −8 converters/Converter.py
  2. +7 −7 converters/ConverterImage.py
  3. +10 −10 facelib/DLIBExtractor.py
  4. +25 −25 facelib/FANSegmentator.py
  5. +6 −7 facelib/FaceType.py
  6. +26 −26 facelib/LandmarksExtractor.py
  7. +72 −72 facelib/LandmarksProcessor.py
  8. +29 −29 facelib/MTCExtractor.py
  9. +14 −14 facelib/S3FDExtractor.py
  10. +5 −5 imagelib/estimate_sharpness.py
  11. +1 −1 interact/__init__.py
  12. +44 −44 interact/interact.py
  13. +7 −7 joblib/SubprocessFunctionCaller.py
  14. +50 −50 joblib/SubprocessorBase.py
  15. +1 −1 joblib/__init__.py
  16. +68 −68 main.py
  17. +79 −79 mainscripts/Converter.py
  18. +143 −143 mainscripts/Extractor.py
  19. +84 −84 mainscripts/LabelingTool_unfinished.py
  20. +160 −160 mainscripts/Sorter.py
  21. +59 −59 mainscripts/Trainer.py
  22. +30 −30 mainscripts/Util.py
  23. +46 −47 mainscripts/VideoEd.py
  24. +6 −6 mathlib/__init__.py
  25. +1 −1 mathlib/umeyama.py
  26. +131 −131 models/ModelBase.py
  27. +52 −52 models/Model_DF/Model.py
  28. +1 −1 models/Model_DF/__init__.py
  29. +23 −23 models/Model_FANSegmentator/Model.py
  30. +1 −1 models/Model_FANSegmentator/__init__.py
  31. +55 −55 models/Model_H128/Model.py
  32. +1 −1 models/Model_H128/__init__.py
  33. +57 −57 models/Model_H64/Model.py
  34. +1 −1 models/Model_H64/__init__.py
  35. +49 −49 models/Model_LIAEF128/Model.py
  36. +1 −1 models/Model_LIAEF128/__init__.py
  37. +247 −233 models/Model_SAE/Model.py
  38. +1 −1 models/Model_SAE/__init__.py
  39. +1 −1 models/__init__.py
  40. +2 −2 nnlib/CAInitializer.py
  41. +1 −1 nnlib/__init__.py
  42. +55 −56 nnlib/device.py
  43. +0 −1 nnlib/nnlib.py
  44. +75 −75 nnlib/pynvml.py
  45. +16 −16 samples/Sample.py
  46. +6 −7 samples/SampleGeneratorBase.py
  47. +32 −32 samples/SampleGeneratorFace.py
  48. +19 −19 samples/SampleGeneratorImageTemporal.py
  49. +46 −46 samples/SampleLoader.py
  50. +48 −48 samples/SampleProcessor.py
  51. +1 −1 samples/__init__.py
  52. +52 −52 utils/DFLJPG.py
  53. +27 −27 utils/DFLPNG.py
  54. +15 −15 utils/Path_utils.py
  55. +2 −2 utils/cv2_utils.py
  56. +76 −77 utils/image_utils.py
  57. +12 −12 utils/iter_utils.py
  58. +4 −4 utils/os_utils.py
  59. +3 −3 utils/random_utils.py
  60. +9 −9 utils/std_utils.py
  61. +1 −2 utils/struct_utils.py
@@ -7,34 +7,34 @@ class Converter(object):
TYPE_FACE = 0 #calls convert_face
TYPE_IMAGE = 1 #calls convert_image without landmarks
TYPE_IMAGE_WITH_LANDMARKS = 2 #calls convert_image with landmarks

#overridable
def __init__(self, predictor_func, type):
self.predictor_func = predictor_func
self.type = type

#overridable
def convert_face (self, img_bgr, img_face_landmarks, debug):
#return float32 image
#return float32 image
#if debug , return tuple ( images of any size and channels, ...)
return image

#overridable
def convert_image (self, img_bgr, img_landmarks, debug):
#img_landmarks not None, if input image is png with embedded data
#return float32 image
#return float32 image
#if debug , return tuple ( images of any size and channels, ...)
return image

#overridable
def dummy_predict(self):
#do dummy predict here
pass

def copy(self):
return copy.copy(self)

def copy_and_set_predictor(self, predictor_func):
result = self.copy()
result.predictor_func = predictor_func
return result
return result
@@ -7,7 +7,7 @@
from utils import image_utils

'''
predictor_func:
predictor_func:
input: [predictor_input_size, predictor_input_size, BGR]
output: [predictor_input_size, predictor_input_size, BGR]
'''
@@ -16,18 +16,18 @@ class ConverterImage(Converter):

#override
def __init__(self, predictor_func,
predictor_input_size=0,
predictor_input_size=0,
output_size=0):

super().__init__(predictor_func, Converter.TYPE_IMAGE)

self.predictor_input_size = predictor_input_size
self.output_size = output_size
self.output_size = output_size

#override
def dummy_predict(self):
self.predictor_func ( np.zeros ( (self.predictor_input_size, self.predictor_input_size,3), dtype=np.float32) )

#override
def convert_image (self, img_bgr, img_landmarks, debug):
img_size = img_bgr.shape[1], img_bgr.shape[0]
@@ -4,36 +4,36 @@

from pathlib import Path

class DLIBExtractor(object):
class DLIBExtractor(object):
def __init__(self, dlib):
self.scale_to = 1850
#3100 eats ~1.687GB VRAM on 2GB 730 desktop card, but >4Gb on 6GB card,
self.scale_to = 1850
#3100 eats ~1.687GB VRAM on 2GB 730 desktop card, but >4Gb on 6GB card,
#but 3100 doesnt work on 2GB 850M notebook card, I cant understand this behaviour
#1850 works on 2GB 850M notebook card, works faster than 3100, produces good result
self.dlib = dlib

def __enter__(self):
def __enter__(self):
self.dlib_cnn_face_detector = self.dlib.cnn_face_detection_model_v1( str(Path(__file__).parent / "mmod_human_face_detector.dat") )
self.dlib_cnn_face_detector ( np.zeros ( (self.scale_to, self.scale_to, 3), dtype=np.uint8), 0 )
self.dlib_cnn_face_detector ( np.zeros ( (self.scale_to, self.scale_to, 3), dtype=np.uint8), 0 )
return self

def __exit__(self, exc_type=None, exc_value=None, traceback=None):
del self.dlib_cnn_face_detector
return False #pass exception between __enter__ and __exit__ to outter level

def extract_from_bgr (self, input_image):
input_image = input_image[:,:,::-1].copy()
(h, w, ch) = input_image.shape

detected_faces = []
detected_faces = []
input_scale = self.scale_to / (w if w > h else h)
input_image = cv2.resize (input_image, ( int(w*input_scale), int(h*input_scale) ), interpolation=cv2.INTER_LINEAR)
detected_faces = self.dlib_cnn_face_detector(input_image, 0)

result = []
result = []
for d_rect in detected_faces:
if type(d_rect) == self.dlib.mmod_rectangle:
d_rect = d_rect.rect
d_rect = d_rect.rect
left, top, right, bottom = d_rect.left(), d_rect.top(), d_rect.right(), d_rect.bottom()
result.append ( (int(left/input_scale), int(top/input_scale), int(right/input_scale), int(bottom/input_scale)) )

@@ -8,16 +8,16 @@
class FANSegmentator(object):
def __init__ (self, resolution, face_type_str, load_weights=True, weights_file_root=None):
exec( nnlib.import_all(), locals(), globals() )

self.model = FANSegmentator.BuildModel(resolution, ngf=32)

if weights_file_root:
weights_file_root = Path(weights_file_root)
else:
weights_file_root = Path(__file__).parent

self.weights_path = weights_file_root / ('FANSeg_%d_%s.h5' % (resolution, face_type_str) )

if load_weights:
self.model.load_weights (str(self.weights_path))
else:
@@ -31,19 +31,19 @@ def __init__ (self, resolution, face_type_str, load_weights=True, weights_file_r

def __enter__(self):
return self

def __exit__(self, exc_type=None, exc_value=None, traceback=None):
return False #pass exception between __enter__ and __exit__ to outter level

def save_weights(self):
self.model.save_weights (str(self.weights_path))

def train_on_batch(self, inp, outp):
return self.model.train_on_batch(inp, outp)

def extract_from_bgr (self, input_image):
return np.clip ( (self.model.predict(input_image) + 1) / 2.0, 0, 1.0 )

@staticmethod
def BuildModel ( resolution, ngf=64):
exec( nnlib.import_all(), locals(), globals() )
@@ -53,7 +53,7 @@ def BuildModel ( resolution, ngf=64):
x = FANSegmentator.DecFlow(ngf=ngf)(x)
model = Model(inp,x)
return model

@staticmethod
def EncFlow(ngf=64, num_downs=4):
exec( nnlib.import_all(), locals(), globals() )
@@ -65,43 +65,43 @@ def XNormalization(x):
def downscale (dim):
def func(x):
return LeakyReLU(0.1)(XNormalization(Conv2D(dim, kernel_size=5, strides=2, padding='same', kernel_initializer=RandomNormal(0, 0.02))(x)))
return func
def func(input):
return func

def func(input):
x = input

result = []
for i in range(num_downs):
x = downscale ( min(ngf*(2**i), ngf*8) )(x)
result += [x]
result += [x]

return result
return func

@staticmethod
def DecFlow(output_nc=1, ngf=64, activation='tanh'):
exec (nnlib.import_all(), locals(), globals())

use_bias = True
def XNormalization(x):
return InstanceNormalization (axis=3, gamma_initializer=RandomNormal(1., 0.02))(x)

def Conv2D (filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1), activation=None, use_bias=use_bias, kernel_initializer=RandomNormal(0, 0.02), bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None):
return keras.layers.Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate, activation=activation, use_bias=use_bias, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint )

def upscale (dim):
def func(x):
return SubpixelUpscaler()( LeakyReLU(0.1)(XNormalization(Conv2D(dim, kernel_size=3, strides=1, padding='same', kernel_initializer=RandomNormal(0, 0.02))(x))))
return func
return func

def func(input):
input_len = len(input)

x = input[input_len-1]
for i in range(input_len-1, -1, -1):
for i in range(input_len-1, -1, -1):
x = upscale( min(ngf* (2**i) *4, ngf*8 *4 ) )(x)
if i != 0:
x = Concatenate(axis=3)([ input[i-1] , x])

return Conv2D(output_nc, 3, 1, 'same', activation=activation)(x)
return func
return func
@@ -3,7 +3,7 @@
class FaceType(IntEnum):
HALF = 0,
FULL = 1,
HEAD = 2,
HEAD = 2,
AVATAR = 3, #centered nose only
MARK_ONLY = 4, #no align at all, just embedded faceinfo
QTY = 5
@@ -13,12 +13,12 @@ def fromString (s):
r = from_string_dict.get (s.lower())
if r is None:
raise Exception ('FaceType.fromString value error')
return r
@staticmethod
return r

@staticmethod
def toString (face_type):
return to_string_list[face_type]

from_string_dict = {'half_face': FaceType.HALF,
'full_face': FaceType.FULL,
'head' : FaceType.HEAD,
@@ -29,6 +29,5 @@ def toString (face_type):
'full_face',
'head',
'avatar',
'mark_only'
'mark_only'
]

Oops, something went wrong.

0 comments on commit a3df049

Please sign in to comment.
You can’t perform that action at this time.