Skip to content
Permalink
Browse files

refactorings

  • Loading branch information
iperov committed Dec 22, 2019
1 parent e0e8970 commit 754d6c385c88a077fc3a70e1b8588a3422d5f5a7
@@ -5,7 +5,6 @@
import numpy as np

from facelib import FaceType
from imagelib import IEPolys
from utils.struct_utils import *
from interact import interact as io

@@ -306,7 +305,7 @@ def setDFLDictData (self, dict_data=None):

def get_face_type(self): return self.dfl_dict['face_type']
def get_landmarks(self): return np.array ( self.dfl_dict['landmarks'] )
def get_ie_polys(self): return IEPolys.load(self.dfl_dict.get('ie_polys',None))
def get_ie_polys(self): return self.dfl_dict.get('ie_polys',None)
def get_source_filename(self): return self.dfl_dict['source_filename']
def get_source_rect(self): return self.dfl_dict['source_rect']
def get_source_landmarks(self): return np.array ( self.dfl_dict['source_landmarks'] )
@@ -7,7 +7,6 @@
import numpy as np

from facelib import FaceType
from imagelib import IEPolys

PNG_HEADER = b"\x89PNG\r\n\x1a\n"

@@ -413,7 +412,7 @@ def setDFLDictData (self, dict_data=None):

def get_face_type(self): return self.dfl_dict['face_type']
def get_landmarks(self): return np.array ( self.dfl_dict['landmarks'] )
def get_ie_polys(self): return IEPolys.load(self.dfl_dict.get('ie_polys',None))
def get_ie_polys(self): return self.dfl_dict.get('ie_polys',None)
def get_source_filename(self): return self.dfl_dict['source_filename']
def get_source_rect(self): return self.dfl_dict['source_rect']
def get_source_landmarks(self): return np.array ( self.dfl_dict['source_landmarks'] )
@@ -97,7 +97,7 @@ def dump(self):
@staticmethod
def load(ie_polys=None):
obj = IEPolys()
if ie_polys is not None:
if ie_polys is not None and isinstance(ie_polys, list):
for (type, points) in ie_polys:
obj.add(type)
obj.n_list().set_points(points)
@@ -403,7 +403,7 @@ def jobs_count():
continue
else:
lmrks = dflimg.get_landmarks()
ie_polys = dflimg.get_ie_polys()
ie_polys = IEPolys.load(dflimg.get_ie_polys())
fanseg_mask = dflimg.get_fanseg_mask()

if filepath.name in cached_images:
@@ -521,7 +521,7 @@ def get_status_lines_func():
do_save_move_count -= 1

ed.mask_finish()
dflimg.embed_and_set (str(filepath), ie_polys=ed.get_ie_polys(), eyebrows_expand_mod=eyebrows_expand_mod )
dflimg.embed_and_set (str(filepath), ie_polys=ed.get_ie_polys().dump(), eyebrows_expand_mod=eyebrows_expand_mod )

done_paths += [ confirmed_path / filepath.name ]
done_images_types[filepath.name] = 2
@@ -532,7 +532,7 @@ def get_status_lines_func():
do_save_count -= 1

ed.mask_finish()
dflimg.embed_and_set (str(filepath), ie_polys=ed.get_ie_polys(), eyebrows_expand_mod=eyebrows_expand_mod )
dflimg.embed_and_set (str(filepath), ie_polys=ed.get_ie_polys().dump(), eyebrows_expand_mod=eyebrows_expand_mod )

done_paths += [ filepath ]
done_images_types[filepath.name] = 2
@@ -1,12 +1,15 @@
import cv2
import pickle
import pickle
from pathlib import Path

import cv2

from DFLIMG import *
from facelib import LandmarksProcessor
from imagelib import IEPolys
from interact import interact as io
from utils import Path_utils
from utils.cv2_utils import *
from DFLIMG import *


def save_faceset_metadata_folder(input_path):
input_path = Path(input_path)
@@ -167,7 +170,7 @@ def add_landmarks_debug_images(input_path):

if img is not None:
face_landmarks = dflimg.get_landmarks()
LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True, ie_polys=dflimg.get_ie_polys() )
LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True, ie_polys=IEPolys.load(dflimg.get_ie_polys()) )

output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem), '_debug.jpg')
cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
@@ -5,7 +5,7 @@
import cv2
import numpy as np

from DFLIMG import DFLIMG
from DFLIMG import *
from facelib import FaceType, LandmarksProcessor
from interact import interact as io
from joblib import Subprocessor
@@ -475,7 +475,7 @@ def dev_test(input_dir):

dir_names = Path_utils.get_all_dir_names(input_path)

for dir_name in dir_names:
for dir_name in io.progress_bar_generator(dir_names, desc="Processing"):

img_paths = Path_utils.get_image_paths (input_path / dir_name)
for filename in img_paths:
@@ -485,7 +485,9 @@ def dev_test(input_dir):
if dflimg is None:
raise ValueError

import code
code.interact(local=dict(globals(), **locals()))
dflimg.embed_and_set(filename, person_name=dir_name)

#import code
#code.interact(local=dict(globals(), **locals()))


@@ -1,13 +1,12 @@
import pickle
import shutil
import struct
from pathlib import Path

from interact import interact as io
from utils import Path_utils


import samplelib.SampleHost
from interact import interact as io
from samplelib import Sample
from utils import Path_utils

packed_faceset_filename = 'faceset.pak'

@@ -24,15 +23,30 @@ def pack(samples_path):

of = open(samples_dat_path, "wb")

image_paths = Path_utils.get_image_paths(samples_path)
as_person_faceset = False
dir_names = Path_utils.get_all_dir_names(samples_path)
if len(dir_names) != 0:
as_person_faceset = io.input_bool(f"{len(dir_names)} subdirectories found, process as person faceset? (y/n) skip:y : ", True)

if as_person_faceset:
image_paths = []

for dir_name in dir_names:
image_paths += Path_utils.get_image_paths(samples_path / dir_name)
else:
image_paths = Path_utils.get_image_paths(samples_path)


samples = samplelib.SampleHost.load_face_samples(image_paths)
samples_len = len(samples)

samples_configs = []
for sample in samples:
sample.filename = str(Path(sample.filename).relative_to(samples_path))
sample_filepath = Path(sample.filename)
sample.filename = sample_filepath.name

if as_person_faceset:
sample.person_name = sample_filepath.parent.name
samples_configs.append ( sample.get_config() )
samples_bytes = pickle.dumps(samples_configs, 4)

@@ -48,7 +62,12 @@ def pack(samples_path):

for sample in io.progress_bar_generator(samples, "Packing"):
try:
with open( samples_path / sample.filename, "rb") as f:
if sample.person_name is not None:
sample_path = samples_path / sample.person_name / sample.filename
else:
sample_path = samples_path / sample.filename

with open(sample_path, "rb") as f:
b = f.read()

offsets.append ( of.tell() - data_start_offset )
@@ -67,6 +86,13 @@ def pack(samples_path):
for filename in io.progress_bar_generator(image_paths,"Deleting"):
Path(filename).unlink()

if as_person_faceset:
for dir_name in dir_names:
dir_path = samples_path / dir_name
try:
shutil.rmtree(dir_path)
except:
io.log_info (f"unable to remove: {dir_path} ")

@staticmethod
def unpack(samples_path):
@@ -78,7 +104,16 @@ def unpack(samples_path):
samples = PackedFaceset.load(samples_path)

for sample in io.progress_bar_generator(samples, "Unpacking"):
with open(samples_path / sample.filename, "wb") as f:
person_name = sample.person_name
if person_name is not None:
person_path = samples_path / person_name
person_path.mkdir(parents=True, exist_ok=True)

target_filepath = person_path / sample.filename
else:
target_filepath = samples_path / sample.filename

with open(target_filepath, "wb") as f:
f.write( sample.read_raw_file() )

samples_dat_path.unlink()
@@ -110,4 +145,3 @@ def load(samples_path):
sample.set_filename_offset_size( str(samples_dat_path), data_start_offset+start_offset, end_offset-start_offset )

return samples

@@ -7,6 +7,7 @@
from utils.cv2_utils import *
from DFLIMG import *
from facelib import LandmarksProcessor
from imagelib import IEPolys

class SampleType(IntEnum):
IMAGE = 0 #raw image
@@ -50,11 +51,13 @@ def __init__(self, sample_type=None,
self.face_type = face_type
self.shape = shape
self.landmarks = np.array(landmarks) if landmarks is not None else None
self.ie_polys = ie_polys
self.ie_polys = IEPolys.load(ie_polys)
self.eyebrows_expand_mod = eyebrows_expand_mod
self.source_filename = source_filename
self.person_name = person_name
self.pitch_yaw_roll = pitch_yaw_roll

self._filename_offset_size = None

def get_pitch_yaw_roll(self):
if self.pitch_yaw_roll is None:
@@ -84,7 +87,7 @@ def get_config(self):
'face_type': self.face_type,
'shape': self.shape,
'landmarks': self.landmarks.tolist(),
'ie_polys': self.ie_polys,
'ie_polys': self.ie_polys.dump(),
'eyebrows_expand_mod': self.eyebrows_expand_mod,
'source_filename': self.source_filename,
'person_name': self.person_name
@@ -31,7 +31,7 @@ def __init__ (self, samples_path, debug=False, batch_size=1,
self.add_sample_idx = add_sample_idx

samples_host = SampleHost.mp_host (SampleType.FACE, self.samples_path)
self.samples_len = len(samples_host)
self.samples_len = len(samples_host.get_list())

if self.samples_len == 0:
raise ValueError('No training data provided.')
@@ -40,7 +40,7 @@ def __init__ (self, samples_path, debug=False, batch_size=1,

if random_ct_samples_path is not None:
ct_samples_host = SampleHost.mp_host (SampleType.FACE, random_ct_samples_path)
ct_index_host = mp_utils.IndexHost( len(ct_samples_host) )
ct_index_host = mp_utils.IndexHost( len(ct_samples_host.get_list()) )
else:
ct_samples_host = None
ct_index_host = None
@@ -76,7 +76,8 @@ def batch_func(self, param ):
ct_indexes = ct_index_host.get(bs) if ct_samples is not None else None

for n_batch in range(bs):
sample = samples[ indexes[n_batch] ]
sample_idx = indexes[n_batch]
sample = samples[ sample_idx ]
ct_sample = ct_samples[ ct_indexes[n_batch] ] if ct_samples is not None else None

try:
@@ -94,9 +95,5 @@ def batch_func(self, param ):
batches[i].append ( x[i] )

if self.add_sample_idx:
batches[i_sample_idx].append (idx)
batches[i_sample_idx].append (sample_idx)
yield [ np.array(batch) for batch in batches]

@staticmethod
def get_person_id_max_count(samples_path):
return SampleHost.get_person_id_max_count(samples_path)

0 comments on commit 754d6c3

Please sign in to comment.
You can’t perform that action at this time.