Skip to content
This repository has been archived by the owner on Nov 9, 2023. It is now read-only.

Improved Multi-GPU Support in Windows System for Merge Command. #5662

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions core/leras/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ def get_device_by_index(self, idx):
return None

def get_devices_from_index_list(self, idx_list):
if not isinstance(idx_list, list):
idx_list = [int(idx.strip()) for idx in idx_list.split(',')]
result = []
for device in self.devices:
if device.index in idx_list:
Expand Down
44 changes: 22 additions & 22 deletions main.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
if __name__ == "__main__":

# Fix for linux
import multiprocessing
multiprocessing.set_start_method("spawn")
Expand Down Expand Up @@ -68,26 +69,26 @@ def process_sort(arguments):
from mainscripts import Sorter
Sorter.main (input_path=Path(arguments.input_dir), sort_by_method=arguments.sort_by_method)

p = subparsers.add_parser( "sort", help="Sort faces in a directory.")
p = subparsers.add_parser("sort", help="Sort faces in a directory.")
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
p.add_argument('--by', dest="sort_by_method", default=None, choices=("blur", "motion-blur", "face-yaw", "face-pitch", "face-source-rect-size", "hist", "hist-dissim", "brightness", "hue", "black", "origname", "oneface", "final-by-blur", "final-by-size", "absdiff"), help="Method of sorting. 'origname' sort by original filename to recover original sequence." )
p.set_defaults (func=process_sort)
p.set_defaults(func=process_sort)

def process_util(arguments):
osex.set_process_lowest_prio()
from mainscripts import Util

if arguments.add_landmarks_debug_images:
Util.add_landmarks_debug_images (input_path=arguments.input_dir)
Util.add_landmarks_debug_images(input_path=arguments.input_dir)

if arguments.recover_original_aligned_filename:
Util.recover_original_aligned_filename (input_path=arguments.input_dir)
Util.recover_original_aligned_filename(input_path=arguments.input_dir)

if arguments.save_faceset_metadata:
Util.save_faceset_metadata_folder (input_path=arguments.input_dir)
Util.save_faceset_metadata_folder(input_path=arguments.input_dir)

if arguments.restore_faceset_metadata:
Util.restore_faceset_metadata_folder (input_path=arguments.input_dir)
Util.restore_faceset_metadata_folder(input_path=arguments.input_dir)

if arguments.pack_faceset:
io.log_info ("Performing faceset packing...\r\n")
Expand All @@ -97,11 +98,11 @@ def process_util(arguments):
if arguments.unpack_faceset:
io.log_info ("Performing faceset unpacking...\r\n")
from samplelib import PackedFaceset
PackedFaceset.unpack( Path(arguments.input_dir) )
PackedFaceset.unpack(Path(arguments.input_dir))

if arguments.export_faceset_mask:
io.log_info ("Exporting faceset mask..\r\n")
Util.export_faceset_mask( Path(arguments.input_dir) )
io.log_info("Exporting faceset mask..\r\n")
Util.export_faceset_mask(Path(arguments.input_dir))

p = subparsers.add_parser( "util", help="Utilities.")
p.add_argument('--input-dir', required=True, action=fixPathAction, dest="input_dir", help="Input directory. A directory containing the files you wish to process.")
Expand All @@ -113,7 +114,7 @@ def process_util(arguments):
p.add_argument('--unpack-faceset', action="store_true", dest="unpack_faceset", default=False, help="")
p.add_argument('--export-faceset-mask', action="store_true", dest="export_faceset_mask", default=False, help="")

p.set_defaults (func=process_util)
p.set_defaults(func=process_util)

def process_train(arguments):
osex.set_process_lowest_prio()
Expand All @@ -127,41 +128,40 @@ def process_train(arguments):
'pretrained_model_path' : Path(arguments.pretrained_model_dir) if arguments.pretrained_model_dir is not None else None,
'no_preview' : arguments.no_preview,
'force_model_name' : arguments.force_model_name,
'force_gpu_idxs' : [ int(x) for x in arguments.force_gpu_idxs.split(',') ] if arguments.force_gpu_idxs is not None else None,
'force_gpu_idxs' : [int(x) for x in arguments.force_gpu_idxs.split(',')] if arguments.force_gpu_idxs is not None else None,
'cpu_only' : arguments.cpu_only,
'silent_start' : arguments.silent_start,
'execute_programs' : [ [int(x[0]), x[1] ] for x in arguments.execute_program ],
'execute_programs' : [[int(x[0]), x[1] ] for x in arguments.execute_program],
'debug' : arguments.debug,
}
from mainscripts import Trainer
Trainer.main(**kwargs)

p = subparsers.add_parser( "train", help="Trainer")
p = subparsers.add_parser("train", help="Trainer")
p.add_argument('--training-data-src-dir', required=True, action=fixPathAction, dest="training_data_src_dir", help="Dir of extracted SRC faceset.")
p.add_argument('--training-data-dst-dir', required=True, action=fixPathAction, dest="training_data_dst_dir", help="Dir of extracted DST faceset.")
p.add_argument('--pretraining-data-dir', action=fixPathAction, dest="pretraining_data_dir", default=None, help="Optional dir of extracted faceset that will be used in pretraining mode.")
p.add_argument('--pretrained-model-dir', action=fixPathAction, dest="pretrained_model_dir", default=None, help="Optional dir of pretrain model files. (Currently only for Quick96).")
p.add_argument('--model-dir', required=True, action=fixPathAction, dest="model_dir", help="Saved models dir.")
p.add_argument('--model', required=True, dest="model_name", choices=pathex.get_all_dir_names_startswith ( Path(__file__).parent / 'models' , 'Model_'), help="Model class name.")
p.add_argument('--model', required=True, dest="model_name", choices=pathex.get_all_dir_names_startswith(Path(__file__).parent / 'models', 'Model_'), help="Model class name.")
p.add_argument('--debug', action="store_true", dest="debug", default=False, help="Debug samples.")
p.add_argument('--no-preview', action="store_true", dest="no_preview", default=False, help="Disable preview window.")
p.add_argument('--force-model-name', dest="force_model_name", default=None, help="Forcing to choose model name from model/ folder.")
p.add_argument('--cpu-only', action="store_true", dest="cpu_only", default=False, help="Train on CPU.")
p.add_argument('--force-gpu-idxs', dest="force_gpu_idxs", default=None, help="Force to choose GPU indexes separated by comma.")
p.add_argument('--silent-start', action="store_true", dest="silent_start", default=False, help="Silent start. Automatically chooses Best GPU and last used model.")

p.add_argument('--execute-program', dest="execute_program", default=[], action='append', nargs='+')
p.set_defaults (func=process_train)
p.set_defaults(func=process_train)

def process_exportdfm(arguments):
osex.set_process_lowest_prio()
from mainscripts import ExportDFM
ExportDFM.main(model_class_name = arguments.model_name, saved_models_path = Path(arguments.model_dir))
ExportDFM.main(model_class_name=arguments.model_name, saved_models_path=Path(arguments.model_dir))

p = subparsers.add_parser( "exportdfm", help="Export model to use in DeepFaceLive.")
p.add_argument('--model-dir', required=True, action=fixPathAction, dest="model_dir", help="Saved models dir.")
p.add_argument('--model', required=True, dest="model_name", choices=pathex.get_all_dir_names_startswith ( Path(__file__).parent / 'models' , 'Model_'), help="Model class name.")
p.set_defaults (func=process_exportdfm)
p.add_argument('--model', required=True, dest="model_name", choices=pathex.get_all_dir_names_startswith(Path(__file__).parent / 'models' , 'Model_'), help="Model class name.")
p.set_defaults(func=process_exportdfm)

def process_merge(arguments):
osex.set_process_lowest_prio()
Expand All @@ -188,13 +188,13 @@ def process_merge(arguments):
p.add_argument('--force-gpu-idxs', dest="force_gpu_idxs", default=None, help="Force to choose GPU indexes separated by comma.")
p.set_defaults(func=process_merge)

videoed_parser = subparsers.add_parser( "videoed", help="Video processing.").add_subparsers()
videoed_parser = subparsers.add_parser("videoed", help="Video processing.").add_subparsers()

def process_videoed_extract_video(arguments):
osex.set_process_lowest_prio()
from mainscripts import VideoEd
VideoEd.extract_video (arguments.input_file, arguments.output_dir, arguments.output_ext, arguments.fps)
p = videoed_parser.add_parser( "extract-video", help="Extract images from video file.")
VideoEd.extract_video(arguments.input_file, arguments.output_dir, arguments.output_ext, arguments.fps)
p = videoed_parser.add_parser("extract-video", help="Extract images from video file.")
p.add_argument('--input-file', required=True, action=fixPathAction, dest="input_file", help="Input file to be processed. Specify .*-extension to find first file.")
p.add_argument('--output-dir', required=True, action=fixPathAction, dest="output_dir", help="Output directory. This is where the extracted images will be stored.")
p.add_argument('--output-ext', dest="output_ext", default=None, help="Image format (extension) of output files.")
Expand Down
21 changes: 11 additions & 10 deletions requirements-cuda.txt
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
tqdm
numpy==1.19.3
numexpr
h5py==2.10.0
opencv-python==4.1.0.25
ffmpeg-python==0.1.17
scikit-image==0.14.2
scipy==1.4.1
colorama
tensorflow-gpu==2.4.0
cython==0.29.25
ffmpeg-python==0.1.17
h5py==2.10.0
numpy==1.20.1
numexpr
opencv-python==4.1.2.30
pyqt5
tf2onnx==1.9.3
scikit-image==0.18.3
scipy==1.10.1
tensorflow-gpu==2.4.1
tf2onnx==1.9.3
tqdm