Skip to content

Commit

Permalink
Introduce execution device id
Browse files Browse the repository at this point in the history
  • Loading branch information
henryruhs committed May 18, 2024
1 parent 176da11 commit 47cc665
Show file tree
Hide file tree
Showing 16 changed files with 35 additions and 21 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ misc:
--log-level {error,warn,info,debug} adjust the message severity displayed in the terminal
execution:
--execution-device-id EXECUTION_DEVICE_ID specify the device used for processing
--execution-providers EXECUTION_PROVIDERS [EXECUTION_PROVIDERS ...] accelerate the model inference using different providers (choices: cpu, ...)
--execution-thread-count [1-128] specify the amount of parallel threads while processing
--execution-queue-count [1-32] specify the amount of frames each thread is processing
Expand Down
1 change: 1 addition & 0 deletions facefusion.ini
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ headless =
log_level =

[execution]
execution_device_id =
execution_providers =
execution_thread_count =
execution_queue_count =
Expand Down
2 changes: 1 addition & 1 deletion facefusion/content_analyser.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def get_content_analyser() -> Any:
sleep(0.5)
if CONTENT_ANALYSER is None:
model_path = MODELS.get('open_nsfw').get('path')
CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
return CONTENT_ANALYSER


Expand Down
2 changes: 2 additions & 0 deletions facefusion/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ def cli() -> None:
# execution
execution_providers = encode_execution_providers(onnxruntime.get_available_providers())
group_execution = program.add_argument_group('execution')
group_execution.add_argument('--execution-device-id', help = wording.get('help.execution_device_id'), type = int, default = config.get_int_value('execution.face_detector_size', '0'))
group_execution.add_argument('--execution-providers', help = wording.get('help.execution_providers').format(choices = ', '.join(execution_providers)), default = config.get_str_list('execution.execution_providers', 'cpu'), choices = execution_providers, nargs = '+', metavar = 'EXECUTION_PROVIDERS')
group_execution.add_argument('--execution-thread-count', help = wording.get('help.execution_thread_count'), type = int, default = config.get_int_value('execution.execution_thread_count', '4'), choices = facefusion.choices.execution_thread_count_range, metavar = create_metavar(facefusion.choices.execution_thread_count_range))
group_execution.add_argument('--execution-queue-count', help = wording.get('help.execution_queue_count'), type = int, default = config.get_int_value('execution.execution_queue_count', '1'), choices = facefusion.choices.execution_queue_count_range, metavar = create_metavar(facefusion.choices.execution_queue_count_range))
Expand Down Expand Up @@ -141,6 +142,7 @@ def apply_args(program : ArgumentParser) -> None:
facefusion.globals.headless = args.headless
facefusion.globals.log_level = args.log_level
# execution
facefusion.globals.execution_device_id = args.execution_device_id
facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
facefusion.globals.execution_thread_count = args.execution_thread_count
facefusion.globals.execution_queue_count = args.execution_queue_count
Expand Down
9 changes: 8 additions & 1 deletion facefusion/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import xml.etree.ElementTree as ElementTree
import onnxruntime

import facefusion.globals
from facefusion.typing import ExecutionDevice, ValueAndUnit


Expand All @@ -18,15 +19,21 @@ def decode_execution_providers(execution_providers : List[str]) -> List[str]:
return [ execution_provider for execution_provider, encoded_execution_provider in zip(available_execution_providers, encoded_execution_providers) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers) ]


def apply_execution_provider_options(execution_providers : List[str]) -> List[Any]:
def apply_execution_provider_options(execution_device_id : int, execution_providers : List[str]) -> List[Any]:
execution_providers_with_options : List[Any] = []

for execution_provider in execution_providers:
if execution_provider == 'CUDAExecutionProvider':
execution_providers_with_options.append((execution_provider,
{
'device_id': execution_device_id,
'cudnn_conv_algo_search': 'EXHAUSTIVE' if use_exhaustive() else 'DEFAULT'
}))
elif execution_provider in [ 'DmlExecutionProvider', 'OpenVINOExecutionProvider', 'ROCMExecutionProvider' ]:
execution_providers_with_options.append((execution_provider,
{
'device_id': execution_device_id,
}))
else:
execution_providers_with_options.append(execution_provider)
return execution_providers_with_options
Expand Down
20 changes: 10 additions & 10 deletions facefusion/face_analyser.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,24 +88,24 @@ def get_face_analyser() -> Any:
sleep(0.5)
if FACE_ANALYSER is None:
if facefusion.globals.face_detector_model in [ 'many', 'retinaface' ]:
face_detectors['retinaface'] = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
face_detectors['retinaface'] = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
if facefusion.globals.face_detector_model in [ 'many', 'scrfd' ]:
face_detectors['scrfd'] = onnxruntime.InferenceSession(MODELS.get('face_detector_scrfd').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
face_detectors['scrfd'] = onnxruntime.InferenceSession(MODELS.get('face_detector_scrfd').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
if facefusion.globals.face_detector_model in [ 'many', 'yoloface' ]:
face_detectors['yoloface'] = onnxruntime.InferenceSession(MODELS.get('face_detector_yoloface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
face_detectors['yoloface'] = onnxruntime.InferenceSession(MODELS.get('face_detector_yoloface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
if facefusion.globals.face_detector_model in [ 'yunet' ]:
face_detectors['yunet'] = cv2.FaceDetectorYN.create(MODELS.get('face_detector_yunet').get('path'), '', (0, 0))
if facefusion.globals.face_recognizer_model == 'arcface_blendswap':
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendswap').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_blendswap').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
if facefusion.globals.face_recognizer_model == 'arcface_inswapper':
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_inswapper').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
if facefusion.globals.face_recognizer_model == 'arcface_simswap':
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_simswap').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_simswap').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
if facefusion.globals.face_recognizer_model == 'arcface_uniface':
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_uniface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
face_landmarkers['68'] = onnxruntime.InferenceSession(MODELS.get('face_landmarker_68').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
face_landmarkers['68_5'] = onnxruntime.InferenceSession(MODELS.get('face_landmarker_68_5').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
gender_age = onnxruntime.InferenceSession(MODELS.get('gender_age').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
face_recognizer = onnxruntime.InferenceSession(MODELS.get('face_recognizer_arcface_uniface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
face_landmarkers['68'] = onnxruntime.InferenceSession(MODELS.get('face_landmarker_68').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
face_landmarkers['68_5'] = onnxruntime.InferenceSession(MODELS.get('face_landmarker_68_5').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
gender_age = onnxruntime.InferenceSession(MODELS.get('gender_age').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
FACE_ANALYSER =\
{
'face_detectors': face_detectors,
Expand Down
4 changes: 2 additions & 2 deletions facefusion/face_masker.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def get_face_occluder() -> Any:
sleep(0.5)
if FACE_OCCLUDER is None:
model_path = MODELS.get('face_occluder').get('path')
FACE_OCCLUDER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
FACE_OCCLUDER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
return FACE_OCCLUDER


Expand All @@ -64,7 +64,7 @@ def get_face_parser() -> Any:
sleep(0.5)
if FACE_PARSER is None:
model_path = MODELS.get('face_parser').get('path')
FACE_PARSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
FACE_PARSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
return FACE_PARSER


Expand Down
1 change: 1 addition & 0 deletions facefusion/globals.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
headless : Optional[bool] = None
log_level : Optional[LogLevel] = None
# execution
execution_device_id : Optional[int] = None
execution_providers : List[str] = []
execution_thread_count : Optional[int] = None
execution_queue_count : Optional[int] = None
Expand Down
2 changes: 1 addition & 1 deletion facefusion/processors/frame/modules/face_enhancer.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def get_frame_processor() -> Any:
sleep(0.5)
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
return FRAME_PROCESSOR


Expand Down
2 changes: 1 addition & 1 deletion facefusion/processors/frame/modules/face_swapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def get_frame_processor() -> Any:
sleep(0.5)
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
return FRAME_PROCESSOR


Expand Down
2 changes: 1 addition & 1 deletion facefusion/processors/frame/modules/frame_colorizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def get_frame_processor() -> Any:
sleep(0.5)
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
return FRAME_PROCESSOR


Expand Down
2 changes: 1 addition & 1 deletion facefusion/processors/frame/modules/frame_enhancer.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def get_frame_processor() -> Any:
sleep(0.5)
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
return FRAME_PROCESSOR


Expand Down
2 changes: 1 addition & 1 deletion facefusion/processors/frame/modules/lip_syncer.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def get_frame_processor() -> Any:
sleep(0.5)
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
return FRAME_PROCESSOR


Expand Down
2 changes: 1 addition & 1 deletion facefusion/voice_extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def get_voice_extractor() -> Any:
sleep(0.5)
if VOICE_EXTRACTOR is None:
model_path = MODELS.get('voice_extractor').get('path')
VOICE_EXTRACTOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
VOICE_EXTRACTOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_device_id, facefusion.globals.execution_providers))
return VOICE_EXTRACTOR


Expand Down
1 change: 1 addition & 0 deletions facefusion/wording.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
'headless': 'run the program without a user interface',
'log_level': 'adjust the message severity displayed in the terminal',
# execution
'execution_device_id': 'specify the device used for processing',
'execution_providers': 'accelerate the model inference using different providers (choices: {choices}, ...)',
'execution_thread_count': 'specify the amount of parallel threads while processing',
'execution_queue_count': 'specify the amount of frames each thread is processing',
Expand Down
3 changes: 2 additions & 1 deletion tests/test_execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,8 @@ def test_multiple_execution_providers() -> None:
'CPUExecutionProvider',
('CUDAExecutionProvider',
{
'device_id': 1,
'cudnn_conv_algo_search': 'DEFAULT'
})
]
assert apply_execution_provider_options([ 'CPUExecutionProvider', 'CUDAExecutionProvider' ]) == execution_provider_with_options
assert apply_execution_provider_options(1, [ 'CPUExecutionProvider', 'CUDAExecutionProvider' ]) == execution_provider_with_options

0 comments on commit 47cc665

Please sign in to comment.