Skip to content

Commit

Permalink
2.4.1 (#442)
Browse files Browse the repository at this point in the history
* Update official url for cuda 12-2 wheels

* Fix preview for audio to image

* Prevent download loop when remote is unreachable

* Prevent download loop when remote is unreachable

* changes (#444)

* Tidy up monkey patch

* Use cpu core count for concurrency count

* Dynamic concurrency_count for ideal Gradio performance

* Conditional download face analyser models

* Fix testing via pre_check()

* Introduce checking to process manager for blocking the UI

* Introduce checking to process manager for blocking the UI

* Introduce checking to process manager for blocking the UI

* Introduce checking to process manager for blocking the UI

* Move the blocking while model download to the correct position

* Remove unused imports

---------

Co-authored-by: Harisreedhar <46858047+harisreedhar@users.noreply.github.com>
  • Loading branch information
henryruhs and harisreedhar committed Mar 20, 2024
1 parent 1e2031e commit 6e67d7b
Show file tree
Hide file tree
Showing 21 changed files with 133 additions and 52 deletions.
7 changes: 6 additions & 1 deletion facefusion/content_analyser.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
from typing import Any, Dict
from functools import lru_cache
from time import sleep
import threading
import cv2
import numpy
import onnxruntime
from tqdm import tqdm

import facefusion.globals
from facefusion import wording
from facefusion import process_manager, wording
from facefusion.typing import VisionFrame, ModelValue, Fps
from facefusion.execution import apply_execution_provider_options
from facefusion.vision import get_video_frame, count_video_frame_total, read_image, detect_video_fps
Expand All @@ -33,6 +34,8 @@ def get_content_analyser() -> Any:
global CONTENT_ANALYSER

with THREAD_LOCK:
while process_manager.is_checking():
sleep(0.5)
if CONTENT_ANALYSER is None:
model_path = MODELS.get('open_nsfw').get('path')
CONTENT_ANALYSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
Expand All @@ -49,7 +52,9 @@ def pre_check() -> bool:
if not facefusion.globals.skip_download:
download_directory_path = resolve_relative_path('../.assets/models')
model_url = MODELS.get('open_nsfw').get('url')
process_manager.check()
conditional_download(download_directory_path, [ model_url ])
process_manager.end()
return True


Expand Down
20 changes: 10 additions & 10 deletions facefusion/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,19 @@ def conditional_download(download_directory_path : str, urls : List[str]) -> Non
executor.submit(get_download_size, url)
for url in urls:
download_file_path = os.path.join(download_directory_path, os.path.basename(url))
initial = os.path.getsize(download_file_path) if is_file(download_file_path) else 0
total = get_download_size(url)
if initial < total:
with tqdm(total = total, initial = initial, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
initial_size = os.path.getsize(download_file_path) if is_file(download_file_path) else 0
download_size = get_download_size(url)
if initial_size < download_size:
with tqdm(total = download_size, initial = initial_size, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =', disable = facefusion.globals.log_level in [ 'warn', 'error' ]) as progress:
subprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ])
current = initial
while current < total:
current_size = initial_size
while current_size < download_size:
if is_file(download_file_path):
current = os.path.getsize(download_file_path)
progress.update(current - progress.n)
if not is_download_done(url, download_file_path):
current_size = os.path.getsize(download_file_path)
progress.update(current_size - progress.n)
if download_size and not is_download_done(url, download_file_path):
os.remove(download_file_path)
conditional_download(download_directory_path, [ url] )
conditional_download(download_directory_path, [ url ])


@lru_cache(maxsize = None)
Expand Down
33 changes: 24 additions & 9 deletions facefusion/face_analyser.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
from typing import Any, Optional, List, Tuple
from time import sleep
import threading
import cv2
import numpy
import onnxruntime

import facefusion.globals
from facefusion import process_manager
from facefusion.common_helper import get_first
from facefusion.face_helper import warp_face_by_face_landmark_5, warp_face_by_translation, create_static_anchors, distance_to_face_landmark_5, distance_to_bounding_box, convert_face_landmark_68_to_5, apply_nms, categorize_age, categorize_gender
from facefusion.face_store import get_static_faces, set_static_faces
Expand Down Expand Up @@ -77,6 +79,8 @@ def get_face_analyser() -> Any:

face_detectors = {}
with THREAD_LOCK:
while process_manager.is_checking():
sleep(0.5)
if FACE_ANALYSER is None:
if facefusion.globals.face_detector_model in [ 'many', 'retinaface' ]:
face_detector = onnxruntime.InferenceSession(MODELS.get('face_detector_retinaface').get('path'), providers = apply_execution_provider_options(facefusion.globals.execution_providers))
Expand Down Expand Up @@ -121,18 +125,29 @@ def pre_check() -> bool:
download_directory_path = resolve_relative_path('../.assets/models')
model_urls =\
[
MODELS.get('face_detector_retinaface').get('url'),
MODELS.get('face_detector_scrfd').get('url'),
MODELS.get('face_detector_yoloface').get('url'),
MODELS.get('face_detector_yunet').get('url'),
MODELS.get('face_recognizer_arcface_blendswap').get('url'),
MODELS.get('face_recognizer_arcface_inswapper').get('url'),
MODELS.get('face_recognizer_arcface_simswap').get('url'),
MODELS.get('face_recognizer_arcface_uniface').get('url'),
MODELS.get('face_landmarker').get('url'),
MODELS.get('gender_age').get('url'),
MODELS.get('gender_age').get('url')
]

if facefusion.globals.face_detector_model in [ 'many', 'retinaface' ]:
model_urls.append(MODELS.get('face_detector_retinaface').get('url'))
if facefusion.globals.face_detector_model in [ 'many', 'scrfd' ]:
model_urls.append(MODELS.get('face_detector_scrfd').get('url'))
if facefusion.globals.face_detector_model in [ 'many', 'yoloface' ]:
model_urls.append(MODELS.get('face_detector_yoloface').get('url'))
if facefusion.globals.face_detector_model in [ 'yunet' ]:
model_urls.append(MODELS.get('face_detector_yunet').get('url'))
if facefusion.globals.face_recognizer_model == 'arcface_blendswap':
model_urls.append(MODELS.get('face_recognizer_arcface_blendswap').get('url'))
if facefusion.globals.face_recognizer_model == 'arcface_inswapper':
model_urls.append(MODELS.get('face_recognizer_arcface_inswapper').get('url'))
if facefusion.globals.face_recognizer_model == 'arcface_simswap':
model_urls.append(MODELS.get('face_recognizer_arcface_simswap').get('url'))
if facefusion.globals.face_recognizer_model == 'arcface_uniface':
model_urls.append(MODELS.get('face_recognizer_arcface_uniface').get('url'))
process_manager.check()
conditional_download(download_directory_path, model_urls)
process_manager.end()
return True


Expand Down
6 changes: 6 additions & 0 deletions facefusion/face_masker.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
from typing import Any, Dict, List
from cv2.typing import Size
from functools import lru_cache
from time import sleep
import threading
import cv2
import numpy
import onnxruntime

import facefusion.globals
from facefusion import process_manager
from facefusion.typing import FaceLandmark68, VisionFrame, Mask, Padding, FaceMaskRegion, ModelSet
from facefusion.execution import apply_execution_provider_options
from facefusion.filesystem import resolve_relative_path
Expand Down Expand Up @@ -57,6 +59,8 @@ def get_face_parser() -> Any:
global FACE_PARSER

with THREAD_LOCK:
while process_manager.is_checking():
sleep(0.5)
if FACE_PARSER is None:
model_path = MODELS.get('face_parser').get('path')
FACE_PARSER = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
Expand All @@ -83,7 +87,9 @@ def pre_check() -> bool:
MODELS.get('face_occluder').get('url'),
MODELS.get('face_parser').get('url'),
]
process_manager.check()
conditional_download(download_directory_path, model_urls)
process_manager.end()
return True


Expand Down
3 changes: 1 addition & 2 deletions facefusion/installer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
ONNXRUNTIMES['default'] = ('onnxruntime', '1.17.1')
else:
ONNXRUNTIMES['default'] = ('onnxruntime', '1.16.3')
if platform.system().lower() == 'linux' or platform.system().lower() == 'windows':
ONNXRUNTIMES['cuda-12.2'] = ('onnxruntime-gpu', '1.17.1')
ONNXRUNTIMES['cuda-11.8'] = ('onnxruntime-gpu', '1.16.3')
ONNXRUNTIMES['openvino'] = ('onnxruntime-openvino', '1.16.0')
Expand Down Expand Up @@ -71,6 +70,6 @@ def run(program : ArgumentParser) -> None:
else:
subprocess.call([ 'pip', 'uninstall', 'onnxruntime', onnxruntime_name, '-y', '-q' ])
if onnxruntime == 'cuda-12.2':
subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--extra-index-url', 'https://pkgs.dev.azure.com/onnxruntime/onnxruntime/_packaging/onnxruntime-cuda-12/pypi/simple', '--force-reinstall' ])
subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--extra-index-url', 'https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple', '--force-reinstall' ])
else:
subprocess.call([ 'pip', 'install', onnxruntime_name + '==' + onnxruntime_version, '--force-reinstall' ])
2 changes: 1 addition & 1 deletion facefusion/metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
{
'name': 'FaceFusion',
'description': 'Next generation face swapper and enhancer',
'version': '2.4.0',
'version': '2.4.1',
'license': 'MIT',
'author': 'Henry Ruhs',
'url': 'https://facefusion.io'
Expand Down
8 changes: 8 additions & 0 deletions facefusion/process_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ def set_process_state(process_state : ProcessState) -> None:
PROCESS_STATE = process_state


def is_checking() -> bool:
return get_process_state() == 'checking'


def is_processing() -> bool:
return get_process_state() == 'processing'

Expand All @@ -27,6 +31,10 @@ def is_pending() -> bool:
return get_process_state() == 'pending'


def check() -> None:
set_process_state('checking')


def start() -> None:
set_process_state('processing')

Expand Down
5 changes: 5 additions & 0 deletions facefusion/processors/frame/modules/face_enhancer.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from typing import Any, List, Literal, Optional
from argparse import ArgumentParser
from time import sleep
import cv2
import threading
import numpy
Expand Down Expand Up @@ -87,6 +88,8 @@ def get_frame_processor() -> Any:
global FRAME_PROCESSOR

with THREAD_LOCK:
while process_manager.is_checking():
sleep(0.5)
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
Expand Down Expand Up @@ -131,7 +134,9 @@ def pre_check() -> bool:
if not facefusion.globals.skip_download:
download_directory_path = resolve_relative_path('../.assets/models')
model_url = get_options('model').get('url')
process_manager.check()
conditional_download(download_directory_path, [ model_url ])
process_manager.end()
return True


Expand Down
7 changes: 7 additions & 0 deletions facefusion/processors/frame/modules/face_swapper.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from typing import Any, List, Literal, Optional
from argparse import ArgumentParser
from time import sleep
import threading
import numpy
import onnx
Expand Down Expand Up @@ -99,6 +100,8 @@ def get_frame_processor() -> Any:
global FRAME_PROCESSOR

with THREAD_LOCK:
while process_manager.is_checking():
sleep(0.5)
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
Expand All @@ -115,6 +118,8 @@ def get_model_matrix() -> Any:
global MODEL_MATRIX

with THREAD_LOCK:
while process_manager.is_checking():
sleep(0.5)
if MODEL_MATRIX is None:
model_path = get_options('model').get('path')
model = onnx.load(model_path)
Expand Down Expand Up @@ -171,7 +176,9 @@ def pre_check() -> bool:
if not facefusion.globals.skip_download:
download_directory_path = resolve_relative_path('../.assets/models')
model_url = get_options('model').get('url')
process_manager.check()
conditional_download(download_directory_path, [ model_url ])
process_manager.end()
return True


Expand Down
5 changes: 5 additions & 0 deletions facefusion/processors/frame/modules/frame_enhancer.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from typing import Any, List, Literal, Optional
from argparse import ArgumentParser
from time import sleep
import threading
import cv2
import numpy
Expand Down Expand Up @@ -69,6 +70,8 @@ def get_frame_processor() -> Any:
global FRAME_PROCESSOR

with THREAD_LOCK:
while process_manager.is_checking():
sleep(0.5)
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
Expand Down Expand Up @@ -113,7 +116,9 @@ def pre_check() -> bool:
if not facefusion.globals.skip_download:
download_directory_path = resolve_relative_path('../.assets/models')
model_url = get_options('model').get('url')
process_manager.check()
conditional_download(download_directory_path, [ model_url ])
process_manager.end()
return True


Expand Down
5 changes: 5 additions & 0 deletions facefusion/processors/frame/modules/lip_syncer.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from typing import Any, List, Literal, Optional
from argparse import ArgumentParser
from time import sleep
import threading
import cv2
import numpy
Expand Down Expand Up @@ -45,6 +46,8 @@ def get_frame_processor() -> Any:
global FRAME_PROCESSOR

with THREAD_LOCK:
while process_manager.is_checking():
sleep(0.5)
if FRAME_PROCESSOR is None:
model_path = get_options('model').get('path')
FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = apply_execution_provider_options(facefusion.globals.execution_providers))
Expand Down Expand Up @@ -87,7 +90,9 @@ def pre_check() -> bool:
if not facefusion.globals.skip_download:
download_directory_path = resolve_relative_path('../.assets/models')
model_url = get_options('model').get('url')
process_manager.check()
conditional_download(download_directory_path, [ model_url ])
process_manager.end()
return True


Expand Down
2 changes: 1 addition & 1 deletion facefusion/typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
Padding = Tuple[int, int, int, int]
Resolution = Tuple[int, int]

ProcessState = Literal['processing', 'stopping', 'pending']
ProcessState = Literal['checking', 'processing', 'stopping', 'pending']
QueuePayload = TypedDict('QueuePayload',
{
'frame_number' : int,
Expand Down
18 changes: 10 additions & 8 deletions facefusion/uis/components/face_analyser.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
from typing import Optional, Dict, Any
from typing import Optional, Dict, Any, Tuple

import gradio

import facefusion.globals
import facefusion.choices
from facefusion import wording
from facefusion import face_analyser, wording
from facefusion.typing import FaceAnalyserOrder, FaceAnalyserAge, FaceAnalyserGender, FaceDetectorModel
from facefusion.uis.core import register_ui_component

Expand Down Expand Up @@ -83,7 +83,7 @@ def listen() -> None:
FACE_ANALYSER_ORDER_DROPDOWN.change(update_face_analyser_order, inputs = FACE_ANALYSER_ORDER_DROPDOWN)
FACE_ANALYSER_AGE_DROPDOWN.change(update_face_analyser_age, inputs = FACE_ANALYSER_AGE_DROPDOWN)
FACE_ANALYSER_GENDER_DROPDOWN.change(update_face_analyser_gender, inputs = FACE_ANALYSER_GENDER_DROPDOWN)
FACE_DETECTOR_MODEL_DROPDOWN.change(update_face_detector_model, inputs = FACE_DETECTOR_MODEL_DROPDOWN, outputs = FACE_DETECTOR_SIZE_DROPDOWN)
FACE_DETECTOR_MODEL_DROPDOWN.change(update_face_detector_model, inputs = FACE_DETECTOR_MODEL_DROPDOWN, outputs = [ FACE_DETECTOR_MODEL_DROPDOWN, FACE_DETECTOR_SIZE_DROPDOWN ])
FACE_DETECTOR_SIZE_DROPDOWN.change(update_face_detector_size, inputs = FACE_DETECTOR_SIZE_DROPDOWN)
FACE_DETECTOR_SCORE_SLIDER.release(update_face_detector_score, inputs = FACE_DETECTOR_SCORE_SLIDER)
FACE_LANDMARKER_SCORE_SLIDER.release(update_face_landmarker_score, inputs = FACE_LANDMARKER_SCORE_SLIDER)
Expand All @@ -101,12 +101,14 @@ def update_face_analyser_gender(face_analyser_gender : FaceAnalyserGender) -> No
facefusion.globals.face_analyser_gender = face_analyser_gender if face_analyser_gender != 'none' else None


def update_face_detector_model(face_detector_model : FaceDetectorModel) -> gradio.Dropdown:
def update_face_detector_model(face_detector_model : FaceDetectorModel) -> Tuple[gradio.Dropdown, gradio.Dropdown]:
facefusion.globals.face_detector_model = face_detector_model
facefusion.globals.face_detector_size = '640x640'
if facefusion.globals.face_detector_size in facefusion.choices.face_detector_set[face_detector_model]:
return gradio.Dropdown(value = facefusion.globals.face_detector_size, choices = facefusion.choices.face_detector_set[face_detector_model])
return gradio.Dropdown(value = facefusion.globals.face_detector_size, choices = [ facefusion.globals.face_detector_size ])
update_face_detector_size('640x640')
if face_analyser.pre_check():
if facefusion.globals.face_detector_size in facefusion.choices.face_detector_set[face_detector_model]:
return gradio.Dropdown(value = facefusion.globals.face_detector_model), gradio.Dropdown(value = facefusion.globals.face_detector_size, choices = facefusion.choices.face_detector_set[face_detector_model])
return gradio.Dropdown(value = facefusion.globals.face_detector_model), gradio.Dropdown(value = facefusion.globals.face_detector_size, choices = [ facefusion.globals.face_detector_size ])
return gradio.Dropdown(), gradio.Dropdown()


def update_face_detector_size(face_detector_size : str) -> None:
Expand Down
4 changes: 2 additions & 2 deletions facefusion/uis/components/frame_processors_options.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import gradio

import facefusion.globals
from facefusion import wording
from facefusion import face_analyser, wording
from facefusion.processors.frame.core import load_frame_processor_module
from facefusion.processors.frame import globals as frame_processors_globals, choices as frame_processors_choices
from facefusion.processors.frame.typings import FaceDebuggerItem, FaceEnhancerModel, FaceSwapperModel, FrameEnhancerModel, LipSyncerModel
Expand Down Expand Up @@ -134,7 +134,7 @@ def update_face_swapper_model(face_swapper_model : FaceSwapperModel) -> gradio.D
face_swapper_module = load_frame_processor_module('face_swapper')
face_swapper_module.clear_frame_processor()
face_swapper_module.set_options('model', face_swapper_module.MODELS[face_swapper_model])
if face_swapper_module.pre_check():
if face_analyser.pre_check() and face_swapper_module.pre_check():
return gradio.Dropdown(value = frame_processors_globals.face_swapper_model)
return gradio.Dropdown()

Expand Down
Loading

0 comments on commit 6e67d7b

Please sign in to comment.