Skip to content

Commit

Permalink
Simplify conditional_thread_semaphore
Browse files Browse the repository at this point in the history
  • Loading branch information
henryruhs committed Jun 5, 2024
1 parent 5a34e0c commit 2c7fd6f
Show file tree
Hide file tree
Showing 9 changed files with 17 additions and 13 deletions.
2 changes: 1 addition & 1 deletion facefusion/content_analyser.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def analyse_frame(vision_frame : VisionFrame) -> bool:
content_analyser = get_content_analyser()
vision_frame = prepare_frame(vision_frame)

with conditional_thread_semaphore(facefusion.globals.execution_providers):
with conditional_thread_semaphore():
probability = content_analyser.run(None,
{
content_analyser.get_inputs()[0].name: vision_frame
Expand Down
8 changes: 4 additions & 4 deletions facefusion/face_analyser.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ def calc_embedding(temp_vision_frame : VisionFrame, face_landmark_5 : FaceLandma
crop_vision_frame = crop_vision_frame[:, :, ::-1].transpose(2, 0, 1).astype(numpy.float32)
crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0)

with conditional_thread_semaphore(facefusion.globals.execution_providers):
with conditional_thread_semaphore():
embedding = face_recognizer.run(None,
{
face_recognizer.get_inputs()[0].name: crop_vision_frame
Expand All @@ -400,7 +400,7 @@ def detect_face_landmark_68(temp_vision_frame : VisionFrame, bounding_box : Boun
crop_vision_frame = cv2.cvtColor(crop_vision_frame, cv2.COLOR_Lab2RGB)
crop_vision_frame = crop_vision_frame.transpose(2, 0, 1).astype(numpy.float32) / 255.0

with conditional_thread_semaphore(facefusion.globals.execution_providers):
with conditional_thread_semaphore():
face_landmark_68, face_heatmap = face_landmarker.run(None,
{
face_landmarker.get_inputs()[0].name: [ crop_vision_frame ]
Expand All @@ -419,7 +419,7 @@ def expand_face_landmark_68_from_5(face_landmark_5 : FaceLandmark5) -> FaceLandm
affine_matrix = estimate_matrix_by_face_landmark_5(face_landmark_5, 'ffhq_512', (1, 1))
face_landmark_5 = cv2.transform(face_landmark_5.reshape(1, -1, 2), affine_matrix).reshape(-1, 2)

with conditional_thread_semaphore(facefusion.globals.execution_providers):
with conditional_thread_semaphore():
face_landmark_68_5 = face_landmarker.run(None,
{
face_landmarker.get_inputs()[0].name: [ face_landmark_5 ]
Expand All @@ -437,7 +437,7 @@ def detect_gender_age(temp_vision_frame : VisionFrame, bounding_box : BoundingBo
crop_vision_frame = crop_vision_frame[:, :, ::-1].transpose(2, 0, 1).astype(numpy.float32)
crop_vision_frame = numpy.expand_dims(crop_vision_frame, axis = 0)

with conditional_thread_semaphore(facefusion.globals.execution_providers):
with conditional_thread_semaphore():
prediction = gender_age.run(None,
{
gender_age.get_inputs()[0].name: crop_vision_frame
Expand Down
4 changes: 2 additions & 2 deletions facefusion/face_masker.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def create_occlusion_mask(crop_vision_frame : VisionFrame) -> Mask:
prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0).astype(numpy.float32) / 255
prepare_vision_frame = prepare_vision_frame.transpose(0, 1, 2, 3)

with conditional_thread_semaphore(facefusion.globals.execution_providers):
with conditional_thread_semaphore():
occlusion_mask : Mask = face_occluder.run(None,
{
face_occluder.get_inputs()[0].name: prepare_vision_frame
Expand All @@ -137,7 +137,7 @@ def create_region_mask(crop_vision_frame : VisionFrame, face_mask_regions : List
prepare_vision_frame = numpy.expand_dims(prepare_vision_frame, axis = 0).astype(numpy.float32)[:, :, ::-1] / 127.5 - 1
prepare_vision_frame = prepare_vision_frame.transpose(0, 3, 1, 2)

with conditional_thread_semaphore(facefusion.globals.execution_providers):
with conditional_thread_semaphore():
region_mask : Mask = face_parser.run(None,
{
face_parser.get_inputs()[0].name: prepare_vision_frame
Expand Down
2 changes: 1 addition & 1 deletion facefusion/processors/frame/modules/face_swapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ def apply_swap(source_face : Face, crop_vision_frame : VisionFrame) -> VisionFra
if frame_processor_input.name == 'target':
frame_processor_inputs[frame_processor_input.name] = crop_vision_frame

with conditional_thread_semaphore(facefusion.globals.execution_providers):
with conditional_thread_semaphore():
crop_vision_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
return crop_vision_frame

Expand Down
1 change: 1 addition & 0 deletions facefusion/processors/frame/modules/frame_colorizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ def post_process() -> None:
def colorize_frame(temp_vision_frame : VisionFrame) -> VisionFrame:
frame_processor = get_frame_processor()
prepare_vision_frame = prepare_temp_frame(temp_vision_frame)

with thread_semaphore():
color_vision_frame = frame_processor.run(None,
{
Expand Down
2 changes: 1 addition & 1 deletion facefusion/processors/frame/modules/frame_enhancer.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def enhance_frame(temp_vision_frame : VisionFrame) -> VisionFrame:
tile_vision_frames, pad_width, pad_height = create_tile_frames(temp_vision_frame, size)

for index, tile_vision_frame in enumerate(tile_vision_frames):
with conditional_thread_semaphore(facefusion.globals.execution_providers):
with conditional_thread_semaphore():
tile_vision_frame = frame_processor.run(None,
{
frame_processor.get_inputs()[0].name : prepare_tile_frame(tile_vision_frame)
Expand Down
2 changes: 1 addition & 1 deletion facefusion/processors/frame/modules/lip_syncer.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def sync_lip(target_face : Face, temp_audio_frame : AudioFrame, temp_vision_fram
close_vision_frame, close_matrix = warp_face_by_bounding_box(crop_vision_frame, bounding_box, (96, 96))
close_vision_frame = prepare_crop_frame(close_vision_frame)

with conditional_thread_semaphore(facefusion.globals.execution_providers):
with conditional_thread_semaphore():
close_vision_frame = frame_processor.run(None,
{
'source': temp_audio_frame,
Expand Down
8 changes: 5 additions & 3 deletions facefusion/thread_helper.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
from typing import List, Union, ContextManager
from typing import Union, ContextManager
import threading
from contextlib import nullcontext

from facefusion.execution import has_execution_provider

THREAD_LOCK : threading.Lock = threading.Lock()
THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore()
NULL_CONTEXT : ContextManager[None] = nullcontext()
Expand All @@ -15,7 +17,7 @@ def thread_semaphore() -> threading.Semaphore:
return THREAD_SEMAPHORE


def conditional_thread_semaphore(execution_providers : List[str]) -> Union[threading.Semaphore, ContextManager[None]]:
if 'DmlExecutionProvider' in execution_providers:
def conditional_thread_semaphore() -> Union[threading.Semaphore, ContextManager[None]]:
if has_execution_provider('DmlExecutionProvider'):
return THREAD_SEMAPHORE
return NULL_CONTEXT
1 change: 1 addition & 0 deletions facefusion/voice_extractor.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ def extract_voice(temp_audio_chunk : AudioChunk) -> AudioChunk:
trim_size = 3840
temp_audio_chunk, pad_size = prepare_audio_chunk(temp_audio_chunk.T, chunk_size, trim_size)
temp_audio_chunk = decompose_audio_chunk(temp_audio_chunk, trim_size)

with thread_semaphore():
temp_audio_chunk = voice_extractor.run(None,
{
Expand Down

0 comments on commit 2c7fd6f

Please sign in to comment.