From 7efdf31113d1eb1132ca7beaa114904671de2b36 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Tue, 23 Apr 2024 23:31:02 +0800 Subject: [PATCH 01/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=8F=82=E8=80=83?= =?UTF-8?q?=E9=9F=B3=E9=A2=91=E7=AD=9B=E5=8A=9F=E8=83=BD=E9=80=89=E7=95=8C?= =?UTF-8?q?=E9=9D=A2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/__init__.py | 0 .../ref_audio_selector_webui.py | 300 ++++++++++++++++++ 2 files changed, 300 insertions(+) create mode 100644 Ref_Audio_Selector/__init__.py create mode 100644 Ref_Audio_Selector/ref_audio_selector_webui.py diff --git a/Ref_Audio_Selector/__init__.py b/Ref_Audio_Selector/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py new file mode 100644 index 00000000..05d6d7c1 --- /dev/null +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -0,0 +1,300 @@ +import gradio as gr +from tools.i18n.i18n import I18nAuto + +i18n = I18nAuto() + + +# 校验基础信息 +def check_base_info(text_work_space_dir, text_character): + if text_work_space_dir is None or text_work_space_dir == '': + raise Exception(i18n("工作目录不能为空")) + if text_character is None or text_character == '': + raise Exception(i18n("角色名称不能为空")) + + +# 从list文件,提取参考音频 +def convert_from_list(text_work_space_dir, text_character, text_list_input): + text_convert_from_list_info = "转换成功:生成目录XXX" + text_sample_dir = "D://tt" + try: + check_base_info(text_work_space_dir, text_character) + if text_list_input is None or text_list_input == '': + raise Exception(i18n("list文件路径不能为空")) + pass + except Exception as e: + text_convert_from_list_info = f"发生异常:{e}" + text_sample_dir = '' + return [text_convert_from_list_info, text_sample_dir] + + +# 基于一个基准音频,从参考音频目录中进行分段抽样 +def sample(text_work_space_dir, text_character, text_sample_dir, text_base_voice_path, + text_subsection_num, text_sample_num, checkbox_similarity_output): + text_sample_info = "抽样成功:生成目录XXX" + ref_audio_dir = "D://tt" + try: + check_base_info(text_work_space_dir, text_character) + if text_sample_dir is None or text_sample_dir == '': + raise Exception(i18n("参考音频抽样目录不能为空,请先完成上一步操作")) + if text_base_voice_path is None or text_base_voice_path == '': + raise Exception(i18n("基准音频路径不能为空")) + if text_subsection_num is None or text_subsection_num == '': + raise Exception(i18n("分段数不能为空")) + if text_sample_num is None or text_sample_num == '': + raise Exception(i18n("每段随机抽样个数不能为空")) + pass + except Exception as e: + text_sample_info = f"发生异常:{e}" + ref_audio_dir = '' + text_model_inference_voice_dir = ref_audio_dir + text_sync_ref_audio_dir = ref_audio_dir + text_sync_ref_audio_dir2 = ref_audio_dir + return [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, + text_sync_ref_audio_dir2] + + +# 根据参考音频和测试文本,执行批量推理 +def model_inference(text_work_space_dir, text_character, text_model_inference_voice_dir, text_url, + text_text, text_ref_path, text_ref_text, text_emotion, + text_test_content): + text_model_inference_info = "推理成功:生成目录XXX" + text_asr_audio_dir = "D://tt" + try: + check_base_info(text_work_space_dir, text_character) + if text_model_inference_voice_dir is None or text_model_inference_voice_dir == '': + raise Exception(i18n("待推理的参考音频所在目录不能为空,请先完成上一步操作")) + if text_url is None or text_url == '': + raise Exception(i18n("推理服务请求地址不能为空")) + if text_text is None or text_text == '': + raise Exception(i18n("文本参数名不能为空")) + if text_test_content is None or text_test_content == '': + raise Exception(i18n("待推理文本路径不能为空")) + if (text_ref_path is None or text_ref_path == '') and (text_ref_text is None or text_ref_text == '') and (text_emotion is None or text_emotion == ''): + raise Exception(i18n("参考音频路径/文本和角色情绪二选一填写,不能全部为空")) + pass + except Exception as e: + text_model_inference_info = f"发生异常:{e}" + text_asr_audio_dir = '' + return [text_model_inference_info, text_asr_audio_dir] + + +# 对推理生成音频执行asr +def asr(text_work_space_dir, text_character, text_asr_audio_dir, dropdown_asr_model, + dropdown_asr_size, dropdown_asr_lang): + text_asr_info = "asr成功:生成目录XXX" + text_text_similarity_analysis_path = "D://tt" + try: + check_base_info(text_work_space_dir, text_character) + if text_asr_audio_dir is None or text_asr_audio_dir == '': + raise Exception(i18n("待asr的音频所在目录不能为空,请先完成上一步操作")) + if dropdown_asr_model is None or dropdown_asr_model == '': + raise Exception(i18n("asr模型不能为空")) + if dropdown_asr_size is None or dropdown_asr_size == '': + raise Exception(i18n("asr模型大小不能为空")) + if dropdown_asr_lang is None or dropdown_asr_lang == '': + raise Exception(i18n("asr语言不能为空")) + pass + except Exception as e: + text_asr_info = f"发生异常:{e}" + text_text_similarity_analysis_path = '' + return [text_asr_info, text_text_similarity_analysis_path] + + +# 对asr生成的文件,与原本的文本内容,进行相似度分析 +def text_similarity_analysis(text_work_space_dir, text_character, + text_text_similarity_analysis_path): + text_text_similarity_analysis_info = "相似度分析成功:生成目录XXX" + try: + check_base_info(text_work_space_dir, text_character) + if text_text_similarity_analysis_path is None or text_text_similarity_analysis_path == '': + raise Exception(i18n("asr生成的文件路径不能为空,请先完成上一步操作")) + pass + except Exception as e: + text_text_similarity_analysis_info = f"发生异常:{e}" + return text_text_similarity_analysis_info + + +# 根据一个参考音频,对指定目录下的音频进行相似度分析,并输出到另一个目录 +def similarity_audio_output(text_work_space_dir, text_character, text_base_audio_path, + text_compare_audio_dir): + text_similarity_audio_output_info = "相似度分析成功:生成目录XXX" + try: + check_base_info(text_work_space_dir, text_character) + if text_base_audio_path is None or text_base_audio_path == '': + raise Exception(i18n("基准音频路径不能为空")) + if text_compare_audio_dir is None or text_compare_audio_dir == '': + raise Exception(i18n("待分析的音频所在目录不能为空")) + pass + except Exception as e: + text_similarity_audio_output_info = f"发生异常:{e}" + return text_similarity_audio_output_info + + +# 根据参考音频目录的删除情况,将其同步到推理生成的音频目录中,即参考音频目录下,删除了几个参考音频,就在推理目录下,将这些参考音频生成的音频文件移除 +def sync_ref_audio(text_work_space_dir, text_character, text_sync_ref_audio_dir, + text_sync_inference_audio_dir): + text_sync_ref_audio_info = "同步参考音频成功:生成目录XXX" + try: + check_base_info(text_work_space_dir, text_character) + if text_sync_ref_audio_dir is None or text_sync_ref_audio_dir == '': + raise Exception(i18n("参考音频目录不能为空")) + if text_sync_inference_audio_dir is None or text_sync_inference_audio_dir == '': + raise Exception(i18n("推理生成的音频目录不能为空")) + pass + except Exception as e: + text_sync_ref_audio_info = f"发生异常:{e}" + return text_sync_ref_audio_info + + +# 根据模板和参考音频目录,生成参考音频配置内容 +def create_config(text_work_space_dir, text_character, text_template, text_sync_ref_audio_dir2): + text_create_config_info = "配置生成成功:生成目录XXX" + try: + check_base_info(text_work_space_dir, text_character) + if text_template is None or text_template == '': + raise Exception(i18n("参考音频抽样目录不能为空")) + if text_sync_ref_audio_dir2 is None or text_sync_ref_audio_dir2 == '': + raise Exception(i18n("参考音频目录不能为空")) + pass + except Exception as e: + text_create_config_info = f"发生异常:{e}" + return text_create_config_info + + +# 基于请求路径和参数,合成完整的请求路径 +def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): + text_whole_url = f'{text_url}?{text_text}=文本内容&{text_ref_path}=参考音频路径&{text_ref_text}=参考文本&{text_emotion}=情绪类型' + return [text_whole_url] + + +with gr.Blocks() as app: + gr.Markdown(value=i18n("基本介绍:这是一个从训练素材中,批量提取参考音频,并进行效果评估与配置生成的工具")) + with gr.Row(): + text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), value="") + text_character = gr.Text(label=i18n("请输入角色名称"), value="") + with gr.Accordion(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): + gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) + text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") + with gr.Row(): + button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") + text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) + gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) + text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value="", interactive=False) + button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_character, text_list_input], + [text_convert_from_list_info, text_sample_dir]) + with gr.Row(): + text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="") + text_subsection_num = gr.Text(label=i18n("请输入分段数"), value="10") + text_sample_num = gr.Text(label=i18n("请输入每段随机抽样个数"), value="4") + checkbox_similarity_output = gr.Checkbox(label=i18n("是否将相似度匹配结果输出到临时目录?"), show_label=True) + with gr.Row(): + button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary") + text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False) + with gr.Accordion(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): + gr.Markdown(value=i18n("2.1:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," + "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) + text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), value="", interactive=False) + text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value="") + with gr.Row(): + text_text = gr.Text(label=i18n("请输入文本参数名"), value="text") + text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), value="text") + text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), value="text") + text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value="text") + text_whole_url = gr.Text(label=i18n("完整地址"), value="5555555555555555", interactive=False) + text_url.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_ref_path.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_ref_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + gr.Markdown(value=i18n("2.2:配置待推理文本,一句一行,不要太多,10条即可")) + text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value="text") + gr.Markdown(value=i18n("2.3:启动推理服务,如果还没启动的话")) + gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) + with gr.Row(): + button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") + text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) + with gr.Accordion(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): + gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) + text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value="", interactive=False) + button_model_inference.click(model_inference, + [text_work_space_dir, text_character, text_model_inference_voice_dir, text_url, + text_text, text_ref_path, text_ref_text, text_emotion, + text_test_content], [text_model_inference_info, text_asr_audio_dir]) + with gr.Row(): + dropdown_asr_model = gr.Dropdown( + label=i18n("ASR 模型"), + choices=[], + interactive=True, + value="达摩 ASR (中文)" + ) + dropdown_asr_size = gr.Dropdown( + label=i18n("ASR 模型尺寸"), + choices=["large"], + interactive=True, + value="large" + ) + dropdown_asr_lang = gr.Dropdown( + label=i18n("ASR 语言设置"), + choices=["zh"], + interactive=True, + value="zh" + ) + with gr.Row(): + button_asr = gr.Button(i18n("启动asr"), variant="primary") + text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) + gr.Markdown(value=i18n("3.2:启动文本相似度分析")) + text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), value="", interactive=False) + button_asr.click(asr, [text_work_space_dir, text_character, text_asr_audio_dir, dropdown_asr_model, + dropdown_asr_size, dropdown_asr_lang], + [text_asr_info, text_text_similarity_analysis_path]) + with gr.Row(): + button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") + text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", interactive=False) + button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_character, + text_text_similarity_analysis_path], + [text_text_similarity_analysis_info]) + gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) + gr.Markdown(value=i18n("3.4:对结果按音频相似度排序,筛选低音质音频")) + with gr.Row(): + text_base_audio_path = gr.Text(label=i18n("请输入基准音频"), value="text") + text_compare_audio_dir = gr.Text(label=i18n("请输入待比较的音频文件目录"), value="text") + with gr.Row(): + button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary") + text_similarity_audio_output_info = gr.Text(label=i18n("输出结果"), value="", interactive=False) + button_similarity_audio_output.click(similarity_audio_output, + [text_work_space_dir, text_character, text_base_audio_path, + text_compare_audio_dir], [text_similarity_audio_output_info]) + with gr.Row(): + text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value="", interactive=False) + text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), value="", interactive=False) + with gr.Row(): + button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") + text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) + button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_character, text_sync_ref_audio_dir, + text_sync_inference_audio_dir], [text_sync_ref_info]) + with gr.Accordion("第四步:生成参考音频配置文本", open=False): + gr.Markdown(value=i18n("4.1:编辑模板")) + text_template_path = gr.Text(label=i18n("模板文件路径"), value="", interactive=False) + text_template = gr.Text(label=i18n("模板内容"), value="text", lines=10) + gr.Markdown(value=i18n("4.2:生成配置")) + text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value="", interactive=False) + with gr.Row(): + button_create_config = gr.Button(i18n("生成配置"), variant="primary") + text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) + button_create_config.click(create_config, + [text_work_space_dir, text_character, text_template, text_sync_ref_audio_dir2], + [text_create_config_info]) + button_sample.click(sample, [text_work_space_dir, text_character, text_sample_dir, text_base_voice_path, + text_subsection_num, text_sample_num, checkbox_similarity_output], + [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, + text_sync_ref_audio_dir2]) + +app.launch( + server_port=9423, + quiet=True, +) From 29b8370c45ac25086c943870bd4656a43266a1a5 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Tue, 23 Apr 2024 23:56:49 +0800 Subject: [PATCH 02/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=A0=B9=E6=8D=AElist?= =?UTF-8?q?=EF=BC=8C=E8=BD=AC=E6=8D=A2=E5=8F=82=E8=80=83=E9=9F=B3=E9=A2=91?= =?UTF-8?q?=E7=9A=84=E6=96=B9=E6=B3=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 13 ++++-- Ref_Audio_Selector/tool/__init__.py | 0 Ref_Audio_Selector/tool/ref_audio_opt.py | 46 +++++++++++++++++++ 3 files changed, 55 insertions(+), 4 deletions(-) create mode 100644 Ref_Audio_Selector/tool/__init__.py create mode 100644 Ref_Audio_Selector/tool/ref_audio_opt.py diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 05d6d7c1..6946fee9 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -1,4 +1,7 @@ +import os.path + import gradio as gr +import Ref_Audio_Selector.tool.ref_audio_opt as ref_audio_opt from tools.i18n.i18n import I18nAuto i18n = I18nAuto() @@ -14,13 +17,14 @@ def check_base_info(text_work_space_dir, text_character): # 从list文件,提取参考音频 def convert_from_list(text_work_space_dir, text_character, text_list_input): - text_convert_from_list_info = "转换成功:生成目录XXX" - text_sample_dir = "D://tt" + ref_audio_all = os.path.join(text_work_space_dir, 'ref_audio_all') + text_convert_from_list_info = f"转换成功:生成目录${ref_audio_all}" + text_sample_dir = ref_audio_all try: check_base_info(text_work_space_dir, text_character) if text_list_input is None or text_list_input == '': raise Exception(i18n("list文件路径不能为空")) - pass + ref_audio_opt.convert_from_list(text_list_input, ref_audio_all) except Exception as e: text_convert_from_list_info = f"发生异常:{e}" text_sample_dir = '' @@ -69,7 +73,8 @@ def model_inference(text_work_space_dir, text_character, text_model_inference_vo raise Exception(i18n("文本参数名不能为空")) if text_test_content is None or text_test_content == '': raise Exception(i18n("待推理文本路径不能为空")) - if (text_ref_path is None or text_ref_path == '') and (text_ref_text is None or text_ref_text == '') and (text_emotion is None or text_emotion == ''): + if (text_ref_path is None or text_ref_path == '') and (text_ref_text is None or text_ref_text == '') and ( + text_emotion is None or text_emotion == ''): raise Exception(i18n("参考音频路径/文本和角色情绪二选一填写,不能全部为空")) pass except Exception as e: diff --git a/Ref_Audio_Selector/tool/__init__.py b/Ref_Audio_Selector/tool/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Ref_Audio_Selector/tool/ref_audio_opt.py b/Ref_Audio_Selector/tool/ref_audio_opt.py new file mode 100644 index 00000000..fb7bd7e2 --- /dev/null +++ b/Ref_Audio_Selector/tool/ref_audio_opt.py @@ -0,0 +1,46 @@ +import os +import shutil + + +def convert_from_list(list_file, output_dir): + # 创建输出目录,如果它不存在的话 + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + # 解析.list文件,并操作文件 + with open(list_file, 'r', encoding='utf-8') as file: + lines = file.readlines() + + for line in lines: + parts = line.strip().split('|') + if len(parts) != 4: + print(f"Line format incorrect: {line}") + continue + + audio_path, _, _, transcription = parts + + # 构建新的文件名和路径 + new_filename = transcription + '.wav' + # new_filename = new_filename.replace(' ', '_') # 移除空格 + # new_filename = ''.join(e for e in new_filename if e.isalnum() or e in ['_', '.']) # 移除非法字符 + new_path = os.path.join(output_dir, new_filename) + + # 如果目标文件已存在,不要覆盖 + if os.path.exists(new_path): + print(f"File already exists: {new_path}") + continue + + try: + # 检查音频文件是否存在 + if not os.path.exists(audio_path): + print(f"Audio file does not exist: {audio_path}") + continue + + # 复制音频文件到output目录并重命名 + shutil.copy2(audio_path, new_path) + print(f"File copied and renamed to: {new_path}") + except Exception as e: + print(f"An error occurred while processing: {audio_path}") + print(e) + + print("Processing complete.") From e69e449599f5d850cd87637c5b20c3293447fe03 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Wed, 24 Apr 2024 16:54:51 +0800 Subject: [PATCH 03/72] =?UTF-8?q?=E5=8A=9F=E8=83=BD=E8=A1=A5=E5=85=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/__init__.py | 0 Ref_Audio_Selector/common/common.py | 67 +++++++ .../ref_audio_selector_webui.py | 57 ++++-- Ref_Audio_Selector/tool/audio_inference.py | 104 ++++++++++ Ref_Audio_Selector/tool/audio_similarity.py | 182 ++++++++++++++++++ Ref_Audio_Selector/tool/ref_audio_opt.py | 46 ----- tools/speaker_verification/__init__.py | 0 .../speaker_verification/audio_similarity.py | 64 ++++++ 8 files changed, 458 insertions(+), 62 deletions(-) create mode 100644 Ref_Audio_Selector/common/__init__.py create mode 100644 Ref_Audio_Selector/common/common.py create mode 100644 Ref_Audio_Selector/tool/audio_inference.py create mode 100644 Ref_Audio_Selector/tool/audio_similarity.py delete mode 100644 Ref_Audio_Selector/tool/ref_audio_opt.py create mode 100644 tools/speaker_verification/__init__.py create mode 100644 tools/speaker_verification/audio_similarity.py diff --git a/Ref_Audio_Selector/common/__init__.py b/Ref_Audio_Selector/common/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Ref_Audio_Selector/common/common.py b/Ref_Audio_Selector/common/common.py new file mode 100644 index 00000000..f7de06da --- /dev/null +++ b/Ref_Audio_Selector/common/common.py @@ -0,0 +1,67 @@ +from tools import my_utils +import glob +import os + +class RefAudioListManager: + def __init__(self, root_dir): + self.audio_dict = {'default': []} + absolute_root = os.path.abspath(root_dir) + + for subdir, dirs, files in os.walk(absolute_root): + relative_path = os.path.relpath(subdir, absolute_root) + + if relative_path == '.': + category = 'default' + else: + category = relative_path.replace(os.sep, '') + + for file in files: + if file.endswith('.wav'): + # 将相对路径转换为绝对路径 + audio_abs_path = os.path.join(subdir, file) + self.audio_dict[category].append(audio_abs_path) + + def get_audio_list(self): + return self.audio_dict + + def get_flattened_audio_list(self): + all_audio_files = [] + for category_audios in self.audio_dict.values(): + all_audio_files.extend(category_audios) + return all_audio_files + + def get_ref_audio_list(self): + audio_info_list = [] + for category, audio_paths in self.audio_dict.items(): + for audio_path in audio_paths: + filename_without_extension = os.path.splitext(os.path.basename(audio_path))[0] + audio_info = { + 'emotion': f"{category}-{filename_without_extension}", + 'ref_path': audio_path, + 'ref_text': filename_without_extension, + } + audio_info_list.append(audio_info) + return audio_info_list + +def batch_clean_paths(paths): + """ + 批量处理路径列表,对每个路径调用 clean_path() 函数。 + + 参数: + paths (list[str]): 包含待处理路径的列表。 + + 返回: + list[str]: 经过 clean_path() 处理后的路径列表。 + """ + cleaned_paths = [] + for path in paths: + cleaned_paths.append(my_utils.clean_path(path)) + return cleaned_paths + + +def read_text_file_to_list(file_path): + # 按照UTF-8编码打开文件(确保能够正确读取中文) + with open(file_path, mode='r', encoding='utf-8') as file: + # 读取所有行并存储到一个列表中 + lines = file.read().splitlines() + return lines \ No newline at end of file diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 6946fee9..74f2f2a6 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -1,7 +1,9 @@ import os.path import gradio as gr -import Ref_Audio_Selector.tool.ref_audio_opt as ref_audio_opt +import Ref_Audio_Selector.tool.audio_similarity as audio_similarity +import Ref_Audio_Selector.tool.audio_inference as audio_inference +import Ref_Audio_Selector.common.common as common from tools.i18n.i18n import I18nAuto i18n = I18nAuto() @@ -17,14 +19,14 @@ def check_base_info(text_work_space_dir, text_character): # 从list文件,提取参考音频 def convert_from_list(text_work_space_dir, text_character, text_list_input): - ref_audio_all = os.path.join(text_work_space_dir, 'ref_audio_all') - text_convert_from_list_info = f"转换成功:生成目录${ref_audio_all}" + ref_audio_all = os.path.join(text_work_space_dir, 'refer_audio_all') + text_convert_from_list_info = f"转换成功:生成目录{ref_audio_all}" text_sample_dir = ref_audio_all try: check_base_info(text_work_space_dir, text_character) if text_list_input is None or text_list_input == '': raise Exception(i18n("list文件路径不能为空")) - ref_audio_opt.convert_from_list(text_list_input, ref_audio_all) + audio_similarity.convert_from_list(text_list_input, ref_audio_all) except Exception as e: text_convert_from_list_info = f"发生异常:{e}" text_sample_dir = '' @@ -34,8 +36,8 @@ def convert_from_list(text_work_space_dir, text_character, text_list_input): # 基于一个基准音频,从参考音频目录中进行分段抽样 def sample(text_work_space_dir, text_character, text_sample_dir, text_base_voice_path, text_subsection_num, text_sample_num, checkbox_similarity_output): - text_sample_info = "抽样成功:生成目录XXX" - ref_audio_dir = "D://tt" + ref_audio_dir = os.path.join(text_work_space_dir, 'refer_audio') + text_sample_info = f"抽样成功:生成目录{ref_audio_dir}" try: check_base_info(text_work_space_dir, text_character) if text_sample_dir is None or text_sample_dir == '': @@ -46,7 +48,14 @@ def sample(text_work_space_dir, text_character, text_sample_dir, text_base_voice raise Exception(i18n("分段数不能为空")) if text_sample_num is None or text_sample_num == '': raise Exception(i18n("每段随机抽样个数不能为空")) - pass + + similarity_list = audio_similarity.start_similarity_analysis(text_work_space_dir, text_sample_dir, text_base_voice_path, checkbox_similarity_output) + + if similarity_list is None: + raise Exception(i18n("相似度分析失败")) + + audio_similarity.sample(ref_audio_dir, similarity_list, text_subsection_num, text_sample_num) + except Exception as e: text_sample_info = f"发生异常:{e}" ref_audio_dir = '' @@ -61,8 +70,9 @@ def sample(text_work_space_dir, text_character, text_sample_dir, text_base_voice def model_inference(text_work_space_dir, text_character, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content): - text_model_inference_info = "推理成功:生成目录XXX" - text_asr_audio_dir = "D://tt" + inference_dir = os.path.join(text_work_space_dir, 'inference_audio') + text_asr_audio_dir = os.path.join(inference_dir, 'text') + text_model_inference_info = f"推理成功:生成目录{inference_dir}" try: check_base_info(text_work_space_dir, text_character) if text_model_inference_voice_dir is None or text_model_inference_voice_dir == '': @@ -76,7 +86,15 @@ def model_inference(text_work_space_dir, text_character, text_model_inference_vo if (text_ref_path is None or text_ref_path == '') and (text_ref_text is None or text_ref_text == '') and ( text_emotion is None or text_emotion == ''): raise Exception(i18n("参考音频路径/文本和角色情绪二选一填写,不能全部为空")) - pass + url_composer = audio_inference.URLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) + url_composer.is_valid() + text_list = common.read_text_file_to_list(text_test_content) + if text_list is None or len(text_list) == 0: + raise Exception(i18n("待推理文本内容不能为空")) + ref_audio_manager = common.RefAudioListManager(text_model_inference_voice_dir) + if len(ref_audio_manager.get_audio_list()) == 0: + raise Exception(i18n("待推理的参考音频不能为空")) + audio_inference.generate_audio_files(url_composer, text_list, ref_audio_manager.get_ref_audio_list(), inference_dir) except Exception as e: text_model_inference_info = f"发生异常:{e}" text_asr_audio_dir = '' @@ -86,8 +104,9 @@ def model_inference(text_work_space_dir, text_character, text_model_inference_vo # 对推理生成音频执行asr def asr(text_work_space_dir, text_character, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang): - text_asr_info = "asr成功:生成目录XXX" - text_text_similarity_analysis_path = "D://tt" + asr_file = os.path.join(text_work_space_dir, 'asr.list') + text_text_similarity_analysis_path = asr_file + text_asr_info = f"asr成功:生成文件asr.list" try: check_base_info(text_work_space_dir, text_character) if text_asr_audio_dir is None or text_asr_audio_dir == '': @@ -108,7 +127,8 @@ def asr(text_work_space_dir, text_character, text_asr_audio_dir, dropdown_asr_mo # 对asr生成的文件,与原本的文本内容,进行相似度分析 def text_similarity_analysis(text_work_space_dir, text_character, text_text_similarity_analysis_path): - text_text_similarity_analysis_info = "相似度分析成功:生成目录XXX" + similarity_file = os.path.join(text_work_space_dir, 'similarity.txt') + text_text_similarity_analysis_info = f"相似度分析成功:生成文件{similarity_file}" try: check_base_info(text_work_space_dir, text_character) if text_text_similarity_analysis_path is None or text_text_similarity_analysis_path == '': @@ -153,7 +173,8 @@ def sync_ref_audio(text_work_space_dir, text_character, text_sync_ref_audio_dir, # 根据模板和参考音频目录,生成参考音频配置内容 def create_config(text_work_space_dir, text_character, text_template, text_sync_ref_audio_dir2): - text_create_config_info = "配置生成成功:生成目录XXX" + config_file = os.path.join(text_work_space_dir, 'refer_audio.json') + text_create_config_info = f"配置生成成功:生成文件{config_file}" try: check_base_info(text_work_space_dir, text_character) if text_template is None or text_template == '': @@ -168,8 +189,12 @@ def create_config(text_work_space_dir, text_character, text_template, text_sync_ # 基于请求路径和参数,合成完整的请求路径 def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): - text_whole_url = f'{text_url}?{text_text}=文本内容&{text_ref_path}=参考音频路径&{text_ref_text}=参考文本&{text_emotion}=情绪类型' - return [text_whole_url] + url_composer = audio_inference.URLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) + if url_composer.is_emotion(): + text_whole_url = url_composer.build_url_with_emotion('测试内容','情绪类型') + else: + text_whole_url = url_composer.build_url_with_ref('测试内容','参考路径','参考文本') + return text_whole_url with gr.Blocks() as app: diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py new file mode 100644 index 00000000..2c291220 --- /dev/null +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -0,0 +1,104 @@ +import os +import requests +import urllib.parse + +class URLComposer: + def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): + self.base_url = base_url + self.emotion_param_name = emotion_param_name + self.text_param_name = text_param_name + self.ref_path_param_name = ref_path_param_name + self.ref_text_param_name = ref_text_param_name + + + def is_valid(self): + if self.base_url is None or self.base_url == '': + raise ValueError("请输入url") + + if self.text_param_name is None or self.text_param_name == '': + raise ValueError("请输入text参数名") + + if self.emotion_param_name is None and self.ref_path_param_name is None and self.ref_text_param_name is None: + raise ValueError("请输入至少一个参考or情绪的参数") + + def is_emotion(self): + return self.emotion_param_name is not None and self.emotion_param_name != '' + + def build_url_with_emotion(self, text_value, emotion_value): + if not self.emotion_param_name: + raise ValueError("Emotion parameter name is not set.") + params = { + self.text_param_name: urllib.parse.quote(text_value), + self.emotion_param_name: urllib.parse.quote(emotion_value), + } + return self._append_params_to_url(params) + + def build_url_with_ref(self, text_value, ref_path_value, ref_text_value): + if self.emotion_param_name: + raise ValueError("Cannot use reference parameters when emotion parameter is set.") + params = { + self.text_param_name: urllib.parse.quote(text_value), + self.ref_path_param_name: urllib.parse.quote(ref_path_value), + self.ref_text_param_name: urllib.parse.quote(ref_text_value), + } + return self._append_params_to_url(params) + + def _append_params_to_url(self, params: dict): + url_with_params = self.base_url + if params: + query_params = '&'.join([f"{k}={v}" for k, v in params.items()]) + url_with_params += '?' + query_params if '?' not in self.base_url else '&' + query_params + return url_with_params + + +def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path): + + # Ensure the output directory exists + output_dir = Path(output_dir_path) + output_dir.mkdir(parents=True, exist_ok=True) + + # Create subdirectories for text and emotion categories + text_subdir = os.path.join(output_dir, 'text') + text_subdir.mkdir(exist_ok=True) + emotion_subdir = os.path.join(output_dir, 'emotion') + emotion_subdir.mkdir(exist_ok=True) + + for text, emotion in zip(text_list, emotion_list): + # Generate audio byte stream using the create_audio function + + if url_composer.is_emotion(): + real_url = url_composer.build_url_with_emotion(text, emotion['emotion']) + else: + real_url = url_composer.build_url_with_ref(text, emotion['ref_path'], emotion['ref_text']) + + audio_bytes = inference_audio_from_api(real_url) + + emotion_name = emotion['emotion'] + + # Save audio files in both directories with the desired structure + text_file_path = os.path.join(text_subdir, text, emotion_name, '.wav') + emotion_file_path = os.path.join(emotion_subdir, emotion_name, text, '.wav') + + # Ensure intermediate directories for nested file paths exist + text_file_path.parent.mkdir(parents=True, exist_ok=True) + emotion_file_path.parent.mkdir(parents=True, exist_ok=True) + + # Write audio bytes to the respective files + with open(text_file_path, 'wb') as f: + f.write(audio_bytes) + with open(emotion_file_path, 'wb') as f: + f.write(audio_bytes) + + + +def inference_audio_from_api(url): + + # 发起GET请求 + response = requests.get(url, stream=True) + + # 检查响应状态码是否正常(例如200表示成功) + if response.status_code == 200: + # 返回音频数据的字节流 + return response.content + else: + raise Exception(f"Failed to fetch audio from API. Server responded with status code {response.status_code}.") \ No newline at end of file diff --git a/Ref_Audio_Selector/tool/audio_similarity.py b/Ref_Audio_Selector/tool/audio_similarity.py new file mode 100644 index 00000000..9eb7172b --- /dev/null +++ b/Ref_Audio_Selector/tool/audio_similarity.py @@ -0,0 +1,182 @@ +import os +import shutil +from config import python_exec +from subprocess import Popen + +def convert_from_list(list_file, output_dir): + # 创建输出目录,如果它不存在的话 + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + # 解析.list文件,并操作文件 + with open(list_file, 'r', encoding='utf-8') as file: + lines = file.readlines() + + for line in lines: + parts = line.strip().split('|') + if len(parts) != 4: + print(f"Line format incorrect: {line}") + continue + + audio_path, _, _, transcription = parts + + # 构建新的文件名和路径 + new_filename = transcription + '.wav' + # new_filename = new_filename.replace(' ', '_') # 移除空格 + # new_filename = ''.join(e for e in new_filename if e.isalnum() or e in ['_', '.']) # 移除非法字符 + new_path = os.path.join(output_dir, new_filename) + + # 如果目标文件已存在,不要覆盖 + if os.path.exists(new_path): + print(f"File already exists: {new_path}") + continue + + try: + # 检查音频文件是否存在 + if not os.path.exists(audio_path): + print(f"Audio file does not exist: {audio_path}") + continue + + # 复制音频文件到output目录并重命名 + shutil.copy2(audio_path, new_path) + print(f"File copied and renamed to: {new_path}") + except Exception as e: + print(f"An error occurred while processing: {audio_path}") + print(e) + + print("Processing complete.") + + +def sample(output_audio_dir, similarity_list, subsection_num, sample_num): + # 按照相似度分值降序排序相似度列表 + similarity_list.sort(key=lambda x: x['score'], reverse=True) + + # 计算每段的起始索引 + step = len(similarity_list) // subsection_num + if len(similarity_list) % subsection_num != 0: + step += 1 + + # 分段并随机采样 + for i in range(subsection_num): + start = i * step + end = (i + 1) * step + end = min(end, len(similarity_list)) # 防止最后一段越界 + + num = min(sample_num, len(similarity_list[start:end])) + + # 随机采样 + random.shuffle(similarity_list[start:end]) + sampled_subsection = similarity_list[start:start+num] + + # 创建并进入子目录 + subdir_name = f'subsection_{i+1}' + subdir_path = os.path.join(output_audio_dir, subdir_name) + os.makedirs(subdir_path, exist_ok=True) + + # 复制采样结果的音频到子目录 + for item in sampled_subsection: + src_path = item['wav_path'] + dst_path = os.path.join(subdir_path, os.path.basename(src_path)) + shutil.copyfile(src_path, dst_path) + + print("Sampling completed.") + + + +def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_similarity_output): + + similarity_list = None + + similarity_dir = os.path.join(work_space_dir, 'similarity') + os.makedirs(similarity_dir, exist_ok=True) + + base_voice_file_name = ref_audio_opt.get_filename_without_extension(base_voice_path) + similarity_file = os.path.join(similarity_dir, f'{base_voice_file_name}.txt') + + global p_similarity + if(p_similarity==None): + cmd = f'"{python_exec}" tools/speaker_verification/audio_similarity.py ' + cmd += f' -r "{base_voice_path}"' + cmd += f' -c "{sample_dir}"' + cmd += f' -o {similarity_file}' + + print(cmd) + p_similarity = Popen(cmd, shell=True) + p_similarity.wait() + + if need_similarity_output: + similarity_list = ref_audio_opt.parse_similarity_file(similarity_file) + similarity_file_dir = os.path.dirname(similarity_dir, base_voice_file_name) + ref_audio_opt.copy_and_move(similarity_file_dir, similarity_list) + + p_similarity=None + return similarity_list + else: + return similarity_list + + +def parse_similarity_file(file_path): + """ + 解析指定文本文件,将其中的内容以元组形式存入列表。 + + 参数: + file_path (str): 待解析的文本文件路径。 + + 返回: + list[tuple[float, str]]: 存储浮点数和路径的元组列表。 + """ + result_list = [] + + with open(file_path, 'r') as file: + for line in file: + # 去除行尾换行符并按'|'分割 + score, filepath = line.strip().split('|') + + # 将浮点数字符串转换为浮点数类型 + score = float(score) + + # 将得分和路径作为元组添加到结果列表 + result_list.append({ + 'score': score, + 'wav_path': filepath + }) + + return result_list + + +def copy_and_move(output_audio_directory, similarity_scores): + + # 确保新目录存在 + if not os.path.exists(output_audio_directory): + os.makedirs(output_audio_directory) + + # 遍历并复制文件 + for item in similarity_scores: + # 构造新的文件名 + base_name = os.path.basename(item['wav_path'])[:-4] # 去掉.wav扩展名 + new_name = f"{item['score']}-{base_name}.wav" + + # 新文件的完整路径 + new_path = os.path.join(output_audio_directory, new_name) + + # 复制文件到新目录 + shutil.copyfile(item['wav_path'], new_path) + + print("已完成复制和重命名操作。") + + +def get_filename_without_extension(file_path): + """ + Given a file path string, returns the file name without its extension. + + Parameters: + file_path (str): The full path to the file. + + Returns: + str: The file name without its extension. + """ + base_name = os.path.basename(file_path) # Get the base name (file name with extension) + file_name, file_extension = os.path.splitext(base_name) # Split the base name into file name and extension + return file_name # Return the file name without extension + + diff --git a/Ref_Audio_Selector/tool/ref_audio_opt.py b/Ref_Audio_Selector/tool/ref_audio_opt.py deleted file mode 100644 index fb7bd7e2..00000000 --- a/Ref_Audio_Selector/tool/ref_audio_opt.py +++ /dev/null @@ -1,46 +0,0 @@ -import os -import shutil - - -def convert_from_list(list_file, output_dir): - # 创建输出目录,如果它不存在的话 - if not os.path.exists(output_dir): - os.makedirs(output_dir) - - # 解析.list文件,并操作文件 - with open(list_file, 'r', encoding='utf-8') as file: - lines = file.readlines() - - for line in lines: - parts = line.strip().split('|') - if len(parts) != 4: - print(f"Line format incorrect: {line}") - continue - - audio_path, _, _, transcription = parts - - # 构建新的文件名和路径 - new_filename = transcription + '.wav' - # new_filename = new_filename.replace(' ', '_') # 移除空格 - # new_filename = ''.join(e for e in new_filename if e.isalnum() or e in ['_', '.']) # 移除非法字符 - new_path = os.path.join(output_dir, new_filename) - - # 如果目标文件已存在,不要覆盖 - if os.path.exists(new_path): - print(f"File already exists: {new_path}") - continue - - try: - # 检查音频文件是否存在 - if not os.path.exists(audio_path): - print(f"Audio file does not exist: {audio_path}") - continue - - # 复制音频文件到output目录并重命名 - shutil.copy2(audio_path, new_path) - print(f"File copied and renamed to: {new_path}") - except Exception as e: - print(f"An error occurred while processing: {audio_path}") - print(e) - - print("Processing complete.") diff --git a/tools/speaker_verification/__init__.py b/tools/speaker_verification/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tools/speaker_verification/audio_similarity.py b/tools/speaker_verification/audio_similarity.py new file mode 100644 index 00000000..c1f415e8 --- /dev/null +++ b/tools/speaker_verification/audio_similarity.py @@ -0,0 +1,64 @@ +import argparse +import os + +from modelscope.pipelines import pipeline +sv_pipeline = pipeline( + task='speaker-verification', + model='/tools/speaker_verification/models/speech_campplus_sv_zh-cn_16k-common', + model_revision='v1.0.0' +) + + +def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, output_file_path): + # Step 1: 获取比较音频目录下所有音频文件的路径 + comparison_audio_paths = [os.path.join(comparison_dir_path, f) for f in os.listdir(comparison_dir_path) if f.endswith('.wav')] + + # Step 2: 用参考音频依次比较音频目录下的每个音频,获取相似度分数及对应路径 + similarity_scores = [] + for audio_path in comparison_audio_paths: + score = sv_pipeline([reference_audio_path, audio_path])['score'] + similarity_scores.append({ + 'score': score, + 'path': audio_path + }) + + # Step 3: 根据相似度分数降序排列 + similarity_scores.sort(key=lambda x: x['score'], reverse=True) + + # Step 4: 处理输出文件不存在的情况,创建新文件 + if not os.path.exists(output_file_path): + open(output_file_path, 'w').close() # Create an empty file + + # Step 5: 将排序后的结果写入输出结果文件(支持中文) + formatted_scores = [f'{item["score"]}|{item["path"]}' for item in similarity_scores] + with open(output_file_path, 'w', encoding='utf-8') as f: + # 使用'\n'将每个字符串分开,使其写入不同行 + content = '\n'.join(formatted_scores ) + f.write(content) + + +def parse_arguments(): + parser = argparse.ArgumentParser(description="Audio processing script arguments") + + # Reference audio path + parser.add_argument("-r", "--reference_audio", type=str, required=True, + help="Path to the reference WAV file.") + + # Comparison directory path + parser.add_argument("-c", "--comparison_dir", type=str, required=True, + help="Path to the directory containing comparison WAV files.") + + # Output file path + parser.add_argument("-o", "--output_file", type=str, required=True, + help="Path to the output file where results will be written.") + + return parser.parse_args() + + +if __name__ == '__main__': + cmd = parse_arguments() + compare_audio_and_generate_report( + reference_audio_path = cmd.reference_audio, + comparison_dir = cmd.comparison_dir, + output_file = cmd.output_file, + ) \ No newline at end of file From 8c9627bb30e704c835b4049b46084dd1d9542864 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Wed, 24 Apr 2024 16:58:02 +0800 Subject: [PATCH 04/72] =?UTF-8?q?=E5=8A=9F=E8=83=BD=E8=A1=A5=E5=85=A8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../{audio_similarity.py => voice_similarity.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename tools/speaker_verification/{audio_similarity.py => voice_similarity.py} (100%) diff --git a/tools/speaker_verification/audio_similarity.py b/tools/speaker_verification/voice_similarity.py similarity index 100% rename from tools/speaker_verification/audio_similarity.py rename to tools/speaker_verification/voice_similarity.py From a1fc00a9d8df826d16eb0ed00ea214cbe2585181 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Wed, 24 Apr 2024 18:53:00 +0800 Subject: [PATCH 05/72] =?UTF-8?q?=E8=B0=83=E6=95=B4=E7=9B=AE=E5=BD=95?= =?UTF-8?q?=E7=BB=93=E6=9E=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 41 +++++-- .../tool/asr}/__init__.py | 0 .../tool/asr/funasr_asr_multi_level_dir.py | 111 ++++++++++++++++++ Ref_Audio_Selector/tool/audio_asr.py | 34 ++++++ Ref_Audio_Selector/tool/audio_config.py | 26 ++++ Ref_Audio_Selector/tool/audio_similarity.py | 6 +- .../tool/speaker_verification/__init__.py | 0 .../speaker_verification/voice_similarity.py | 0 .../tool/text_comparison/__init__.py | 0 .../tool/text_comparison/text_comparison.py | 53 +++++++++ 10 files changed, 255 insertions(+), 16 deletions(-) rename {tools/speaker_verification => Ref_Audio_Selector/tool/asr}/__init__.py (100%) create mode 100644 Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py create mode 100644 Ref_Audio_Selector/tool/audio_asr.py create mode 100644 Ref_Audio_Selector/tool/audio_config.py create mode 100644 Ref_Audio_Selector/tool/speaker_verification/__init__.py rename {tools => Ref_Audio_Selector/tool}/speaker_verification/voice_similarity.py (100%) create mode 100644 Ref_Audio_Selector/tool/text_comparison/__init__.py create mode 100644 Ref_Audio_Selector/tool/text_comparison/text_comparison.py diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 74f2f2a6..c0ba953b 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -3,6 +3,8 @@ import gradio as gr import Ref_Audio_Selector.tool.audio_similarity as audio_similarity import Ref_Audio_Selector.tool.audio_inference as audio_inference +import Ref_Audio_Selector.tool.audio_asr as audio_asr +import Ref_Audio_Selector.tool.audio_config as audio_config import Ref_Audio_Selector.common.common as common from tools.i18n.i18n import I18nAuto @@ -49,13 +51,14 @@ def sample(text_work_space_dir, text_character, text_sample_dir, text_base_voice if text_sample_num is None or text_sample_num == '': raise Exception(i18n("每段随机抽样个数不能为空")) - similarity_list = audio_similarity.start_similarity_analysis(text_work_space_dir, text_sample_dir, text_base_voice_path, checkbox_similarity_output) - + similarity_list = audio_similarity.start_similarity_analysis(text_work_space_dir, text_sample_dir, + text_base_voice_path, checkbox_similarity_output) + if similarity_list is None: raise Exception(i18n("相似度分析失败")) - + audio_similarity.sample(ref_audio_dir, similarity_list, text_subsection_num, text_sample_num) - + except Exception as e: text_sample_info = f"发生异常:{e}" ref_audio_dir = '' @@ -94,7 +97,8 @@ def model_inference(text_work_space_dir, text_character, text_model_inference_vo ref_audio_manager = common.RefAudioListManager(text_model_inference_voice_dir) if len(ref_audio_manager.get_audio_list()) == 0: raise Exception(i18n("待推理的参考音频不能为空")) - audio_inference.generate_audio_files(url_composer, text_list, ref_audio_manager.get_ref_audio_list(), inference_dir) + audio_inference.generate_audio_files(url_composer, text_list, ref_audio_manager.get_ref_audio_list(), + inference_dir) except Exception as e: text_model_inference_info = f"发生异常:{e}" text_asr_audio_dir = '' @@ -104,9 +108,9 @@ def model_inference(text_work_space_dir, text_character, text_model_inference_vo # 对推理生成音频执行asr def asr(text_work_space_dir, text_character, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang): - asr_file = os.path.join(text_work_space_dir, 'asr.list') - text_text_similarity_analysis_path = asr_file - text_asr_info = f"asr成功:生成文件asr.list" + asr_file = None + text_text_similarity_analysis_path = None + text_asr_info = None try: check_base_info(text_work_space_dir, text_character) if text_asr_audio_dir is None or text_asr_audio_dir == '': @@ -117,7 +121,10 @@ def asr(text_work_space_dir, text_character, text_asr_audio_dir, dropdown_asr_mo raise Exception(i18n("asr模型大小不能为空")) if dropdown_asr_lang is None or dropdown_asr_lang == '': raise Exception(i18n("asr语言不能为空")) - pass + asr_file = audio_asr.open_asr(text_asr_audio_dir, text_work_space_dir, dropdown_asr_model, dropdown_asr_size, + dropdown_asr_lang) + text_text_similarity_analysis_path = asr_file + text_asr_info = f"asr成功:生成文件{asr_file}" except Exception as e: text_asr_info = f"发生异常:{e}" text_text_similarity_analysis_path = '' @@ -149,7 +156,14 @@ def similarity_audio_output(text_work_space_dir, text_character, text_base_audio raise Exception(i18n("基准音频路径不能为空")) if text_compare_audio_dir is None or text_compare_audio_dir == '': raise Exception(i18n("待分析的音频所在目录不能为空")) - pass + similarity_list, similarity_file, similarity_file_dir = audio_similarity.start_similarity_analysis( + text_work_space_dir, text_compare_audio_dir, text_base_audio_path, True) + + if similarity_list is None: + raise Exception(i18n("相似度分析失败")) + + text_similarity_audio_output_info = f'相似度分析成功:生成目录{similarity_file_dir},文件{similarity_file}' + except Exception as e: text_similarity_audio_output_info = f"发生异常:{e}" return text_similarity_audio_output_info @@ -181,7 +195,8 @@ def create_config(text_work_space_dir, text_character, text_template, text_sync_ raise Exception(i18n("参考音频抽样目录不能为空")) if text_sync_ref_audio_dir2 is None or text_sync_ref_audio_dir2 == '': raise Exception(i18n("参考音频目录不能为空")) - pass + ref_audio_manager = common.RefAudioListManager(text_sync_ref_audio_dir2) + audio_config.generate_audio_config(text_template, ref_audio_manager.get_ref_audio_list(), config_file) except Exception as e: text_create_config_info = f"发生异常:{e}" return text_create_config_info @@ -191,9 +206,9 @@ def create_config(text_work_space_dir, text_character, text_template, text_sync_ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): url_composer = audio_inference.URLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) if url_composer.is_emotion(): - text_whole_url = url_composer.build_url_with_emotion('测试内容','情绪类型') + text_whole_url = url_composer.build_url_with_emotion('测试内容', '情绪类型') else: - text_whole_url = url_composer.build_url_with_ref('测试内容','参考路径','参考文本') + text_whole_url = url_composer.build_url_with_ref('测试内容', '参考路径', '参考文本') return text_whole_url diff --git a/tools/speaker_verification/__init__.py b/Ref_Audio_Selector/tool/asr/__init__.py similarity index 100% rename from tools/speaker_verification/__init__.py rename to Ref_Audio_Selector/tool/asr/__init__.py diff --git a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py new file mode 100644 index 00000000..ab94b4a3 --- /dev/null +++ b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py @@ -0,0 +1,111 @@ +# -*- coding:utf-8 -*- + +import argparse +import os +import traceback +from tqdm import tqdm + +from funasr import AutoModel + +path_asr = 'tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' +path_vad = 'tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch' +path_punc = 'tools/asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch' +path_asr = path_asr if os.path.exists(path_asr) else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" +path_vad = path_vad if os.path.exists(path_vad) else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch" +path_punc = path_punc if os.path.exists(path_punc) else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch" + +model = AutoModel( + model = path_asr, + model_revision = "v2.0.4", + vad_model = path_vad, + vad_model_revision = "v2.0.4", + punc_model = path_punc, + punc_model_revision = "v2.0.4", +) + +def only_asr(input_file): + try: + text = model.generate(input=input_file)[0]["text"] + except: + text = '' + print(traceback.format_exc()) + return text + +def execute_asr(input_folder, output_folder, model_size, language): + input_file_names = os.listdir(input_folder) + input_file_names.sort() + + output = [] + output_file_name = os.path.basename(input_folder) + + for name in tqdm(input_file_names): + try: + text = model.generate(input="%s/%s"%(input_folder, name))[0]["text"] + output.append(f"{input_folder}/{name}|{output_file_name}|{language.upper()}|{text}") + except: + print(traceback.format_exc()) + + output_folder = output_folder or "output/asr_opt" + os.makedirs(output_folder, exist_ok=True) + output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list') + + with open(output_file_path, "w", encoding="utf-8") as f: + f.write("\n".join(output)) + print(f"ASR 任务完成->标注文件路径: {output_file_path}\n") + return output_file_path + + +def execute_asr_multi_level_dir(input_folder, output_folder, model_size, language): + output = [] + output_file_name = os.path.basename(input_folder) + # 递归遍历输入目录及所有子目录 + for root, dirs, files in os.walk(input_folder): + for name in sorted(files): + # 只处理wav文件(假设是wav文件) + if name.endswith(".wav"): + try: + # 构造完整的输入音频文件路径 + input_file_path = os.path.join(root, name) + input_file_path = os.path.normpath(input_file_path) # 先标准化可能存在混合斜杠的情况 + text = model.generate(input=input_file_path)[0]["text"] + + output.append(f"{input_file_path}|{output_file_name}|{language.upper()}|{text}") + + except: + print(traceback.format_exc()) + + # 创建或打开指定的输出目录 + output_folder = output_folder or "output/asr_opt" + output_dir_abs = os.path.abspath(output_folder) + os.makedirs(output_dir_abs, exist_ok=True) + + # 构造输出文件路径 + output_file_path = os.path.join(output_dir_abs, f'{output_file_name}.list') + + # 将输出写入文件 + with open(output_file_path, "w", encoding="utf-8") as f: + f.write("\n".join(output)) + print(f"ASR 任务完成->标注文件路径: {output_file_path}\n") + + return output_file_path + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--input_folder", type=str, required=True, + help="Path to the folder containing WAV files.") + parser.add_argument("-o", "--output_folder", type=str, required=True, + help="Output folder to store transcriptions.") + parser.add_argument("-s", "--model_size", type=str, default='large', + help="Model Size of FunASR is Large") + parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh'], + help="Language of the audio files.") + parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'], + help="fp16 or fp32")#还没接入 + + cmd = parser.parse_args() + execute_asr_multi_level_dir( + input_folder = cmd.input_folder, + output_folder = cmd.output_folder, + model_size = cmd.model_size, + language = cmd.language, + ) diff --git a/Ref_Audio_Selector/tool/audio_asr.py b/Ref_Audio_Selector/tool/audio_asr.py new file mode 100644 index 00000000..126d3f9a --- /dev/null +++ b/Ref_Audio_Selector/tool/audio_asr.py @@ -0,0 +1,34 @@ +import os +from config import python_exec,is_half +from tools import my_utils +from tools.asr.config import asr_dict +from subprocess import Popen +def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): + global p_asr + if(p_asr==None): + asr_inp_dir=my_utils.clean_path(asr_inp_dir) + asr_py_path = asr_dict[asr_model]["path"] + if asr_py_path == 'funasr_asr.py': + asr_py_path = 'funasr_asr_multi_level_dir.py' + if asr_py_path == 'fasterwhisper.py': + asr_py_path = 'fasterwhisper_asr_multi_level_dir.py' + cmd = f'"{python_exec}" tools/asr/{asr_py_path}' + cmd += f' -i "{asr_inp_dir}"' + cmd += f' -o "{asr_opt_dir}"' + cmd += f' -s {asr_model_size}' + cmd += f' -l {asr_lang}' + cmd += " -p %s"%("float16"if is_half==True else "float32") + + print(cmd) + p_asr = Popen(cmd, shell=True) + p_asr.wait() + p_asr=None + + output_dir_abs = os.path.abspath(asr_opt_dir) + output_file_name = os.path.basename(asr_inp_dir) + # 构造输出文件路径 + output_file_path = os.path.join(output_dir_abs, f'{output_file_name}.list') + return output_file_path + + else: + return None \ No newline at end of file diff --git a/Ref_Audio_Selector/tool/audio_config.py b/Ref_Audio_Selector/tool/audio_config.py new file mode 100644 index 00000000..7ea9a9b3 --- /dev/null +++ b/Ref_Audio_Selector/tool/audio_config.py @@ -0,0 +1,26 @@ +import os + + +def generate_audio_config(template_str, audio_list, output_file_path): + # 定义一个空字符串来存储最终要写入文件的内容 + file_content = "" + + # 遍历参考音频列表 + for audio_info in audio_list: + emotion = audio_info['emotion'] + ref_path = audio_info['ref_path'] + ref_text = audio_info['ref_text'] + + # 使用字符串模板替换变量 + formatted_line = template_str.replace('${emotion}', emotion).replace('${ref_path}', ref_path).replace( + '${ref_text}', ref_text) + + # 将格式化后的行添加到内容中,使用逗号和换行符分隔 + file_content += formatted_line + ",\n" + + # 删除最后一个逗号和换行符,确保格式整洁 + file_content = file_content[:-2] + + # 将内容写入输出文件 + with open(output_file_path, 'w', encoding='utf-8') as output_file: + output_file.write(file_content) diff --git a/Ref_Audio_Selector/tool/audio_similarity.py b/Ref_Audio_Selector/tool/audio_similarity.py index 9eb7172b..30ae5d1f 100644 --- a/Ref_Audio_Selector/tool/audio_similarity.py +++ b/Ref_Audio_Selector/tool/audio_similarity.py @@ -95,7 +95,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ global p_similarity if(p_similarity==None): - cmd = f'"{python_exec}" tools/speaker_verification/audio_similarity.py ' + cmd = f'"{python_exec}" tools/speaker_verification/voice_similarity.py ' cmd += f' -r "{base_voice_path}"' cmd += f' -c "{sample_dir}"' cmd += f' -o {similarity_file}' @@ -110,9 +110,9 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ ref_audio_opt.copy_and_move(similarity_file_dir, similarity_list) p_similarity=None - return similarity_list + return similarity_list, similarity_file, similarity_file_dir else: - return similarity_list + return similarity_list, None, None def parse_similarity_file(file_path): diff --git a/Ref_Audio_Selector/tool/speaker_verification/__init__.py b/Ref_Audio_Selector/tool/speaker_verification/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tools/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py similarity index 100% rename from tools/speaker_verification/voice_similarity.py rename to Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py diff --git a/Ref_Audio_Selector/tool/text_comparison/__init__.py b/Ref_Audio_Selector/tool/text_comparison/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py new file mode 100644 index 00000000..dab00b62 --- /dev/null +++ b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py @@ -0,0 +1,53 @@ +import torch +from transformers import AutoTokenizer, AutoModel +from scipy.spatial.distance import cosine +import math + +bert_path = os.environ.get( + "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" +) + + +tokenizer = AutoTokenizer.from_pretrained(bert_path) +model = AutoModel.from_pretrained(bert_path) + + +def calculate_similarity(text1, text2, max_length=512): + # 预处理文本,设置最大长度 + inputs1 = tokenizer(text1, padding=True, truncation=True, max_length=max_length, return_tensors='pt') + inputs2 = tokenizer(text2, padding=True, truncation=True, max_length=max_length, return_tensors='pt') + + # 获取句子向量(这里是取CLS token的向量并展平为一维) + with torch.no_grad(): + encoded_text1 = model(**inputs1)[0][:, 0, :].flatten() + encoded_text2 = model(**inputs2)[0][:, 0, :].flatten() + + # 确保转换为numpy数组并且是一维的 + similarity = 1 - cosine(encoded_text1.cpu().numpy().flatten(), encoded_text2.cpu().numpy().flatten()) + + return similarity + +# 对0.8-1区间的值进行放大 +def adjusted_similarity(similarity_score2, boundary=0.8): + + if similarity_score2 < boundary: + return 0 + + # 倍数 + multiple = 1/(1 - boundary) + + adjusted_score = (similarity_score2 - boundary)*multiple + + return adjusted_score + + +def calculate_result(t1, t2): + # 计算并打印相似度 + similarity_score2 = calculate_similarity(t1, t2) + + # 调整相似度 + adjusted_similarity_score2 = adjusted_similarity(similarity_score2) + + return similarity_score2, adjusted_similarity_score2 + + From 4cbbe2a2587ea9606cbc4894354113af40e8eb20 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Wed, 24 Apr 2024 18:57:36 +0800 Subject: [PATCH 06/72] =?UTF-8?q?=E8=B0=83=E6=95=B4=E7=9B=AE=E5=BD=95?= =?UTF-8?q?=E7=BB=93=E6=9E=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 56 +++++++++---------- Ref_Audio_Selector/tool/audio_asr.py | 16 +++--- Ref_Audio_Selector/tool/audio_inference.py | 23 ++++---- Ref_Audio_Selector/tool/audio_similarity.py | 16 ++---- .../tool/text_comparison/text_comparison.py | 1 + 5 files changed, 52 insertions(+), 60 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index c0ba953b..b2422132 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -12,20 +12,18 @@ # 校验基础信息 -def check_base_info(text_work_space_dir, text_character): +def check_base_info(text_work_space_dir): if text_work_space_dir is None or text_work_space_dir == '': raise Exception(i18n("工作目录不能为空")) - if text_character is None or text_character == '': - raise Exception(i18n("角色名称不能为空")) # 从list文件,提取参考音频 -def convert_from_list(text_work_space_dir, text_character, text_list_input): +def convert_from_list(text_work_space_dir, text_list_input): ref_audio_all = os.path.join(text_work_space_dir, 'refer_audio_all') text_convert_from_list_info = f"转换成功:生成目录{ref_audio_all}" text_sample_dir = ref_audio_all try: - check_base_info(text_work_space_dir, text_character) + check_base_info(text_work_space_dir) if text_list_input is None or text_list_input == '': raise Exception(i18n("list文件路径不能为空")) audio_similarity.convert_from_list(text_list_input, ref_audio_all) @@ -36,12 +34,12 @@ def convert_from_list(text_work_space_dir, text_character, text_list_input): # 基于一个基准音频,从参考音频目录中进行分段抽样 -def sample(text_work_space_dir, text_character, text_sample_dir, text_base_voice_path, +def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, text_subsection_num, text_sample_num, checkbox_similarity_output): ref_audio_dir = os.path.join(text_work_space_dir, 'refer_audio') text_sample_info = f"抽样成功:生成目录{ref_audio_dir}" try: - check_base_info(text_work_space_dir, text_character) + check_base_info(text_work_space_dir) if text_sample_dir is None or text_sample_dir == '': raise Exception(i18n("参考音频抽样目录不能为空,请先完成上一步操作")) if text_base_voice_path is None or text_base_voice_path == '': @@ -70,14 +68,14 @@ def sample(text_work_space_dir, text_character, text_sample_dir, text_base_voice # 根据参考音频和测试文本,执行批量推理 -def model_inference(text_work_space_dir, text_character, text_model_inference_voice_dir, text_url, +def model_inference(text_work_space_dir, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content): inference_dir = os.path.join(text_work_space_dir, 'inference_audio') text_asr_audio_dir = os.path.join(inference_dir, 'text') text_model_inference_info = f"推理成功:生成目录{inference_dir}" try: - check_base_info(text_work_space_dir, text_character) + check_base_info(text_work_space_dir) if text_model_inference_voice_dir is None or text_model_inference_voice_dir == '': raise Exception(i18n("待推理的参考音频所在目录不能为空,请先完成上一步操作")) if text_url is None or text_url == '': @@ -106,13 +104,13 @@ def model_inference(text_work_space_dir, text_character, text_model_inference_vo # 对推理生成音频执行asr -def asr(text_work_space_dir, text_character, text_asr_audio_dir, dropdown_asr_model, +def asr(text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang): asr_file = None text_text_similarity_analysis_path = None text_asr_info = None try: - check_base_info(text_work_space_dir, text_character) + check_base_info(text_work_space_dir) if text_asr_audio_dir is None or text_asr_audio_dir == '': raise Exception(i18n("待asr的音频所在目录不能为空,请先完成上一步操作")) if dropdown_asr_model is None or dropdown_asr_model == '': @@ -132,12 +130,12 @@ def asr(text_work_space_dir, text_character, text_asr_audio_dir, dropdown_asr_mo # 对asr生成的文件,与原本的文本内容,进行相似度分析 -def text_similarity_analysis(text_work_space_dir, text_character, +def text_similarity_analysis(text_work_space_dir, text_text_similarity_analysis_path): similarity_file = os.path.join(text_work_space_dir, 'similarity.txt') text_text_similarity_analysis_info = f"相似度分析成功:生成文件{similarity_file}" try: - check_base_info(text_work_space_dir, text_character) + check_base_info(text_work_space_dir) if text_text_similarity_analysis_path is None or text_text_similarity_analysis_path == '': raise Exception(i18n("asr生成的文件路径不能为空,请先完成上一步操作")) pass @@ -147,11 +145,11 @@ def text_similarity_analysis(text_work_space_dir, text_character, # 根据一个参考音频,对指定目录下的音频进行相似度分析,并输出到另一个目录 -def similarity_audio_output(text_work_space_dir, text_character, text_base_audio_path, +def similarity_audio_output(text_work_space_dir, text_base_audio_path, text_compare_audio_dir): text_similarity_audio_output_info = "相似度分析成功:生成目录XXX" try: - check_base_info(text_work_space_dir, text_character) + check_base_info(text_work_space_dir) if text_base_audio_path is None or text_base_audio_path == '': raise Exception(i18n("基准音频路径不能为空")) if text_compare_audio_dir is None or text_compare_audio_dir == '': @@ -170,11 +168,11 @@ def similarity_audio_output(text_work_space_dir, text_character, text_base_audio # 根据参考音频目录的删除情况,将其同步到推理生成的音频目录中,即参考音频目录下,删除了几个参考音频,就在推理目录下,将这些参考音频生成的音频文件移除 -def sync_ref_audio(text_work_space_dir, text_character, text_sync_ref_audio_dir, +def sync_ref_audio(text_work_space_dir, text_sync_ref_audio_dir, text_sync_inference_audio_dir): text_sync_ref_audio_info = "同步参考音频成功:生成目录XXX" try: - check_base_info(text_work_space_dir, text_character) + check_base_info(text_work_space_dir) if text_sync_ref_audio_dir is None or text_sync_ref_audio_dir == '': raise Exception(i18n("参考音频目录不能为空")) if text_sync_inference_audio_dir is None or text_sync_inference_audio_dir == '': @@ -186,11 +184,11 @@ def sync_ref_audio(text_work_space_dir, text_character, text_sync_ref_audio_dir, # 根据模板和参考音频目录,生成参考音频配置内容 -def create_config(text_work_space_dir, text_character, text_template, text_sync_ref_audio_dir2): +def create_config(text_work_space_dir, text_template, text_sync_ref_audio_dir2): config_file = os.path.join(text_work_space_dir, 'refer_audio.json') text_create_config_info = f"配置生成成功:生成文件{config_file}" try: - check_base_info(text_work_space_dir, text_character) + check_base_info(text_work_space_dir) if text_template is None or text_template == '': raise Exception(i18n("参考音频抽样目录不能为空")) if text_sync_ref_audio_dir2 is None or text_sync_ref_audio_dir2 == '': @@ -214,9 +212,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): with gr.Blocks() as app: gr.Markdown(value=i18n("基本介绍:这是一个从训练素材中,批量提取参考音频,并进行效果评估与配置生成的工具")) - with gr.Row(): - text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), value="") - text_character = gr.Text(label=i18n("请输入角色名称"), value="") + text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), value="") with gr.Accordion(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") @@ -225,7 +221,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value="", interactive=False) - button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_character, text_list_input], + button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_list_input], [text_convert_from_list_info, text_sample_dir]) with gr.Row(): text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="") @@ -267,7 +263,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value="", interactive=False) button_model_inference.click(model_inference, - [text_work_space_dir, text_character, text_model_inference_voice_dir, text_url, + [text_work_space_dir, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content], [text_model_inference_info, text_asr_audio_dir]) with gr.Row(): @@ -294,13 +290,13 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) gr.Markdown(value=i18n("3.2:启动文本相似度分析")) text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), value="", interactive=False) - button_asr.click(asr, [text_work_space_dir, text_character, text_asr_audio_dir, dropdown_asr_model, + button_asr.click(asr, [text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], [text_asr_info, text_text_similarity_analysis_path]) with gr.Row(): button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", interactive=False) - button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_character, + button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_text_similarity_analysis_path], [text_text_similarity_analysis_info]) gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) @@ -312,7 +308,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary") text_similarity_audio_output_info = gr.Text(label=i18n("输出结果"), value="", interactive=False) button_similarity_audio_output.click(similarity_audio_output, - [text_work_space_dir, text_character, text_base_audio_path, + [text_work_space_dir, text_base_audio_path, text_compare_audio_dir], [text_similarity_audio_output_info]) with gr.Row(): text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value="", interactive=False) @@ -320,7 +316,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): with gr.Row(): button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) - button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_character, text_sync_ref_audio_dir, + button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_sync_ref_audio_dir, text_sync_inference_audio_dir], [text_sync_ref_info]) with gr.Accordion("第四步:生成参考音频配置文本", open=False): gr.Markdown(value=i18n("4.1:编辑模板")) @@ -332,9 +328,9 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): button_create_config = gr.Button(i18n("生成配置"), variant="primary") text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) button_create_config.click(create_config, - [text_work_space_dir, text_character, text_template, text_sync_ref_audio_dir2], + [text_work_space_dir, text_template, text_sync_ref_audio_dir2], [text_create_config_info]) - button_sample.click(sample, [text_work_space_dir, text_character, text_sample_dir, text_base_voice_path, + button_sample.click(sample, [text_work_space_dir, text_sample_dir, text_base_voice_path, text_subsection_num, text_sample_num, checkbox_similarity_output], [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, text_sync_ref_audio_dir2]) diff --git a/Ref_Audio_Selector/tool/audio_asr.py b/Ref_Audio_Selector/tool/audio_asr.py index 126d3f9a..f637e2c9 100644 --- a/Ref_Audio_Selector/tool/audio_asr.py +++ b/Ref_Audio_Selector/tool/audio_asr.py @@ -1,12 +1,14 @@ import os -from config import python_exec,is_half +from config import python_exec, is_half from tools import my_utils from tools.asr.config import asr_dict from subprocess import Popen + + def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): global p_asr - if(p_asr==None): - asr_inp_dir=my_utils.clean_path(asr_inp_dir) + if (p_asr == None): + asr_inp_dir = my_utils.clean_path(asr_inp_dir) asr_py_path = asr_dict[asr_model]["path"] if asr_py_path == 'funasr_asr.py': asr_py_path = 'funasr_asr_multi_level_dir.py' @@ -17,18 +19,18 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): cmd += f' -o "{asr_opt_dir}"' cmd += f' -s {asr_model_size}' cmd += f' -l {asr_lang}' - cmd += " -p %s"%("float16"if is_half==True else "float32") + cmd += " -p %s" % ("float16" if is_half == True else "float32") print(cmd) p_asr = Popen(cmd, shell=True) p_asr.wait() - p_asr=None + p_asr = None output_dir_abs = os.path.abspath(asr_opt_dir) output_file_name = os.path.basename(asr_inp_dir) # 构造输出文件路径 output_file_path = os.path.join(output_dir_abs, f'{output_file_name}.list') return output_file_path - + else: - return None \ No newline at end of file + return None diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 2c291220..67aabf3b 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -2,6 +2,7 @@ import requests import urllib.parse + class URLComposer: def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): self.base_url = base_url @@ -9,18 +10,17 @@ def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param self.text_param_name = text_param_name self.ref_path_param_name = ref_path_param_name self.ref_text_param_name = ref_text_param_name - - + def is_valid(self): if self.base_url is None or self.base_url == '': raise ValueError("请输入url") - + if self.text_param_name is None or self.text_param_name == '': raise ValueError("请输入text参数名") - + if self.emotion_param_name is None and self.ref_path_param_name is None and self.ref_text_param_name is None: raise ValueError("请输入至少一个参考or情绪的参数") - + def is_emotion(self): return self.emotion_param_name is not None and self.emotion_param_name != '' @@ -49,10 +49,9 @@ def _append_params_to_url(self, params: dict): query_params = '&'.join([f"{k}={v}" for k, v in params.items()]) url_with_params += '?' + query_params if '?' not in self.base_url else '&' + query_params return url_with_params - - -def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path): + +def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path): # Ensure the output directory exists output_dir = Path(output_dir_path) output_dir.mkdir(parents=True, exist_ok=True) @@ -65,12 +64,12 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) for text, emotion in zip(text_list, emotion_list): # Generate audio byte stream using the create_audio function - + if url_composer.is_emotion(): real_url = url_composer.build_url_with_emotion(text, emotion['emotion']) else: real_url = url_composer.build_url_with_ref(text, emotion['ref_path'], emotion['ref_text']) - + audio_bytes = inference_audio_from_api(real_url) emotion_name = emotion['emotion'] @@ -88,11 +87,9 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) f.write(audio_bytes) with open(emotion_file_path, 'wb') as f: f.write(audio_bytes) - def inference_audio_from_api(url): - # 发起GET请求 response = requests.get(url, stream=True) @@ -101,4 +98,4 @@ def inference_audio_from_api(url): # 返回音频数据的字节流 return response.content else: - raise Exception(f"Failed to fetch audio from API. Server responded with status code {response.status_code}.") \ No newline at end of file + raise Exception(f"Failed to fetch audio from API. Server responded with status code {response.status_code}.") diff --git a/Ref_Audio_Selector/tool/audio_similarity.py b/Ref_Audio_Selector/tool/audio_similarity.py index 30ae5d1f..602f8fc3 100644 --- a/Ref_Audio_Selector/tool/audio_similarity.py +++ b/Ref_Audio_Selector/tool/audio_similarity.py @@ -3,6 +3,7 @@ from config import python_exec from subprocess import Popen + def convert_from_list(list_file, output_dir): # 创建输出目录,如果它不存在的话 if not os.path.exists(output_dir): @@ -61,15 +62,15 @@ def sample(output_audio_dir, similarity_list, subsection_num, sample_num): start = i * step end = (i + 1) * step end = min(end, len(similarity_list)) # 防止最后一段越界 - + num = min(sample_num, len(similarity_list[start:end])) # 随机采样 random.shuffle(similarity_list[start:end]) - sampled_subsection = similarity_list[start:start+num] + sampled_subsection = similarity_list[start:start + num] # 创建并进入子目录 - subdir_name = f'subsection_{i+1}' + subdir_name = f'subsection_{i + 1}' subdir_path = os.path.join(output_audio_dir, subdir_name) os.makedirs(subdir_path, exist_ok=True) @@ -82,9 +83,7 @@ def sample(output_audio_dir, similarity_list, subsection_num, sample_num): print("Sampling completed.") - def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_similarity_output): - similarity_list = None similarity_dir = os.path.join(work_space_dir, 'similarity') @@ -94,7 +93,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ similarity_file = os.path.join(similarity_dir, f'{base_voice_file_name}.txt') global p_similarity - if(p_similarity==None): + if (p_similarity == None): cmd = f'"{python_exec}" tools/speaker_verification/voice_similarity.py ' cmd += f' -r "{base_voice_path}"' cmd += f' -c "{sample_dir}"' @@ -109,7 +108,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ similarity_file_dir = os.path.dirname(similarity_dir, base_voice_file_name) ref_audio_opt.copy_and_move(similarity_file_dir, similarity_list) - p_similarity=None + p_similarity = None return similarity_list, similarity_file, similarity_file_dir else: return similarity_list, None, None @@ -145,7 +144,6 @@ def parse_similarity_file(file_path): def copy_and_move(output_audio_directory, similarity_scores): - # 确保新目录存在 if not os.path.exists(output_audio_directory): os.makedirs(output_audio_directory) @@ -178,5 +176,3 @@ def get_filename_without_extension(file_path): base_name = os.path.basename(file_path) # Get the base name (file name with extension) file_name, file_extension = os.path.splitext(base_name) # Split the base name into file name and extension return file_name # Return the file name without extension - - diff --git a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py index dab00b62..420a9ed8 100644 --- a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py +++ b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py @@ -1,3 +1,4 @@ +import os import torch from transformers import AutoTokenizer, AutoModel from scipy.spatial.distance import cosine From 2c8f6bd4c9e6c90d660d37c4cf3b556d50064ad4 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 00:22:58 +0800 Subject: [PATCH 07/72] =?UTF-8?q?=E9=85=8D=E7=BD=AE=E6=96=87=E4=BB=B6?= =?UTF-8?q?=E7=94=9F=E6=88=90=E3=80=81=E9=9F=B3=E9=A2=91=E6=8A=BD=E6=A0=B7?= =?UTF-8?q?=E3=80=81=E9=9F=B3=E9=A2=91=E6=8E=A8=E7=90=86=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/common.py | 30 ++++++- .../ref_audio_selector_webui.py | 78 +++++++++++++++---- Ref_Audio_Selector/tool/audio_config.py | 10 ++- Ref_Audio_Selector/tool/audio_inference.py | 59 +++++++++----- Ref_Audio_Selector/tool/audio_similarity.py | 55 ++----------- .../config_template/ref_audio_template.txt | 5 ++ .../speaker_verification/voice_similarity.py | 8 +- .../tool/test_content/test_content.txt | 12 +++ 8 files changed, 169 insertions(+), 88 deletions(-) create mode 100644 Ref_Audio_Selector/tool/config_template/ref_audio_template.txt create mode 100644 Ref_Audio_Selector/tool/test_content/test_content.txt diff --git a/Ref_Audio_Selector/common/common.py b/Ref_Audio_Selector/common/common.py index f7de06da..b1197f5e 100644 --- a/Ref_Audio_Selector/common/common.py +++ b/Ref_Audio_Selector/common/common.py @@ -19,6 +19,8 @@ def __init__(self, root_dir): if file.endswith('.wav'): # 将相对路径转换为绝对路径 audio_abs_path = os.path.join(subdir, file) + if category not in self.audio_dict: + self.audio_dict[category] = [] self.audio_dict[category].append(audio_abs_path) def get_audio_list(self): @@ -64,4 +66,30 @@ def read_text_file_to_list(file_path): with open(file_path, mode='r', encoding='utf-8') as file: # 读取所有行并存储到一个列表中 lines = file.read().splitlines() - return lines \ No newline at end of file + return lines + + +def get_filename_without_extension(file_path): + """ + Given a file path string, returns the file name without its extension. + + Parameters: + file_path (str): The full path to the file. + + Returns: + str: The file name without its extension. + """ + base_name = os.path.basename(file_path) # Get the base name (file name with extension) + file_name, file_extension = os.path.splitext(base_name) # Split the base name into file name and extension + return file_name # Return the file name without extension + + +def read_file(file_path): + # 使用with语句打开并读取文件 + with open(file_path, 'r', encoding='utf-8') as file: # 'r' 表示以读取模式打开文件 + # 一次性读取文件所有内容 + file_content = file.read() + + # 文件在with语句结束时会自动关闭 + # 现在file_content变量中存储了文件的所有文本内容 + return file_content \ No newline at end of file diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index b2422132..8a7abfa5 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -1,4 +1,5 @@ import os.path +import traceback import gradio as gr import Ref_Audio_Selector.tool.audio_similarity as audio_similarity @@ -7,9 +8,13 @@ import Ref_Audio_Selector.tool.audio_config as audio_config import Ref_Audio_Selector.common.common as common from tools.i18n.i18n import I18nAuto +from config import python_exec +from subprocess import Popen i18n = I18nAuto() +p_similarity = None + # 校验基础信息 def check_base_info(text_work_space_dir): @@ -28,11 +33,44 @@ def convert_from_list(text_work_space_dir, text_list_input): raise Exception(i18n("list文件路径不能为空")) audio_similarity.convert_from_list(text_list_input, ref_audio_all) except Exception as e: + traceback.print_exc() text_convert_from_list_info = f"发生异常:{e}" text_sample_dir = '' return [text_convert_from_list_info, text_sample_dir] +def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_similarity_output): + similarity_list = None + similarity_file_dir = None + + similarity_dir = os.path.join(work_space_dir, 'similarity') + os.makedirs(similarity_dir, exist_ok=True) + + base_voice_file_name = common.get_filename_without_extension(base_voice_path) + similarity_file = os.path.join(similarity_dir, f'{base_voice_file_name}.txt') + + global p_similarity + if p_similarity is None: + cmd = f'"{python_exec}" Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py ' + cmd += f' -r "{base_voice_path}"' + cmd += f' -c "{sample_dir}"' + cmd += f' -o {similarity_file}' + + print(cmd) + p_similarity = Popen(cmd, shell=True) + p_similarity.wait() + + if need_similarity_output: + similarity_list = audio_similarity.parse_similarity_file(similarity_file) + similarity_file_dir = os.path.join(similarity_dir, base_voice_file_name) + audio_similarity.copy_and_move(similarity_file_dir, similarity_list) + + p_similarity = None + return similarity_list, similarity_file, similarity_file_dir + else: + return similarity_list, None, None + + # 基于一个基准音频,从参考音频目录中进行分段抽样 def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, text_subsection_num, text_sample_num, checkbox_similarity_output): @@ -49,15 +87,16 @@ def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, if text_sample_num is None or text_sample_num == '': raise Exception(i18n("每段随机抽样个数不能为空")) - similarity_list = audio_similarity.start_similarity_analysis(text_work_space_dir, text_sample_dir, - text_base_voice_path, checkbox_similarity_output) + similarity_list, _, _ = start_similarity_analysis(text_work_space_dir, text_sample_dir, + text_base_voice_path, checkbox_similarity_output) if similarity_list is None: raise Exception(i18n("相似度分析失败")) - audio_similarity.sample(ref_audio_dir, similarity_list, text_subsection_num, text_sample_num) + audio_similarity.sample(ref_audio_dir, similarity_list, int(text_subsection_num), int(text_sample_num)) except Exception as e: + traceback.print_exc() text_sample_info = f"发生异常:{e}" ref_audio_dir = '' text_model_inference_voice_dir = ref_audio_dir @@ -98,6 +137,7 @@ def model_inference(text_work_space_dir, text_model_inference_voice_dir, text_ur audio_inference.generate_audio_files(url_composer, text_list, ref_audio_manager.get_ref_audio_list(), inference_dir) except Exception as e: + traceback.print_exc() text_model_inference_info = f"发生异常:{e}" text_asr_audio_dir = '' return [text_model_inference_info, text_asr_audio_dir] @@ -124,6 +164,7 @@ def asr(text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, text_text_similarity_analysis_path = asr_file text_asr_info = f"asr成功:生成文件{asr_file}" except Exception as e: + traceback.print_exc() text_asr_info = f"发生异常:{e}" text_text_similarity_analysis_path = '' return [text_asr_info, text_text_similarity_analysis_path] @@ -140,6 +181,7 @@ def text_similarity_analysis(text_work_space_dir, raise Exception(i18n("asr生成的文件路径不能为空,请先完成上一步操作")) pass except Exception as e: + traceback.print_exc() text_text_similarity_analysis_info = f"发生异常:{e}" return text_text_similarity_analysis_info @@ -154,7 +196,7 @@ def similarity_audio_output(text_work_space_dir, text_base_audio_path, raise Exception(i18n("基准音频路径不能为空")) if text_compare_audio_dir is None or text_compare_audio_dir == '': raise Exception(i18n("待分析的音频所在目录不能为空")) - similarity_list, similarity_file, similarity_file_dir = audio_similarity.start_similarity_analysis( + similarity_list, similarity_file, similarity_file_dir = start_similarity_analysis( text_work_space_dir, text_compare_audio_dir, text_base_audio_path, True) if similarity_list is None: @@ -163,6 +205,7 @@ def similarity_audio_output(text_work_space_dir, text_base_audio_path, text_similarity_audio_output_info = f'相似度分析成功:生成目录{similarity_file_dir},文件{similarity_file}' except Exception as e: + traceback.print_exc() text_similarity_audio_output_info = f"发生异常:{e}" return text_similarity_audio_output_info @@ -179,6 +222,7 @@ def sync_ref_audio(text_work_space_dir, text_sync_ref_audio_dir, raise Exception(i18n("推理生成的音频目录不能为空")) pass except Exception as e: + traceback.print_exc() text_sync_ref_audio_info = f"发生异常:{e}" return text_sync_ref_audio_info @@ -194,8 +238,9 @@ def create_config(text_work_space_dir, text_template, text_sync_ref_audio_dir2): if text_sync_ref_audio_dir2 is None or text_sync_ref_audio_dir2 == '': raise Exception(i18n("参考音频目录不能为空")) ref_audio_manager = common.RefAudioListManager(text_sync_ref_audio_dir2) - audio_config.generate_audio_config(text_template, ref_audio_manager.get_ref_audio_list(), config_file) + audio_config.generate_audio_config(text_work_space_dir, text_template, ref_audio_manager.get_ref_audio_list(), config_file) except Exception as e: + traceback.print_exc() text_create_config_info = f"发生异常:{e}" return text_create_config_info @@ -234,14 +279,14 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): with gr.Accordion(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): gr.Markdown(value=i18n("2.1:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) - text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), value="", interactive=False) + text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), value="", interactive=True) text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value="") with gr.Row(): text_text = gr.Text(label=i18n("请输入文本参数名"), value="text") - text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), value="text") - text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), value="text") - text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value="text") - text_whole_url = gr.Text(label=i18n("完整地址"), value="5555555555555555", interactive=False) + text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), value="") + text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), value="") + text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value="emotion") + text_whole_url = gr.Text(label=i18n("完整地址"), value="", interactive=False) text_url.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) text_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], @@ -253,7 +298,8 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) gr.Markdown(value=i18n("2.2:配置待推理文本,一句一行,不要太多,10条即可")) - text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value="text") + default_test_content_path = 'Ref_Audio_Selector/tool/test_content/test_content.txt' + text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) gr.Markdown(value=i18n("2.3:启动推理服务,如果还没启动的话")) gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) with gr.Row(): @@ -311,7 +357,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): [text_work_space_dir, text_base_audio_path, text_compare_audio_dir], [text_similarity_audio_output_info]) with gr.Row(): - text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value="", interactive=False) + text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value="", interactive=True) text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), value="", interactive=False) with gr.Row(): button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") @@ -320,10 +366,12 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): text_sync_inference_audio_dir], [text_sync_ref_info]) with gr.Accordion("第四步:生成参考音频配置文本", open=False): gr.Markdown(value=i18n("4.1:编辑模板")) - text_template_path = gr.Text(label=i18n("模板文件路径"), value="", interactive=False) - text_template = gr.Text(label=i18n("模板内容"), value="text", lines=10) + default_template_path = 'Ref_Audio_Selector/tool/config_template/ref_audio_template.txt' + default_template_content = common.read_file(default_template_path) + text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=False) + text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) gr.Markdown(value=i18n("4.2:生成配置")) - text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value="", interactive=False) + text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value="", interactive=True) with gr.Row(): button_create_config = gr.Button(i18n("生成配置"), variant="primary") text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) diff --git a/Ref_Audio_Selector/tool/audio_config.py b/Ref_Audio_Selector/tool/audio_config.py index 7ea9a9b3..b6194343 100644 --- a/Ref_Audio_Selector/tool/audio_config.py +++ b/Ref_Audio_Selector/tool/audio_config.py @@ -1,7 +1,9 @@ import os +import platform +from tools import my_utils -def generate_audio_config(template_str, audio_list, output_file_path): +def generate_audio_config(work_space_dir, template_str, audio_list, output_file_path): # 定义一个空字符串来存储最终要写入文件的内容 file_content = "" @@ -11,8 +13,12 @@ def generate_audio_config(template_str, audio_list, output_file_path): ref_path = audio_info['ref_path'] ref_text = audio_info['ref_text'] + relative_path = os.path.relpath(ref_path, work_space_dir) + if platform.system() == 'Windows': + relative_path = relative_path.replace('\\', '/') + # 使用字符串模板替换变量 - formatted_line = template_str.replace('${emotion}', emotion).replace('${ref_path}', ref_path).replace( + formatted_line = template_str.replace('${emotion}', emotion).replace('${ref_path}', relative_path).replace( '${ref_text}', ref_text) # 将格式化后的行添加到内容中,使用逗号和换行符分隔 diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 67aabf3b..9a5fff5f 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -1,11 +1,12 @@ import os import requests -import urllib.parse +from pathlib import Path +from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote class URLComposer: def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): - self.base_url = base_url + self.base_url = safe_encode_query_params(base_url) self.emotion_param_name = emotion_param_name self.text_param_name = text_param_name self.ref_path_param_name = ref_path_param_name @@ -28,8 +29,8 @@ def build_url_with_emotion(self, text_value, emotion_value): if not self.emotion_param_name: raise ValueError("Emotion parameter name is not set.") params = { - self.text_param_name: urllib.parse.quote(text_value), - self.emotion_param_name: urllib.parse.quote(emotion_value), + self.text_param_name: quote(text_value), + self.emotion_param_name: quote(emotion_value), } return self._append_params_to_url(params) @@ -37,9 +38,9 @@ def build_url_with_ref(self, text_value, ref_path_value, ref_text_value): if self.emotion_param_name: raise ValueError("Cannot use reference parameters when emotion parameter is set.") params = { - self.text_param_name: urllib.parse.quote(text_value), - self.ref_path_param_name: urllib.parse.quote(ref_path_value), - self.ref_text_param_name: urllib.parse.quote(ref_text_value), + self.text_param_name: quote(text_value), + self.ref_path_param_name: quote(ref_path_value), + self.ref_text_param_name: quote(ref_text_value), } return self._append_params_to_url(params) @@ -51,16 +52,36 @@ def _append_params_to_url(self, params: dict): return url_with_params +def safe_encode_query_params(original_url): + + # 分析URL以获取查询字符串部分 + parsed_url = urlparse(original_url) + query_params = parse_qs(parsed_url.query) + + # 将查询参数转换为编码过的字典(键值对会被转码) + encoded_params = {k: quote(v[0]) for k, v in query_params.items()} + + # 重新编码查询字符串 + new_query_string = urlencode(encoded_params, doseq=False) + + # 重建完整的URL + new_parsed_url = parsed_url._replace(query=new_query_string) + encoded_url = urlunparse(new_parsed_url) + + print(encoded_url) + return encoded_url + + def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path): # Ensure the output directory exists - output_dir = Path(output_dir_path) - output_dir.mkdir(parents=True, exist_ok=True) + output_dir = os.path.abspath(output_dir_path) + os.makedirs(output_dir, exist_ok=True) # Create subdirectories for text and emotion categories text_subdir = os.path.join(output_dir, 'text') - text_subdir.mkdir(exist_ok=True) + os.makedirs(text_subdir, exist_ok=True) emotion_subdir = os.path.join(output_dir, 'emotion') - emotion_subdir.mkdir(exist_ok=True) + os.makedirs(emotion_subdir, exist_ok=True) for text, emotion in zip(text_list, emotion_list): # Generate audio byte stream using the create_audio function @@ -74,18 +95,18 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) emotion_name = emotion['emotion'] - # Save audio files in both directories with the desired structure - text_file_path = os.path.join(text_subdir, text, emotion_name, '.wav') - emotion_file_path = os.path.join(emotion_subdir, emotion_name, text, '.wav') + text_subdir_text = os.path.join(text_subdir, text) + os.makedirs(text_subdir_text, exist_ok=True) + text_subdir_text_file_path = os.path.join(text_subdir_text, emotion_name + '.wav') - # Ensure intermediate directories for nested file paths exist - text_file_path.parent.mkdir(parents=True, exist_ok=True) - emotion_file_path.parent.mkdir(parents=True, exist_ok=True) + emotion_subdir_emotion = os.path.join(emotion_subdir, emotion_name) + os.makedirs(emotion_subdir_emotion, exist_ok=True) + emotion_subdir_emotion_file_path = os.path.join(emotion_subdir_emotion, text + '.wav') # Write audio bytes to the respective files - with open(text_file_path, 'wb') as f: + with open(text_subdir_text_file_path, 'wb') as f: f.write(audio_bytes) - with open(emotion_file_path, 'wb') as f: + with open(emotion_subdir_emotion_file_path, 'wb') as f: f.write(audio_bytes) diff --git a/Ref_Audio_Selector/tool/audio_similarity.py b/Ref_Audio_Selector/tool/audio_similarity.py index 602f8fc3..962f85d7 100644 --- a/Ref_Audio_Selector/tool/audio_similarity.py +++ b/Ref_Audio_Selector/tool/audio_similarity.py @@ -1,7 +1,7 @@ import os import shutil -from config import python_exec -from subprocess import Popen +import random + def convert_from_list(list_file, output_dir): @@ -70,7 +70,7 @@ def sample(output_audio_dir, similarity_list, subsection_num, sample_num): sampled_subsection = similarity_list[start:start + num] # 创建并进入子目录 - subdir_name = f'subsection_{i + 1}' + subdir_name = f'emotion_{i + 1}' subdir_path = os.path.join(output_audio_dir, subdir_name) os.makedirs(subdir_path, exist_ok=True) @@ -83,37 +83,6 @@ def sample(output_audio_dir, similarity_list, subsection_num, sample_num): print("Sampling completed.") -def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_similarity_output): - similarity_list = None - - similarity_dir = os.path.join(work_space_dir, 'similarity') - os.makedirs(similarity_dir, exist_ok=True) - - base_voice_file_name = ref_audio_opt.get_filename_without_extension(base_voice_path) - similarity_file = os.path.join(similarity_dir, f'{base_voice_file_name}.txt') - - global p_similarity - if (p_similarity == None): - cmd = f'"{python_exec}" tools/speaker_verification/voice_similarity.py ' - cmd += f' -r "{base_voice_path}"' - cmd += f' -c "{sample_dir}"' - cmd += f' -o {similarity_file}' - - print(cmd) - p_similarity = Popen(cmd, shell=True) - p_similarity.wait() - - if need_similarity_output: - similarity_list = ref_audio_opt.parse_similarity_file(similarity_file) - similarity_file_dir = os.path.dirname(similarity_dir, base_voice_file_name) - ref_audio_opt.copy_and_move(similarity_file_dir, similarity_list) - - p_similarity = None - return similarity_list, similarity_file, similarity_file_dir - else: - return similarity_list, None, None - - def parse_similarity_file(file_path): """ 解析指定文本文件,将其中的内容以元组形式存入列表。 @@ -126,7 +95,7 @@ def parse_similarity_file(file_path): """ result_list = [] - with open(file_path, 'r') as file: + with open(file_path, 'r', encoding='utf-8') as file: for line in file: # 去除行尾换行符并按'|'分割 score, filepath = line.strip().split('|') @@ -163,16 +132,6 @@ def copy_and_move(output_audio_directory, similarity_scores): print("已完成复制和重命名操作。") -def get_filename_without_extension(file_path): - """ - Given a file path string, returns the file name without its extension. - - Parameters: - file_path (str): The full path to the file. - - Returns: - str: The file name without its extension. - """ - base_name = os.path.basename(file_path) # Get the base name (file name with extension) - file_name, file_extension = os.path.splitext(base_name) # Split the base name into file name and extension - return file_name # Return the file name without extension +if __name__ == '__main__': + similarity_list = parse_similarity_file("D:/tt/similarity/啊,除了伊甸和樱,竟然还有其他人会提起我?.txt") + sample('D:/tt/similarity/output', similarity_list, 10, 4) \ No newline at end of file diff --git a/Ref_Audio_Selector/tool/config_template/ref_audio_template.txt b/Ref_Audio_Selector/tool/config_template/ref_audio_template.txt new file mode 100644 index 00000000..97142b11 --- /dev/null +++ b/Ref_Audio_Selector/tool/config_template/ref_audio_template.txt @@ -0,0 +1,5 @@ +"${emotion}": { + "ref_wav_path": "${ref_path}", + "prompt_text": "${ref_text}", + "prompt_language": "中文" +} \ No newline at end of file diff --git a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py index c1f415e8..4b8ded76 100644 --- a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py +++ b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py @@ -4,7 +4,7 @@ from modelscope.pipelines import pipeline sv_pipeline = pipeline( task='speaker-verification', - model='/tools/speaker_verification/models/speech_campplus_sv_zh-cn_16k-common', + model='Ref_Audio_Selector/tool/speaker_verification/models/speech_campplus_sv_zh-cn_16k-common', model_revision='v1.0.0' ) @@ -21,6 +21,7 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, 'score': score, 'path': audio_path }) + print(f'similarity score: {score}, path: {audio_path}') # Step 3: 根据相似度分数降序排列 similarity_scores.sort(key=lambda x: x['score'], reverse=True) @@ -57,8 +58,9 @@ def parse_arguments(): if __name__ == '__main__': cmd = parse_arguments() + print(cmd) compare_audio_and_generate_report( reference_audio_path = cmd.reference_audio, - comparison_dir = cmd.comparison_dir, - output_file = cmd.output_file, + comparison_dir_path = cmd.comparison_dir, + output_file_path = cmd.output_file, ) \ No newline at end of file diff --git a/Ref_Audio_Selector/tool/test_content/test_content.txt b/Ref_Audio_Selector/tool/test_content/test_content.txt new file mode 100644 index 00000000..507383c6 --- /dev/null +++ b/Ref_Audio_Selector/tool/test_content/test_content.txt @@ -0,0 +1,12 @@ +你知道这不可能! +如果有人故意破坏呢? +也不可能!同时改变三颗卫星和一个地面观测站的数据?那这破坏也有些超自然了。 +汪淼点点头,比起宇宙闪烁来,他宁愿接受这个超自然。但沙瑞山立刻抽走了他怀中这唯一的一根救命稻草。 +要想最终证实这一切,其实很简单。宇宙背景辐射这样幅度的波动,已经大到我们能用肉眼觉察的程度。 +你胡说什么?现在是你在违反常识了:背景辐射的波长是7厘米,比可见光大了七八个数量级,怎么能看到? +用特制眼镜。 +特制眼镜? +是我们为首都天文馆做的一个科普小玩意儿。现在的技术,已经能将彭齐阿斯和威尔逊在四十多年前用于发现特制背景辐射的二十英尺的喇叭形天线做成眼镜大小, +并且在这个眼镜中设置一个转换系统,将接收到的背景辐射的波长压缩七个数量级,将7厘米波转换成红光。 +这样,观众在夜里戴上这种眼镜,就能亲眼看到宇宙的特制背景辐射,现在,也能看到宇宙闪烁。 +这东西现在哪儿?能告诉我吗 \ No newline at end of file From 4daa9ad53c311279a92590c74c4605ed04f29fda Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 11:54:13 +0800 Subject: [PATCH 08/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=96=87=E6=9C=AC?= =?UTF-8?q?=E7=9B=B8=E4=BC=BC=E5=BA=A6=E6=AF=94=E8=BE=83=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/common.py | 12 +- .../config_template/ref_audio_template.txt | 0 .../test_content/test_content.txt | 0 .../ref_audio_selector_webui.py | 66 ++++++++- .../tool/asr/funasr_asr_multi_level_dir.py | 45 +++--- Ref_Audio_Selector/tool/audio_asr.py | 32 ----- Ref_Audio_Selector/tool/audio_inference.py | 15 +- .../tool/text_comparison/asr_text_process.py | 136 ++++++++++++++++++ .../tool/text_comparison/text_comparison.py | 13 +- Ref_Audio_Selector/tool/text_similarity.py | 0 10 files changed, 248 insertions(+), 71 deletions(-) rename Ref_Audio_Selector/{tool => file}/config_template/ref_audio_template.txt (100%) rename Ref_Audio_Selector/{tool => file}/test_content/test_content.txt (100%) create mode 100644 Ref_Audio_Selector/tool/text_comparison/asr_text_process.py create mode 100644 Ref_Audio_Selector/tool/text_similarity.py diff --git a/Ref_Audio_Selector/common/common.py b/Ref_Audio_Selector/common/common.py index b1197f5e..998ae6ab 100644 --- a/Ref_Audio_Selector/common/common.py +++ b/Ref_Audio_Selector/common/common.py @@ -92,4 +92,14 @@ def read_file(file_path): # 文件在with语句结束时会自动关闭 # 现在file_content变量中存储了文件的所有文本内容 - return file_content \ No newline at end of file + return file_content + + +def write_text_to_file(text, output_file_path): + try: + with open(output_file_path, 'w', encoding='utf-8') as file: + file.write(text) + except IOError as e: + print(f"Error occurred while writing to the file: {e}") + else: + print(f"Text successfully written to file: {output_file_path}") \ No newline at end of file diff --git a/Ref_Audio_Selector/tool/config_template/ref_audio_template.txt b/Ref_Audio_Selector/file/config_template/ref_audio_template.txt similarity index 100% rename from Ref_Audio_Selector/tool/config_template/ref_audio_template.txt rename to Ref_Audio_Selector/file/config_template/ref_audio_template.txt diff --git a/Ref_Audio_Selector/tool/test_content/test_content.txt b/Ref_Audio_Selector/file/test_content/test_content.txt similarity index 100% rename from Ref_Audio_Selector/tool/test_content/test_content.txt rename to Ref_Audio_Selector/file/test_content/test_content.txt diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 8a7abfa5..ef0ac0a9 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -1,19 +1,23 @@ import os.path +import os import traceback import gradio as gr import Ref_Audio_Selector.tool.audio_similarity as audio_similarity import Ref_Audio_Selector.tool.audio_inference as audio_inference -import Ref_Audio_Selector.tool.audio_asr as audio_asr import Ref_Audio_Selector.tool.audio_config as audio_config import Ref_Audio_Selector.common.common as common from tools.i18n.i18n import I18nAuto -from config import python_exec +from config import python_exec, is_half +from tools import my_utils +from tools.asr.config import asr_dict from subprocess import Popen i18n = I18nAuto() p_similarity = None +p_asr = None +p_text_similarity = None # 校验基础信息 @@ -159,7 +163,7 @@ def asr(text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, raise Exception(i18n("asr模型大小不能为空")) if dropdown_asr_lang is None or dropdown_asr_lang == '': raise Exception(i18n("asr语言不能为空")) - asr_file = audio_asr.open_asr(text_asr_audio_dir, text_work_space_dir, dropdown_asr_model, dropdown_asr_size, + asr_file = open_asr(text_asr_audio_dir, text_work_space_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang) text_text_similarity_analysis_path = asr_file text_asr_info = f"asr成功:生成文件{asr_file}" @@ -170,22 +174,72 @@ def asr(text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, return [text_asr_info, text_text_similarity_analysis_path] +def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): + global p_asr + if p_asr is None: + asr_inp_dir = my_utils.clean_path(asr_inp_dir) + asr_py_path = asr_dict[asr_model]["path"] + if asr_py_path == 'funasr_asr.py': + asr_py_path = 'funasr_asr_multi_level_dir.py' + if asr_py_path == 'fasterwhisper.py': + asr_py_path = 'fasterwhisper_asr_multi_level_dir.py' + cmd = f'"{python_exec}" Ref_Audio_Selector/tool/asr/{asr_py_path} ' + cmd += f' -i "{asr_inp_dir}"' + cmd += f' -o "{asr_opt_dir}"' + cmd += f' -s {asr_model_size}' + cmd += f' -l {asr_lang}' + cmd += " -p %s" % ("float16" if is_half == True else "float32") + + print(cmd) + p_asr = Popen(cmd, shell=True) + p_asr.wait() + p_asr = None + + output_dir_abs = os.path.abspath(asr_opt_dir) + output_file_name = os.path.basename(asr_inp_dir) + # 构造输出文件路径 + output_file_path = os.path.join(output_dir_abs, f'{output_file_name}_asr.list') + return output_file_path + + else: + return None + + # 对asr生成的文件,与原本的文本内容,进行相似度分析 def text_similarity_analysis(text_work_space_dir, text_text_similarity_analysis_path): - similarity_file = os.path.join(text_work_space_dir, 'similarity.txt') - text_text_similarity_analysis_info = f"相似度分析成功:生成文件{similarity_file}" + similarity_dir = os.path.join(text_work_space_dir, 'text_similarity') + text_text_similarity_analysis_info = f"相似度分析成功:生成目录{similarity_dir}" try: check_base_info(text_work_space_dir) if text_text_similarity_analysis_path is None or text_text_similarity_analysis_path == '': raise Exception(i18n("asr生成的文件路径不能为空,请先完成上一步操作")) - pass + open_text_similarity_analysis(text_text_similarity_analysis_path, similarity_dir) except Exception as e: traceback.print_exc() text_text_similarity_analysis_info = f"发生异常:{e}" return text_text_similarity_analysis_info +def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_boundary=0.8): + global p_text_similarity + if p_text_similarity is None: + cmd = f'"{python_exec}" Ref_Audio_Selector/tool/text_comparison/asr_text_process.py ' + cmd += f' -a "{asr_file_path}"' + cmd += f' -o "{output_dir}"' + cmd += f' -b {similarity_enlarge_boundary}' + + print(cmd) + p_text_similarity = Popen(cmd, shell=True) + p_text_similarity.wait() + p_text_similarity = None + + return output_dir + + else: + return None + + # 根据一个参考音频,对指定目录下的音频进行相似度分析,并输出到另一个目录 def similarity_audio_output(text_work_space_dir, text_base_audio_path, text_compare_audio_dir): diff --git a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py index ab94b4a3..a67822d1 100644 --- a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py +++ b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py @@ -7,22 +7,24 @@ from funasr import AutoModel -path_asr = 'tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' -path_vad = 'tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch' +path_asr = 'tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' +path_vad = 'tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch' path_punc = 'tools/asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch' -path_asr = path_asr if os.path.exists(path_asr) else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" -path_vad = path_vad if os.path.exists(path_vad) else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch" +path_asr = path_asr if os.path.exists( + path_asr) else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch" +path_vad = path_vad if os.path.exists(path_vad) else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch" path_punc = path_punc if os.path.exists(path_punc) else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch" model = AutoModel( - model = path_asr, - model_revision = "v2.0.4", - vad_model = path_vad, - vad_model_revision = "v2.0.4", - punc_model = path_punc, - punc_model_revision = "v2.0.4", + model=path_asr, + model_revision="v2.0.4", + vad_model=path_vad, + vad_model_revision="v2.0.4", + punc_model=path_punc, + punc_model_revision="v2.0.4", ) + def only_asr(input_file): try: text = model.generate(input=input_file)[0]["text"] @@ -31,6 +33,7 @@ def only_asr(input_file): print(traceback.format_exc()) return text + def execute_asr(input_folder, output_folder, model_size, language): input_file_names = os.listdir(input_folder) input_file_names.sort() @@ -40,7 +43,7 @@ def execute_asr(input_folder, output_folder, model_size, language): for name in tqdm(input_file_names): try: - text = model.generate(input="%s/%s"%(input_folder, name))[0]["text"] + text = model.generate(input="%s/%s" % (input_folder, name))[0]["text"] output.append(f"{input_folder}/{name}|{output_file_name}|{language.upper()}|{text}") except: print(traceback.format_exc()) @@ -64,12 +67,13 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag # 只处理wav文件(假设是wav文件) if name.endswith(".wav"): try: + original_text = os.path.basename(root) # 构造完整的输入音频文件路径 input_file_path = os.path.join(root, name) input_file_path = os.path.normpath(input_file_path) # 先标准化可能存在混合斜杠的情况 - text = model.generate(input=input_file_path)[0]["text"] + asr_text = model.generate(input=input_file_path)[0]["text"] - output.append(f"{input_file_path}|{output_file_name}|{language.upper()}|{text}") + output.append(f"{input_file_path}|{original_text}|{language.upper()}|{asr_text}") except: print(traceback.format_exc()) @@ -80,7 +84,7 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag os.makedirs(output_dir_abs, exist_ok=True) # 构造输出文件路径 - output_file_path = os.path.join(output_dir_abs, f'{output_file_name}.list') + output_file_path = os.path.join(output_dir_abs, f'{output_file_name}_asr.list') # 将输出写入文件 with open(output_file_path, "w", encoding="utf-8") as f: @@ -89,6 +93,7 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag return output_file_path + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-i", "--input_folder", type=str, required=True, @@ -99,13 +104,13 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag help="Model Size of FunASR is Large") parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh'], help="Language of the audio files.") - parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16','float32'], - help="fp16 or fp32")#还没接入 + parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16', 'float32'], + help="fp16 or fp32") # 还没接入 cmd = parser.parse_args() execute_asr_multi_level_dir( - input_folder = cmd.input_folder, - output_folder = cmd.output_folder, - model_size = cmd.model_size, - language = cmd.language, + input_folder=cmd.input_folder, + output_folder=cmd.output_folder, + model_size=cmd.model_size, + language=cmd.language, ) diff --git a/Ref_Audio_Selector/tool/audio_asr.py b/Ref_Audio_Selector/tool/audio_asr.py index f637e2c9..fd40910d 100644 --- a/Ref_Audio_Selector/tool/audio_asr.py +++ b/Ref_Audio_Selector/tool/audio_asr.py @@ -1,36 +1,4 @@ -import os -from config import python_exec, is_half -from tools import my_utils -from tools.asr.config import asr_dict -from subprocess import Popen -def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): - global p_asr - if (p_asr == None): - asr_inp_dir = my_utils.clean_path(asr_inp_dir) - asr_py_path = asr_dict[asr_model]["path"] - if asr_py_path == 'funasr_asr.py': - asr_py_path = 'funasr_asr_multi_level_dir.py' - if asr_py_path == 'fasterwhisper.py': - asr_py_path = 'fasterwhisper_asr_multi_level_dir.py' - cmd = f'"{python_exec}" tools/asr/{asr_py_path}' - cmd += f' -i "{asr_inp_dir}"' - cmd += f' -o "{asr_opt_dir}"' - cmd += f' -s {asr_model_size}' - cmd += f' -l {asr_lang}' - cmd += " -p %s" % ("float16" if is_half == True else "float32") - print(cmd) - p_asr = Popen(cmd, shell=True) - p_asr.wait() - p_asr = None - output_dir_abs = os.path.abspath(asr_opt_dir) - output_file_name = os.path.basename(asr_inp_dir) - # 构造输出文件路径 - output_file_path = os.path.join(output_dir_abs, f'{output_file_name}.list') - return output_file_path - - else: - return None diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 9a5fff5f..c2a46344 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -1,7 +1,9 @@ import os import requests -from pathlib import Path +import itertools from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote +from tools.i18n.i18n import I18nAuto +i18n = I18nAuto() class URLComposer: @@ -14,13 +16,13 @@ def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param def is_valid(self): if self.base_url is None or self.base_url == '': - raise ValueError("请输入url") + raise ValueError(i18n("请输入url")) if self.text_param_name is None or self.text_param_name == '': - raise ValueError("请输入text参数名") + raise ValueError(i18n("请输入text参数名")) if self.emotion_param_name is None and self.ref_path_param_name is None and self.ref_text_param_name is None: - raise ValueError("请输入至少一个参考or情绪的参数") + raise ValueError(i18n("请输入至少一个参考or情绪的参数")) def is_emotion(self): return self.emotion_param_name is not None and self.emotion_param_name != '' @@ -83,7 +85,10 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) emotion_subdir = os.path.join(output_dir, 'emotion') os.makedirs(emotion_subdir, exist_ok=True) - for text, emotion in zip(text_list, emotion_list): + # 计算笛卡尔积 + cartesian_product = list(itertools.product(text_list, emotion_list)) + + for text, emotion in cartesian_product: # Generate audio byte stream using the create_audio function if url_composer.is_emotion(): diff --git a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py new file mode 100644 index 00000000..7ec2bd9b --- /dev/null +++ b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py @@ -0,0 +1,136 @@ +import os +import argparse +from collections import defaultdict +from operator import itemgetter +import Ref_Audio_Selector.tool.text_comparison.text_comparison as text_comparison +import Ref_Audio_Selector.common.common as common + + +def parse_asr_file(file_path): + output = [] + + with open(file_path, 'r', encoding='utf-8') as file: + for line in file: + # 假设每行都是正确的格式,且"|"'是固定分隔符 + input_file_path, original_text, language, asr_text = line.strip().split('|') + + emotion = common.get_filename_without_extension(input_file_path) + + # 将解析出的数据构造成新的字典或元组等结构 + parsed_data = { + 'emotion': emotion, + 'input_file_path': input_file_path, + 'original_text': original_text, + 'language': language, + 'asr_text': asr_text, + 'similarity_score': 0 + } + + output.append(parsed_data) + + return output + + +def calculate_similarity_and_append_to_list(input_list, boundary): + for item in input_list: + similarity_score = text_comparison.calculate_result(item['original_text'], item['asr_text'], boundary) + item['similarity_score'] = similarity_score + + return input_list + + +def calculate_average_similarity_by_emotion(data_list): + result_dict = defaultdict(list) + + for item in data_list: + emotion = item['emotion'] + similarity_score = item['similarity_score'] + result_dict[emotion].append(similarity_score) + + average_scores = [{'emotion': emotion, 'average_similarity_score': sum(scores) / len(scores)} + for emotion, scores in result_dict.items()] + + average_scores.sort(key=lambda x: x['average_similarity_score'], reverse=True) + + return average_scores + + +def group_and_sort_by_field(data, group_by_field): + # 创建一个空的结果字典,键是group_by_field指定的字段,值是一个列表 + result_dict = defaultdict(list) + + # 遍历输入列表 + for item in data: + # 根据指定的group_by_field将当前元素添加到对应键的列表中 + key_to_group = item[group_by_field] + result_dict[key_to_group].append(item) + + # 对每个键对应的列表中的元素按similarity_score降序排序 + for key in result_dict: + result_dict[key].sort(key=itemgetter('similarity_score'), reverse=True) + + # 将结果字典转换为列表,每个元素是一个包含键(emotion或original_text)和排序后数组的元组 + result_list = [(k, v) for k, v in result_dict.items()] + + return result_list + + +def format_list_to_text(data_list, output_filename): + with open(output_filename, 'w', encoding='utf-8') as output_file: + for key, items in data_list: + # 写入情绪标题 + output_file.write(key + '\n') + + # 写入每条记录 + for item in items: + formatted_line = f"{item['similarity_score']}|{item['original_text']}|{item['asr_text']}\n" + output_file.write(formatted_line) + + +def process(asr_file_path, output_dir, similarity_enlarge_boundary): + # 检查输出目录是否存在,如果不存在则创建 + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + records = parse_asr_file(asr_file_path) + calculate_similarity_and_append_to_list(records, similarity_enlarge_boundary) + average_similarity_list = calculate_average_similarity_by_emotion(records) + + average_similarity_file = os.path.join(output_dir, 'average_similarity.txt') + average_similarity_content = \ + '\n'.join([f"{item['average_similarity_score']}|{item['emotion']}" for item in average_similarity_list]) + common.write_text_to_file(average_similarity_content, average_similarity_file) + + emotion_detail_list = group_and_sort_by_field(records, 'emotion') + + emotion_detail_file = os.path.join(output_dir, 'emotion_group_detail.txt') + format_list_to_text(emotion_detail_list, emotion_detail_file) + + original_text_detail_list = group_and_sort_by_field(records, 'original_text') + + original_text_detail_file = os.path.join(output_dir, 'text_group_detail.txt') + format_list_to_text(original_text_detail_list, original_text_detail_file) + + print('文本相似度分析完成。') + + +def parse_arguments(): + parser = argparse.ArgumentParser(description="Process ASR files and analyze similarity.") + + parser.add_argument("-a", "--asr_file_path", type=str, required=True, + help="Path to the directory containing ASR files or path to a single ASR file.") + + parser.add_argument("-o", "--output_dir", type=str, required=True, + help="Path to the directory where the analysis results should be saved.") + + parser.add_argument("-b", "--similarity_enlarge_boundary", type=float, required=True, + help="Similarity score boundary value to be used in your calculations.") + + args = parser.parse_args() + return args + + +if __name__ == '__main__': + cmd = parse_arguments() + print(cmd) + process(cmd.asr_file_path, cmd.output_dir, cmd.similarity_enlarge_boundary) diff --git a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py index 420a9ed8..5a33776a 100644 --- a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py +++ b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py @@ -8,7 +8,6 @@ "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" ) - tokenizer = AutoTokenizer.from_pretrained(bert_path) model = AutoModel.from_pretrained(bert_path) @@ -28,26 +27,26 @@ def calculate_similarity(text1, text2, max_length=512): return similarity -# 对0.8-1区间的值进行放大 -def adjusted_similarity(similarity_score2, boundary=0.8): +# 对boundary到1区间的值进行放大 +def adjusted_similarity(similarity_score2, boundary=0.8): if similarity_score2 < boundary: return 0 # 倍数 - multiple = 1/(1 - boundary) + multiple = 1 / (1 - boundary) - adjusted_score = (similarity_score2 - boundary)*multiple + adjusted_score = (similarity_score2 - boundary) * multiple return adjusted_score -def calculate_result(t1, t2): +def calculate_result(t1, t2, boundary): # 计算并打印相似度 similarity_score2 = calculate_similarity(t1, t2) # 调整相似度 - adjusted_similarity_score2 = adjusted_similarity(similarity_score2) + adjusted_similarity_score2 = adjusted_similarity(similarity_score2, boundary) return similarity_score2, adjusted_similarity_score2 diff --git a/Ref_Audio_Selector/tool/text_similarity.py b/Ref_Audio_Selector/tool/text_similarity.py new file mode 100644 index 00000000..e69de29b From b6f0bb36ef229e4ad34c20e3b57c65e291e7994a Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 13:26:32 +0800 Subject: [PATCH 09/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=90=8C=E6=AD=A5?= =?UTF-8?q?=E5=8F=82=E8=80=83=E9=9F=B3=E9=A2=91=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/common.py | 4 +- .../ref_audio_selector_webui.py | 75 ++++++++++--------- Ref_Audio_Selector/tool/audio_asr.py | 4 - Ref_Audio_Selector/tool/audio_inference.py | 8 +- .../tool/delete_inference_with_ref.py | 58 ++++++++++++++ Ref_Audio_Selector/tool/text_similarity.py | 0 6 files changed, 102 insertions(+), 47 deletions(-) delete mode 100644 Ref_Audio_Selector/tool/audio_asr.py create mode 100644 Ref_Audio_Selector/tool/delete_inference_with_ref.py delete mode 100644 Ref_Audio_Selector/tool/text_similarity.py diff --git a/Ref_Audio_Selector/common/common.py b/Ref_Audio_Selector/common/common.py index 998ae6ab..3d746747 100644 --- a/Ref_Audio_Selector/common/common.py +++ b/Ref_Audio_Selector/common/common.py @@ -2,6 +2,7 @@ import glob import os + class RefAudioListManager: def __init__(self, root_dir): self.audio_dict = {'default': []} @@ -45,6 +46,7 @@ def get_ref_audio_list(self): audio_info_list.append(audio_info) return audio_info_list + def batch_clean_paths(paths): """ 批量处理路径列表,对每个路径调用 clean_path() 函数。 @@ -102,4 +104,4 @@ def write_text_to_file(text, output_file_path): except IOError as e: print(f"Error occurred while writing to the file: {e}") else: - print(f"Text successfully written to file: {output_file_path}") \ No newline at end of file + print(f"Text successfully written to file: {output_file_path}") diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index ef0ac0a9..c96523ab 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -6,6 +6,7 @@ import Ref_Audio_Selector.tool.audio_similarity as audio_similarity import Ref_Audio_Selector.tool.audio_inference as audio_inference import Ref_Audio_Selector.tool.audio_config as audio_config +import Ref_Audio_Selector.tool.delete_inference_with_ref as delete_inference_with_ref import Ref_Audio_Selector.common.common as common from tools.i18n.i18n import I18nAuto from config import python_exec, is_half @@ -23,7 +24,7 @@ # 校验基础信息 def check_base_info(text_work_space_dir): if text_work_space_dir is None or text_work_space_dir == '': - raise Exception(i18n("工作目录不能为空")) + raise Exception("工作目录不能为空") # 从list文件,提取参考音频 @@ -34,13 +35,13 @@ def convert_from_list(text_work_space_dir, text_list_input): try: check_base_info(text_work_space_dir) if text_list_input is None or text_list_input == '': - raise Exception(i18n("list文件路径不能为空")) + raise Exception("list文件路径不能为空") audio_similarity.convert_from_list(text_list_input, ref_audio_all) except Exception as e: traceback.print_exc() text_convert_from_list_info = f"发生异常:{e}" text_sample_dir = '' - return [text_convert_from_list_info, text_sample_dir] + return i18n(text_convert_from_list_info), text_sample_dir def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_similarity_output): @@ -83,19 +84,19 @@ def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, try: check_base_info(text_work_space_dir) if text_sample_dir is None or text_sample_dir == '': - raise Exception(i18n("参考音频抽样目录不能为空,请先完成上一步操作")) + raise Exception("参考音频抽样目录不能为空,请先完成上一步操作") if text_base_voice_path is None or text_base_voice_path == '': - raise Exception(i18n("基准音频路径不能为空")) + raise Exception("基准音频路径不能为空") if text_subsection_num is None or text_subsection_num == '': - raise Exception(i18n("分段数不能为空")) + raise Exception("分段数不能为空") if text_sample_num is None or text_sample_num == '': - raise Exception(i18n("每段随机抽样个数不能为空")) + raise Exception("每段随机抽样个数不能为空") similarity_list, _, _ = start_similarity_analysis(text_work_space_dir, text_sample_dir, text_base_voice_path, checkbox_similarity_output) if similarity_list is None: - raise Exception(i18n("相似度分析失败")) + raise Exception("相似度分析失败") audio_similarity.sample(ref_audio_dir, similarity_list, int(text_subsection_num), int(text_sample_num)) @@ -106,8 +107,7 @@ def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, text_model_inference_voice_dir = ref_audio_dir text_sync_ref_audio_dir = ref_audio_dir text_sync_ref_audio_dir2 = ref_audio_dir - return [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, - text_sync_ref_audio_dir2] + return i18n(text_sample_info), text_model_inference_voice_dir, text_sync_ref_audio_dir, text_sync_ref_audio_dir2 # 根据参考音频和测试文本,执行批量推理 @@ -120,31 +120,31 @@ def model_inference(text_work_space_dir, text_model_inference_voice_dir, text_ur try: check_base_info(text_work_space_dir) if text_model_inference_voice_dir is None or text_model_inference_voice_dir == '': - raise Exception(i18n("待推理的参考音频所在目录不能为空,请先完成上一步操作")) + raise Exception("待推理的参考音频所在目录不能为空,请先完成上一步操作") if text_url is None or text_url == '': - raise Exception(i18n("推理服务请求地址不能为空")) + raise Exception("推理服务请求地址不能为空") if text_text is None or text_text == '': - raise Exception(i18n("文本参数名不能为空")) + raise Exception("文本参数名不能为空") if text_test_content is None or text_test_content == '': - raise Exception(i18n("待推理文本路径不能为空")) + raise Exception("待推理文本路径不能为空") if (text_ref_path is None or text_ref_path == '') and (text_ref_text is None or text_ref_text == '') and ( text_emotion is None or text_emotion == ''): - raise Exception(i18n("参考音频路径/文本和角色情绪二选一填写,不能全部为空")) + raise Exception("参考音频路径/文本和角色情绪二选一填写,不能全部为空") url_composer = audio_inference.URLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) url_composer.is_valid() text_list = common.read_text_file_to_list(text_test_content) if text_list is None or len(text_list) == 0: - raise Exception(i18n("待推理文本内容不能为空")) + raise Exception("待推理文本内容不能为空") ref_audio_manager = common.RefAudioListManager(text_model_inference_voice_dir) if len(ref_audio_manager.get_audio_list()) == 0: - raise Exception(i18n("待推理的参考音频不能为空")) + raise Exception("待推理的参考音频不能为空") audio_inference.generate_audio_files(url_composer, text_list, ref_audio_manager.get_ref_audio_list(), inference_dir) except Exception as e: traceback.print_exc() text_model_inference_info = f"发生异常:{e}" text_asr_audio_dir = '' - return [text_model_inference_info, text_asr_audio_dir] + return i18n(text_model_inference_info), text_asr_audio_dir # 对推理生成音频执行asr @@ -156,13 +156,13 @@ def asr(text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, try: check_base_info(text_work_space_dir) if text_asr_audio_dir is None or text_asr_audio_dir == '': - raise Exception(i18n("待asr的音频所在目录不能为空,请先完成上一步操作")) + raise Exception("待asr的音频所在目录不能为空,请先完成上一步操作") if dropdown_asr_model is None or dropdown_asr_model == '': - raise Exception(i18n("asr模型不能为空")) + raise Exception("asr模型不能为空") if dropdown_asr_size is None or dropdown_asr_size == '': - raise Exception(i18n("asr模型大小不能为空")) + raise Exception("asr模型大小不能为空") if dropdown_asr_lang is None or dropdown_asr_lang == '': - raise Exception(i18n("asr语言不能为空")) + raise Exception("asr语言不能为空") asr_file = open_asr(text_asr_audio_dir, text_work_space_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang) text_text_similarity_analysis_path = asr_file @@ -171,7 +171,7 @@ def asr(text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, traceback.print_exc() text_asr_info = f"发生异常:{e}" text_text_similarity_analysis_path = '' - return [text_asr_info, text_text_similarity_analysis_path] + return i18n(text_asr_info), text_text_similarity_analysis_path def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): @@ -213,12 +213,12 @@ def text_similarity_analysis(text_work_space_dir, try: check_base_info(text_work_space_dir) if text_text_similarity_analysis_path is None or text_text_similarity_analysis_path == '': - raise Exception(i18n("asr生成的文件路径不能为空,请先完成上一步操作")) + raise Exception("asr生成的文件路径不能为空,请先完成上一步操作") open_text_similarity_analysis(text_text_similarity_analysis_path, similarity_dir) except Exception as e: traceback.print_exc() text_text_similarity_analysis_info = f"发生异常:{e}" - return text_text_similarity_analysis_info + return i18n(text_text_similarity_analysis_info) def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_boundary=0.8): @@ -247,38 +247,39 @@ def similarity_audio_output(text_work_space_dir, text_base_audio_path, try: check_base_info(text_work_space_dir) if text_base_audio_path is None or text_base_audio_path == '': - raise Exception(i18n("基准音频路径不能为空")) + raise Exception("基准音频路径不能为空") if text_compare_audio_dir is None or text_compare_audio_dir == '': - raise Exception(i18n("待分析的音频所在目录不能为空")) + raise Exception("待分析的音频所在目录不能为空") similarity_list, similarity_file, similarity_file_dir = start_similarity_analysis( text_work_space_dir, text_compare_audio_dir, text_base_audio_path, True) if similarity_list is None: - raise Exception(i18n("相似度分析失败")) + raise Exception("相似度分析失败") text_similarity_audio_output_info = f'相似度分析成功:生成目录{similarity_file_dir},文件{similarity_file}' except Exception as e: traceback.print_exc() text_similarity_audio_output_info = f"发生异常:{e}" - return text_similarity_audio_output_info + return i18n(text_similarity_audio_output_info) # 根据参考音频目录的删除情况,将其同步到推理生成的音频目录中,即参考音频目录下,删除了几个参考音频,就在推理目录下,将这些参考音频生成的音频文件移除 def sync_ref_audio(text_work_space_dir, text_sync_ref_audio_dir, text_sync_inference_audio_dir): - text_sync_ref_audio_info = "同步参考音频成功:生成目录XXX" + text_sync_ref_audio_info = None try: check_base_info(text_work_space_dir) if text_sync_ref_audio_dir is None or text_sync_ref_audio_dir == '': - raise Exception(i18n("参考音频目录不能为空")) + raise Exception("参考音频目录不能为空") if text_sync_inference_audio_dir is None or text_sync_inference_audio_dir == '': - raise Exception(i18n("推理生成的音频目录不能为空")) - pass + raise Exception("推理生成的音频目录不能为空") + delete_text_wav_num, delete_emotion_dir_num = delete_inference_with_ref.sync_ref_audio(text_sync_ref_audio_dir, text_sync_inference_audio_dir) + text_sync_ref_audio_info = f"推理音频目录{text_sync_inference_audio_dir}下,text目录删除了{delete_text_wav_num}个参考音频,emotion目录下,删除了{delete_emotion_dir_num}个目录" except Exception as e: traceback.print_exc() text_sync_ref_audio_info = f"发生异常:{e}" - return text_sync_ref_audio_info + return i18n(text_sync_ref_audio_info) # 根据模板和参考音频目录,生成参考音频配置内容 @@ -288,15 +289,15 @@ def create_config(text_work_space_dir, text_template, text_sync_ref_audio_dir2): try: check_base_info(text_work_space_dir) if text_template is None or text_template == '': - raise Exception(i18n("参考音频抽样目录不能为空")) + raise Exception("参考音频抽样目录不能为空") if text_sync_ref_audio_dir2 is None or text_sync_ref_audio_dir2 == '': - raise Exception(i18n("参考音频目录不能为空")) + raise Exception("参考音频目录不能为空") ref_audio_manager = common.RefAudioListManager(text_sync_ref_audio_dir2) audio_config.generate_audio_config(text_work_space_dir, text_template, ref_audio_manager.get_ref_audio_list(), config_file) except Exception as e: traceback.print_exc() text_create_config_info = f"发生异常:{e}" - return text_create_config_info + return i18n(text_create_config_info) # 基于请求路径和参数,合成完整的请求路径 diff --git a/Ref_Audio_Selector/tool/audio_asr.py b/Ref_Audio_Selector/tool/audio_asr.py deleted file mode 100644 index fd40910d..00000000 --- a/Ref_Audio_Selector/tool/audio_asr.py +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index c2a46344..01adee76 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -2,8 +2,6 @@ import requests import itertools from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote -from tools.i18n.i18n import I18nAuto -i18n = I18nAuto() class URLComposer: @@ -16,13 +14,13 @@ def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param def is_valid(self): if self.base_url is None or self.base_url == '': - raise ValueError(i18n("请输入url")) + raise ValueError("请输入url") if self.text_param_name is None or self.text_param_name == '': - raise ValueError(i18n("请输入text参数名")) + raise ValueError("请输入text参数名") if self.emotion_param_name is None and self.ref_path_param_name is None and self.ref_text_param_name is None: - raise ValueError(i18n("请输入至少一个参考or情绪的参数")) + raise ValueError("请输入至少一个参考or情绪的参数") def is_emotion(self): return self.emotion_param_name is not None and self.emotion_param_name != '' diff --git a/Ref_Audio_Selector/tool/delete_inference_with_ref.py b/Ref_Audio_Selector/tool/delete_inference_with_ref.py new file mode 100644 index 00000000..d653499f --- /dev/null +++ b/Ref_Audio_Selector/tool/delete_inference_with_ref.py @@ -0,0 +1,58 @@ +import os +import shutil +import Ref_Audio_Selector.common.common as common + + +def remove_matching_audio_files_in_text_dir(text_dir, emotions_list): + count = 0 + for root, dirs, files in os.walk(text_dir): + for emotion_dict in emotions_list: + emotion_tag = emotion_dict['emotion'] + wav_file_name = f"{emotion_tag}.wav" + file_path = os.path.join(root, wav_file_name) + if os.path.exists(file_path): + print(f"Deleting file: {file_path}") + try: + os.remove(file_path) + count += 1 + except Exception as e: + print(f"Error deleting file {file_path}: {e}") + return count + + +def delete_emotion_subdirectories(emotion_dir, emotions_list): + """ + 根据给定的情绪数组,删除emotion目录下对应情绪标签的子目录。 + + 参数: + emotions_list (List[Dict]): 每个字典包含'emotion'字段。 + base_dir (str): 子目录所在的基础目录,默认为'emotion')。 + + 返回: + None + """ + count = 0 + for emotion_dict in emotions_list: + emotion_folder = emotion_dict['emotion'] + folder_path = os.path.join(emotion_dir, emotion_folder) + + # 检查emotion子目录是否存在 + if os.path.isdir(folder_path): + print(f"Deleting directory: {folder_path}") + try: + # 使用shutil.rmtree删除整个子目录及其内容 + shutil.rmtree(folder_path) + count += 1 + except Exception as e: + print(f"Error deleting directory {folder_path}: {e}") + return count + + +def sync_ref_audio(ref_audio_dir, inference_audio_dir): + ref_audio_manager = common.RefAudioListManager(ref_audio_dir) + ref_list = ref_audio_manager.get_ref_audio_list() + text_dir = os.path.join(inference_audio_dir, 'text') + emotion_dir = os.path.join(inference_audio_dir, 'emotion') + delete_text_wav_num = remove_matching_audio_files_in_text_dir(text_dir, ref_list) + delete_emotion_dir_num = delete_emotion_subdirectories(emotion_dir, ref_list) + return delete_text_wav_num, delete_emotion_dir_num diff --git a/Ref_Audio_Selector/tool/text_similarity.py b/Ref_Audio_Selector/tool/text_similarity.py deleted file mode 100644 index e69de29b..00000000 From ecbc7d0b1ec578059e885269d043643a3f239a6e Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 16:20:11 +0800 Subject: [PATCH 10/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E9=85=8D=E7=BD=AE?= =?UTF-8?q?=E6=96=87=E4=BB=B6=E7=AE=A1=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/config.ini | 45 +++++++++++++ Ref_Audio_Selector/config/__init__.py | 0 Ref_Audio_Selector/config/config_manager.py | 67 +++++++++++++++++++ .../ref_audio_selector_webui.py | 34 ++++++---- .../tool/asr/funasr_asr_multi_level_dir.py | 5 +- Ref_Audio_Selector/tool/audio_inference.py | 7 +- .../tool/delete_inference_with_ref.py | 6 +- .../speaker_verification/voice_similarity.py | 16 +++-- .../tool/text_comparison/asr_text_process.py | 10 ++- 9 files changed, 160 insertions(+), 30 deletions(-) create mode 100644 Ref_Audio_Selector/config.ini create mode 100644 Ref_Audio_Selector/config/__init__.py create mode 100644 Ref_Audio_Selector/config/config_manager.py diff --git a/Ref_Audio_Selector/config.ini b/Ref_Audio_Selector/config.ini new file mode 100644 index 00000000..1aef25d3 --- /dev/null +++ b/Ref_Audio_Selector/config.ini @@ -0,0 +1,45 @@ +# config.ini + +[Base] +# 工作目录 +work_dir = +# 角色 +role = +# 参考音频目录 +reference_audio_dir = refer_audio + +[AudioSample] +# list转换待选参考音频目录 +list_to_convert_reference_audio_dir = refer_audio_all +# 音频相似度目录 +audio_similarity_dir = similarity + +[Inference] +# 默认测试文本位置 +default_test_text_path = Ref_Audio_Selector/file/test_content/test_content.txt +# 推理音频目录 +inference_audio_dir = inference_audio +# 推理音频文本聚合目录 +inference_audio_text_aggregation_dir = text +# 推理音频情绪聚合目录 +inference_audio_emotion_aggregation_dir = emotion + +[ResultCheck] +# asr输出文件 +asr_filename = asr +# 文本相似度输出目录 +text_similarity_output_dir = text_similarity +# 文本情绪平均相似度报告文件名 +text_emotion_average_similarity_report_filename = average_similarity +# 文本相似度按情绪聚合明细文件名 +text_similarity_by_emotion_detail_filename = emotion_group_detail +# 文本相似度按文本聚合明细文件名 +text_similarity_by_text_detail_filename = text_group_detail + +[AudioConfig] +# 默认模板文件位置 +default_template_path = Ref_Audio_Selector/file/config_template/ref_audio_template.txt +# 参考音频配置文件名 +reference_audio_config_filename = refer_audio + +[Other] \ No newline at end of file diff --git a/Ref_Audio_Selector/config/__init__.py b/Ref_Audio_Selector/config/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Ref_Audio_Selector/config/config_manager.py b/Ref_Audio_Selector/config/config_manager.py new file mode 100644 index 00000000..6c87ef9d --- /dev/null +++ b/Ref_Audio_Selector/config/config_manager.py @@ -0,0 +1,67 @@ +import configparser +import re + + +class ConfigManager: + def __init__(self): + self.config_path = 'Ref_Audio_Selector/config.ini' + self.comments = [] + self.config = None + self.read_with_comments() + + def read_with_comments(self): + with open(self.config_path, 'r', encoding='utf-8') as f: + lines = f.readlines() + + self.comments = [] + for i, line in enumerate(lines): + if line.startswith(';') or line.startswith('#'): + self.comments.append((i, line)) + + self.config = configparser.ConfigParser() + self.config.read_string(''.join(lines)) + + def write_with_comments(self): + output_lines = [] + + # 先写入配置项 + config_str = self.config.write() + output_lines.extend(config_str.splitlines(True)) # 保持换行 + + # 然后插入原有注释 + for index, comment in sorted(self.comments, reverse=True): # 从后往前插入,避免行号错乱 + while len(output_lines) < index + 1: + output_lines.append('\n') # 补充空行 + output_lines.insert(index, comment) + + with open(self.config_path, 'w', encoding='utf-8') as f: + f.writelines(output_lines) + + def get_base(self, key): + return self.config.get('Base', key) + + def set_base(self, key, value): + self.config.set('Base', key, value) + self.write_with_comments() + + def get_audio_sample(self, key): + return self.config.get('AudioSample', key) + + def get_inference(self, key): + return self.config.get('Inference', key) + + def get_result_check(self, key): + return self.config.get('ResultCheck', key) + + def get_audio_config(self, key): + return self.config.get('AudioConfig', key) + + def get_other(self, key): + return self.config.get('Other', key) + + +_config = ConfigManager() + + +def get_config(): + return _config diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index c96523ab..8cb1a571 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -8,6 +8,7 @@ import Ref_Audio_Selector.tool.audio_config as audio_config import Ref_Audio_Selector.tool.delete_inference_with_ref as delete_inference_with_ref import Ref_Audio_Selector.common.common as common +import Ref_Audio_Selector.config.config_manager as config_manager from tools.i18n.i18n import I18nAuto from config import python_exec, is_half from tools import my_utils @@ -15,6 +16,7 @@ from subprocess import Popen i18n = I18nAuto() +config = config_manager.get_config() p_similarity = None p_asr = None @@ -29,7 +31,8 @@ def check_base_info(text_work_space_dir): # 从list文件,提取参考音频 def convert_from_list(text_work_space_dir, text_list_input): - ref_audio_all = os.path.join(text_work_space_dir, 'refer_audio_all') + ref_audio_all = os.path.join(text_work_space_dir, + config.get_audio_sample('list_to_convert_reference_audio_dir')) text_convert_from_list_info = f"转换成功:生成目录{ref_audio_all}" text_sample_dir = ref_audio_all try: @@ -48,7 +51,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ similarity_list = None similarity_file_dir = None - similarity_dir = os.path.join(work_space_dir, 'similarity') + similarity_dir = os.path.join(work_space_dir, config.get_audio_sample('audio_similarity_dir')) os.makedirs(similarity_dir, exist_ok=True) base_voice_file_name = common.get_filename_without_extension(base_voice_path) @@ -79,7 +82,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ # 基于一个基准音频,从参考音频目录中进行分段抽样 def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, text_subsection_num, text_sample_num, checkbox_similarity_output): - ref_audio_dir = os.path.join(text_work_space_dir, 'refer_audio') + ref_audio_dir = os.path.join(text_work_space_dir, config.get_base('reference_audio_dir')) text_sample_info = f"抽样成功:生成目录{ref_audio_dir}" try: check_base_info(text_work_space_dir) @@ -114,8 +117,9 @@ def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, def model_inference(text_work_space_dir, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content): - inference_dir = os.path.join(text_work_space_dir, 'inference_audio') - text_asr_audio_dir = os.path.join(inference_dir, 'text') + inference_dir = os.path.join(text_work_space_dir, config.get_inference('inference_audio_dir')) + text_asr_audio_dir = os.path.join(inference_dir, + config.get_inference('inference_audio_text_aggregation_dir')) text_model_inference_info = f"推理成功:生成目录{inference_dir}" try: check_base_info(text_work_space_dir) @@ -164,7 +168,7 @@ def asr(text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, if dropdown_asr_lang is None or dropdown_asr_lang == '': raise Exception("asr语言不能为空") asr_file = open_asr(text_asr_audio_dir, text_work_space_dir, dropdown_asr_model, dropdown_asr_size, - dropdown_asr_lang) + dropdown_asr_lang) text_text_similarity_analysis_path = asr_file text_asr_info = f"asr成功:生成文件{asr_file}" except Exception as e: @@ -198,7 +202,7 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): output_dir_abs = os.path.abspath(asr_opt_dir) output_file_name = os.path.basename(asr_inp_dir) # 构造输出文件路径 - output_file_path = os.path.join(output_dir_abs, f'{output_file_name}_asr.list') + output_file_path = os.path.join(output_dir_abs, f'{config.get_result_check("asr_filename")}.list') return output_file_path else: @@ -208,7 +212,7 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): # 对asr生成的文件,与原本的文本内容,进行相似度分析 def text_similarity_analysis(text_work_space_dir, text_text_similarity_analysis_path): - similarity_dir = os.path.join(text_work_space_dir, 'text_similarity') + similarity_dir = os.path.join(text_work_space_dir, config.get_result_check('text_similarity_output_dir')) text_text_similarity_analysis_info = f"相似度分析成功:生成目录{similarity_dir}" try: check_base_info(text_work_space_dir) @@ -243,7 +247,7 @@ def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_ # 根据一个参考音频,对指定目录下的音频进行相似度分析,并输出到另一个目录 def similarity_audio_output(text_work_space_dir, text_base_audio_path, text_compare_audio_dir): - text_similarity_audio_output_info = "相似度分析成功:生成目录XXX" + text_similarity_audio_output_info = None try: check_base_info(text_work_space_dir) if text_base_audio_path is None or text_base_audio_path == '': @@ -274,7 +278,8 @@ def sync_ref_audio(text_work_space_dir, text_sync_ref_audio_dir, raise Exception("参考音频目录不能为空") if text_sync_inference_audio_dir is None or text_sync_inference_audio_dir == '': raise Exception("推理生成的音频目录不能为空") - delete_text_wav_num, delete_emotion_dir_num = delete_inference_with_ref.sync_ref_audio(text_sync_ref_audio_dir, text_sync_inference_audio_dir) + delete_text_wav_num, delete_emotion_dir_num = delete_inference_with_ref.sync_ref_audio(text_sync_ref_audio_dir, + text_sync_inference_audio_dir) text_sync_ref_audio_info = f"推理音频目录{text_sync_inference_audio_dir}下,text目录删除了{delete_text_wav_num}个参考音频,emotion目录下,删除了{delete_emotion_dir_num}个目录" except Exception as e: traceback.print_exc() @@ -284,7 +289,7 @@ def sync_ref_audio(text_work_space_dir, text_sync_ref_audio_dir, # 根据模板和参考音频目录,生成参考音频配置内容 def create_config(text_work_space_dir, text_template, text_sync_ref_audio_dir2): - config_file = os.path.join(text_work_space_dir, 'refer_audio.json') + config_file = os.path.join(text_work_space_dir, f'{config.get_audio_config("reference_audio_config_filename")}.json') text_create_config_info = f"配置生成成功:生成文件{config_file}" try: check_base_info(text_work_space_dir) @@ -293,7 +298,8 @@ def create_config(text_work_space_dir, text_template, text_sync_ref_audio_dir2): if text_sync_ref_audio_dir2 is None or text_sync_ref_audio_dir2 == '': raise Exception("参考音频目录不能为空") ref_audio_manager = common.RefAudioListManager(text_sync_ref_audio_dir2) - audio_config.generate_audio_config(text_work_space_dir, text_template, ref_audio_manager.get_ref_audio_list(), config_file) + audio_config.generate_audio_config(text_work_space_dir, text_template, ref_audio_manager.get_ref_audio_list(), + config_file) except Exception as e: traceback.print_exc() text_create_config_info = f"发生异常:{e}" @@ -353,7 +359,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) gr.Markdown(value=i18n("2.2:配置待推理文本,一句一行,不要太多,10条即可")) - default_test_content_path = 'Ref_Audio_Selector/tool/test_content/test_content.txt' + default_test_content_path = config.get_inference('default_test_text_path') text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) gr.Markdown(value=i18n("2.3:启动推理服务,如果还没启动的话")) gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) @@ -421,7 +427,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): text_sync_inference_audio_dir], [text_sync_ref_info]) with gr.Accordion("第四步:生成参考音频配置文本", open=False): gr.Markdown(value=i18n("4.1:编辑模板")) - default_template_path = 'Ref_Audio_Selector/tool/config_template/ref_audio_template.txt' + default_template_path = config.get_audio_config('default_template_path') default_template_content = common.read_file(default_template_path) text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=False) text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) diff --git a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py index a67822d1..24753b52 100644 --- a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py +++ b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py @@ -3,9 +3,10 @@ import argparse import os import traceback +import Ref_Audio_Selector.config.config_manager as config_manager from tqdm import tqdm - from funasr import AutoModel +config = config_manager.get_config() path_asr = 'tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' path_vad = 'tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch' @@ -84,7 +85,7 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag os.makedirs(output_dir_abs, exist_ok=True) # 构造输出文件路径 - output_file_path = os.path.join(output_dir_abs, f'{output_file_name}_asr.list') + output_file_path = os.path.join(output_dir_abs, f'{config.get_result_check("asr_filename")}.list') # 将输出写入文件 with open(output_file_path, "w", encoding="utf-8") as f: diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 01adee76..f8bec4d8 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -1,8 +1,11 @@ import os import requests import itertools +import Ref_Audio_Selector.config.config_manager as config_manager from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote +config = config_manager.get_config() + class URLComposer: def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): @@ -78,9 +81,9 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) os.makedirs(output_dir, exist_ok=True) # Create subdirectories for text and emotion categories - text_subdir = os.path.join(output_dir, 'text') + text_subdir = os.path.join(output_dir, config.get_inference('inference_audio_text_aggregation_dir')) os.makedirs(text_subdir, exist_ok=True) - emotion_subdir = os.path.join(output_dir, 'emotion') + emotion_subdir = os.path.join(output_dir, config.get_inference('inference_audio_emotion_aggregation_dir')) os.makedirs(emotion_subdir, exist_ok=True) # 计算笛卡尔积 diff --git a/Ref_Audio_Selector/tool/delete_inference_with_ref.py b/Ref_Audio_Selector/tool/delete_inference_with_ref.py index d653499f..3a34c49c 100644 --- a/Ref_Audio_Selector/tool/delete_inference_with_ref.py +++ b/Ref_Audio_Selector/tool/delete_inference_with_ref.py @@ -1,7 +1,9 @@ import os import shutil import Ref_Audio_Selector.common.common as common +import Ref_Audio_Selector.config.config_manager as config_manager +config = config_manager.get_config() def remove_matching_audio_files_in_text_dir(text_dir, emotions_list): count = 0 @@ -51,8 +53,8 @@ def delete_emotion_subdirectories(emotion_dir, emotions_list): def sync_ref_audio(ref_audio_dir, inference_audio_dir): ref_audio_manager = common.RefAudioListManager(ref_audio_dir) ref_list = ref_audio_manager.get_ref_audio_list() - text_dir = os.path.join(inference_audio_dir, 'text') - emotion_dir = os.path.join(inference_audio_dir, 'emotion') + text_dir = os.path.join(inference_audio_dir, config.get_inference('inference_audio_text_aggregation_dir')) + emotion_dir = os.path.join(inference_audio_dir, config.get_inference('inference_audio_emotion_aggregation_dir')) delete_text_wav_num = remove_matching_audio_files_in_text_dir(text_dir, ref_list) delete_emotion_dir_num = delete_emotion_subdirectories(emotion_dir, ref_list) return delete_text_wav_num, delete_emotion_dir_num diff --git a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py index 4b8ded76..8a97d9b8 100644 --- a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py +++ b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py @@ -2,6 +2,7 @@ import os from modelscope.pipelines import pipeline + sv_pipeline = pipeline( task='speaker-verification', model='Ref_Audio_Selector/tool/speaker_verification/models/speech_campplus_sv_zh-cn_16k-common', @@ -11,7 +12,8 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, output_file_path): # Step 1: 获取比较音频目录下所有音频文件的路径 - comparison_audio_paths = [os.path.join(comparison_dir_path, f) for f in os.listdir(comparison_dir_path) if f.endswith('.wav')] + comparison_audio_paths = [os.path.join(comparison_dir_path, f) for f in os.listdir(comparison_dir_path) if + f.endswith('.wav')] # Step 2: 用参考音频依次比较音频目录下的每个音频,获取相似度分数及对应路径 similarity_scores = [] @@ -31,10 +33,10 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, open(output_file_path, 'w').close() # Create an empty file # Step 5: 将排序后的结果写入输出结果文件(支持中文) - formatted_scores = [f'{item["score"]}|{item["path"]}' for item in similarity_scores] + formatted_scores = [f'{item["score"]}|{item["path"]}' for item in similarity_scores] with open(output_file_path, 'w', encoding='utf-8') as f: # 使用'\n'将每个字符串分开,使其写入不同行 - content = '\n'.join(formatted_scores ) + content = '\n'.join(formatted_scores) f.write(content) @@ -60,7 +62,7 @@ def parse_arguments(): cmd = parse_arguments() print(cmd) compare_audio_and_generate_report( - reference_audio_path = cmd.reference_audio, - comparison_dir_path = cmd.comparison_dir, - output_file_path = cmd.output_file, - ) \ No newline at end of file + reference_audio_path=cmd.reference_audio, + comparison_dir_path=cmd.comparison_dir, + output_file_path=cmd.output_file, + ) diff --git a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py index 7ec2bd9b..bc5791f7 100644 --- a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py +++ b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py @@ -3,8 +3,11 @@ from collections import defaultdict from operator import itemgetter import Ref_Audio_Selector.tool.text_comparison.text_comparison as text_comparison +import Ref_Audio_Selector.config.config_manager as config_manager import Ref_Audio_Selector.common.common as common +config = config_manager.get_config() + def parse_asr_file(file_path): output = [] @@ -96,19 +99,20 @@ def process(asr_file_path, output_dir, similarity_enlarge_boundary): calculate_similarity_and_append_to_list(records, similarity_enlarge_boundary) average_similarity_list = calculate_average_similarity_by_emotion(records) - average_similarity_file = os.path.join(output_dir, 'average_similarity.txt') + average_similarity_file = os.path.join(output_dir, + f'{config.get_result_check("text_emotion_average_similarity_report_filename")}.txt') average_similarity_content = \ '\n'.join([f"{item['average_similarity_score']}|{item['emotion']}" for item in average_similarity_list]) common.write_text_to_file(average_similarity_content, average_similarity_file) emotion_detail_list = group_and_sort_by_field(records, 'emotion') - emotion_detail_file = os.path.join(output_dir, 'emotion_group_detail.txt') + emotion_detail_file = os.path.join(output_dir, f'{config.get_result_check("emotion_group_detail")}.txt') format_list_to_text(emotion_detail_list, emotion_detail_file) original_text_detail_list = group_and_sort_by_field(records, 'original_text') - original_text_detail_file = os.path.join(output_dir, 'text_group_detail.txt') + original_text_detail_file = os.path.join(output_dir, f'{config.get_result_check("text_group_detail")}.txt') format_list_to_text(original_text_detail_list, original_text_detail_file) print('文本相似度分析完成。') From 441ab548895a4fe91c6c4be6a9496c8ac2cadb62 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 16:39:56 +0800 Subject: [PATCH 11/72] =?UTF-8?q?url=E7=BC=96=E7=A0=81=E8=B0=83=E6=95=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 8 +++--- Ref_Audio_Selector/tool/audio_inference.py | 25 +++++++++---------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 8cb1a571..954361a3 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -310,9 +310,9 @@ def create_config(text_work_space_dir, text_template, text_sync_ref_audio_dir2): def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): url_composer = audio_inference.URLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) if url_composer.is_emotion(): - text_whole_url = url_composer.build_url_with_emotion('测试内容', '情绪类型') + text_whole_url = url_composer.build_url_with_emotion('测试内容', '情绪类型', False) else: - text_whole_url = url_composer.build_url_with_ref('测试内容', '参考路径', '参考文本') + text_whole_url = url_composer.build_url_with_ref('测试内容', '参考路径', '参考文本', False) return text_whole_url @@ -409,8 +409,8 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) gr.Markdown(value=i18n("3.4:对结果按音频相似度排序,筛选低音质音频")) with gr.Row(): - text_base_audio_path = gr.Text(label=i18n("请输入基准音频"), value="text") - text_compare_audio_dir = gr.Text(label=i18n("请输入待比较的音频文件目录"), value="text") + text_base_audio_path = gr.Text(label=i18n("请输入基准音频"), value="") + text_compare_audio_dir = gr.Text(label=i18n("请输入待比较的音频文件目录"), value="") with gr.Row(): button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary") text_similarity_audio_output_info = gr.Text(label=i18n("输出结果"), value="", interactive=False) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index f8bec4d8..0df6b26d 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -9,7 +9,7 @@ class URLComposer: def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): - self.base_url = safe_encode_query_params(base_url) + self.base_url = base_url self.emotion_param_name = emotion_param_name self.text_param_name = text_param_name self.ref_path_param_name = ref_path_param_name @@ -28,35 +28,34 @@ def is_valid(self): def is_emotion(self): return self.emotion_param_name is not None and self.emotion_param_name != '' - def build_url_with_emotion(self, text_value, emotion_value): + def build_url_with_emotion(self, text_value, emotion_value, need_url_encode=True): if not self.emotion_param_name: raise ValueError("Emotion parameter name is not set.") params = { - self.text_param_name: quote(text_value), - self.emotion_param_name: quote(emotion_value), + self.text_param_name: text_value, + self.emotion_param_name: emotion_value, } - return self._append_params_to_url(params) + return self._append_params_to_url(params, need_url_encode) - def build_url_with_ref(self, text_value, ref_path_value, ref_text_value): + def build_url_with_ref(self, text_value, ref_path_value, ref_text_value, need_url_encode=True): if self.emotion_param_name: raise ValueError("Cannot use reference parameters when emotion parameter is set.") params = { - self.text_param_name: quote(text_value), - self.ref_path_param_name: quote(ref_path_value), - self.ref_text_param_name: quote(ref_text_value), + self.text_param_name: text_value, + self.ref_path_param_name: ref_path_value, + self.ref_text_param_name: ref_text_value, } - return self._append_params_to_url(params) + return self._append_params_to_url(params, need_url_encode) - def _append_params_to_url(self, params: dict): + def _append_params_to_url(self, params, need_url_encode): url_with_params = self.base_url if params: query_params = '&'.join([f"{k}={v}" for k, v in params.items()]) url_with_params += '?' + query_params if '?' not in self.base_url else '&' + query_params - return url_with_params + return url_with_params if not need_url_encode else safe_encode_query_params(url_with_params) def safe_encode_query_params(original_url): - # 分析URL以获取查询字符串部分 parsed_url = urlparse(original_url) query_params = parse_qs(parsed_url.query) From f61a723babbf5580b59cf5652009509639d9c6ca Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 16:45:42 +0800 Subject: [PATCH 12/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A03s=E8=87=B310s=E7=9A=84?= =?UTF-8?q?=E9=9F=B3=E9=A2=91=E8=BF=87=E6=BB=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_similarity.py | 31 ++++++++++++++++++--- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/Ref_Audio_Selector/tool/audio_similarity.py b/Ref_Audio_Selector/tool/audio_similarity.py index 962f85d7..afb6a52c 100644 --- a/Ref_Audio_Selector/tool/audio_similarity.py +++ b/Ref_Audio_Selector/tool/audio_similarity.py @@ -1,8 +1,27 @@ import os import shutil import random +import librosa +def check_audio_duration(path, min_duration=3, max_duration=10): + try: + # 加载音频文件 + audio, sample_rate = librosa.load(path) + + # 计算音频的时长(单位:秒) + duration = librosa.get_duration(y=audio, sr=sample_rate) + + # 判断时长是否在3s至10s之间 + if min_duration <= duration <= max_duration: + return True + else: + return False + + except Exception as e: + print(f"无法打开或处理音频文件:{e}") + return None + def convert_from_list(list_file, output_dir): # 创建输出目录,如果它不存在的话 @@ -38,9 +57,13 @@ def convert_from_list(list_file, output_dir): print(f"Audio file does not exist: {audio_path}") continue - # 复制音频文件到output目录并重命名 - shutil.copy2(audio_path, new_path) - print(f"File copied and renamed to: {new_path}") + if check_audio_duration(audio_path): + # 复制音频文件到output目录并重命名 + shutil.copy2(audio_path, new_path) + print(f"File copied and renamed to: {new_path}") + else: + print(f"File skipped due to duration: {audio_path}") + except Exception as e: print(f"An error occurred while processing: {audio_path}") print(e) @@ -134,4 +157,4 @@ def copy_and_move(output_audio_directory, similarity_scores): if __name__ == '__main__': similarity_list = parse_similarity_file("D:/tt/similarity/啊,除了伊甸和樱,竟然还有其他人会提起我?.txt") - sample('D:/tt/similarity/output', similarity_list, 10, 4) \ No newline at end of file + sample('D:/tt/similarity/output', similarity_list, 10, 4) From 926dd6b34a7244485b2f17215fc30f2bd7b0a712 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 17:13:30 +0800 Subject: [PATCH 13/72] =?UTF-8?q?=E8=B0=83=E6=95=B4=E9=85=8D=E7=BD=AE?= =?UTF-8?q?=E7=AE=A1=E7=90=86=EF=BC=8C=E5=8E=BB=E9=99=A4=E5=86=99=E5=85=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/config/config_manager.py | 50 +++++++-------------- 1 file changed, 15 insertions(+), 35 deletions(-) diff --git a/Ref_Audio_Selector/config/config_manager.py b/Ref_Audio_Selector/config/config_manager.py index 6c87ef9d..4c2e47c1 100644 --- a/Ref_Audio_Selector/config/config_manager.py +++ b/Ref_Audio_Selector/config/config_manager.py @@ -5,45 +5,12 @@ class ConfigManager: def __init__(self): self.config_path = 'Ref_Audio_Selector/config.ini' - self.comments = [] - self.config = None - self.read_with_comments() - - def read_with_comments(self): - with open(self.config_path, 'r', encoding='utf-8') as f: - lines = f.readlines() - - self.comments = [] - for i, line in enumerate(lines): - if line.startswith(';') or line.startswith('#'): - self.comments.append((i, line)) - - self.config = configparser.ConfigParser() - self.config.read_string(''.join(lines)) - - def write_with_comments(self): - output_lines = [] - - # 先写入配置项 - config_str = self.config.write() - output_lines.extend(config_str.splitlines(True)) # 保持换行 - - # 然后插入原有注释 - for index, comment in sorted(self.comments, reverse=True): # 从后往前插入,避免行号错乱 - while len(output_lines) < index + 1: - output_lines.append('\n') # 补充空行 - output_lines.insert(index, comment) - - with open(self.config_path, 'w', encoding='utf-8') as f: - f.writelines(output_lines) + self.config = configparser.ConfigParser() + self.config.read(self.config_path, encoding='utf-8') def get_base(self, key): return self.config.get('Base', key) - def set_base(self, key, value): - self.config.set('Base', key, value) - self.write_with_comments() - def get_audio_sample(self, key): return self.config.get('AudioSample', key) @@ -59,9 +26,22 @@ def get_audio_config(self, key): def get_other(self, key): return self.config.get('Other', key) + def print(self): + # 打印所有配置 + for section in self.config.sections(): + print('[{}]'.format(section)) + for key in self.config[section]: + print('{} = {}'.format(key, self.config[section][key])) + print() + _config = ConfigManager() def get_config(): return _config + + +if __name__ == '__main__': + print(_config.print()) + print(_config.get_base('reference_audio_dir')) From d20bd37965546eb84437e38665fc4d1e6f08e553 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 17:36:13 +0800 Subject: [PATCH 14/72] =?UTF-8?q?=E8=B0=83=E6=95=B4=E9=85=8D=E7=BD=AE?= =?UTF-8?q?=E5=8F=82=E6=95=B0=EF=BC=8C=E8=BF=9B=E8=A1=8C=E9=9B=86=E4=B8=AD?= =?UTF-8?q?=E7=AE=A1=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/config/config_manager.py | 1 - Ref_Audio_Selector/config/config_params.py | 42 +++++++++++++++++++ .../ref_audio_selector_webui.py | 25 ++++++----- .../tool/asr/funasr_asr_multi_level_dir.py | 5 +-- Ref_Audio_Selector/tool/audio_inference.py | 8 ++-- .../tool/delete_inference_with_ref.py | 7 ++-- .../tool/text_comparison/asr_text_process.py | 10 ++--- 7 files changed, 66 insertions(+), 32 deletions(-) create mode 100644 Ref_Audio_Selector/config/config_params.py diff --git a/Ref_Audio_Selector/config/config_manager.py b/Ref_Audio_Selector/config/config_manager.py index 4c2e47c1..6a7a1880 100644 --- a/Ref_Audio_Selector/config/config_manager.py +++ b/Ref_Audio_Selector/config/config_manager.py @@ -44,4 +44,3 @@ def get_config(): if __name__ == '__main__': print(_config.print()) - print(_config.get_base('reference_audio_dir')) diff --git a/Ref_Audio_Selector/config/config_params.py b/Ref_Audio_Selector/config/config_params.py new file mode 100644 index 00000000..74c77c6c --- /dev/null +++ b/Ref_Audio_Selector/config/config_params.py @@ -0,0 +1,42 @@ +import Ref_Audio_Selector.config.config_manager as config_manager + +config = config_manager.get_config() + +# [Base] +# 参考音频目录 +reference_audio_dir = config.get_base('reference_audio_dir') + +# [AudioSample] +# list转换待选参考音频目录 +list_to_convert_reference_audio_dir = config.get_audio_sample('list_to_convert_reference_audio_dir') +# 音频相似度目录 +audio_similarity_dir = config.get_audio_sample('audio_similarity_dir') + +# [Inference] +# 默认测试文本位置 +default_test_text_path = config.get_inference('default_test_text_path') +# 推理音频目录 +inference_audio_dir = config.get_inference('inference_audio_dir') +# 推理音频文本聚合目录 +inference_audio_text_aggregation_dir = config.get_inference('inference_audio_text_aggregation_dir') +# 推理音频情绪聚合目录 +inference_audio_emotion_aggregation_dir = config.get_inference('inference_audio_emotion_aggregation_dir') + +# [ResultCheck] +# asr输出文件 +asr_filename = config.get_result_check('asr_filename') +# 文本相似度输出目录 +text_similarity_output_dir = config.get_result_check('text_similarity_output_dir') +# 文本情绪平均相似度报告文件名 +text_emotion_average_similarity_report_filename = config.get_result_check('text_emotion_average_similarity_report_filename') +# 文本相似度按情绪聚合明细文件名 +text_similarity_by_emotion_detail_filename = config.get_result_check('text_similarity_by_emotion_detail_filename') +# 文本相似度按文本聚合明细文件名 +text_similarity_by_text_detail_filename = config.get_result_check('text_similarity_by_text_detail_filename') + +# [AudioConfig] +# 默认模板文件位置 +default_template_path = config.get_audio_config('default_template_path') +# 参考音频配置文件名 +reference_audio_config_filename = config.get_audio_config('reference_audio_config_filename') + diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 954361a3..7eeba39d 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -8,7 +8,7 @@ import Ref_Audio_Selector.tool.audio_config as audio_config import Ref_Audio_Selector.tool.delete_inference_with_ref as delete_inference_with_ref import Ref_Audio_Selector.common.common as common -import Ref_Audio_Selector.config.config_manager as config_manager +import Ref_Audio_Selector.config.config_params as params from tools.i18n.i18n import I18nAuto from config import python_exec, is_half from tools import my_utils @@ -16,7 +16,6 @@ from subprocess import Popen i18n = I18nAuto() -config = config_manager.get_config() p_similarity = None p_asr = None @@ -32,7 +31,7 @@ def check_base_info(text_work_space_dir): # 从list文件,提取参考音频 def convert_from_list(text_work_space_dir, text_list_input): ref_audio_all = os.path.join(text_work_space_dir, - config.get_audio_sample('list_to_convert_reference_audio_dir')) + params.list_to_convert_reference_audio_dir) text_convert_from_list_info = f"转换成功:生成目录{ref_audio_all}" text_sample_dir = ref_audio_all try: @@ -51,7 +50,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ similarity_list = None similarity_file_dir = None - similarity_dir = os.path.join(work_space_dir, config.get_audio_sample('audio_similarity_dir')) + similarity_dir = os.path.join(work_space_dir, params.audio_similarity_dir) os.makedirs(similarity_dir, exist_ok=True) base_voice_file_name = common.get_filename_without_extension(base_voice_path) @@ -82,7 +81,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ # 基于一个基准音频,从参考音频目录中进行分段抽样 def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, text_subsection_num, text_sample_num, checkbox_similarity_output): - ref_audio_dir = os.path.join(text_work_space_dir, config.get_base('reference_audio_dir')) + ref_audio_dir = os.path.join(text_work_space_dir, params.reference_audio_dir) text_sample_info = f"抽样成功:生成目录{ref_audio_dir}" try: check_base_info(text_work_space_dir) @@ -117,9 +116,9 @@ def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, def model_inference(text_work_space_dir, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content): - inference_dir = os.path.join(text_work_space_dir, config.get_inference('inference_audio_dir')) - text_asr_audio_dir = os.path.join(inference_dir, - config.get_inference('inference_audio_text_aggregation_dir')) + inference_dir = os.path.join(text_work_space_dir, params.inference_audio_dir) + text_asr_audio_dir = os.path.join(inference_dir, + params.inference_audio_text_aggregation_dir) text_model_inference_info = f"推理成功:生成目录{inference_dir}" try: check_base_info(text_work_space_dir) @@ -202,7 +201,7 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): output_dir_abs = os.path.abspath(asr_opt_dir) output_file_name = os.path.basename(asr_inp_dir) # 构造输出文件路径 - output_file_path = os.path.join(output_dir_abs, f'{config.get_result_check("asr_filename")}.list') + output_file_path = os.path.join(output_dir_abs, f'{params.asr_filename}.list') return output_file_path else: @@ -212,7 +211,7 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): # 对asr生成的文件,与原本的文本内容,进行相似度分析 def text_similarity_analysis(text_work_space_dir, text_text_similarity_analysis_path): - similarity_dir = os.path.join(text_work_space_dir, config.get_result_check('text_similarity_output_dir')) + similarity_dir = os.path.join(text_work_space_dir, params.text_similarity_output_dir) text_text_similarity_analysis_info = f"相似度分析成功:生成目录{similarity_dir}" try: check_base_info(text_work_space_dir) @@ -289,7 +288,7 @@ def sync_ref_audio(text_work_space_dir, text_sync_ref_audio_dir, # 根据模板和参考音频目录,生成参考音频配置内容 def create_config(text_work_space_dir, text_template, text_sync_ref_audio_dir2): - config_file = os.path.join(text_work_space_dir, f'{config.get_audio_config("reference_audio_config_filename")}.json') + config_file = os.path.join(text_work_space_dir, f'{params.reference_audio_config_filename}.json') text_create_config_info = f"配置生成成功:生成文件{config_file}" try: check_base_info(text_work_space_dir) @@ -359,7 +358,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) gr.Markdown(value=i18n("2.2:配置待推理文本,一句一行,不要太多,10条即可")) - default_test_content_path = config.get_inference('default_test_text_path') + default_test_content_path = params.default_test_text_path text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) gr.Markdown(value=i18n("2.3:启动推理服务,如果还没启动的话")) gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) @@ -427,7 +426,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): text_sync_inference_audio_dir], [text_sync_ref_info]) with gr.Accordion("第四步:生成参考音频配置文本", open=False): gr.Markdown(value=i18n("4.1:编辑模板")) - default_template_path = config.get_audio_config('default_template_path') + default_template_path = params.default_template_path default_template_content = common.read_file(default_template_path) text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=False) text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) diff --git a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py index 24753b52..3fbb0212 100644 --- a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py +++ b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py @@ -3,10 +3,9 @@ import argparse import os import traceback -import Ref_Audio_Selector.config.config_manager as config_manager +import Ref_Audio_Selector.config.config_params as params from tqdm import tqdm from funasr import AutoModel -config = config_manager.get_config() path_asr = 'tools/asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch' path_vad = 'tools/asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch' @@ -85,7 +84,7 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag os.makedirs(output_dir_abs, exist_ok=True) # 构造输出文件路径 - output_file_path = os.path.join(output_dir_abs, f'{config.get_result_check("asr_filename")}.list') + output_file_path = os.path.join(output_dir_abs, f'{params.asr_filename}.list') # 将输出写入文件 with open(output_file_path, "w", encoding="utf-8") as f: diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 0df6b26d..efa4746e 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -1,11 +1,9 @@ import os import requests import itertools -import Ref_Audio_Selector.config.config_manager as config_manager +import Ref_Audio_Selector.config.config_params as params from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote -config = config_manager.get_config() - class URLComposer: def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): @@ -80,9 +78,9 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) os.makedirs(output_dir, exist_ok=True) # Create subdirectories for text and emotion categories - text_subdir = os.path.join(output_dir, config.get_inference('inference_audio_text_aggregation_dir')) + text_subdir = os.path.join(output_dir, params.inference_audio_text_aggregation_dir) os.makedirs(text_subdir, exist_ok=True) - emotion_subdir = os.path.join(output_dir, config.get_inference('inference_audio_emotion_aggregation_dir')) + emotion_subdir = os.path.join(output_dir, params.inference_audio_emotion_aggregation_dir) os.makedirs(emotion_subdir, exist_ok=True) # 计算笛卡尔积 diff --git a/Ref_Audio_Selector/tool/delete_inference_with_ref.py b/Ref_Audio_Selector/tool/delete_inference_with_ref.py index 3a34c49c..74ff9d99 100644 --- a/Ref_Audio_Selector/tool/delete_inference_with_ref.py +++ b/Ref_Audio_Selector/tool/delete_inference_with_ref.py @@ -1,9 +1,8 @@ import os import shutil import Ref_Audio_Selector.common.common as common -import Ref_Audio_Selector.config.config_manager as config_manager +import Ref_Audio_Selector.config.config_params as params -config = config_manager.get_config() def remove_matching_audio_files_in_text_dir(text_dir, emotions_list): count = 0 @@ -53,8 +52,8 @@ def delete_emotion_subdirectories(emotion_dir, emotions_list): def sync_ref_audio(ref_audio_dir, inference_audio_dir): ref_audio_manager = common.RefAudioListManager(ref_audio_dir) ref_list = ref_audio_manager.get_ref_audio_list() - text_dir = os.path.join(inference_audio_dir, config.get_inference('inference_audio_text_aggregation_dir')) - emotion_dir = os.path.join(inference_audio_dir, config.get_inference('inference_audio_emotion_aggregation_dir')) + text_dir = os.path.join(inference_audio_dir, params.inference_audio_text_aggregation_dir) + emotion_dir = os.path.join(inference_audio_dir, params.inference_audio_emotion_aggregation_dir) delete_text_wav_num = remove_matching_audio_files_in_text_dir(text_dir, ref_list) delete_emotion_dir_num = delete_emotion_subdirectories(emotion_dir, ref_list) return delete_text_wav_num, delete_emotion_dir_num diff --git a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py index bc5791f7..49b27bae 100644 --- a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py +++ b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py @@ -3,11 +3,9 @@ from collections import defaultdict from operator import itemgetter import Ref_Audio_Selector.tool.text_comparison.text_comparison as text_comparison -import Ref_Audio_Selector.config.config_manager as config_manager +import Ref_Audio_Selector.config.config_params as params import Ref_Audio_Selector.common.common as common -config = config_manager.get_config() - def parse_asr_file(file_path): output = [] @@ -100,19 +98,19 @@ def process(asr_file_path, output_dir, similarity_enlarge_boundary): average_similarity_list = calculate_average_similarity_by_emotion(records) average_similarity_file = os.path.join(output_dir, - f'{config.get_result_check("text_emotion_average_similarity_report_filename")}.txt') + f'{params.text_emotion_average_similarity_report_filename}.txt') average_similarity_content = \ '\n'.join([f"{item['average_similarity_score']}|{item['emotion']}" for item in average_similarity_list]) common.write_text_to_file(average_similarity_content, average_similarity_file) emotion_detail_list = group_and_sort_by_field(records, 'emotion') - emotion_detail_file = os.path.join(output_dir, f'{config.get_result_check("emotion_group_detail")}.txt') + emotion_detail_file = os.path.join(output_dir, f'{params.text_similarity_by_emotion_detail_filename}.txt') format_list_to_text(emotion_detail_list, emotion_detail_file) original_text_detail_list = group_and_sort_by_field(records, 'original_text') - original_text_detail_file = os.path.join(output_dir, f'{config.get_result_check("text_group_detail")}.txt') + original_text_detail_file = os.path.join(output_dir, f'{params.text_similarity_by_text_detail_filename}.txt') format_list_to_text(original_text_detail_list, original_text_detail_file) print('文本相似度分析完成。') From d855eecc7bd3438523ea32c309669b55b8b66a0e Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 18:50:52 +0800 Subject: [PATCH 15/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E7=9B=AE=E5=BD=95?= =?UTF-8?q?=E4=BF=9D=E5=AD=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/config.ini | 4 - Ref_Audio_Selector/config/config_manager.py | 29 +++- Ref_Audio_Selector/file/base_info/role.txt | 0 .../file/base_info/work_dir.txt | 0 .../ref_audio_selector_webui.py | 137 ++++++++++++------ 5 files changed, 121 insertions(+), 49 deletions(-) create mode 100644 Ref_Audio_Selector/file/base_info/role.txt create mode 100644 Ref_Audio_Selector/file/base_info/work_dir.txt diff --git a/Ref_Audio_Selector/config.ini b/Ref_Audio_Selector/config.ini index 1aef25d3..81bcb968 100644 --- a/Ref_Audio_Selector/config.ini +++ b/Ref_Audio_Selector/config.ini @@ -1,10 +1,6 @@ # config.ini [Base] -# 工作目录 -work_dir = -# 角色 -role = # 参考音频目录 reference_audio_dir = refer_audio diff --git a/Ref_Audio_Selector/config/config_manager.py b/Ref_Audio_Selector/config/config_manager.py index 6a7a1880..4183f2a0 100644 --- a/Ref_Audio_Selector/config/config_manager.py +++ b/Ref_Audio_Selector/config/config_manager.py @@ -1,5 +1,27 @@ import configparser -import re +import Ref_Audio_Selector.common.common as common + + +class ParamReadWriteManager: + def __init__(self): + self.work_dir_path = 'Ref_Audio_Selector/file/base_info/work_dir.txt' + self.role_path = 'Ref_Audio_Selector/file/base_info/role.txt' + + def read_work_dir(self): + content = common.read_file(self.work_dir_path) + return content.strip() + + def read_role(self): + content = common.read_file(self.role_path) + return content.strip() + + def write_work_dir(self, work_dir_content): + clean_content = work_dir_content.strip() + common.write_text_to_file(clean_content, self.work_dir_path) + + def write_role(self, role_content): + clean_content = role_content.strip() + common.write_text_to_file(clean_content, self.role_path) class ConfigManager: @@ -36,11 +58,16 @@ def print(self): _config = ConfigManager() +_param_read_write_manager = ParamReadWriteManager() def get_config(): return _config +def get_rw_param(): + return _param_read_write_manager + + if __name__ == '__main__': print(_config.print()) diff --git a/Ref_Audio_Selector/file/base_info/role.txt b/Ref_Audio_Selector/file/base_info/role.txt new file mode 100644 index 00000000..e69de29b diff --git a/Ref_Audio_Selector/file/base_info/work_dir.txt b/Ref_Audio_Selector/file/base_info/work_dir.txt new file mode 100644 index 00000000..e69de29b diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 7eeba39d..3cab594a 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -20,24 +20,37 @@ p_similarity = None p_asr = None p_text_similarity = None +rw_param = params.config_manager.get_rw_param() # 校验基础信息 -def check_base_info(text_work_space_dir): +def check_base_info(text_work_space_dir, text_role): if text_work_space_dir is None or text_work_space_dir == '': raise Exception("工作目录不能为空") + if text_role is None or text_role == '': + raise Exception("角色不能为空") + base_role_dir = os.path.join(text_work_space_dir, text_role) + # 判断目录是否存在 + if not os.path.exists(base_role_dir): + # 如果不存在,则创建目录 + os.makedirs(base_role_dir, exist_ok=True) + return base_role_dir # 从list文件,提取参考音频 -def convert_from_list(text_work_space_dir, text_list_input): - ref_audio_all = os.path.join(text_work_space_dir, - params.list_to_convert_reference_audio_dir) - text_convert_from_list_info = f"转换成功:生成目录{ref_audio_all}" - text_sample_dir = ref_audio_all +def convert_from_list(text_work_space_dir, text_role, text_list_input): + text_convert_from_list_info = None + text_sample_dir = None try: - check_base_info(text_work_space_dir) + base_role_dir = check_base_info(text_work_space_dir, text_role) if text_list_input is None or text_list_input == '': raise Exception("list文件路径不能为空") + + ref_audio_all = os.path.join(base_role_dir, + params.list_to_convert_reference_audio_dir) + text_convert_from_list_info = f"转换成功:生成目录{ref_audio_all}" + text_sample_dir = ref_audio_all + audio_similarity.convert_from_list(text_list_input, ref_audio_all) except Exception as e: traceback.print_exc() @@ -79,12 +92,12 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ # 基于一个基准音频,从参考音频目录中进行分段抽样 -def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, +def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, text_subsection_num, text_sample_num, checkbox_similarity_output): - ref_audio_dir = os.path.join(text_work_space_dir, params.reference_audio_dir) - text_sample_info = f"抽样成功:生成目录{ref_audio_dir}" + ref_audio_dir = None + text_sample_info = None try: - check_base_info(text_work_space_dir) + base_role_dir = check_base_info(text_work_space_dir, text_role) if text_sample_dir is None or text_sample_dir == '': raise Exception("参考音频抽样目录不能为空,请先完成上一步操作") if text_base_voice_path is None or text_base_voice_path == '': @@ -94,7 +107,10 @@ def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, if text_sample_num is None or text_sample_num == '': raise Exception("每段随机抽样个数不能为空") - similarity_list, _, _ = start_similarity_analysis(text_work_space_dir, text_sample_dir, + ref_audio_dir = os.path.join(base_role_dir, params.reference_audio_dir) + text_sample_info = f"抽样成功:生成目录{ref_audio_dir}" + + similarity_list, _, _ = start_similarity_analysis(base_role_dir, text_sample_dir, text_base_voice_path, checkbox_similarity_output) if similarity_list is None: @@ -113,15 +129,14 @@ def sample(text_work_space_dir, text_sample_dir, text_base_voice_path, # 根据参考音频和测试文本,执行批量推理 -def model_inference(text_work_space_dir, text_model_inference_voice_dir, text_url, +def model_inference(text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content): - inference_dir = os.path.join(text_work_space_dir, params.inference_audio_dir) - text_asr_audio_dir = os.path.join(inference_dir, - params.inference_audio_text_aggregation_dir) - text_model_inference_info = f"推理成功:生成目录{inference_dir}" + inference_dir = None + text_asr_audio_dir = None + text_model_inference_info = None try: - check_base_info(text_work_space_dir) + base_role_dir = check_base_info(text_work_space_dir, text_role) if text_model_inference_voice_dir is None or text_model_inference_voice_dir == '': raise Exception("待推理的参考音频所在目录不能为空,请先完成上一步操作") if text_url is None or text_url == '': @@ -133,6 +148,12 @@ def model_inference(text_work_space_dir, text_model_inference_voice_dir, text_ur if (text_ref_path is None or text_ref_path == '') and (text_ref_text is None or text_ref_text == '') and ( text_emotion is None or text_emotion == ''): raise Exception("参考音频路径/文本和角色情绪二选一填写,不能全部为空") + + inference_dir = os.path.join(base_role_dir, params.inference_audio_dir) + text_asr_audio_dir = os.path.join(inference_dir, + params.inference_audio_text_aggregation_dir) + text_model_inference_info = f"推理成功:生成目录{inference_dir}" + url_composer = audio_inference.URLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) url_composer.is_valid() text_list = common.read_text_file_to_list(text_test_content) @@ -151,13 +172,13 @@ def model_inference(text_work_space_dir, text_model_inference_voice_dir, text_ur # 对推理生成音频执行asr -def asr(text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, +def asr(text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang): asr_file = None text_text_similarity_analysis_path = None text_asr_info = None try: - check_base_info(text_work_space_dir) + base_role_dir = check_base_info(text_work_space_dir, text_role) if text_asr_audio_dir is None or text_asr_audio_dir == '': raise Exception("待asr的音频所在目录不能为空,请先完成上一步操作") if dropdown_asr_model is None or dropdown_asr_model == '': @@ -166,7 +187,7 @@ def asr(text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, raise Exception("asr模型大小不能为空") if dropdown_asr_lang is None or dropdown_asr_lang == '': raise Exception("asr语言不能为空") - asr_file = open_asr(text_asr_audio_dir, text_work_space_dir, dropdown_asr_model, dropdown_asr_size, + asr_file = open_asr(text_asr_audio_dir, base_role_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang) text_text_similarity_analysis_path = asr_file text_asr_info = f"asr成功:生成文件{asr_file}" @@ -209,14 +230,16 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): # 对asr生成的文件,与原本的文本内容,进行相似度分析 -def text_similarity_analysis(text_work_space_dir, +def text_similarity_analysis(text_work_space_dir, text_role, text_text_similarity_analysis_path): - similarity_dir = os.path.join(text_work_space_dir, params.text_similarity_output_dir) - text_text_similarity_analysis_info = f"相似度分析成功:生成目录{similarity_dir}" + similarity_dir = None + text_text_similarity_analysis_info = None try: - check_base_info(text_work_space_dir) + base_role_dir = check_base_info(text_work_space_dir, text_role) if text_text_similarity_analysis_path is None or text_text_similarity_analysis_path == '': raise Exception("asr生成的文件路径不能为空,请先完成上一步操作") + similarity_dir = os.path.join(base_role_dir, params.text_similarity_output_dir) + text_text_similarity_analysis_info = f"相似度分析成功:生成目录{similarity_dir}" open_text_similarity_analysis(text_text_similarity_analysis_path, similarity_dir) except Exception as e: traceback.print_exc() @@ -244,17 +267,17 @@ def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_ # 根据一个参考音频,对指定目录下的音频进行相似度分析,并输出到另一个目录 -def similarity_audio_output(text_work_space_dir, text_base_audio_path, +def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path, text_compare_audio_dir): text_similarity_audio_output_info = None try: - check_base_info(text_work_space_dir) + base_role_dir = check_base_info(text_work_space_dir, text_role) if text_base_audio_path is None or text_base_audio_path == '': raise Exception("基准音频路径不能为空") if text_compare_audio_dir is None or text_compare_audio_dir == '': raise Exception("待分析的音频所在目录不能为空") similarity_list, similarity_file, similarity_file_dir = start_similarity_analysis( - text_work_space_dir, text_compare_audio_dir, text_base_audio_path, True) + base_role_dir, text_compare_audio_dir, text_base_audio_path, True) if similarity_list is None: raise Exception("相似度分析失败") @@ -268,11 +291,11 @@ def similarity_audio_output(text_work_space_dir, text_base_audio_path, # 根据参考音频目录的删除情况,将其同步到推理生成的音频目录中,即参考音频目录下,删除了几个参考音频,就在推理目录下,将这些参考音频生成的音频文件移除 -def sync_ref_audio(text_work_space_dir, text_sync_ref_audio_dir, +def sync_ref_audio(text_work_space_dir, text_role, text_sync_ref_audio_dir, text_sync_inference_audio_dir): text_sync_ref_audio_info = None try: - check_base_info(text_work_space_dir) + check_base_info(text_work_space_dir, text_role) if text_sync_ref_audio_dir is None or text_sync_ref_audio_dir == '': raise Exception("参考音频目录不能为空") if text_sync_inference_audio_dir is None or text_sync_inference_audio_dir == '': @@ -287,17 +310,19 @@ def sync_ref_audio(text_work_space_dir, text_sync_ref_audio_dir, # 根据模板和参考音频目录,生成参考音频配置内容 -def create_config(text_work_space_dir, text_template, text_sync_ref_audio_dir2): - config_file = os.path.join(text_work_space_dir, f'{params.reference_audio_config_filename}.json') - text_create_config_info = f"配置生成成功:生成文件{config_file}" +def create_config(text_work_space_dir, text_role, text_template, text_sync_ref_audio_dir2): + config_file = None + text_create_config_info = None try: - check_base_info(text_work_space_dir) + base_role_dir = check_base_info(text_work_space_dir, text_role) if text_template is None or text_template == '': raise Exception("参考音频抽样目录不能为空") if text_sync_ref_audio_dir2 is None or text_sync_ref_audio_dir2 == '': raise Exception("参考音频目录不能为空") + config_file = os.path.join(base_role_dir, f'{params.reference_audio_config_filename}.json') + text_create_config_info = f"配置生成成功:生成文件{config_file}" ref_audio_manager = common.RefAudioListManager(text_sync_ref_audio_dir2) - audio_config.generate_audio_config(text_work_space_dir, text_template, ref_audio_manager.get_ref_audio_list(), + audio_config.generate_audio_config(base_role_dir, text_template, ref_audio_manager.get_ref_audio_list(), config_file) except Exception as e: traceback.print_exc() @@ -315,9 +340,33 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): return text_whole_url +def save_work_dir(text_work_space_dir, text_role): + rw_param.write_work_dir(text_work_space_dir) + if text_role is not None and text_role != '': + return text_role + else: + role_dir = '' + for i in range(1, 101): + dir_name = os.path.join(text_work_space_dir, f"role_{i}") + if not os.path.isdir(dir_name): + role_dir = dir_name + break + rw_param.write_role(role_dir) + return role_dir + + +def save_role(text_role): + rw_param.write_role(text_role) + + with gr.Blocks() as app: gr.Markdown(value=i18n("基本介绍:这是一个从训练素材中,批量提取参考音频,并进行效果评估与配置生成的工具")) - text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), value="") + with gr.Row(): + text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), + value=rw_param.read_work_dir()) + text_role = gr.Text(label=i18n("角色名称"), value=rw_param.read_role()) + text_work_space_dir.input(save_work_dir, [text_work_space_dir, text_role], [text_role]) + text_role.input(save_role, [text_role], []) with gr.Accordion(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") @@ -326,7 +375,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value="", interactive=False) - button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_list_input], + button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], [text_convert_from_list_info, text_sample_dir]) with gr.Row(): text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="") @@ -369,7 +418,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value="", interactive=False) button_model_inference.click(model_inference, - [text_work_space_dir, text_model_inference_voice_dir, text_url, + [text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content], [text_model_inference_info, text_asr_audio_dir]) with gr.Row(): @@ -396,13 +445,13 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) gr.Markdown(value=i18n("3.2:启动文本相似度分析")) text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), value="", interactive=False) - button_asr.click(asr, [text_work_space_dir, text_asr_audio_dir, dropdown_asr_model, + button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], [text_asr_info, text_text_similarity_analysis_path]) with gr.Row(): button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", interactive=False) - button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, + button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, text_text_similarity_analysis_path], [text_text_similarity_analysis_info]) gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) @@ -414,7 +463,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary") text_similarity_audio_output_info = gr.Text(label=i18n("输出结果"), value="", interactive=False) button_similarity_audio_output.click(similarity_audio_output, - [text_work_space_dir, text_base_audio_path, + [text_work_space_dir, text_role, text_base_audio_path, text_compare_audio_dir], [text_similarity_audio_output_info]) with gr.Row(): text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value="", interactive=True) @@ -422,7 +471,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): with gr.Row(): button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) - button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_sync_ref_audio_dir, + button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_sync_ref_audio_dir, text_sync_inference_audio_dir], [text_sync_ref_info]) with gr.Accordion("第四步:生成参考音频配置文本", open=False): gr.Markdown(value=i18n("4.1:编辑模板")) @@ -436,9 +485,9 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): button_create_config = gr.Button(i18n("生成配置"), variant="primary") text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) button_create_config.click(create_config, - [text_work_space_dir, text_template, text_sync_ref_audio_dir2], + [text_work_space_dir, text_role, text_template, text_sync_ref_audio_dir2], [text_create_config_info]) - button_sample.click(sample, [text_work_space_dir, text_sample_dir, text_base_voice_path, + button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, text_subsection_num, text_sample_num, checkbox_similarity_output], [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, text_sync_ref_audio_dir2]) From c8be484c0eeca64ed2ae75a25955a659f87f7266 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 19:09:27 +0800 Subject: [PATCH 16/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E8=B7=AF=E5=BE=84?= =?UTF-8?q?=E6=B8=85=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/common.py | 7 ++++++ .../ref_audio_selector_webui.py | 24 +++++++++++++++++++ Ref_Audio_Selector/tool/audio_config.py | 1 - 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/common/common.py b/Ref_Audio_Selector/common/common.py index 3d746747..b051b222 100644 --- a/Ref_Audio_Selector/common/common.py +++ b/Ref_Audio_Selector/common/common.py @@ -105,3 +105,10 @@ def write_text_to_file(text, output_file_path): print(f"Error occurred while writing to the file: {e}") else: print(f"Text successfully written to file: {output_file_path}") + + +if __name__ == '__main__': + dir = r'C:\Users\Administrator\Desktop/test' + dir2 = r'"C:\Users\Administrator\Desktop\test2"' + dir, dir2 = batch_clean_paths([dir, dir2]) + print(dir, dir2) \ No newline at end of file diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 3cab594a..40eb9d88 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -39,6 +39,8 @@ def check_base_info(text_work_space_dir, text_role): # 从list文件,提取参考音频 def convert_from_list(text_work_space_dir, text_role, text_list_input): + text_work_space_dir, text_list_input = common.batch_clean_paths([text_work_space_dir, text_list_input]) + text_convert_from_list_info = None text_sample_dir = None try: @@ -94,6 +96,9 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ # 基于一个基准音频,从参考音频目录中进行分段抽样 def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, text_subsection_num, text_sample_num, checkbox_similarity_output): + text_work_space_dir, text_sample_dir, text_base_voice_path \ + = common.batch_clean_paths([text_work_space_dir, text_sample_dir, text_base_voice_path]) + ref_audio_dir = None text_sample_info = None try: @@ -132,6 +137,9 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path def model_inference(text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content): + text_work_space_dir, text_model_inference_voice_dir, text_test_content \ + = common.batch_clean_paths([text_work_space_dir, text_model_inference_voice_dir, text_test_content]) + inference_dir = None text_asr_audio_dir = None text_model_inference_info = None @@ -174,6 +182,9 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d # 对推理生成音频执行asr def asr(text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang): + text_work_space_dir, text_asr_audio_dir \ + = common.batch_clean_paths([text_work_space_dir, text_asr_audio_dir]) + asr_file = None text_text_similarity_analysis_path = None text_asr_info = None @@ -232,6 +243,9 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): # 对asr生成的文件,与原本的文本内容,进行相似度分析 def text_similarity_analysis(text_work_space_dir, text_role, text_text_similarity_analysis_path): + text_work_space_dir, text_text_similarity_analysis_path \ + = common.batch_clean_paths([text_work_space_dir, text_text_similarity_analysis_path]) + similarity_dir = None text_text_similarity_analysis_info = None try: @@ -269,6 +283,9 @@ def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_ # 根据一个参考音频,对指定目录下的音频进行相似度分析,并输出到另一个目录 def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path, text_compare_audio_dir): + text_work_space_dir, text_base_audio_path, text_compare_audio_dir \ + = common.batch_clean_paths([text_work_space_dir, text_base_audio_path, text_compare_audio_dir]) + text_similarity_audio_output_info = None try: base_role_dir = check_base_info(text_work_space_dir, text_role) @@ -293,6 +310,9 @@ def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path # 根据参考音频目录的删除情况,将其同步到推理生成的音频目录中,即参考音频目录下,删除了几个参考音频,就在推理目录下,将这些参考音频生成的音频文件移除 def sync_ref_audio(text_work_space_dir, text_role, text_sync_ref_audio_dir, text_sync_inference_audio_dir): + text_work_space_dir, text_sync_ref_audio_dir, text_sync_inference_audio_dir \ + = common.batch_clean_paths([text_work_space_dir, text_sync_ref_audio_dir, text_sync_inference_audio_dir]) + text_sync_ref_audio_info = None try: check_base_info(text_work_space_dir, text_role) @@ -311,6 +331,9 @@ def sync_ref_audio(text_work_space_dir, text_role, text_sync_ref_audio_dir, # 根据模板和参考音频目录,生成参考音频配置内容 def create_config(text_work_space_dir, text_role, text_template, text_sync_ref_audio_dir2): + text_work_space_dir, text_sync_ref_audio_dir2 \ + = common.batch_clean_paths([text_work_space_dir, text_sync_ref_audio_dir2]) + config_file = None text_create_config_info = None try: @@ -341,6 +364,7 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): def save_work_dir(text_work_space_dir, text_role): + text_work_space_dir = my_utils.clean_path(text_work_space_dir) rw_param.write_work_dir(text_work_space_dir) if text_role is not None and text_role != '': return text_role diff --git a/Ref_Audio_Selector/tool/audio_config.py b/Ref_Audio_Selector/tool/audio_config.py index b6194343..1783f1de 100644 --- a/Ref_Audio_Selector/tool/audio_config.py +++ b/Ref_Audio_Selector/tool/audio_config.py @@ -1,6 +1,5 @@ import os import platform -from tools import my_utils def generate_audio_config(work_space_dir, template_str, audio_list, output_file_path): From 1da23aa25978a2aa36a6dda66bde50b3d4819eec Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 25 Apr 2024 22:54:40 +0800 Subject: [PATCH 17/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../{config => config_param}/__init__.py | 0 .../config_manager.py | 0 .../{config => config_param}/config_params.py | 2 +- .../ref_audio_selector_webui.py | 26 +++++++++---------- .../tool/asr/funasr_asr_multi_level_dir.py | 2 +- Ref_Audio_Selector/tool/audio_inference.py | 2 +- .../tool/delete_inference_with_ref.py | 6 ++--- .../tool/text_comparison/asr_text_process.py | 4 +-- 8 files changed, 21 insertions(+), 21 deletions(-) rename Ref_Audio_Selector/{config => config_param}/__init__.py (100%) rename Ref_Audio_Selector/{config => config_param}/config_manager.py (100%) rename Ref_Audio_Selector/{config => config_param}/config_params.py (96%) diff --git a/Ref_Audio_Selector/config/__init__.py b/Ref_Audio_Selector/config_param/__init__.py similarity index 100% rename from Ref_Audio_Selector/config/__init__.py rename to Ref_Audio_Selector/config_param/__init__.py diff --git a/Ref_Audio_Selector/config/config_manager.py b/Ref_Audio_Selector/config_param/config_manager.py similarity index 100% rename from Ref_Audio_Selector/config/config_manager.py rename to Ref_Audio_Selector/config_param/config_manager.py diff --git a/Ref_Audio_Selector/config/config_params.py b/Ref_Audio_Selector/config_param/config_params.py similarity index 96% rename from Ref_Audio_Selector/config/config_params.py rename to Ref_Audio_Selector/config_param/config_params.py index 74c77c6c..015a5e71 100644 --- a/Ref_Audio_Selector/config/config_params.py +++ b/Ref_Audio_Selector/config_param/config_params.py @@ -1,4 +1,4 @@ -import Ref_Audio_Selector.config.config_manager as config_manager +import Ref_Audio_Selector.config_param.config_manager as config_manager config = config_manager.get_config() diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 40eb9d88..710aa3c5 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -8,7 +8,7 @@ import Ref_Audio_Selector.tool.audio_config as audio_config import Ref_Audio_Selector.tool.delete_inference_with_ref as delete_inference_with_ref import Ref_Audio_Selector.common.common as common -import Ref_Audio_Selector.config.config_params as params +import Ref_Audio_Selector.config_param.config_params as params from tools.i18n.i18n import I18nAuto from config import python_exec, is_half from tools import my_utils @@ -176,7 +176,7 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d traceback.print_exc() text_model_inference_info = f"发生异常:{e}" text_asr_audio_dir = '' - return i18n(text_model_inference_info), text_asr_audio_dir + return i18n(text_model_inference_info), text_asr_audio_dir, text_asr_audio_dir # 对推理生成音频执行asr @@ -371,9 +371,9 @@ def save_work_dir(text_work_space_dir, text_role): else: role_dir = '' for i in range(1, 101): - dir_name = os.path.join(text_work_space_dir, f"role_{i}") + role_dir = f"role_{i}" + dir_name = os.path.join(text_work_space_dir, role_dir) if not os.path.isdir(dir_name): - role_dir = dir_name break rw_param.write_role(role_dir) return role_dir @@ -398,7 +398,7 @@ def save_role(text_role): button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) - text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value="", interactive=False) + text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value="", interactive=True) button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], [text_convert_from_list_info, text_sample_dir]) with gr.Row(): @@ -440,11 +440,7 @@ def save_role(text_role): text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) with gr.Accordion(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) - text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value="", interactive=False) - button_model_inference.click(model_inference, - [text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, - text_text, text_ref_path, text_ref_text, text_emotion, - text_test_content], [text_model_inference_info, text_asr_audio_dir]) + text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value="", interactive=True) with gr.Row(): dropdown_asr_model = gr.Dropdown( label=i18n("ASR 模型"), @@ -468,7 +464,7 @@ def save_role(text_role): button_asr = gr.Button(i18n("启动asr"), variant="primary") text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) gr.Markdown(value=i18n("3.2:启动文本相似度分析")) - text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), value="", interactive=False) + text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), value="", interactive=True) button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], [text_asr_info, text_text_similarity_analysis_path]) @@ -491,7 +487,7 @@ def save_role(text_role): text_compare_audio_dir], [text_similarity_audio_output_info]) with gr.Row(): text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value="", interactive=True) - text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), value="", interactive=False) + text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), value="", interactive=True) with gr.Row(): button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) @@ -501,7 +497,7 @@ def save_role(text_role): gr.Markdown(value=i18n("4.1:编辑模板")) default_template_path = params.default_template_path default_template_content = common.read_file(default_template_path) - text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=False) + text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=True) text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) gr.Markdown(value=i18n("4.2:生成配置")) text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value="", interactive=True) @@ -515,6 +511,10 @@ def save_role(text_role): text_subsection_num, text_sample_num, checkbox_similarity_output], [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, text_sync_ref_audio_dir2]) + button_model_inference.click(model_inference, + [text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, + text_text, text_ref_path, text_ref_text, text_emotion, + text_test_content], [text_model_inference_info, text_asr_audio_dir, text_sync_inference_audio_dir]) app.launch( server_port=9423, diff --git a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py index 3fbb0212..d6e04c81 100644 --- a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py +++ b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py @@ -3,7 +3,7 @@ import argparse import os import traceback -import Ref_Audio_Selector.config.config_params as params +import Ref_Audio_Selector.config_param.config_params as params from tqdm import tqdm from funasr import AutoModel diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index efa4746e..a93328d9 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -1,7 +1,7 @@ import os import requests import itertools -import Ref_Audio_Selector.config.config_params as params +import Ref_Audio_Selector.config_param.config_params as params from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote diff --git a/Ref_Audio_Selector/tool/delete_inference_with_ref.py b/Ref_Audio_Selector/tool/delete_inference_with_ref.py index 74ff9d99..4d7f1758 100644 --- a/Ref_Audio_Selector/tool/delete_inference_with_ref.py +++ b/Ref_Audio_Selector/tool/delete_inference_with_ref.py @@ -1,7 +1,7 @@ import os import shutil import Ref_Audio_Selector.common.common as common -import Ref_Audio_Selector.config.config_params as params +import Ref_Audio_Selector.config_param.config_params as params def remove_matching_audio_files_in_text_dir(text_dir, emotions_list): @@ -11,7 +11,7 @@ def remove_matching_audio_files_in_text_dir(text_dir, emotions_list): emotion_tag = emotion_dict['emotion'] wav_file_name = f"{emotion_tag}.wav" file_path = os.path.join(root, wav_file_name) - if os.path.exists(file_path): + if not os.path.exists(file_path): print(f"Deleting file: {file_path}") try: os.remove(file_path) @@ -38,7 +38,7 @@ def delete_emotion_subdirectories(emotion_dir, emotions_list): folder_path = os.path.join(emotion_dir, emotion_folder) # 检查emotion子目录是否存在 - if os.path.isdir(folder_path): + if not os.path.isdir(folder_path): print(f"Deleting directory: {folder_path}") try: # 使用shutil.rmtree删除整个子目录及其内容 diff --git a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py index 49b27bae..79e2f1ff 100644 --- a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py +++ b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py @@ -3,7 +3,7 @@ from collections import defaultdict from operator import itemgetter import Ref_Audio_Selector.tool.text_comparison.text_comparison as text_comparison -import Ref_Audio_Selector.config.config_params as params +import Ref_Audio_Selector.config_param.config_params as params import Ref_Audio_Selector.common.common as common @@ -34,7 +34,7 @@ def parse_asr_file(file_path): def calculate_similarity_and_append_to_list(input_list, boundary): for item in input_list: - similarity_score = text_comparison.calculate_result(item['original_text'], item['asr_text'], boundary) + _, similarity_score = text_comparison.calculate_result(item['original_text'], item['asr_text'], boundary) item['similarity_score'] = similarity_score return input_list From 2880e3a6f88af2bd4128ec4724c38733aec3121e Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 13:25:02 +0800 Subject: [PATCH 18/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=80=A7=E8=83=BD?= =?UTF-8?q?=E7=9B=91=E6=8E=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/time_util.py | 71 ++++++++++++ Ref_Audio_Selector/config.ini | 4 + .../config_param/config_params.py | 4 + Ref_Audio_Selector/config_param/log_config.py | 39 +++++++ .../ref_audio_selector_webui.py | 103 +++++++++++++----- .../tool/asr/funasr_asr_multi_level_dir.py | 26 +---- Ref_Audio_Selector/tool/audio_inference.py | 2 + Ref_Audio_Selector/tool/audio_similarity.py | 6 +- .../speaker_verification/voice_similarity.py | 2 + .../tool/text_comparison/asr_text_process.py | 2 + 10 files changed, 202 insertions(+), 57 deletions(-) create mode 100644 Ref_Audio_Selector/common/time_util.py create mode 100644 Ref_Audio_Selector/config_param/log_config.py diff --git a/Ref_Audio_Selector/common/time_util.py b/Ref_Audio_Selector/common/time_util.py new file mode 100644 index 00000000..82d63b8c --- /dev/null +++ b/Ref_Audio_Selector/common/time_util.py @@ -0,0 +1,71 @@ +import time +from Ref_Audio_Selector.config_param.log_config import p_logger +import Ref_Audio_Selector.config_param.config_params as params + + +def timeit_decorator(func): + """ + 装饰器,用于计算被装饰函数的执行时间。 + + 参数: + func (function): 要计时的函数。 + + 返回: + function: 包含计时功能的新函数。 + """ + + def wrapper(*args, **kwargs): + if params.time_log_print_type != 'file': + return func(*args, **kwargs) + + start_time = time.perf_counter() # 使用 perf_counter 获取高精度计时起点 + + func_result = func(*args, **kwargs) # 执行原函数 + + end_time = time.perf_counter() # 获取计时终点 + elapsed_time = end_time - start_time # 计算执行耗时 + + # 记录日志内容 + log_message = f"{func.__name__} 执行耗时: {elapsed_time:.6f} 秒" + p_logger.info(log_message) + + return func_result + + return wrapper + + +def time_monitor(func): + """ + 返回结果,追加时间 + """ + + def wrapper(*args, **kwargs): + + start_time = time.perf_counter() # 使用 perf_counter 获取高精度计时起点 + + func_result = func(*args, **kwargs) # 执行原函数 + + end_time = time.perf_counter() # 获取计时终点 + elapsed_time = end_time - start_time # 计算执行耗时 + + return elapsed_time, func_result + + return wrapper + + +# 使用装饰器 +@timeit_decorator +def example_function(n): + time.sleep(n) # 假设这是需要计时的函数,这里模拟耗时操作 + return n * 2 + + +def example_function2(n): + time.sleep(n) # 假设这是需要计时的函数,这里模拟耗时操作 + return n * 2 + + +if __name__ == "__main__": + # 调用经过装饰的函数 + # result = example_function(2) + print(time_monitor(example_function2)(2)) diff --git a/Ref_Audio_Selector/config.ini b/Ref_Audio_Selector/config.ini index 81bcb968..0ba2aa29 100644 --- a/Ref_Audio_Selector/config.ini +++ b/Ref_Audio_Selector/config.ini @@ -1,6 +1,10 @@ # config.ini [Base] +# 函数时间消耗日志打印类型 file 打印到文件; close 关闭 +time_log_print_type = file +# 函数时间消耗日志保存目录路径 +time_log_print_dir = Ref_Audio_Selector/log/performance # 参考音频目录 reference_audio_dir = refer_audio diff --git a/Ref_Audio_Selector/config_param/config_params.py b/Ref_Audio_Selector/config_param/config_params.py index 015a5e71..050bb1d2 100644 --- a/Ref_Audio_Selector/config_param/config_params.py +++ b/Ref_Audio_Selector/config_param/config_params.py @@ -3,6 +3,10 @@ config = config_manager.get_config() # [Base] +# 函数时间消耗日志打印类型 file 打印到文件; close 关闭 +time_log_print_type = config.get_base('time_log_print_type') +# 函数时间消耗日志保存目录路径 +time_log_print_dir = config.get_base('time_log_print_dir') # 参考音频目录 reference_audio_dir = config.get_base('reference_audio_dir') diff --git a/Ref_Audio_Selector/config_param/log_config.py b/Ref_Audio_Selector/config_param/log_config.py new file mode 100644 index 00000000..249eba5c --- /dev/null +++ b/Ref_Audio_Selector/config_param/log_config.py @@ -0,0 +1,39 @@ +import logging +import datetime +import Ref_Audio_Selector.config_param.config_params as params + + +def setup_logging(): + # 获取当前日期,用于文件名和日志内容 + current_date = datetime.datetime.now().strftime('%Y-%m-%d') + + # 创建一个用于常规日志的处理器 + general_handler = logging.FileHandler('general.log', mode='a', encoding='utf-8') + general_handler.setLevel(logging.INFO) + general_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') + general_handler.setFormatter(general_formatter) + + # 创建一个专用于性能监控日志的处理器 + performance_handler = logging.FileHandler( + f"{params.time_log_print_dir}/{current_date}.log", mode='a', encoding='utf-8') + performance_handler.setLevel(logging.INFO) + performance_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') + performance_handler.setFormatter(performance_formatter) + + # 配置一个常规的logger + general_logger = logging.getLogger('general') + general_logger.setLevel(logging.INFO) + general_logger.addHandler(general_handler) + + # 配置一个专门用于性能监控的logger + performance_logger = logging.getLogger('performance') + performance_logger.setLevel(logging.INFO) + performance_logger.addHandler(performance_handler) + + # 配置根logger,以防万一 + logging.basicConfig(level=logging.WARNING, handlers=[general_handler]) + + return general_logger, performance_logger + + +logger, p_logger = setup_logging() \ No newline at end of file diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 710aa3c5..0a127858 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -9,6 +9,8 @@ import Ref_Audio_Selector.tool.delete_inference_with_ref as delete_inference_with_ref import Ref_Audio_Selector.common.common as common import Ref_Audio_Selector.config_param.config_params as params +import Ref_Audio_Selector.common.time_util as time_util +from Ref_Audio_Selector.config_param.log_config import logger from tools.i18n.i18n import I18nAuto from config import python_exec, is_half from tools import my_utils @@ -16,11 +18,11 @@ from subprocess import Popen i18n = I18nAuto() +rw_param = params.config_manager.get_rw_param() p_similarity = None p_asr = None p_text_similarity = None -rw_param = params.config_manager.get_rw_param() # 校验基础信息 @@ -50,10 +52,13 @@ def convert_from_list(text_work_space_dir, text_role, text_list_input): ref_audio_all = os.path.join(base_role_dir, params.list_to_convert_reference_audio_dir) - text_convert_from_list_info = f"转换成功:生成目录{ref_audio_all}" + + time_consuming, _ = time_util.time_monitor(audio_similarity.convert_from_list)(text_list_input, ref_audio_all) + + text_convert_from_list_info = f"耗时:{time_consuming:0.1f}秒;转换成功:生成目录{ref_audio_all}" text_sample_dir = ref_audio_all - audio_similarity.convert_from_list(text_list_input, ref_audio_all) + # audio_similarity.convert_from_list(text_list_input, ref_audio_all) except Exception as e: traceback.print_exc() text_convert_from_list_info = f"发生异常:{e}" @@ -113,10 +118,17 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path raise Exception("每段随机抽样个数不能为空") ref_audio_dir = os.path.join(base_role_dir, params.reference_audio_dir) - text_sample_info = f"抽样成功:生成目录{ref_audio_dir}" - similarity_list, _, _ = start_similarity_analysis(base_role_dir, text_sample_dir, - text_base_voice_path, checkbox_similarity_output) + time_consuming, similarity_list, _, _ = ( + time_util.time_monitor(start_similarity_analysis)(base_role_dir, + text_sample_dir, + text_base_voice_path, + checkbox_similarity_output)) + + text_sample_info = f"耗时:{time_consuming:0.1f}秒;抽样成功:生成目录{ref_audio_dir}" + + # similarity_list, _, _ = start_similarity_analysis(base_role_dir, text_sample_dir, + # text_base_voice_path, checkbox_similarity_output) if similarity_list is None: raise Exception("相似度分析失败") @@ -136,9 +148,9 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path # 根据参考音频和测试文本,执行批量推理 def model_inference(text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, - text_test_content): - text_work_space_dir, text_model_inference_voice_dir, text_test_content \ - = common.batch_clean_paths([text_work_space_dir, text_model_inference_voice_dir, text_test_content]) + text_test_content_dir): + text_work_space_dir, text_model_inference_voice_dir, text_test_content_dir \ + = common.batch_clean_paths([text_work_space_dir, text_model_inference_voice_dir, text_test_content_dir]) inference_dir = None text_asr_audio_dir = None @@ -151,7 +163,7 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d raise Exception("推理服务请求地址不能为空") if text_text is None or text_text == '': raise Exception("文本参数名不能为空") - if text_test_content is None or text_test_content == '': + if text_test_content_dir is None or text_test_content_dir == '': raise Exception("待推理文本路径不能为空") if (text_ref_path is None or text_ref_path == '') and (text_ref_text is None or text_ref_text == '') and ( text_emotion is None or text_emotion == ''): @@ -160,18 +172,24 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d inference_dir = os.path.join(base_role_dir, params.inference_audio_dir) text_asr_audio_dir = os.path.join(inference_dir, params.inference_audio_text_aggregation_dir) - text_model_inference_info = f"推理成功:生成目录{inference_dir}" url_composer = audio_inference.URLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) url_composer.is_valid() - text_list = common.read_text_file_to_list(text_test_content) + text_list = common.read_text_file_to_list(text_test_content_dir) if text_list is None or len(text_list) == 0: raise Exception("待推理文本内容不能为空") ref_audio_manager = common.RefAudioListManager(text_model_inference_voice_dir) if len(ref_audio_manager.get_audio_list()) == 0: raise Exception("待推理的参考音频不能为空") - audio_inference.generate_audio_files(url_composer, text_list, ref_audio_manager.get_ref_audio_list(), - inference_dir) + + time_consuming, _ = time_util.time_monitor(audio_inference.generate_audio_files)(url_composer, text_list, + ref_audio_manager.get_ref_audio_list(), + inference_dir) + + text_model_inference_info = f"耗时:{time_consuming:0.1f}秒;推理成功:生成目录{inference_dir}" + + # audio_inference.generate_audio_files(url_composer, text_list, ref_audio_manager.get_ref_audio_list(), + # inference_dir) except Exception as e: traceback.print_exc() text_model_inference_info = f"发生异常:{e}" @@ -198,10 +216,15 @@ def asr(text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, raise Exception("asr模型大小不能为空") if dropdown_asr_lang is None or dropdown_asr_lang == '': raise Exception("asr语言不能为空") - asr_file = open_asr(text_asr_audio_dir, base_role_dir, dropdown_asr_model, dropdown_asr_size, - dropdown_asr_lang) + + time_consuming, asr_file = time_util.time_monitor(open_asr)(text_asr_audio_dir, base_role_dir, + dropdown_asr_model, dropdown_asr_size, + dropdown_asr_lang) + + # asr_file = open_asr(text_asr_audio_dir, base_role_dir, dropdown_asr_model, dropdown_asr_size, + # dropdown_asr_lang) text_text_similarity_analysis_path = asr_file - text_asr_info = f"asr成功:生成文件{asr_file}" + text_asr_info = f"耗时:{time_consuming:0.1f}秒;asr成功:生成文件{asr_file}" except Exception as e: traceback.print_exc() text_asr_info = f"发生异常:{e}" @@ -253,8 +276,13 @@ def text_similarity_analysis(text_work_space_dir, text_role, if text_text_similarity_analysis_path is None or text_text_similarity_analysis_path == '': raise Exception("asr生成的文件路径不能为空,请先完成上一步操作") similarity_dir = os.path.join(base_role_dir, params.text_similarity_output_dir) - text_text_similarity_analysis_info = f"相似度分析成功:生成目录{similarity_dir}" - open_text_similarity_analysis(text_text_similarity_analysis_path, similarity_dir) + + time_consuming, _ = time_util.time_monitor(open_text_similarity_analysis)(text_text_similarity_analysis_path, + similarity_dir) + + text_text_similarity_analysis_info = f"耗时:{time_consuming:0.1f}秒;相似度分析成功:生成目录{similarity_dir}" + + # open_text_similarity_analysis(text_text_similarity_analysis_path, similarity_dir) except Exception as e: traceback.print_exc() text_text_similarity_analysis_info = f"发生异常:{e}" @@ -293,13 +321,18 @@ def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path raise Exception("基准音频路径不能为空") if text_compare_audio_dir is None or text_compare_audio_dir == '': raise Exception("待分析的音频所在目录不能为空") - similarity_list, similarity_file, similarity_file_dir = start_similarity_analysis( - base_role_dir, text_compare_audio_dir, text_base_audio_path, True) + + time_consuming, similarity_list, similarity_file, similarity_file_dir \ + = time_util.time_monitor(start_similarity_analysis)(base_role_dir, + text_compare_audio_dir, text_base_audio_path, True) + + # similarity_list, similarity_file, similarity_file_dir = start_similarity_analysis( + # base_role_dir, text_compare_audio_dir, text_base_audio_path, True) if similarity_list is None: raise Exception("相似度分析失败") - text_similarity_audio_output_info = f'相似度分析成功:生成目录{similarity_file_dir},文件{similarity_file}' + text_similarity_audio_output_info = f'耗时:{time_consuming:0.1f}秒;相似度分析成功:生成目录{similarity_file_dir},文件{similarity_file}' except Exception as e: traceback.print_exc() @@ -320,9 +353,13 @@ def sync_ref_audio(text_work_space_dir, text_role, text_sync_ref_audio_dir, raise Exception("参考音频目录不能为空") if text_sync_inference_audio_dir is None or text_sync_inference_audio_dir == '': raise Exception("推理生成的音频目录不能为空") - delete_text_wav_num, delete_emotion_dir_num = delete_inference_with_ref.sync_ref_audio(text_sync_ref_audio_dir, - text_sync_inference_audio_dir) - text_sync_ref_audio_info = f"推理音频目录{text_sync_inference_audio_dir}下,text目录删除了{delete_text_wav_num}个参考音频,emotion目录下,删除了{delete_emotion_dir_num}个目录" + time_consuming, delete_text_wav_num, delete_emotion_dir_num \ + = time_util.time_monitor(delete_inference_with_ref.sync_ref_audio)(text_sync_ref_audio_dir, + text_sync_inference_audio_dir) + # delete_text_wav_num, delete_emotion_dir_num = delete_inference_with_ref.sync_ref_audio( + # text_sync_ref_audio_dir, text_sync_inference_audio_dir) + text_sync_ref_audio_info = (f"耗时:{time_consuming:0.1f}秒;推理音频目录{text_sync_inference_audio_dir}下," + f"text目录删除了{delete_text_wav_num}个参考音频,emotion目录下,删除了{delete_emotion_dir_num}个目录") except Exception as e: traceback.print_exc() text_sync_ref_audio_info = f"发生异常:{e}" @@ -343,10 +380,17 @@ def create_config(text_work_space_dir, text_role, text_template, text_sync_ref_a if text_sync_ref_audio_dir2 is None or text_sync_ref_audio_dir2 == '': raise Exception("参考音频目录不能为空") config_file = os.path.join(base_role_dir, f'{params.reference_audio_config_filename}.json') - text_create_config_info = f"配置生成成功:生成文件{config_file}" ref_audio_manager = common.RefAudioListManager(text_sync_ref_audio_dir2) - audio_config.generate_audio_config(base_role_dir, text_template, ref_audio_manager.get_ref_audio_list(), - config_file) + + time_consuming, _ = time_util.time_monitor(audio_config.generate_audio_config)(base_role_dir, text_template, + ref_audio_manager.get_ref_audio_list(), + config_file) + + # audio_config.generate_audio_config(base_role_dir, text_template, ref_audio_manager.get_ref_audio_list(), + # config_file) + + text_create_config_info = f"耗时:{time_consuming:0.1f}秒;配置生成成功:生成文件{config_file}" + except Exception as e: traceback.print_exc() text_create_config_info = f"发生异常:{e}" @@ -514,7 +558,8 @@ def save_role(text_role): button_model_inference.click(model_inference, [text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, - text_test_content], [text_model_inference_info, text_asr_audio_dir, text_sync_inference_audio_dir]) + text_test_content], + [text_model_inference_info, text_asr_audio_dir, text_sync_inference_audio_dir]) app.launch( server_port=9423, diff --git a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py index d6e04c81..43ec04d9 100644 --- a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py +++ b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py @@ -4,6 +4,7 @@ import os import traceback import Ref_Audio_Selector.config_param.config_params as params +from Ref_Audio_Selector.common.time_util import timeit_decorator from tqdm import tqdm from funasr import AutoModel @@ -34,30 +35,7 @@ def only_asr(input_file): return text -def execute_asr(input_folder, output_folder, model_size, language): - input_file_names = os.listdir(input_folder) - input_file_names.sort() - - output = [] - output_file_name = os.path.basename(input_folder) - - for name in tqdm(input_file_names): - try: - text = model.generate(input="%s/%s" % (input_folder, name))[0]["text"] - output.append(f"{input_folder}/{name}|{output_file_name}|{language.upper()}|{text}") - except: - print(traceback.format_exc()) - - output_folder = output_folder or "output/asr_opt" - os.makedirs(output_folder, exist_ok=True) - output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list') - - with open(output_file_path, "w", encoding="utf-8") as f: - f.write("\n".join(output)) - print(f"ASR 任务完成->标注文件路径: {output_file_path}\n") - return output_file_path - - +@timeit_decorator def execute_asr_multi_level_dir(input_folder, output_folder, model_size, language): output = [] output_file_name = os.path.basename(input_folder) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index a93328d9..a5cc7e5d 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -2,6 +2,7 @@ import requests import itertools import Ref_Audio_Selector.config_param.config_params as params +from Ref_Audio_Selector.common.time_util import timeit_decorator from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote @@ -72,6 +73,7 @@ def safe_encode_query_params(original_url): return encoded_url +@timeit_decorator def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path): # Ensure the output directory exists output_dir = os.path.abspath(output_dir_path) diff --git a/Ref_Audio_Selector/tool/audio_similarity.py b/Ref_Audio_Selector/tool/audio_similarity.py index afb6a52c..3c6fe1cc 100644 --- a/Ref_Audio_Selector/tool/audio_similarity.py +++ b/Ref_Audio_Selector/tool/audio_similarity.py @@ -6,11 +6,9 @@ def check_audio_duration(path, min_duration=3, max_duration=10): try: - # 加载音频文件 - audio, sample_rate = librosa.load(path) - # 计算音频的时长(单位:秒) - duration = librosa.get_duration(y=audio, sr=sample_rate) + # 直接计算音频文件的时长(单位:秒) + duration = librosa.get_duration(filename=path) # 判断时长是否在3s至10s之间 if min_duration <= duration <= max_duration: diff --git a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py index 8a97d9b8..e4a5ed93 100644 --- a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py +++ b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py @@ -1,5 +1,6 @@ import argparse import os +from Ref_Audio_Selector.common.time_util import timeit_decorator from modelscope.pipelines import pipeline @@ -10,6 +11,7 @@ ) +@timeit_decorator def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, output_file_path): # Step 1: 获取比较音频目录下所有音频文件的路径 comparison_audio_paths = [os.path.join(comparison_dir_path, f) for f in os.listdir(comparison_dir_path) if diff --git a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py index 79e2f1ff..486391e1 100644 --- a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py +++ b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py @@ -2,6 +2,7 @@ import argparse from collections import defaultdict from operator import itemgetter +from Ref_Audio_Selector.common.time_util import timeit_decorator import Ref_Audio_Selector.tool.text_comparison.text_comparison as text_comparison import Ref_Audio_Selector.config_param.config_params as params import Ref_Audio_Selector.common.common as common @@ -88,6 +89,7 @@ def format_list_to_text(data_list, output_filename): output_file.write(formatted_line) +@timeit_decorator def process(asr_file_path, output_dir, similarity_enlarge_boundary): # 检查输出目录是否存在,如果不存在则创建 if not os.path.exists(output_dir): From 878fef248ad56c827e3b5d320a8ce3dcb703b270 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 14:16:16 +0800 Subject: [PATCH 19/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 2 +- Ref_Audio_Selector/tool/audio_inference.py | 26 +++++-- Ref_Audio_Selector/tool/audio_similarity.py | 2 +- .../tool/text_comparison/asr_text_process.py | 26 ++++++- .../tool/text_comparison/text_comparison.py | 70 +++++++++++++++++++ 5 files changed, 114 insertions(+), 12 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 0a127858..e3cead2b 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -289,7 +289,7 @@ def text_similarity_analysis(text_work_space_dir, text_role, return i18n(text_text_similarity_analysis_info) -def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_boundary=0.8): +def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_boundary=0.9): global p_text_similarity if p_text_similarity is None: cmd = f'"{python_exec}" Ref_Audio_Selector/tool/text_comparison/asr_text_process.py ' diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index a5cc7e5d..39cc2c6f 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -85,19 +85,15 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) emotion_subdir = os.path.join(output_dir, params.inference_audio_emotion_aggregation_dir) os.makedirs(emotion_subdir, exist_ok=True) + all_count = len(text_list) * len(emotion_list) + has_generated_count = 0 + # 计算笛卡尔积 cartesian_product = list(itertools.product(text_list, emotion_list)) for text, emotion in cartesian_product: # Generate audio byte stream using the create_audio function - if url_composer.is_emotion(): - real_url = url_composer.build_url_with_emotion(text, emotion['emotion']) - else: - real_url = url_composer.build_url_with_ref(text, emotion['ref_path'], emotion['ref_text']) - - audio_bytes = inference_audio_from_api(real_url) - emotion_name = emotion['emotion'] text_subdir_text = os.path.join(text_subdir, text) @@ -108,12 +104,28 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) os.makedirs(emotion_subdir_emotion, exist_ok=True) emotion_subdir_emotion_file_path = os.path.join(emotion_subdir_emotion, text + '.wav') + # 检查是否已经存在对应的音频文件,如果存在则跳过 + if os.path.exists(text_subdir_text_file_path) and os.path.exists(emotion_subdir_emotion_file_path): + has_generated_count += 1 + print(f"进度: {has_generated_count}/{all_count}") + continue + + if url_composer.is_emotion(): + real_url = url_composer.build_url_with_emotion(text, emotion['emotion']) + else: + real_url = url_composer.build_url_with_ref(text, emotion['ref_path'], emotion['ref_text']) + + audio_bytes = inference_audio_from_api(real_url) + # Write audio bytes to the respective files with open(text_subdir_text_file_path, 'wb') as f: f.write(audio_bytes) with open(emotion_subdir_emotion_file_path, 'wb') as f: f.write(audio_bytes) + has_generated_count += 1 + print(f"进度: {has_generated_count}/{all_count}") + def inference_audio_from_api(url): # 发起GET请求 diff --git a/Ref_Audio_Selector/tool/audio_similarity.py b/Ref_Audio_Selector/tool/audio_similarity.py index 3c6fe1cc..658251f9 100644 --- a/Ref_Audio_Selector/tool/audio_similarity.py +++ b/Ref_Audio_Selector/tool/audio_similarity.py @@ -142,7 +142,7 @@ def copy_and_move(output_audio_directory, similarity_scores): for item in similarity_scores: # 构造新的文件名 base_name = os.path.basename(item['wav_path'])[:-4] # 去掉.wav扩展名 - new_name = f"{item['score']}-{base_name}.wav" + new_name = f"{item['score']*10000:04.0f}-{base_name}.wav" # 新文件的完整路径 new_path = os.path.join(output_audio_directory, new_name) diff --git a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py index 486391e1..585df909 100644 --- a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py +++ b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py @@ -33,10 +33,16 @@ def parse_asr_file(file_path): return output +@timeit_decorator def calculate_similarity_and_append_to_list(input_list, boundary): + all_count = len(input_list) + has_been_processed_count = 0 for item in input_list: - _, similarity_score = text_comparison.calculate_result(item['original_text'], item['asr_text'], boundary) + original_score, similarity_score = text_comparison.calculate_result(item['original_text'], item['asr_text'], boundary) item['similarity_score'] = similarity_score + item['original_score'] = original_score + has_been_processed_count += 1 + print(f'进度:{has_been_processed_count}/{all_count}') return input_list @@ -79,13 +85,27 @@ def group_and_sort_by_field(data, group_by_field): def format_list_to_text(data_list, output_filename): with open(output_filename, 'w', encoding='utf-8') as output_file: + output_file.write('放大后的相似度分值|原始分值|ASR文本|原文文本\n') + for key, items in data_list: + # 写入情绪标题 + output_file.write(key + '\n') + + # 写入每条记录 + for item in items: + formatted_line = f"{item['similarity_score']}|{item['original_score']}|{item['asr_text']}|{item['original_text']}\n" + output_file.write(formatted_line) + + +def format_list_to_emotion(data_list, output_filename): + with open(output_filename, 'w', encoding='utf-8') as output_file: + output_file.write('放大后的相似度分值|原始分值|ASR文本|情绪类型\n') for key, items in data_list: # 写入情绪标题 output_file.write(key + '\n') # 写入每条记录 for item in items: - formatted_line = f"{item['similarity_score']}|{item['original_text']}|{item['asr_text']}\n" + formatted_line = f"{item['similarity_score']}|{item['original_score']}|{item['asr_text']}|{item['emotion']}\n" output_file.write(formatted_line) @@ -113,7 +133,7 @@ def process(asr_file_path, output_dir, similarity_enlarge_boundary): original_text_detail_list = group_and_sort_by_field(records, 'original_text') original_text_detail_file = os.path.join(output_dir, f'{params.text_similarity_by_text_detail_filename}.txt') - format_list_to_text(original_text_detail_list, original_text_detail_file) + format_list_to_emotion(original_text_detail_list, original_text_detail_file) print('文本相似度分析完成。') diff --git a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py index 5a33776a..edb0c7d1 100644 --- a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py +++ b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py @@ -51,3 +51,73 @@ def calculate_result(t1, t2, boundary): return similarity_score2, adjusted_similarity_score2 +def print_result(t1, t2, boundary): + print(f't2: {t2}') + # 计算并打印相似度 + similarity_score2 = calculate_similarity(t1, t2) + print(f"两句话的相似度为: {similarity_score2:.4f}") + + # 调整相似度 + adjusted_similarity_score2 = adjusted_similarity(similarity_score2, boundary) + print(f"调整后的相似度为: {adjusted_similarity_score2:.4f}") + + +def test(boundary): + # 原始文本 + text1 = "这是第一个句子" + list = """ + 这是第一个句子 + 这是第二个句子。 + 那么,这是第三个表达。 + 当前呈现的是第四个句子。 + 接下来,我们有第五句话。 + 在此,展示第六条陈述。 + 继续下去,这是第七个短句。 + 不容忽视的是第八个表述。 + 顺延着序列,这是第九句。 + 此处列举的是第十个说法。 + 进入新的篇章,这是第十一个句子。 + 下一段内容即为第十二个句子。 + 显而易见,这是第十三个叙述。 + 渐进地,我们来到第十四句话。 + 向下滚动,您会看到第十五个表达。 + 此刻,呈现在眼前的是第十六个句子。 + 它们中的一个——第十七个句子在此。 + 如同链条般连接,这是第十八个断言。 + 按照顺序排列,接下来是第十九个话语。 + 逐一列举,这是第二十个陈述句。 + 结构相似,本例给出第二十一个实例句。 + 这是最初的陈述句。 + 首先表达的是这一个句子。 + 第一句内容即为此处所示。 + 这是起始的叙述段落。 + 开篇所展示的第一句话就是这个。 + 明媚的阳光洒满大地 + 窗外飘落粉色樱花瓣 + 笔尖轻触纸面思绪万千 + 深夜的月光如水般静谧 + 穿越丛林的小径蜿蜒曲折 + 浅酌清茶品味人生百态 + 破晓时分雄鸡一唱天下白 + 草原上奔驰的骏马无拘无束 + 秋叶纷飞描绘季节更替画卷 + 寒冬雪夜炉火旁围坐共话家常 + kszdRjYXw + pfsMgTlVHnB + uQaGxIbWz + ZtqNhPmKcOe + jfyrXsStVUo + wDiEgLkZbn + yhNvAfUmqC + TpKjxMrWgs + eBzHUaFJtYd + oQnXcVSiPkL + 00000 + """ + list2 = list.strip().split('\n') + for item in list2: + print_result(text1, item, boundary) + + +if __name__ == '__main__': + test(0.9) From 684e1cfd2f0dd6b8e8e389b36ae2ebd6d13a5a97 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 14:31:54 +0800 Subject: [PATCH 20/72] =?UTF-8?q?=E6=96=87=E6=9C=AC=E7=9B=B8=E4=BC=BC?= =?UTF-8?q?=E5=BA=A6=EF=BC=8C=E6=B7=BB=E5=8A=A0GPU=E5=8A=A0=E9=80=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tool/text_comparison/text_comparison.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py index edb0c7d1..156fa53c 100644 --- a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py +++ b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py @@ -8,14 +8,19 @@ "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" ) +# Set device to GPU if available, else CPU +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +print(f'使用计算设备: {device}') + tokenizer = AutoTokenizer.from_pretrained(bert_path) -model = AutoModel.from_pretrained(bert_path) +model = AutoModel.from_pretrained(bert_path).to(device) def calculate_similarity(text1, text2, max_length=512): # 预处理文本,设置最大长度 - inputs1 = tokenizer(text1, padding=True, truncation=True, max_length=max_length, return_tensors='pt') - inputs2 = tokenizer(text2, padding=True, truncation=True, max_length=max_length, return_tensors='pt') + inputs1 = tokenizer(text1, padding=True, truncation=True, max_length=max_length, return_tensors='pt').to(device) + inputs2 = tokenizer(text2, padding=True, truncation=True, max_length=max_length, return_tensors='pt').to(device) # 获取句子向量(这里是取CLS token的向量并展平为一维) with torch.no_grad(): From ca9ffbf98e89ef3c41ab830c28da755b104b8f03 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 15:00:34 +0800 Subject: [PATCH 21/72] =?UTF-8?q?=E9=9F=B3=E9=A2=91=E7=9B=B8=E4=BC=BC?= =?UTF-8?q?=E5=BA=A6=E6=AF=94=E8=BE=83=EF=BC=8C=E6=B7=BB=E5=8A=A0=E5=8F=82?= =?UTF-8?q?=E8=80=83=E9=9F=B3=E9=A2=91=E7=9A=84=E9=A2=84=E9=87=87=E6=A0=B7?= =?UTF-8?q?=E6=AD=A5=E9=AA=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/config.ini | 2 + .../config_param/config_params.py | 2 + .../speaker_verification/voice_similarity.py | 59 ++++++++++++++++++- 3 files changed, 62 insertions(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/config.ini b/Ref_Audio_Selector/config.ini index 0ba2aa29..26b47e95 100644 --- a/Ref_Audio_Selector/config.ini +++ b/Ref_Audio_Selector/config.ini @@ -7,6 +7,8 @@ time_log_print_type = file time_log_print_dir = Ref_Audio_Selector/log/performance # 参考音频目录 reference_audio_dir = refer_audio +# 临时文件目录 +temp_dir = Ref_Audio_Selector/temp [AudioSample] # list转换待选参考音频目录 diff --git a/Ref_Audio_Selector/config_param/config_params.py b/Ref_Audio_Selector/config_param/config_params.py index 050bb1d2..9c585d48 100644 --- a/Ref_Audio_Selector/config_param/config_params.py +++ b/Ref_Audio_Selector/config_param/config_params.py @@ -9,6 +9,8 @@ time_log_print_dir = config.get_base('time_log_print_dir') # 参考音频目录 reference_audio_dir = config.get_base('reference_audio_dir') +# 临时文件目录 +temp_dir = config.get_base('temp_dir') # [AudioSample] # list转换待选参考音频目录 diff --git a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py index e4a5ed93..f30ace28 100644 --- a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py +++ b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py @@ -1,5 +1,9 @@ import argparse import os +import soundfile as sf +import torchaudio +import torchaudio.transforms as T +import Ref_Audio_Selector.config_param.config_params as params from Ref_Audio_Selector.common.time_util import timeit_decorator from modelscope.pipelines import pipeline @@ -17,10 +21,13 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, comparison_audio_paths = [os.path.join(comparison_dir_path, f) for f in os.listdir(comparison_dir_path) if f.endswith('.wav')] + # 因为这个模型是基于16k音频数据训练的,为了避免后续比较时,每次都对参考音频进行重采样,所以,提前进行了采样 + reference_audio_16k = ensure_16k_wav(reference_audio_path) + # Step 2: 用参考音频依次比较音频目录下的每个音频,获取相似度分数及对应路径 similarity_scores = [] for audio_path in comparison_audio_paths: - score = sv_pipeline([reference_audio_path, audio_path])['score'] + score = sv_pipeline([reference_audio_16k, audio_path])['score'] similarity_scores.append({ 'score': score, 'path': audio_path @@ -42,6 +49,56 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, f.write(content) +def ensure_16k_wav(audio_file_path, target_sample_rate=16000): + # 读取音频文件信息 + sample_rate, audio_data = sf.read(audio_file_path) + + # 检查采样率是否为16kHz + if sample_rate == target_sample_rate: + # 是16kHz采样率,直接返回原始文件路径 + return audio_file_path + + # 设置临时文件名 + temp_file_path = os.path.join(params.temp_dir, os.path.basename(audio_file_path)) + + # 重采样至16kHz并保存到临时文件 + sf.write(temp_file_path, audio_data, samplerate=target_sample_rate, format="WAV") + + return temp_file_path + + +def ensure_16k_wav_2(audio_file_path, target_sample_rate=16000): + """ + 输入一个音频文件地址,判断其采样率并决定是否进行重采样,然后将结果保存到指定的输出文件。 + + 参数: + audio_file_path (str): 音频文件路径。 + output_file_path (str): 保存重采样后音频数据的目标文件路径。 + target_sample_rate (int, optional): 目标采样率,默认为16000Hz。 + """ + # 读取音频文件并获取其采样率 + waveform, sample_rate = torchaudio.load(audio_file_path) + + # 判断是否需要重采样 + if sample_rate == target_sample_rate: + return audio_file_path + else: + + # 创建Resample实例 + resampler = T.Resample(orig_freq=sample_rate, new_freq=target_sample_rate) + + # 应用重采样 + resampled_waveform = resampler(waveform) + + # 设置临时文件名 + temp_file_path = os.path.join(params.temp_dir, os.path.basename(audio_file_path)) + + # 保存重采样后的音频到指定文件 + torchaudio.save(temp_file_path, resampled_waveform, target_sample_rate) + + return temp_file_path + + def parse_arguments(): parser = argparse.ArgumentParser(description="Audio processing script arguments") From e3e47d2c0627e31f6b52e63ee79449eb981c50d3 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 15:08:33 +0800 Subject: [PATCH 22/72] =?UTF-8?q?=E9=9F=B3=E9=A2=91=E7=9B=B8=E4=BC=BC?= =?UTF-8?q?=E5=BA=A6=E6=AF=94=E8=BE=83=EF=BC=8C=E6=B7=BB=E5=8A=A0=E5=8F=82?= =?UTF-8?q?=E8=80=83=E9=9F=B3=E9=A2=91=E7=9A=84=E9=A2=84=E9=87=87=E6=A0=B7?= =?UTF-8?q?=E6=AD=A5=E9=AA=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../speaker_verification/voice_similarity.py | 25 +++++-------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py index f30ace28..1252c835 100644 --- a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py +++ b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py @@ -50,24 +50,6 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, def ensure_16k_wav(audio_file_path, target_sample_rate=16000): - # 读取音频文件信息 - sample_rate, audio_data = sf.read(audio_file_path) - - # 检查采样率是否为16kHz - if sample_rate == target_sample_rate: - # 是16kHz采样率,直接返回原始文件路径 - return audio_file_path - - # 设置临时文件名 - temp_file_path = os.path.join(params.temp_dir, os.path.basename(audio_file_path)) - - # 重采样至16kHz并保存到临时文件 - sf.write(temp_file_path, audio_data, samplerate=target_sample_rate, format="WAV") - - return temp_file_path - - -def ensure_16k_wav_2(audio_file_path, target_sample_rate=16000): """ 输入一个音频文件地址,判断其采样率并决定是否进行重采样,然后将结果保存到指定的输出文件。 @@ -119,9 +101,14 @@ def parse_arguments(): if __name__ == '__main__': cmd = parse_arguments() - print(cmd) compare_audio_and_generate_report( reference_audio_path=cmd.reference_audio, comparison_dir_path=cmd.comparison_dir, output_file_path=cmd.output_file, ) + + # compare_audio_and_generate_report( + # reference_audio_path="D:/tt/渡鸦/refer_audio_all/也对,你的身份和我们不同吗?.wav", + # comparison_dir_path='D:/tt/渡鸦/refer_audio_all', + # output_file_path='D:/tt/渡鸦/test.txt', + # ) From a291629438f5d8362158f5b17fefce27495249a0 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 15:37:58 +0800 Subject: [PATCH 23/72] =?UTF-8?q?=E9=9F=B3=E9=A2=91=E7=9B=B8=E4=BC=BC?= =?UTF-8?q?=E5=BA=A6=E6=AF=94=E8=BE=83=EF=BC=8C=E6=B7=BB=E5=8A=A0=E5=8F=82?= =?UTF-8?q?=E8=80=83=E9=9F=B3=E9=A2=91=E7=9A=84=E9=A2=84=E9=87=87=E6=A0=B7?= =?UTF-8?q?=E6=AD=A5=E9=AA=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../speaker_verification/voice_similarity.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py index 1252c835..568c5e2d 100644 --- a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py +++ b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py @@ -1,8 +1,8 @@ import argparse import os -import soundfile as sf import torchaudio import torchaudio.transforms as T +import platform import Ref_Audio_Selector.config_param.config_params as params from Ref_Audio_Selector.common.time_util import timeit_decorator @@ -21,8 +21,15 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, comparison_audio_paths = [os.path.join(comparison_dir_path, f) for f in os.listdir(comparison_dir_path) if f.endswith('.wav')] - # 因为这个模型是基于16k音频数据训练的,为了避免后续比较时,每次都对参考音频进行重采样,所以,提前进行了采样 - reference_audio_16k = ensure_16k_wav(reference_audio_path) + if platform.system() == 'Windows': + # 因为这个模型是基于16k音频数据训练的,为了避免后续比较时,每次都对参考音频进行重采样,所以,提前进行了采样 + # windows不支持torchaudio.sox_effects.apply_effects_tensor,所以改写了依赖文件中的重采样方法 + # 改用torchaudio.transforms.Resample进行重采样,如果在非windows环境下,没有更改依赖包的采样方法的话, + # 使用这段代码进行预采样会出现因为采样方法不同,而导致的模型相似度计算不准确的问题 + # 当然如果在windows下,使用了其他的采样方法,也会出现不准确的问题 + reference_audio_16k = ensure_16k_wav(reference_audio_path) + else: + reference_audio_16k = reference_audio_path # Step 2: 用参考音频依次比较音频目录下的每个音频,获取相似度分数及对应路径 similarity_scores = [] @@ -72,6 +79,9 @@ def ensure_16k_wav(audio_file_path, target_sample_rate=16000): # 应用重采样 resampled_waveform = resampler(waveform) + # 创建临时文件夹 + os.makedirs(params.temp_dir, exist_ok=True) + # 设置临时文件名 temp_file_path = os.path.join(params.temp_dir, os.path.basename(audio_file_path)) From 64cc2fd9d1effee00d796506af6affa237f35e4d Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 16:18:40 +0800 Subject: [PATCH 24/72] =?UTF-8?q?=E5=B0=86=E6=89=93=E5=8D=B0=E4=BF=A1?= =?UTF-8?q?=E6=81=AF=EF=BC=8C=E6=94=B9=E7=94=B1=E6=97=A5=E5=BF=97=E8=BE=93?= =?UTF-8?q?=E5=87=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/common.py | 6 +-- Ref_Audio_Selector/config.ini | 14 ++++-- .../config_param/config_manager.py | 3 ++ .../config_param/config_params.py | 14 ++++-- Ref_Audio_Selector/config_param/log_config.py | 45 +++++++++++++------ .../ref_audio_selector_webui.py | 22 ++++----- .../tool/asr/funasr_asr_multi_level_dir.py | 7 +-- Ref_Audio_Selector/tool/audio_inference.py | 7 +-- Ref_Audio_Selector/tool/audio_similarity.py | 23 +++++----- .../tool/delete_inference_with_ref.py | 9 ++-- .../speaker_verification/voice_similarity.py | 6 ++- .../tool/text_comparison/asr_text_process.py | 5 ++- .../tool/text_comparison/text_comparison.py | 4 +- 13 files changed, 104 insertions(+), 61 deletions(-) diff --git a/Ref_Audio_Selector/common/common.py b/Ref_Audio_Selector/common/common.py index b051b222..0a94a6d9 100644 --- a/Ref_Audio_Selector/common/common.py +++ b/Ref_Audio_Selector/common/common.py @@ -1,5 +1,5 @@ from tools import my_utils -import glob +from Ref_Audio_Selector.config_param.log_config import logger import os @@ -102,9 +102,9 @@ def write_text_to_file(text, output_file_path): with open(output_file_path, 'w', encoding='utf-8') as file: file.write(text) except IOError as e: - print(f"Error occurred while writing to the file: {e}") + logger.info(f"Error occurred while writing to the file: {e}") else: - print(f"Text successfully written to file: {output_file_path}") + logger.info(f"Text successfully written to file: {output_file_path}") if __name__ == '__main__': diff --git a/Ref_Audio_Selector/config.ini b/Ref_Audio_Selector/config.ini index 26b47e95..7507853b 100644 --- a/Ref_Audio_Selector/config.ini +++ b/Ref_Audio_Selector/config.ini @@ -1,15 +1,21 @@ # config.ini [Base] -# 函数时间消耗日志打印类型 file 打印到文件; close 关闭 -time_log_print_type = file -# 函数时间消耗日志保存目录路径 -time_log_print_dir = Ref_Audio_Selector/log/performance # 参考音频目录 reference_audio_dir = refer_audio # 临时文件目录 temp_dir = Ref_Audio_Selector/temp +[Log] +# 日志保存目录路径 +log_dir = Ref_Audio_Selector/log/general +# 日志级别 CRITICAL、FATAL、ERROR、WARNING、WARN、INFO、DEBUG、NOTSET、 +log_level = INFO +# 函数时间消耗日志打印类型 file 打印到文件; close 关闭 +time_log_print_type = file +# 函数时间消耗日志保存目录路径 +time_log_print_dir = Ref_Audio_Selector/log/performance + [AudioSample] # list转换待选参考音频目录 list_to_convert_reference_audio_dir = refer_audio_all diff --git a/Ref_Audio_Selector/config_param/config_manager.py b/Ref_Audio_Selector/config_param/config_manager.py index 4183f2a0..ea7af4a6 100644 --- a/Ref_Audio_Selector/config_param/config_manager.py +++ b/Ref_Audio_Selector/config_param/config_manager.py @@ -33,6 +33,9 @@ def __init__(self): def get_base(self, key): return self.config.get('Base', key) + def get_log(self, key): + return self.config.get('Log', key) + def get_audio_sample(self, key): return self.config.get('AudioSample', key) diff --git a/Ref_Audio_Selector/config_param/config_params.py b/Ref_Audio_Selector/config_param/config_params.py index 9c585d48..de12d722 100644 --- a/Ref_Audio_Selector/config_param/config_params.py +++ b/Ref_Audio_Selector/config_param/config_params.py @@ -3,15 +3,21 @@ config = config_manager.get_config() # [Base] -# 函数时间消耗日志打印类型 file 打印到文件; close 关闭 -time_log_print_type = config.get_base('time_log_print_type') -# 函数时间消耗日志保存目录路径 -time_log_print_dir = config.get_base('time_log_print_dir') # 参考音频目录 reference_audio_dir = config.get_base('reference_audio_dir') # 临时文件目录 temp_dir = config.get_base('temp_dir') +# [Log] +# 日志保存目录路径 +log_dir = config.get_log('log_dir') +# 日志级别 CRITICAL、FATAL、ERROR、WARNING、WARN、INFO、DEBUG、NOTSET、 +log_level = config.get_log('log_level') +# 函数时间消耗日志打印类型 file 打印到文件; close 关闭 +time_log_print_type = config.get_log('time_log_print_type') +# 函数时间消耗日志保存目录路径 +time_log_print_dir = config.get_log('time_log_print_dir') + # [AudioSample] # list转换待选参考音频目录 list_to_convert_reference_audio_dir = config.get_audio_sample('list_to_convert_reference_audio_dir') diff --git a/Ref_Audio_Selector/config_param/log_config.py b/Ref_Audio_Selector/config_param/log_config.py index 249eba5c..b10e5c96 100644 --- a/Ref_Audio_Selector/config_param/log_config.py +++ b/Ref_Audio_Selector/config_param/log_config.py @@ -3,37 +3,56 @@ import Ref_Audio_Selector.config_param.config_params as params -def setup_logging(): +def create_general_logger(): # 获取当前日期,用于文件名和日志内容 current_date = datetime.datetime.now().strftime('%Y-%m-%d') + # 创建一个用于控制台输出的处理器,并设置日志级别 + console_handler = logging.StreamHandler() + # console_handler.setLevel(logging.INFO) + # 可以设置控制台输出的格式 + console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') + console_handler.setFormatter(console_formatter) + # 创建一个用于常规日志的处理器 - general_handler = logging.FileHandler('general.log', mode='a', encoding='utf-8') - general_handler.setLevel(logging.INFO) + general_handler = logging.FileHandler(f"{params.log_dir}/{current_date}.log", mode='a', encoding='utf-8') + # general_handler.setLevel(logging.INFO) general_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') general_handler.setFormatter(general_formatter) + # 配置一个常规的logger + general_logger = logging.getLogger('general') + level = logging.getLevelName(params.log_level) + general_logger.setLevel(level) + general_logger.addHandler(console_handler) + general_logger.addHandler(general_handler) + + # 配置根logger,以防万一 + logging.basicConfig(level=logging.WARNING, handlers=[general_handler]) + + return general_logger + + +def create_performance_logger(): + # 获取当前日期,用于文件名和日志内容 + current_date = datetime.datetime.now().strftime('%Y-%m-%d') # 创建一个专用于性能监控日志的处理器 performance_handler = logging.FileHandler( f"{params.time_log_print_dir}/{current_date}.log", mode='a', encoding='utf-8') - performance_handler.setLevel(logging.INFO) + # performance_handler.setLevel(logging.INFO) performance_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') performance_handler.setFormatter(performance_formatter) - # 配置一个常规的logger - general_logger = logging.getLogger('general') - general_logger.setLevel(logging.INFO) - general_logger.addHandler(general_handler) - # 配置一个专门用于性能监控的logger performance_logger = logging.getLogger('performance') performance_logger.setLevel(logging.INFO) performance_logger.addHandler(performance_handler) - # 配置根logger,以防万一 - logging.basicConfig(level=logging.WARNING, handlers=[general_handler]) + return performance_logger - return general_logger, performance_logger + +def setup_logging(): + return create_general_logger(), create_performance_logger() -logger, p_logger = setup_logging() \ No newline at end of file +logger, p_logger = setup_logging() diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index e3cead2b..4569dd27 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -60,7 +60,7 @@ def convert_from_list(text_work_space_dir, text_role, text_list_input): # audio_similarity.convert_from_list(text_list_input, ref_audio_all) except Exception as e: - traceback.print_exc() + logger.error("发生异常: \n%s", traceback.format_exc()) text_convert_from_list_info = f"发生异常:{e}" text_sample_dir = '' return i18n(text_convert_from_list_info), text_sample_dir @@ -83,7 +83,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ cmd += f' -c "{sample_dir}"' cmd += f' -o {similarity_file}' - print(cmd) + logger.info(cmd) p_similarity = Popen(cmd, shell=True) p_similarity.wait() @@ -136,7 +136,7 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path audio_similarity.sample(ref_audio_dir, similarity_list, int(text_subsection_num), int(text_sample_num)) except Exception as e: - traceback.print_exc() + logger.error("发生异常: \n%s", traceback.format_exc()) text_sample_info = f"发生异常:{e}" ref_audio_dir = '' text_model_inference_voice_dir = ref_audio_dir @@ -191,7 +191,7 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d # audio_inference.generate_audio_files(url_composer, text_list, ref_audio_manager.get_ref_audio_list(), # inference_dir) except Exception as e: - traceback.print_exc() + logger.error("发生异常: \n%s", traceback.format_exc()) text_model_inference_info = f"发生异常:{e}" text_asr_audio_dir = '' return i18n(text_model_inference_info), text_asr_audio_dir, text_asr_audio_dir @@ -226,7 +226,7 @@ def asr(text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, text_text_similarity_analysis_path = asr_file text_asr_info = f"耗时:{time_consuming:0.1f}秒;asr成功:生成文件{asr_file}" except Exception as e: - traceback.print_exc() + logger.error("发生异常: \n%s", traceback.format_exc()) text_asr_info = f"发生异常:{e}" text_text_similarity_analysis_path = '' return i18n(text_asr_info), text_text_similarity_analysis_path @@ -248,7 +248,7 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): cmd += f' -l {asr_lang}' cmd += " -p %s" % ("float16" if is_half == True else "float32") - print(cmd) + logger.info(cmd) p_asr = Popen(cmd, shell=True) p_asr.wait() p_asr = None @@ -284,7 +284,7 @@ def text_similarity_analysis(text_work_space_dir, text_role, # open_text_similarity_analysis(text_text_similarity_analysis_path, similarity_dir) except Exception as e: - traceback.print_exc() + logger.error("发生异常: \n%s", traceback.format_exc()) text_text_similarity_analysis_info = f"发生异常:{e}" return i18n(text_text_similarity_analysis_info) @@ -297,7 +297,7 @@ def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_ cmd += f' -o "{output_dir}"' cmd += f' -b {similarity_enlarge_boundary}' - print(cmd) + logger.info(cmd) p_text_similarity = Popen(cmd, shell=True) p_text_similarity.wait() p_text_similarity = None @@ -335,7 +335,7 @@ def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path text_similarity_audio_output_info = f'耗时:{time_consuming:0.1f}秒;相似度分析成功:生成目录{similarity_file_dir},文件{similarity_file}' except Exception as e: - traceback.print_exc() + logger.error("发生异常: \n%s", traceback.format_exc()) text_similarity_audio_output_info = f"发生异常:{e}" return i18n(text_similarity_audio_output_info) @@ -361,7 +361,7 @@ def sync_ref_audio(text_work_space_dir, text_role, text_sync_ref_audio_dir, text_sync_ref_audio_info = (f"耗时:{time_consuming:0.1f}秒;推理音频目录{text_sync_inference_audio_dir}下," f"text目录删除了{delete_text_wav_num}个参考音频,emotion目录下,删除了{delete_emotion_dir_num}个目录") except Exception as e: - traceback.print_exc() + logger.error("发生异常: \n%s", traceback.format_exc()) text_sync_ref_audio_info = f"发生异常:{e}" return i18n(text_sync_ref_audio_info) @@ -392,7 +392,7 @@ def create_config(text_work_space_dir, text_role, text_template, text_sync_ref_a text_create_config_info = f"耗时:{time_consuming:0.1f}秒;配置生成成功:生成文件{config_file}" except Exception as e: - traceback.print_exc() + logger.error("发生异常: \n%s", traceback.format_exc()) text_create_config_info = f"发生异常:{e}" return i18n(text_create_config_info) diff --git a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py index 43ec04d9..22fbfc1a 100644 --- a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py +++ b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py @@ -4,6 +4,7 @@ import os import traceback import Ref_Audio_Selector.config_param.config_params as params +from Ref_Audio_Selector.config_param.log_config import logger from Ref_Audio_Selector.common.time_util import timeit_decorator from tqdm import tqdm from funasr import AutoModel @@ -31,7 +32,7 @@ def only_asr(input_file): text = model.generate(input=input_file)[0]["text"] except: text = '' - print(traceback.format_exc()) + logger.error(traceback.format_exc()) return text @@ -54,7 +55,7 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag output.append(f"{input_file_path}|{original_text}|{language.upper()}|{asr_text}") except: - print(traceback.format_exc()) + logger.error(traceback.format_exc()) # 创建或打开指定的输出目录 output_folder = output_folder or "output/asr_opt" @@ -67,7 +68,7 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag # 将输出写入文件 with open(output_file_path, "w", encoding="utf-8") as f: f.write("\n".join(output)) - print(f"ASR 任务完成->标注文件路径: {output_file_path}\n") + logger.info(f"ASR 任务完成->标注文件路径: {output_file_path}\n") return output_file_path diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 39cc2c6f..7f6e3cc8 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -4,6 +4,7 @@ import Ref_Audio_Selector.config_param.config_params as params from Ref_Audio_Selector.common.time_util import timeit_decorator from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote +from Ref_Audio_Selector.config_param.log_config import logger class URLComposer: @@ -69,7 +70,7 @@ def safe_encode_query_params(original_url): new_parsed_url = parsed_url._replace(query=new_query_string) encoded_url = urlunparse(new_parsed_url) - print(encoded_url) + logger.info(encoded_url) return encoded_url @@ -107,7 +108,7 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) # 检查是否已经存在对应的音频文件,如果存在则跳过 if os.path.exists(text_subdir_text_file_path) and os.path.exists(emotion_subdir_emotion_file_path): has_generated_count += 1 - print(f"进度: {has_generated_count}/{all_count}") + logger.info(f"进度: {has_generated_count}/{all_count}") continue if url_composer.is_emotion(): @@ -124,7 +125,7 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) f.write(audio_bytes) has_generated_count += 1 - print(f"进度: {has_generated_count}/{all_count}") + logger.info(f"进度: {has_generated_count}/{all_count}") def inference_audio_from_api(url): diff --git a/Ref_Audio_Selector/tool/audio_similarity.py b/Ref_Audio_Selector/tool/audio_similarity.py index 658251f9..8517c64b 100644 --- a/Ref_Audio_Selector/tool/audio_similarity.py +++ b/Ref_Audio_Selector/tool/audio_similarity.py @@ -2,6 +2,7 @@ import shutil import random import librosa +from Ref_Audio_Selector.config_param.log_config import logger def check_audio_duration(path, min_duration=3, max_duration=10): @@ -17,7 +18,7 @@ def check_audio_duration(path, min_duration=3, max_duration=10): return False except Exception as e: - print(f"无法打开或处理音频文件:{e}") + logger.error(f"无法打开或处理音频文件:{e}") return None @@ -33,7 +34,7 @@ def convert_from_list(list_file, output_dir): for line in lines: parts = line.strip().split('|') if len(parts) != 4: - print(f"Line format incorrect: {line}") + logger.error(f"Line format incorrect: {line}") continue audio_path, _, _, transcription = parts @@ -46,27 +47,27 @@ def convert_from_list(list_file, output_dir): # 如果目标文件已存在,不要覆盖 if os.path.exists(new_path): - print(f"File already exists: {new_path}") + logger.info(f"File already exists: {new_path}") continue try: # 检查音频文件是否存在 if not os.path.exists(audio_path): - print(f"Audio file does not exist: {audio_path}") + logger.info(f"Audio file does not exist: {audio_path}") continue if check_audio_duration(audio_path): # 复制音频文件到output目录并重命名 shutil.copy2(audio_path, new_path) - print(f"File copied and renamed to: {new_path}") + logger.info(f"File copied and renamed to: {new_path}") else: - print(f"File skipped due to duration: {audio_path}") + logger.info(f"File skipped due to duration: {audio_path}") except Exception as e: - print(f"An error occurred while processing: {audio_path}") - print(e) + logger.error(f"An error occurred while processing: {audio_path}") + logger.error(e) - print("Processing complete.") + logger.info("Processing complete.") def sample(output_audio_dir, similarity_list, subsection_num, sample_num): @@ -101,7 +102,7 @@ def sample(output_audio_dir, similarity_list, subsection_num, sample_num): dst_path = os.path.join(subdir_path, os.path.basename(src_path)) shutil.copyfile(src_path, dst_path) - print("Sampling completed.") + logger.info("Sampling completed.") def parse_similarity_file(file_path): @@ -150,7 +151,7 @@ def copy_and_move(output_audio_directory, similarity_scores): # 复制文件到新目录 shutil.copyfile(item['wav_path'], new_path) - print("已完成复制和重命名操作。") + logger.info("已完成复制和重命名操作。") if __name__ == '__main__': diff --git a/Ref_Audio_Selector/tool/delete_inference_with_ref.py b/Ref_Audio_Selector/tool/delete_inference_with_ref.py index 4d7f1758..5231dde2 100644 --- a/Ref_Audio_Selector/tool/delete_inference_with_ref.py +++ b/Ref_Audio_Selector/tool/delete_inference_with_ref.py @@ -2,6 +2,7 @@ import shutil import Ref_Audio_Selector.common.common as common import Ref_Audio_Selector.config_param.config_params as params +from Ref_Audio_Selector.config_param.log_config import logger def remove_matching_audio_files_in_text_dir(text_dir, emotions_list): @@ -12,12 +13,12 @@ def remove_matching_audio_files_in_text_dir(text_dir, emotions_list): wav_file_name = f"{emotion_tag}.wav" file_path = os.path.join(root, wav_file_name) if not os.path.exists(file_path): - print(f"Deleting file: {file_path}") + logger.info(f"Deleting file: {file_path}") try: os.remove(file_path) count += 1 except Exception as e: - print(f"Error deleting file {file_path}: {e}") + logger.error(f"Error deleting file {file_path}: {e}") return count @@ -39,13 +40,13 @@ def delete_emotion_subdirectories(emotion_dir, emotions_list): # 检查emotion子目录是否存在 if not os.path.isdir(folder_path): - print(f"Deleting directory: {folder_path}") + logger.info(f"Deleting directory: {folder_path}") try: # 使用shutil.rmtree删除整个子目录及其内容 shutil.rmtree(folder_path) count += 1 except Exception as e: - print(f"Error deleting directory {folder_path}: {e}") + logger.error(f"Error deleting directory {folder_path}: {e}") return count diff --git a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py index 568c5e2d..df9d7ee7 100644 --- a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py +++ b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py @@ -5,6 +5,7 @@ import platform import Ref_Audio_Selector.config_param.config_params as params from Ref_Audio_Selector.common.time_util import timeit_decorator +from Ref_Audio_Selector.config_param.log_config import logger from modelscope.pipelines import pipeline @@ -32,6 +33,8 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, reference_audio_16k = reference_audio_path # Step 2: 用参考音频依次比较音频目录下的每个音频,获取相似度分数及对应路径 + all_count = len(comparison_audio_paths) + has_processed_count = 0 similarity_scores = [] for audio_path in comparison_audio_paths: score = sv_pipeline([reference_audio_16k, audio_path])['score'] @@ -39,7 +42,8 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, 'score': score, 'path': audio_path }) - print(f'similarity score: {score}, path: {audio_path}') + has_processed_count += 1 + logger.info(f'进度:{has_processed_count}/{all_count}') # Step 3: 根据相似度分数降序排列 similarity_scores.sort(key=lambda x: x['score'], reverse=True) diff --git a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py index 585df909..96ce2187 100644 --- a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py +++ b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py @@ -6,6 +6,7 @@ import Ref_Audio_Selector.tool.text_comparison.text_comparison as text_comparison import Ref_Audio_Selector.config_param.config_params as params import Ref_Audio_Selector.common.common as common +from Ref_Audio_Selector.config_param.log_config import logger def parse_asr_file(file_path): @@ -42,7 +43,7 @@ def calculate_similarity_and_append_to_list(input_list, boundary): item['similarity_score'] = similarity_score item['original_score'] = original_score has_been_processed_count += 1 - print(f'进度:{has_been_processed_count}/{all_count}') + logger.info(f'进度:{has_been_processed_count}/{all_count}') return input_list @@ -135,7 +136,7 @@ def process(asr_file_path, output_dir, similarity_enlarge_boundary): original_text_detail_file = os.path.join(output_dir, f'{params.text_similarity_by_text_detail_filename}.txt') format_list_to_emotion(original_text_detail_list, original_text_detail_file) - print('文本相似度分析完成。') + logger.info('文本相似度分析完成。') def parse_arguments(): diff --git a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py index 156fa53c..2c4a5302 100644 --- a/Ref_Audio_Selector/tool/text_comparison/text_comparison.py +++ b/Ref_Audio_Selector/tool/text_comparison/text_comparison.py @@ -2,7 +2,7 @@ import torch from transformers import AutoTokenizer, AutoModel from scipy.spatial.distance import cosine -import math +from Ref_Audio_Selector.config_param.log_config import logger bert_path = os.environ.get( "bert_path", "GPT_SoVITS/pretrained_models/chinese-roberta-wwm-ext-large" @@ -11,7 +11,7 @@ # Set device to GPU if available, else CPU device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -print(f'使用计算设备: {device}') +logger.info(f'使用计算设备: {device}') tokenizer = AutoTokenizer.from_pretrained(bert_path) model = AutoModel.from_pretrained(bert_path).to(device) From 9fe20c14d6ece5bdf08a7e5562af5a449e67a6e9 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 16:27:21 +0800 Subject: [PATCH 25/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E9=9F=B3=E9=A2=91?= =?UTF-8?q?=E9=A2=84=E9=87=87=E6=A0=B7=E5=BC=80=E5=85=B3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/config.ini | 2 ++ Ref_Audio_Selector/config_param/config_params.py | 2 ++ .../tool/speaker_verification/voice_similarity.py | 5 ++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/config.ini b/Ref_Audio_Selector/config.ini index 7507853b..43b5ef65 100644 --- a/Ref_Audio_Selector/config.ini +++ b/Ref_Audio_Selector/config.ini @@ -21,6 +21,8 @@ time_log_print_dir = Ref_Audio_Selector/log/performance list_to_convert_reference_audio_dir = refer_audio_all # 音频相似度目录 audio_similarity_dir = similarity +# 是否开启基准音频预采样 true false +enable_pre_sample = true [Inference] # 默认测试文本位置 diff --git a/Ref_Audio_Selector/config_param/config_params.py b/Ref_Audio_Selector/config_param/config_params.py index de12d722..cd75c73b 100644 --- a/Ref_Audio_Selector/config_param/config_params.py +++ b/Ref_Audio_Selector/config_param/config_params.py @@ -23,6 +23,8 @@ list_to_convert_reference_audio_dir = config.get_audio_sample('list_to_convert_reference_audio_dir') # 音频相似度目录 audio_similarity_dir = config.get_audio_sample('audio_similarity_dir') +# 是否开启基准音频预采样 true false +enable_pre_sample = config.get_audio_sample('enable_pre_sample') # [Inference] # 默认测试文本位置 diff --git a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py index df9d7ee7..bad0d3de 100644 --- a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py +++ b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py @@ -28,7 +28,10 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, # 改用torchaudio.transforms.Resample进行重采样,如果在非windows环境下,没有更改依赖包的采样方法的话, # 使用这段代码进行预采样会出现因为采样方法不同,而导致的模型相似度计算不准确的问题 # 当然如果在windows下,使用了其他的采样方法,也会出现不准确的问题 - reference_audio_16k = ensure_16k_wav(reference_audio_path) + if params.enable_pre_sample == 'true': + reference_audio_16k = ensure_16k_wav(reference_audio_path) + else: + reference_audio_16k = reference_audio_path else: reference_audio_16k = reference_audio_path From 1d434e1a0a5c4d35733974ce6991837a76fe0af9 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 17:01:03 +0800 Subject: [PATCH 26/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=88=9D=E5=A7=8B?= =?UTF-8?q?=E5=90=AF=E5=8A=A8=E6=97=B6=E7=9A=84=E9=BB=98=E8=AE=A4=E5=80=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/common.py | 12 ++++++++ .../ref_audio_selector_webui.py | 29 +++++++++++++------ 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/Ref_Audio_Selector/common/common.py b/Ref_Audio_Selector/common/common.py index 0a94a6d9..24231ea3 100644 --- a/Ref_Audio_Selector/common/common.py +++ b/Ref_Audio_Selector/common/common.py @@ -107,6 +107,18 @@ def write_text_to_file(text, output_file_path): logger.info(f"Text successfully written to file: {output_file_path}") +def check_path_existence_and_return(path): + """ + 检查给定路径(文件或目录)是否存在。如果存在,返回该路径;否则,返回空字符串。 + :param path: 待检查的文件或目录路径(字符串) + :return: 如果路径存在,返回原路径;否则,返回空字符串 + """ + if os.path.exists(path): + return path + else: + return "" + + if __name__ == '__main__': dir = r'C:\Users\Administrator\Desktop/test' dir2 = r'"C:\Users\Administrator\Desktop\test2"' diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 4569dd27..586e6547 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -427,12 +427,16 @@ def save_role(text_role): rw_param.write_role(text_role) +default_work_space_dir = rw_param.read_work_dir() +default_role = rw_param.read_role() +default_base_dir = os.path.join(default_work_space_dir, default_role) + with gr.Blocks() as app: gr.Markdown(value=i18n("基本介绍:这是一个从训练素材中,批量提取参考音频,并进行效果评估与配置生成的工具")) with gr.Row(): text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), - value=rw_param.read_work_dir()) - text_role = gr.Text(label=i18n("角色名称"), value=rw_param.read_role()) + value=default_work_space_dir) + text_role = gr.Text(label=i18n("角色名称"), value=default_role) text_work_space_dir.input(save_work_dir, [text_work_space_dir, text_role], [text_role]) text_role.input(save_role, [text_role], []) with gr.Accordion(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): @@ -442,7 +446,8 @@ def save_role(text_role): button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) - text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value="", interactive=True) + default_sample_dir = common.check_path_existence_and_return(os.path.join(default_base_dir, params.list_to_convert_reference_audio_dir)) + text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=default_sample_dir, interactive=True) button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], [text_convert_from_list_info, text_sample_dir]) with gr.Row(): @@ -456,7 +461,8 @@ def save_role(text_role): with gr.Accordion(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): gr.Markdown(value=i18n("2.1:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) - text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), value="", interactive=True) + default_model_inference_voice_dir = common.check_path_existence_and_return(os.path.join(default_base_dir, params.reference_audio_dir)) + text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), value=default_model_inference_voice_dir, interactive=True) text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value="") with gr.Row(): text_text = gr.Text(label=i18n("请输入文本参数名"), value="text") @@ -484,7 +490,8 @@ def save_role(text_role): text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) with gr.Accordion(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) - text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value="", interactive=True) + default_asr_audio_dir = common.check_path_existence_and_return(os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) + text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=default_asr_audio_dir, interactive=True) with gr.Row(): dropdown_asr_model = gr.Dropdown( label=i18n("ASR 模型"), @@ -508,7 +515,8 @@ def save_role(text_role): button_asr = gr.Button(i18n("启动asr"), variant="primary") text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) gr.Markdown(value=i18n("3.2:启动文本相似度分析")) - text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), value="", interactive=True) + default_text_similarity_analysis_path = common.check_path_existence_and_return(os.path.join(default_base_dir, params.asr_filename)) + text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), value=default_text_similarity_analysis_path, interactive=True) button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], [text_asr_info, text_text_similarity_analysis_path]) @@ -530,8 +538,10 @@ def save_role(text_role): [text_work_space_dir, text_role, text_base_audio_path, text_compare_audio_dir], [text_similarity_audio_output_info]) with gr.Row(): - text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value="", interactive=True) - text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), value="", interactive=True) + default_sync_ref_audio_dir = common.check_path_existence_and_return(os.path.join(default_base_dir, params.reference_audio_dir)) + text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir, interactive=True) + default_sync_inference_audio_dir = common.check_path_existence_and_return(os.path.join(default_base_dir, params.inference_audio_dir)) + text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), value=default_sync_inference_audio_dir, interactive=True) with gr.Row(): button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) @@ -544,7 +554,8 @@ def save_role(text_role): text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=True) text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) gr.Markdown(value=i18n("4.2:生成配置")) - text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value="", interactive=True) + default_sync_ref_audio_dir2 = common.check_path_existence_and_return(os.path.join(default_base_dir, params.reference_audio_dir)) + text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir2, interactive=True) with gr.Row(): button_create_config = gr.Button(i18n("生成配置"), variant="primary") text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) From d8d551d4d247e7a07fddbefca86f7e4c657cedc5 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 17:10:23 +0800 Subject: [PATCH 27/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/common.py | 8 ++++---- Ref_Audio_Selector/ref_audio_selector_webui.py | 7 +++++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/Ref_Audio_Selector/common/common.py b/Ref_Audio_Selector/common/common.py index 24231ea3..fe0de9d5 100644 --- a/Ref_Audio_Selector/common/common.py +++ b/Ref_Audio_Selector/common/common.py @@ -1,5 +1,5 @@ from tools import my_utils -from Ref_Audio_Selector.config_param.log_config import logger +import Ref_Audio_Selector.config_param.log_config as log_config import os @@ -102,9 +102,9 @@ def write_text_to_file(text, output_file_path): with open(output_file_path, 'w', encoding='utf-8') as file: file.write(text) except IOError as e: - logger.info(f"Error occurred while writing to the file: {e}") + log_config.logger.info(f"Error occurred while writing to the file: {e}") else: - logger.info(f"Text successfully written to file: {output_file_path}") + log_config.logger.info(f"Text successfully written to file: {output_file_path}") def check_path_existence_and_return(path): @@ -123,4 +123,4 @@ def check_path_existence_and_return(path): dir = r'C:\Users\Administrator\Desktop/test' dir2 = r'"C:\Users\Administrator\Desktop\test2"' dir, dir2 = batch_clean_paths([dir, dir2]) - print(dir, dir2) \ No newline at end of file + print(dir, dir2) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 586e6547..d2ce35ce 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -3,6 +3,9 @@ import traceback import gradio as gr + +from Ref_Audio_Selector.config_param.log_config import logger + import Ref_Audio_Selector.tool.audio_similarity as audio_similarity import Ref_Audio_Selector.tool.audio_inference as audio_inference import Ref_Audio_Selector.tool.audio_config as audio_config @@ -10,7 +13,7 @@ import Ref_Audio_Selector.common.common as common import Ref_Audio_Selector.config_param.config_params as params import Ref_Audio_Selector.common.time_util as time_util -from Ref_Audio_Selector.config_param.log_config import logger + from tools.i18n.i18n import I18nAuto from config import python_exec, is_half from tools import my_utils @@ -515,7 +518,7 @@ def save_role(text_role): button_asr = gr.Button(i18n("启动asr"), variant="primary") text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) gr.Markdown(value=i18n("3.2:启动文本相似度分析")) - default_text_similarity_analysis_path = common.check_path_existence_and_return(os.path.join(default_base_dir, params.asr_filename)) + default_text_similarity_analysis_path = common.check_path_existence_and_return(os.path.join(default_base_dir, params.asr_filename + '.list')) text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), value=default_text_similarity_analysis_path, interactive=True) button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], From d1e92edc7c22913c36fe35f170092c3425e9e4ad Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 17:46:40 +0800 Subject: [PATCH 28/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E4=B8=80=E4=BA=9B?= =?UTF-8?q?=E5=8F=82=E6=95=B0=E7=9A=84=E8=AF=BB=E5=8F=96=E5=92=8C=E4=BF=9D?= =?UTF-8?q?=E5=AD=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../config_param/config_manager.py | 40 +++++---- .../ref_audio_selector_webui.py | 83 ++++++++++++++----- 2 files changed, 83 insertions(+), 40 deletions(-) diff --git a/Ref_Audio_Selector/config_param/config_manager.py b/Ref_Audio_Selector/config_param/config_manager.py index ea7af4a6..53a2c77c 100644 --- a/Ref_Audio_Selector/config_param/config_manager.py +++ b/Ref_Audio_Selector/config_param/config_manager.py @@ -1,27 +1,31 @@ import configparser +import os import Ref_Audio_Selector.common.common as common class ParamReadWriteManager: def __init__(self): - self.work_dir_path = 'Ref_Audio_Selector/file/base_info/work_dir.txt' - self.role_path = 'Ref_Audio_Selector/file/base_info/role.txt' - - def read_work_dir(self): - content = common.read_file(self.work_dir_path) - return content.strip() - - def read_role(self): - content = common.read_file(self.role_path) - return content.strip() - - def write_work_dir(self, work_dir_content): - clean_content = work_dir_content.strip() - common.write_text_to_file(clean_content, self.work_dir_path) - - def write_role(self, role_content): - clean_content = role_content.strip() - common.write_text_to_file(clean_content, self.role_path) + self.base_dir = 'Ref_Audio_Selector/file/base_info' + self.work_dir = 'work_dir' + self.role = 'role' + self.generate_audio_url = 'generate_audio_url' + self.text_param = 'text_param' + self.ref_path_param = 'ref_path_param' + self.ref_text_param = 'ref_text_param' + self.emotion_param = 'emotion_param' + + def read(self, key): + file_path = os.path.join(self.base_dir, key + '.txt') + if os.path.exists(file_path): + content = common.read_file(file_path) + return content.strip() + else: + return '' + + def write(self, key, content): + file_path = os.path.join(self.base_dir, key + '.txt') + clean_content = content.strip() + common.write_text_to_file(clean_content, file_path) class ConfigManager: diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index d2ce35ce..fcbdff05 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -410,9 +410,29 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): return text_whole_url +def save_generate_audio_url(generate_audio_url): + rw_param.write(rw_param.generate_audio_url, generate_audio_url) + + +def save_text_param(text_text): + rw_param.write(rw_param.text_param, text_text) + + +def save_ref_path_param(text_ref_path): + rw_param.write(rw_param.ref_path_param, text_ref_path) + + +def save_ref_text_param(text_ref_text): + rw_param.write(rw_param.ref_text_param, text_ref_text) + + +def save_emotion_param(text_emotion): + rw_param.write(rw_param.emotion_param, text_emotion) + + def save_work_dir(text_work_space_dir, text_role): text_work_space_dir = my_utils.clean_path(text_work_space_dir) - rw_param.write_work_dir(text_work_space_dir) + rw_param.write(rw_param.work_dir, text_work_space_dir) if text_role is not None and text_role != '': return text_role else: @@ -422,16 +442,16 @@ def save_work_dir(text_work_space_dir, text_role): dir_name = os.path.join(text_work_space_dir, role_dir) if not os.path.isdir(dir_name): break - rw_param.write_role(role_dir) + rw_param.write(rw_param.role, role_dir) return role_dir def save_role(text_role): - rw_param.write_role(text_role) + rw_param.write(rw_param.role, text_role) -default_work_space_dir = rw_param.read_work_dir() -default_role = rw_param.read_role() +default_work_space_dir = rw_param.read(rw_param.work_dir) +default_role = rw_param.read(rw_param.role) default_base_dir = os.path.join(default_work_space_dir, default_role) with gr.Blocks() as app: @@ -449,7 +469,8 @@ def save_role(text_role): button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) - default_sample_dir = common.check_path_existence_and_return(os.path.join(default_base_dir, params.list_to_convert_reference_audio_dir)) + default_sample_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.list_to_convert_reference_audio_dir)) text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=default_sample_dir, interactive=True) button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], [text_convert_from_list_info, text_sample_dir]) @@ -464,25 +485,34 @@ def save_role(text_role): with gr.Accordion(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): gr.Markdown(value=i18n("2.1:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) - default_model_inference_voice_dir = common.check_path_existence_and_return(os.path.join(default_base_dir, params.reference_audio_dir)) - text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), value=default_model_inference_voice_dir, interactive=True) - text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value="") + default_model_inference_voice_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.reference_audio_dir)) + text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), + value=default_model_inference_voice_dir, interactive=True) + text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value=rw_param.read(rw_param.generate_audio_url)) with gr.Row(): - text_text = gr.Text(label=i18n("请输入文本参数名"), value="text") - text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), value="") - text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), value="") - text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value="emotion") + text_text = gr.Text(label=i18n("请输入文本参数名"), value=rw_param.read(rw_param.text_param)) + text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), + value=rw_param.read(rw_param.ref_path_param)) + text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), + value=rw_param.read(rw_param.ref_text_param)) + text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value=rw_param.read(rw_param.emotion_param)) text_whole_url = gr.Text(label=i18n("完整地址"), value="", interactive=False) text_url.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) + text_url.blur(save_generate_audio_url, [text_url], []) text_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) + text_text.blur(save_text_param, [text_text], []) text_ref_path.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) + text_ref_path.blur(save_ref_path_param, [text_ref_path], []) text_ref_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) + text_ref_text.blur(save_ref_text_param, [text_ref_text], []) text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) + text_emotion.blur(save_emotion_param, [text_emotion], []) gr.Markdown(value=i18n("2.2:配置待推理文本,一句一行,不要太多,10条即可")) default_test_content_path = params.default_test_text_path text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) @@ -493,7 +523,8 @@ def save_role(text_role): text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) with gr.Accordion(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) - default_asr_audio_dir = common.check_path_existence_and_return(os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) + default_asr_audio_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=default_asr_audio_dir, interactive=True) with gr.Row(): dropdown_asr_model = gr.Dropdown( @@ -518,8 +549,10 @@ def save_role(text_role): button_asr = gr.Button(i18n("启动asr"), variant="primary") text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) gr.Markdown(value=i18n("3.2:启动文本相似度分析")) - default_text_similarity_analysis_path = common.check_path_existence_and_return(os.path.join(default_base_dir, params.asr_filename + '.list')) - text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), value=default_text_similarity_analysis_path, interactive=True) + default_text_similarity_analysis_path = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.asr_filename + '.list')) + text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), + value=default_text_similarity_analysis_path, interactive=True) button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], [text_asr_info, text_text_similarity_analysis_path]) @@ -541,10 +574,14 @@ def save_role(text_role): [text_work_space_dir, text_role, text_base_audio_path, text_compare_audio_dir], [text_similarity_audio_output_info]) with gr.Row(): - default_sync_ref_audio_dir = common.check_path_existence_and_return(os.path.join(default_base_dir, params.reference_audio_dir)) - text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir, interactive=True) - default_sync_inference_audio_dir = common.check_path_existence_and_return(os.path.join(default_base_dir, params.inference_audio_dir)) - text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), value=default_sync_inference_audio_dir, interactive=True) + default_sync_ref_audio_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.reference_audio_dir)) + text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir, + interactive=True) + default_sync_inference_audio_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.inference_audio_dir)) + text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), + value=default_sync_inference_audio_dir, interactive=True) with gr.Row(): button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) @@ -557,8 +594,10 @@ def save_role(text_role): text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=True) text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) gr.Markdown(value=i18n("4.2:生成配置")) - default_sync_ref_audio_dir2 = common.check_path_existence_and_return(os.path.join(default_base_dir, params.reference_audio_dir)) - text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir2, interactive=True) + default_sync_ref_audio_dir2 = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.reference_audio_dir)) + text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir2, + interactive=True) with gr.Row(): button_create_config = gr.Button(i18n("生成配置"), variant="primary") text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) From 2a23f95f61113bd1b1fd6e115844b06b039f6621 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Fri, 26 Apr 2024 22:55:09 +0800 Subject: [PATCH 29/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/common.py | 5 +- .../ref_audio_selector_webui.py | 27 ++++----- .../tool/delete_inference_with_ref.py | 60 +++++++++---------- .../speaker_verification/voice_similarity.py | 4 +- 4 files changed, 44 insertions(+), 52 deletions(-) diff --git a/Ref_Audio_Selector/common/common.py b/Ref_Audio_Selector/common/common.py index fe0de9d5..9742a446 100644 --- a/Ref_Audio_Selector/common/common.py +++ b/Ref_Audio_Selector/common/common.py @@ -1,5 +1,4 @@ from tools import my_utils -import Ref_Audio_Selector.config_param.log_config as log_config import os @@ -102,9 +101,9 @@ def write_text_to_file(text, output_file_path): with open(output_file_path, 'w', encoding='utf-8') as file: file.write(text) except IOError as e: - log_config.logger.info(f"Error occurred while writing to the file: {e}") + print(f"Error occurred while writing to the file: {e}") else: - log_config.logger.info(f"Text successfully written to file: {output_file_path}") + print(f"Text successfully written to file: {output_file_path}") def check_path_existence_and_return(path): diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index fcbdff05..f8bc8eb4 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -90,8 +90,9 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ p_similarity = Popen(cmd, shell=True) p_similarity.wait() + similarity_list = audio_similarity.parse_similarity_file(similarity_file) + if need_similarity_output: - similarity_list = audio_similarity.parse_similarity_file(similarity_file) similarity_file_dir = os.path.join(similarity_dir, base_voice_file_name) audio_similarity.copy_and_move(similarity_file_dir, similarity_list) @@ -122,11 +123,9 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path ref_audio_dir = os.path.join(base_role_dir, params.reference_audio_dir) - time_consuming, similarity_list, _, _ = ( - time_util.time_monitor(start_similarity_analysis)(base_role_dir, - text_sample_dir, - text_base_voice_path, - checkbox_similarity_output)) + time_consuming, (similarity_list, _, _) \ + = time_util.time_monitor(start_similarity_analysis)(base_role_dir, text_sample_dir, text_base_voice_path, + checkbox_similarity_output) text_sample_info = f"耗时:{time_consuming:0.1f}秒;抽样成功:生成目录{ref_audio_dir}" @@ -197,7 +196,7 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d logger.error("发生异常: \n%s", traceback.format_exc()) text_model_inference_info = f"发生异常:{e}" text_asr_audio_dir = '' - return i18n(text_model_inference_info), text_asr_audio_dir, text_asr_audio_dir + return i18n(text_model_inference_info), text_asr_audio_dir, inference_dir # 对推理生成音频执行asr @@ -325,7 +324,7 @@ def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path if text_compare_audio_dir is None or text_compare_audio_dir == '': raise Exception("待分析的音频所在目录不能为空") - time_consuming, similarity_list, similarity_file, similarity_file_dir \ + time_consuming, (similarity_list, similarity_file, similarity_file_dir) \ = time_util.time_monitor(start_similarity_analysis)(base_role_dir, text_compare_audio_dir, text_base_audio_path, True) @@ -356,13 +355,13 @@ def sync_ref_audio(text_work_space_dir, text_role, text_sync_ref_audio_dir, raise Exception("参考音频目录不能为空") if text_sync_inference_audio_dir is None or text_sync_inference_audio_dir == '': raise Exception("推理生成的音频目录不能为空") - time_consuming, delete_text_wav_num, delete_emotion_dir_num \ + time_consuming, (delete_text_wav_num, delete_emotion_dir_num) \ = time_util.time_monitor(delete_inference_with_ref.sync_ref_audio)(text_sync_ref_audio_dir, text_sync_inference_audio_dir) # delete_text_wav_num, delete_emotion_dir_num = delete_inference_with_ref.sync_ref_audio( # text_sync_ref_audio_dir, text_sync_inference_audio_dir) text_sync_ref_audio_info = (f"耗时:{time_consuming:0.1f}秒;推理音频目录{text_sync_inference_audio_dir}下," - f"text目录删除了{delete_text_wav_num}个参考音频,emotion目录下,删除了{delete_emotion_dir_num}个目录") + f"text目录删除了{delete_text_wav_num}个推理音频,emotion目录下,删除了{delete_emotion_dir_num}个目录") except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) text_sync_ref_audio_info = f"发生异常:{e}" @@ -462,7 +461,7 @@ def save_role(text_role): text_role = gr.Text(label=i18n("角色名称"), value=default_role) text_work_space_dir.input(save_work_dir, [text_work_space_dir, text_role], [text_role]) text_role.input(save_role, [text_role], []) - with gr.Accordion(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): + with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") with gr.Row(): @@ -482,7 +481,7 @@ def save_role(text_role): with gr.Row(): button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary") text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False) - with gr.Accordion(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): + with gr.Tab(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): gr.Markdown(value=i18n("2.1:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) default_model_inference_voice_dir = common.check_path_existence_and_return( @@ -521,7 +520,7 @@ def save_role(text_role): with gr.Row(): button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) - with gr.Accordion(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): + with gr.Tab(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) default_asr_audio_dir = common.check_path_existence_and_return( os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) @@ -587,7 +586,7 @@ def save_role(text_role): text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_sync_ref_audio_dir, text_sync_inference_audio_dir], [text_sync_ref_info]) - with gr.Accordion("第四步:生成参考音频配置文本", open=False): + with gr.Tab("第四步:生成参考音频配置文本", open=False): gr.Markdown(value=i18n("4.1:编辑模板")) default_template_path = params.default_template_path default_template_content = common.read_file(default_template_path) diff --git a/Ref_Audio_Selector/tool/delete_inference_with_ref.py b/Ref_Audio_Selector/tool/delete_inference_with_ref.py index 5231dde2..d00ef522 100644 --- a/Ref_Audio_Selector/tool/delete_inference_with_ref.py +++ b/Ref_Audio_Selector/tool/delete_inference_with_ref.py @@ -7,46 +7,40 @@ def remove_matching_audio_files_in_text_dir(text_dir, emotions_list): count = 0 + emotions = [item['emotion'] for item in emotions_list] for root, dirs, files in os.walk(text_dir): - for emotion_dict in emotions_list: - emotion_tag = emotion_dict['emotion'] - wav_file_name = f"{emotion_tag}.wav" - file_path = os.path.join(root, wav_file_name) - if not os.path.exists(file_path): - logger.info(f"Deleting file: {file_path}") - try: - os.remove(file_path) - count += 1 - except Exception as e: - logger.error(f"Error deleting file {file_path}: {e}") + for file in files: + if file.endswith(".wav"): + emotion_tag = os.path.basename(file)[:-4] + if emotion_tag not in emotions: + file_path = os.path.join(root, file) + logger.info(f"Deleting file: {file_path}") + try: + os.remove(file_path) + count += 1 + except Exception as e: + logger.error(f"Error deleting file {file_path}: {e}") + return count def delete_emotion_subdirectories(emotion_dir, emotions_list): - """ - 根据给定的情绪数组,删除emotion目录下对应情绪标签的子目录。 + count = 0 - 参数: - emotions_list (List[Dict]): 每个字典包含'emotion'字段。 - base_dir (str): 子目录所在的基础目录,默认为'emotion')。 + emotions = [item['emotion'] for item in emotions_list] + + for entry in os.listdir(emotion_dir): + entry_path = os.path.join(emotion_dir, entry) + if os.path.isdir(entry_path): + if entry not in emotions: + logger.info(f"Deleting directory: {entry_path}") + try: + # 使用shutil.rmtree删除整个子目录及其内容 + shutil.rmtree(entry_path) + count += 1 + except Exception as e: + logger.error(f"Error deleting directory {entry_path}: {e}") - 返回: - None - """ - count = 0 - for emotion_dict in emotions_list: - emotion_folder = emotion_dict['emotion'] - folder_path = os.path.join(emotion_dir, emotion_folder) - - # 检查emotion子目录是否存在 - if not os.path.isdir(folder_path): - logger.info(f"Deleting directory: {folder_path}") - try: - # 使用shutil.rmtree删除整个子目录及其内容 - shutil.rmtree(folder_path) - count += 1 - except Exception as e: - logger.error(f"Error deleting directory {folder_path}: {e}") return count diff --git a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py index bad0d3de..3c7ec718 100644 --- a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py +++ b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py @@ -4,8 +4,8 @@ import torchaudio.transforms as T import platform import Ref_Audio_Selector.config_param.config_params as params +import Ref_Audio_Selector.config_param.log_config as log_config from Ref_Audio_Selector.common.time_util import timeit_decorator -from Ref_Audio_Selector.config_param.log_config import logger from modelscope.pipelines import pipeline @@ -46,7 +46,7 @@ def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, 'path': audio_path }) has_processed_count += 1 - logger.info(f'进度:{has_processed_count}/{all_count}') + log_config.logger.info(f'进度:{has_processed_count}/{all_count}') # Step 3: 根据相似度分数降序排列 similarity_scores.sort(key=lambda x: x['score'], reverse=True) From c36d0a93fe9760efd390a916650019d09171f4a1 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sat, 27 Apr 2024 01:27:57 +0800 Subject: [PATCH 30/72] =?UTF-8?q?api=E6=8E=A8=E7=90=86=EF=BC=8C=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0=E5=A4=9A=E8=BF=9B=E7=A8=8B=E8=AF=B7=E6=B1=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/time_util.py | 3 +- Ref_Audio_Selector/config_param/log_config.py | 1 + .../ref_audio_selector_webui.py | 342 +++++++++--------- Ref_Audio_Selector/tool/audio_inference.py | 37 +- 4 files changed, 210 insertions(+), 173 deletions(-) diff --git a/Ref_Audio_Selector/common/time_util.py b/Ref_Audio_Selector/common/time_util.py index 82d63b8c..b58ce3ea 100644 --- a/Ref_Audio_Selector/common/time_util.py +++ b/Ref_Audio_Selector/common/time_util.py @@ -1,4 +1,5 @@ import time +import os from Ref_Audio_Selector.config_param.log_config import p_logger import Ref_Audio_Selector.config_param.config_params as params @@ -26,7 +27,7 @@ def wrapper(*args, **kwargs): elapsed_time = end_time - start_time # 计算执行耗时 # 记录日志内容 - log_message = f"{func.__name__} 执行耗时: {elapsed_time:.6f} 秒" + log_message = f"进程ID: {os.getpid()}, {func.__name__} 执行耗时: {elapsed_time:.6f} 秒" p_logger.info(log_message) return func_result diff --git a/Ref_Audio_Selector/config_param/log_config.py b/Ref_Audio_Selector/config_param/log_config.py index b10e5c96..1cb1fd84 100644 --- a/Ref_Audio_Selector/config_param/log_config.py +++ b/Ref_Audio_Selector/config_param/log_config.py @@ -13,6 +13,7 @@ def create_general_logger(): # 可以设置控制台输出的格式 console_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') console_handler.setFormatter(console_formatter) + console_handler.encoding = 'utf-8' # 设置字符编码为utf-8 # 创建一个用于常规日志的处理器 general_handler = logging.FileHandler(f"{params.log_dir}/{current_date}.log", mode='a', encoding='utf-8') diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index f8bc8eb4..0589c751 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -184,9 +184,10 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d if len(ref_audio_manager.get_audio_list()) == 0: raise Exception("待推理的参考音频不能为空") - time_consuming, _ = time_util.time_monitor(audio_inference.generate_audio_files)(url_composer, text_list, - ref_audio_manager.get_ref_audio_list(), - inference_dir) + time_consuming, _ = time_util.time_monitor(audio_inference.generate_audio_files_parallel)(url_composer, + text_list, + ref_audio_manager.get_ref_audio_list(), + inference_dir, 3) text_model_inference_info = f"耗时:{time_consuming:0.1f}秒;推理成功:生成目录{inference_dir}" @@ -449,171 +450,176 @@ def save_role(text_role): rw_param.write(rw_param.role, text_role) -default_work_space_dir = rw_param.read(rw_param.work_dir) -default_role = rw_param.read(rw_param.role) -default_base_dir = os.path.join(default_work_space_dir, default_role) - -with gr.Blocks() as app: - gr.Markdown(value=i18n("基本介绍:这是一个从训练素材中,批量提取参考音频,并进行效果评估与配置生成的工具")) - with gr.Row(): - text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), - value=default_work_space_dir) - text_role = gr.Text(label=i18n("角色名称"), value=default_role) - text_work_space_dir.input(save_work_dir, [text_work_space_dir, text_role], [text_role]) - text_role.input(save_role, [text_role], []) - with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): - gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) - text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") - with gr.Row(): - button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") - text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) - gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) - default_sample_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.list_to_convert_reference_audio_dir)) - text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=default_sample_dir, interactive=True) - button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], - [text_convert_from_list_info, text_sample_dir]) - with gr.Row(): - text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="") - text_subsection_num = gr.Text(label=i18n("请输入分段数"), value="10") - text_sample_num = gr.Text(label=i18n("请输入每段随机抽样个数"), value="4") - checkbox_similarity_output = gr.Checkbox(label=i18n("是否将相似度匹配结果输出到临时目录?"), show_label=True) - with gr.Row(): - button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary") - text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False) - with gr.Tab(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): - gr.Markdown(value=i18n("2.1:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," - "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) - default_model_inference_voice_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.reference_audio_dir)) - text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), - value=default_model_inference_voice_dir, interactive=True) - text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value=rw_param.read(rw_param.generate_audio_url)) +if __name__ == '__main__': + default_work_space_dir = rw_param.read(rw_param.work_dir) + default_role = rw_param.read(rw_param.role) + default_base_dir = os.path.join(default_work_space_dir, default_role) + + with gr.Blocks() as app: + gr.Markdown(value=i18n("基本介绍:这是一个从训练素材中,批量提取参考音频,并进行效果评估与配置生成的工具")) with gr.Row(): - text_text = gr.Text(label=i18n("请输入文本参数名"), value=rw_param.read(rw_param.text_param)) - text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), - value=rw_param.read(rw_param.ref_path_param)) - text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), - value=rw_param.read(rw_param.ref_text_param)) - text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value=rw_param.read(rw_param.emotion_param)) - text_whole_url = gr.Text(label=i18n("完整地址"), value="", interactive=False) - text_url.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], - [text_whole_url]) - text_url.blur(save_generate_audio_url, [text_url], []) - text_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], - [text_whole_url]) - text_text.blur(save_text_param, [text_text], []) - text_ref_path.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], - [text_whole_url]) - text_ref_path.blur(save_ref_path_param, [text_ref_path], []) - text_ref_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], - [text_whole_url]) - text_ref_text.blur(save_ref_text_param, [text_ref_text], []) - text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), + value=default_work_space_dir) + text_role = gr.Text(label=i18n("角色名称"), value=default_role) + text_work_space_dir.input(save_work_dir, [text_work_space_dir, text_role], [text_role]) + text_role.input(save_role, [text_role], []) + with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): + gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) + text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") + with gr.Row(): + button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") + text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) + gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) + default_sample_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.list_to_convert_reference_audio_dir)) + text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=default_sample_dir, interactive=True) + button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], + [text_convert_from_list_info, text_sample_dir]) + with gr.Row(): + text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="") + text_subsection_num = gr.Text(label=i18n("请输入分段数"), value="10") + text_sample_num = gr.Text(label=i18n("请输入每段随机抽样个数"), value="4") + checkbox_similarity_output = gr.Checkbox(label=i18n("是否将相似度匹配结果输出到临时目录?"), + show_label=True) + with gr.Row(): + button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary") + text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False) + with gr.Tab(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): + gr.Markdown(value=i18n("2.1:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," + "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) + default_model_inference_voice_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.reference_audio_dir)) + text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), + value=default_model_inference_voice_dir, interactive=True) + text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), + value=rw_param.read(rw_param.generate_audio_url)) + with gr.Row(): + text_text = gr.Text(label=i18n("请输入文本参数名"), value=rw_param.read(rw_param.text_param)) + text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), + value=rw_param.read(rw_param.ref_path_param)) + text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), + value=rw_param.read(rw_param.ref_text_param)) + text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value=rw_param.read(rw_param.emotion_param)) + text_whole_url = gr.Text(label=i18n("完整地址"), value="", interactive=False) + text_url.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) - text_emotion.blur(save_emotion_param, [text_emotion], []) - gr.Markdown(value=i18n("2.2:配置待推理文本,一句一行,不要太多,10条即可")) - default_test_content_path = params.default_test_text_path - text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) - gr.Markdown(value=i18n("2.3:启动推理服务,如果还没启动的话")) - gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) - with gr.Row(): - button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") - text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) - with gr.Tab(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): - gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) - default_asr_audio_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) - text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=default_asr_audio_dir, interactive=True) - with gr.Row(): - dropdown_asr_model = gr.Dropdown( - label=i18n("ASR 模型"), - choices=[], - interactive=True, - value="达摩 ASR (中文)" - ) - dropdown_asr_size = gr.Dropdown( - label=i18n("ASR 模型尺寸"), - choices=["large"], - interactive=True, - value="large" - ) - dropdown_asr_lang = gr.Dropdown( - label=i18n("ASR 语言设置"), - choices=["zh"], - interactive=True, - value="zh" - ) - with gr.Row(): - button_asr = gr.Button(i18n("启动asr"), variant="primary") - text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) - gr.Markdown(value=i18n("3.2:启动文本相似度分析")) - default_text_similarity_analysis_path = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.asr_filename + '.list')) - text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), - value=default_text_similarity_analysis_path, interactive=True) - button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, - dropdown_asr_size, dropdown_asr_lang], - [text_asr_info, text_text_similarity_analysis_path]) - with gr.Row(): - button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") - text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", interactive=False) - button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, - text_text_similarity_analysis_path], - [text_text_similarity_analysis_info]) - gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) - gr.Markdown(value=i18n("3.4:对结果按音频相似度排序,筛选低音质音频")) - with gr.Row(): - text_base_audio_path = gr.Text(label=i18n("请输入基准音频"), value="") - text_compare_audio_dir = gr.Text(label=i18n("请输入待比较的音频文件目录"), value="") - with gr.Row(): - button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary") - text_similarity_audio_output_info = gr.Text(label=i18n("输出结果"), value="", interactive=False) - button_similarity_audio_output.click(similarity_audio_output, - [text_work_space_dir, text_role, text_base_audio_path, - text_compare_audio_dir], [text_similarity_audio_output_info]) - with gr.Row(): - default_sync_ref_audio_dir = common.check_path_existence_and_return( + text_url.blur(save_generate_audio_url, [text_url], []) + text_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_text.blur(save_text_param, [text_text], []) + text_ref_path.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_ref_path.blur(save_ref_path_param, [text_ref_path], []) + text_ref_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_ref_text.blur(save_ref_text_param, [text_ref_text], []) + text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + [text_whole_url]) + text_emotion.blur(save_emotion_param, [text_emotion], []) + gr.Markdown(value=i18n("2.2:配置待推理文本,一句一行,不要太多,10条即可")) + default_test_content_path = params.default_test_text_path + text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) + gr.Markdown(value=i18n("2.3:启动推理服务,如果还没启动的话")) + gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) + with gr.Row(): + button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") + text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) + with gr.Tab(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): + gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) + default_asr_audio_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) + text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=default_asr_audio_dir, + interactive=True) + with gr.Row(): + dropdown_asr_model = gr.Dropdown( + label=i18n("ASR 模型"), + choices=[], + interactive=True, + value="达摩 ASR (中文)" + ) + dropdown_asr_size = gr.Dropdown( + label=i18n("ASR 模型尺寸"), + choices=["large"], + interactive=True, + value="large" + ) + dropdown_asr_lang = gr.Dropdown( + label=i18n("ASR 语言设置"), + choices=["zh"], + interactive=True, + value="zh" + ) + with gr.Row(): + button_asr = gr.Button(i18n("启动asr"), variant="primary") + text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) + gr.Markdown(value=i18n("3.2:启动文本相似度分析")) + default_text_similarity_analysis_path = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.asr_filename + '.list')) + text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), + value=default_text_similarity_analysis_path, interactive=True) + button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, + dropdown_asr_size, dropdown_asr_lang], + [text_asr_info, text_text_similarity_analysis_path]) + with gr.Row(): + button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") + text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", + interactive=False) + button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, + text_text_similarity_analysis_path], + [text_text_similarity_analysis_info]) + gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) + gr.Markdown(value=i18n("3.4:对结果按音频相似度排序,筛选低音质音频")) + with gr.Row(): + text_base_audio_path = gr.Text(label=i18n("请输入基准音频"), value="") + text_compare_audio_dir = gr.Text(label=i18n("请输入待比较的音频文件目录"), value="") + with gr.Row(): + button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary") + text_similarity_audio_output_info = gr.Text(label=i18n("输出结果"), value="", interactive=False) + button_similarity_audio_output.click(similarity_audio_output, + [text_work_space_dir, text_role, text_base_audio_path, + text_compare_audio_dir], [text_similarity_audio_output_info]) + with gr.Row(): + default_sync_ref_audio_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.reference_audio_dir)) + text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir, + interactive=True) + default_sync_inference_audio_dir = common.check_path_existence_and_return( + os.path.join(default_base_dir, params.inference_audio_dir)) + text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), + value=default_sync_inference_audio_dir, interactive=True) + with gr.Row(): + button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") + text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) + button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_sync_ref_audio_dir, + text_sync_inference_audio_dir], [text_sync_ref_info]) + with gr.Tab("第四步:生成参考音频配置文本", open=False): + gr.Markdown(value=i18n("4.1:编辑模板")) + default_template_path = params.default_template_path + default_template_content = common.read_file(default_template_path) + text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=True) + text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) + gr.Markdown(value=i18n("4.2:生成配置")) + default_sync_ref_audio_dir2 = common.check_path_existence_and_return( os.path.join(default_base_dir, params.reference_audio_dir)) - text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir, - interactive=True) - default_sync_inference_audio_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.inference_audio_dir)) - text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), - value=default_sync_inference_audio_dir, interactive=True) - with gr.Row(): - button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") - text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) - button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_sync_ref_audio_dir, - text_sync_inference_audio_dir], [text_sync_ref_info]) - with gr.Tab("第四步:生成参考音频配置文本", open=False): - gr.Markdown(value=i18n("4.1:编辑模板")) - default_template_path = params.default_template_path - default_template_content = common.read_file(default_template_path) - text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=True) - text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) - gr.Markdown(value=i18n("4.2:生成配置")) - default_sync_ref_audio_dir2 = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.reference_audio_dir)) - text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir2, - interactive=True) - with gr.Row(): - button_create_config = gr.Button(i18n("生成配置"), variant="primary") - text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) - button_create_config.click(create_config, - [text_work_space_dir, text_role, text_template, text_sync_ref_audio_dir2], - [text_create_config_info]) - button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, - text_subsection_num, text_sample_num, checkbox_similarity_output], - [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, - text_sync_ref_audio_dir2]) - button_model_inference.click(model_inference, - [text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, - text_text, text_ref_path, text_ref_text, text_emotion, - text_test_content], - [text_model_inference_info, text_asr_audio_dir, text_sync_inference_audio_dir]) - -app.launch( - server_port=9423, - quiet=True, -) + text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir2, + interactive=True) + with gr.Row(): + button_create_config = gr.Button(i18n("生成配置"), variant="primary") + text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) + button_create_config.click(create_config, + [text_work_space_dir, text_role, text_template, text_sync_ref_audio_dir2], + [text_create_config_info]) + button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, + text_subsection_num, text_sample_num, checkbox_similarity_output], + [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, + text_sync_ref_audio_dir2]) + button_model_inference.click(model_inference, + [text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, + text_text, text_ref_path, text_ref_text, text_emotion, + text_test_content], + [text_model_inference_info, text_asr_audio_dir, text_sync_inference_audio_dir]) + + app.launch( + server_port=9423, + quiet=True, + ) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 7f6e3cc8..b146e282 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -1,12 +1,20 @@ +import time import os import requests import itertools +import multiprocessing +from multiprocessing import Pool +from concurrent.futures import ProcessPoolExecutor +import numpy as np import Ref_Audio_Selector.config_param.config_params as params from Ref_Audio_Selector.common.time_util import timeit_decorator from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote -from Ref_Audio_Selector.config_param.log_config import logger +from Ref_Audio_Selector.config_param.log_config import logger, p_logger +# 假设手动指定端口范围为9400-9500 +available_ports = list(range(9400, 9500)) + class URLComposer: def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): self.base_url = base_url @@ -74,8 +82,24 @@ def safe_encode_query_params(original_url): return encoded_url -@timeit_decorator -def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path): +def generate_audio_files_parallel(url_composer, text_list, emotion_list, output_dir_path, num_processes=None): + if num_processes is None: + num_processes = multiprocessing.cpu_count() + + num_processes = min(num_processes, len(available_ports)) # 限制进程数不超过可用端口数 + + # 将emotion_list均匀分成num_processes个子集 + emotion_groups = np.array_split(emotion_list, num_processes) + + with ProcessPoolExecutor(max_workers=num_processes) as executor: + futures = [executor.submit(generate_audio_files_for_emotion_group, url_composer, text_list, group, output_dir_path) + for group in emotion_groups] + for future in futures: + future.result() # 等待所有进程完成 + + +def generate_audio_files_for_emotion_group(url_composer, text_list, emotion_list, output_dir_path): + start_time = time.perf_counter() # 使用 perf_counter 获取高精度计时起点 # Ensure the output directory exists output_dir = os.path.abspath(output_dir_path) os.makedirs(output_dir, exist_ok=True) @@ -108,7 +132,7 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) # 检查是否已经存在对应的音频文件,如果存在则跳过 if os.path.exists(text_subdir_text_file_path) and os.path.exists(emotion_subdir_emotion_file_path): has_generated_count += 1 - logger.info(f"进度: {has_generated_count}/{all_count}") + logger.info(f"进程ID: {os.getpid()}, 进度: {has_generated_count}/{all_count}") continue if url_composer.is_emotion(): @@ -126,6 +150,11 @@ def generate_audio_files(url_composer, text_list, emotion_list, output_dir_path) has_generated_count += 1 logger.info(f"进度: {has_generated_count}/{all_count}") + end_time = time.perf_counter() # 获取计时终点 + elapsed_time = end_time - start_time # 计算执行耗时 + # 记录日志内容 + log_message = f"进程ID: {os.getpid()}, generate_audio_files_for_emotion_group 执行耗时: {elapsed_time:.6f} 秒" + p_logger.info(log_message) def inference_audio_from_api(url): From 1a7cf580e01db76e228f754a51ae7b4113a63fe3 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sat, 27 Apr 2024 11:24:57 +0800 Subject: [PATCH 31/72] =?UTF-8?q?=E5=88=9B=E5=BB=BA=E6=97=A5=E5=BF=97?= =?UTF-8?q?=E7=9B=AE=E5=BD=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/config_param/log_config.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Ref_Audio_Selector/config_param/log_config.py b/Ref_Audio_Selector/config_param/log_config.py index 1cb1fd84..fda5a3c6 100644 --- a/Ref_Audio_Selector/config_param/log_config.py +++ b/Ref_Audio_Selector/config_param/log_config.py @@ -1,4 +1,5 @@ import logging +import os import datetime import Ref_Audio_Selector.config_param.config_params as params @@ -15,6 +16,8 @@ def create_general_logger(): console_handler.setFormatter(console_formatter) console_handler.encoding = 'utf-8' # 设置字符编码为utf-8 + os.makedirs(params.log_dir, exist_ok=True) + # 创建一个用于常规日志的处理器 general_handler = logging.FileHandler(f"{params.log_dir}/{current_date}.log", mode='a', encoding='utf-8') # general_handler.setLevel(logging.INFO) @@ -37,6 +40,9 @@ def create_general_logger(): def create_performance_logger(): # 获取当前日期,用于文件名和日志内容 current_date = datetime.datetime.now().strftime('%Y-%m-%d') + + os.makedirs(params.time_log_print_dir, exist_ok=True) + # 创建一个专用于性能监控日志的处理器 performance_handler = logging.FileHandler( f"{params.time_log_print_dir}/{current_date}.log", mode='a', encoding='utf-8') From 25b65cdfd00400ab9bd3477830cd66c88491e82d Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sat, 27 Apr 2024 22:09:03 +0800 Subject: [PATCH 32/72] =?UTF-8?q?=E8=B0=83=E6=95=B4ui=E5=B8=83=E5=B1=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 102 ++++++++++++++---- Ref_Audio_Selector/tool/audio_inference.py | 5 - 2 files changed, 82 insertions(+), 25 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 0589c751..01670337 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -104,7 +104,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ # 基于一个基准音频,从参考音频目录中进行分段抽样 def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, - text_subsection_num, text_sample_num, checkbox_similarity_output): + slider_subsection_num, slider_sample_num, checkbox_similarity_output): text_work_space_dir, text_sample_dir, text_base_voice_path \ = common.batch_clean_paths([text_work_space_dir, text_sample_dir, text_base_voice_path]) @@ -116,9 +116,9 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path raise Exception("参考音频抽样目录不能为空,请先完成上一步操作") if text_base_voice_path is None or text_base_voice_path == '': raise Exception("基准音频路径不能为空") - if text_subsection_num is None or text_subsection_num == '': + if slider_subsection_num is None or slider_subsection_num == '': raise Exception("分段数不能为空") - if text_sample_num is None or text_sample_num == '': + if slider_sample_num is None or slider_sample_num == '': raise Exception("每段随机抽样个数不能为空") ref_audio_dir = os.path.join(base_role_dir, params.reference_audio_dir) @@ -135,7 +135,7 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path if similarity_list is None: raise Exception("相似度分析失败") - audio_similarity.sample(ref_audio_dir, similarity_list, int(text_subsection_num), int(text_sample_num)) + audio_similarity.sample(ref_audio_dir, similarity_list, slider_subsection_num, slider_sample_num) except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) @@ -463,7 +463,7 @@ def save_role(text_role): text_role = gr.Text(label=i18n("角色名称"), value=default_role) text_work_space_dir.input(save_work_dir, [text_work_space_dir, text_role], [text_role]) text_role.input(save_role, [text_role], []) - with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表"), open=False): + with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表")): gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") with gr.Row(): @@ -477,20 +477,67 @@ def save_role(text_role): [text_convert_from_list_info, text_sample_dir]) with gr.Row(): text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="") - text_subsection_num = gr.Text(label=i18n("请输入分段数"), value="10") - text_sample_num = gr.Text(label=i18n("请输入每段随机抽样个数"), value="4") + slider_subsection_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入分段数"), value=5, + interactive=True) + slider_sample_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入每段随机抽样个数"), + value=4, interactive=True) checkbox_similarity_output = gr.Checkbox(label=i18n("是否将相似度匹配结果输出到临时目录?"), show_label=True) with gr.Row(): button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary") text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False) - with gr.Tab(label=i18n("第二步:基于参考音频和测试文本,执行批量推理"), open=False): - gr.Markdown(value=i18n("2.1:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," - "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) + with gr.Tab(label=i18n("第二步:基于参考音频和测试文本,执行批量推理")): default_model_inference_voice_dir = common.check_path_existence_and_return( os.path.join(default_base_dir, params.reference_audio_dir)) text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), value=default_model_inference_voice_dir, interactive=True) + gr.Markdown(value=i18n("2.1:启动推理服务,并配置模型参数")) + with gr.Accordion(label=i18n("详情")): + with gr.Tab(label=i18n("主项目下api服务")): + gr.Markdown(value=i18n("2.1.1:启动服务")) + with gr.Row(): + gr.Button(i18n("启动api"), variant="primary") + gr.Text(label=i18n("api启动信息"), value="", interactive=False) + gr.Markdown(value=i18n("2.1.2:设置模型参数")) + gr.Text(label=i18n("请输入api服务模型切换接口地址"), value="", interactive=True) + with gr.Row(): + gr.Dropdown(label=i18n("GPT模型列表"), choices=[], value="", interactive=True) + gr.Dropdown(label=i18n("SoVITS模型列表"), choices=[], value="", interactive=True) + gr.Button(i18n("刷新模型路径"), variant="primary") + with gr.Row(): + gr.Text(label=i18n("GPT模型参数名"), value="", interactive=True) + gr.Text(label=i18n("SoVITS模型参数名"), value="", interactive=True) + gr.Markdown(value=i18n("2.1.3:发起设置请求")) + gr.Text(label=i18n("完整的模型参数设置请求地址"), value="", interactive=False) + with gr.Row(): + gr.Button(i18n("发起模型设置请求"), variant="primary") + gr.Text(label=i18n("设置请求结果"), value="", interactive=False) + with gr.Tab(label=i18n("fast项目下api_v2服务")): + gr.Markdown(value=i18n("2.1.1:请到你的项目下,启动服务")) + gr.Markdown(value=i18n("2.1.2:设置GPT模型参数")) + gr.Text(label=i18n("请输入api服务GPT模型切换接口地址"), value="", interactive=True) + with gr.Row(): + gr.Text(label=i18n("GPT模型参数名"), value="", interactive=True) + gr.Dropdown(label=i18n("GPT模型列表"), choices=[], value="", interactive=True) + gr.Button(i18n("刷新模型路径"), variant="primary") + gr.Text(label=i18n("完整的GPT模型参数设置请求地址"), value="", interactive=False) + with gr.Row(): + gr.Button(i18n("发起GPT模型设置请求"), variant="primary") + gr.Text(label=i18n("设置请求结果"), value="", interactive=False) + gr.Markdown(value=i18n("2.1.3:设置SoVITS模型参数")) + gr.Text(label=i18n("请输入api服务SoVITS模型切换接口地址"), value="", interactive=True) + with gr.Row(): + gr.Text(label=i18n("SoVITS模型参数名"), value="", interactive=True) + gr.Dropdown(label=i18n("SoVITS模型列表"), choices=[], value="", interactive=True) + gr.Button(i18n("刷新模型路径"), variant="primary") + gr.Text(label=i18n("完整的SoVITS模型参数设置请求地址"), value="", interactive=False) + with gr.Row(): + gr.Button(i18n("发起SoVITS模型设置请求"), variant="primary") + gr.Text(label=i18n("设置请求结果"), value="", interactive=False) + with gr.Tab(label=i18n("第三方推理服务")): + gr.Markdown(value=i18n("启动第三方推理服务,并完成参考音频打包,模型参数设置等操作")) + gr.Markdown(value=i18n("2.2:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," + "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value=rw_param.read(rw_param.generate_audio_url)) with gr.Row(): @@ -516,15 +563,16 @@ def save_role(text_role): text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) text_emotion.blur(save_emotion_param, [text_emotion], []) - gr.Markdown(value=i18n("2.2:配置待推理文本,一句一行,不要太多,10条即可")) + gr.Markdown(value=i18n("2.3:配置待推理文本,一句一行,不要太多,10条即可")) default_test_content_path = params.default_test_text_path text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) - gr.Markdown(value=i18n("2.3:启动推理服务,如果还没启动的话")) gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) + gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入请求并发数,会根据此数创建对应数量的子进程并行发起推理请求"), value=3, + interactive=True) with gr.Row(): button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) - with gr.Tab(label=i18n("第三步:进行参考音频效果校验与筛选"), open=False): + with gr.Tab(label=i18n("第三步:进行参考音频效果校验与筛选")): gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) default_asr_audio_dir = common.check_path_existence_and_return( os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) @@ -555,8 +603,11 @@ def save_role(text_role): gr.Markdown(value=i18n("3.2:启动文本相似度分析")) default_text_similarity_analysis_path = common.check_path_existence_and_return( os.path.join(default_base_dir, params.asr_filename + '.list')) - text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), - value=default_text_similarity_analysis_path, interactive=True) + with gr.Row(): + text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), + value=default_text_similarity_analysis_path, interactive=True) + gr.Slider(minimum=0, maximum=1, step=0.01, label=i18n("文本相似度放大边界"), value=0.90, + interactive=True) button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], [text_asr_info, text_text_similarity_analysis_path]) @@ -568,7 +619,16 @@ def save_role(text_role): text_text_similarity_analysis_path], [text_text_similarity_analysis_info]) gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) - gr.Markdown(value=i18n("3.4:对结果按音频相似度排序,筛选低音质音频")) + with gr.Row(): + gr.Text(label=i18n("文本相似度分析结果文件所在路径"), value="", interactive=True) + gr.Button(i18n("打开文本相似度分析结果文件"), variant="primary") + gr.Slider(minimum=0, maximum=1, step=0.01, label=i18n("音频文本相似度边界值"), value=0.80, + interactive=True) + with gr.Row(): + gr.Button(i18n("删除音频文本相似度边界值以下的参考音频"), variant="primary") + gr.Text(label=i18n("删除结果"), value="", interactive=True) + with gr.Tab(label=i18n("第四步:校验参考音频音质")): + gr.Markdown(value=i18n("4.1:对结果按音频相似度排序,或许有用吧,主要还是耳朵听")) with gr.Row(): text_base_audio_path = gr.Text(label=i18n("请输入基准音频"), value="") text_compare_audio_dir = gr.Text(label=i18n("请输入待比较的音频文件目录"), value="") @@ -578,6 +638,8 @@ def save_role(text_role): button_similarity_audio_output.click(similarity_audio_output, [text_work_space_dir, text_role, text_base_audio_path, text_compare_audio_dir], [text_similarity_audio_output_info]) + gr.Markdown(value=i18n("4.2:如果发现存在低音质的推理音频,那么就去参考音频目录下,把原参考音频删了")) + gr.Markdown(value=i18n("4.3:删除参考音频之后,按下面的操作,会将推理音频目录下对应的音频也删掉")) with gr.Row(): default_sync_ref_audio_dir = common.check_path_existence_and_return( os.path.join(default_base_dir, params.reference_audio_dir)) @@ -592,13 +654,13 @@ def save_role(text_role): text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_sync_ref_audio_dir, text_sync_inference_audio_dir], [text_sync_ref_info]) - with gr.Tab("第四步:生成参考音频配置文本", open=False): - gr.Markdown(value=i18n("4.1:编辑模板")) + with gr.Tab("第五步:生成参考音频配置文本"): + gr.Markdown(value=i18n("5.1:编辑模板")) default_template_path = params.default_template_path default_template_content = common.read_file(default_template_path) text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=True) text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) - gr.Markdown(value=i18n("4.2:生成配置")) + gr.Markdown(value=i18n("5.2:生成配置")) default_sync_ref_audio_dir2 = common.check_path_existence_and_return( os.path.join(default_base_dir, params.reference_audio_dir)) text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir2, @@ -610,7 +672,7 @@ def save_role(text_role): [text_work_space_dir, text_role, text_template, text_sync_ref_audio_dir2], [text_create_config_info]) button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, - text_subsection_num, text_sample_num, checkbox_similarity_output], + slider_subsection_num, slider_sample_num, checkbox_similarity_output], [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, text_sync_ref_audio_dir2]) button_model_inference.click(model_inference, diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index b146e282..f6b4569c 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -12,9 +12,6 @@ from Ref_Audio_Selector.config_param.log_config import logger, p_logger -# 假设手动指定端口范围为9400-9500 -available_ports = list(range(9400, 9500)) - class URLComposer: def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): self.base_url = base_url @@ -86,8 +83,6 @@ def generate_audio_files_parallel(url_composer, text_list, emotion_list, output_ if num_processes is None: num_processes = multiprocessing.cpu_count() - num_processes = min(num_processes, len(available_ports)) # 限制进程数不超过可用端口数 - # 将emotion_list均匀分成num_processes个子集 emotion_groups = np.array_split(emotion_list, num_processes) From 9264f7e38e7c3b1babeb228cbca3ee59f645b05d Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sun, 28 Apr 2024 14:10:02 +0800 Subject: [PATCH 33/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E4=BA=8B=E4=BB=B6?= =?UTF-8?q?=E7=BB=91=E5=AE=9A=E5=92=8C=E5=AE=9E=E7=8E=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/common.py | 31 +++ .../ref_audio_selector_webui.py | 222 ++++++++++++++---- ...e_inference_with_ref.py => audio_check.py} | 0 Ref_Audio_Selector/tool/audio_inference.py | 100 ++++++-- Ref_Audio_Selector/tool/model_manager.py | 34 +++ Ref_Audio_Selector/tool/text_check.py | 77 ++++++ .../tool/text_comparison/asr_text_process.py | 4 +- 7 files changed, 401 insertions(+), 67 deletions(-) rename Ref_Audio_Selector/tool/{delete_inference_with_ref.py => audio_check.py} (100%) create mode 100644 Ref_Audio_Selector/tool/model_manager.py create mode 100644 Ref_Audio_Selector/tool/text_check.py diff --git a/Ref_Audio_Selector/common/common.py b/Ref_Audio_Selector/common/common.py index 9742a446..5957fc3f 100644 --- a/Ref_Audio_Selector/common/common.py +++ b/Ref_Audio_Selector/common/common.py @@ -1,4 +1,7 @@ from tools import my_utils +from config import python_exec, is_half +import subprocess +import sys import os @@ -118,6 +121,34 @@ def check_path_existence_and_return(path): return "" +def open_file(filepath): + if sys.platform.startswith('darwin'): + subprocess.run(['open', filepath]) # macOS + elif os.name == 'nt': # For Windows + os.startfile(filepath) + elif os.name == 'posix': # For Linux, Unix, etc. + subprocess.run(['xdg-open', filepath]) + + +def start_new_service(script_path): + # 对于Windows系统 + if sys.platform.startswith('win'): + cmd = f'start cmd /k {python_exec} {script_path}' + # 对于Mac或者Linux系统 + else: + cmd = f'xterm -e {python_exec} {script_path}' + + proc = subprocess.Popen(cmd, shell=True) + + # 关闭之前启动的子进程 + # proc.terminate() + + # 或者如果需要强制关闭可以使用 + # proc.kill() + + return proc + + if __name__ == '__main__': dir = r'C:\Users\Administrator\Desktop/test' dir2 = r'"C:\Users\Administrator\Desktop\test2"' diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 01670337..41ce14a6 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -6,10 +6,12 @@ from Ref_Audio_Selector.config_param.log_config import logger +import Ref_Audio_Selector.tool.model_manager as model_manager import Ref_Audio_Selector.tool.audio_similarity as audio_similarity import Ref_Audio_Selector.tool.audio_inference as audio_inference import Ref_Audio_Selector.tool.audio_config as audio_config -import Ref_Audio_Selector.tool.delete_inference_with_ref as delete_inference_with_ref +import Ref_Audio_Selector.tool.audio_check as audio_check +import Ref_Audio_Selector.tool.text_check as text_check import Ref_Audio_Selector.common.common as common import Ref_Audio_Selector.config_param.config_params as params import Ref_Audio_Selector.common.time_util as time_util @@ -148,7 +150,7 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path # 根据参考音频和测试文本,执行批量推理 -def model_inference(text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, +def model_inference(text_work_space_dir, text_role, slider_request_concurrency_num, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content_dir): text_work_space_dir, text_model_inference_voice_dir, text_test_content_dir \ @@ -175,7 +177,7 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d text_asr_audio_dir = os.path.join(inference_dir, params.inference_audio_text_aggregation_dir) - url_composer = audio_inference.URLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) + url_composer = audio_inference.TTSURLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) url_composer.is_valid() text_list = common.read_text_file_to_list(text_test_content_dir) if text_list is None or len(text_list) == 0: @@ -187,7 +189,8 @@ def model_inference(text_work_space_dir, text_role, text_model_inference_voice_d time_consuming, _ = time_util.time_monitor(audio_inference.generate_audio_files_parallel)(url_composer, text_list, ref_audio_manager.get_ref_audio_list(), - inference_dir, 3) + inference_dir, + slider_request_concurrency_num) text_model_inference_info = f"耗时:{time_consuming:0.1f}秒;推理成功:生成目录{inference_dir}" @@ -267,7 +270,7 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): # 对asr生成的文件,与原本的文本内容,进行相似度分析 -def text_similarity_analysis(text_work_space_dir, text_role, +def text_similarity_analysis(text_work_space_dir, text_role, slider_text_similarity_amplification_boundary, text_text_similarity_analysis_path): text_work_space_dir, text_text_similarity_analysis_path \ = common.batch_clean_paths([text_work_space_dir, text_text_similarity_analysis_path]) @@ -281,7 +284,7 @@ def text_similarity_analysis(text_work_space_dir, text_role, similarity_dir = os.path.join(base_role_dir, params.text_similarity_output_dir) time_consuming, _ = time_util.time_monitor(open_text_similarity_analysis)(text_text_similarity_analysis_path, - similarity_dir) + similarity_dir, slider_text_similarity_amplification_boundary) text_text_similarity_analysis_info = f"耗时:{time_consuming:0.1f}秒;相似度分析成功:生成目录{similarity_dir}" @@ -357,10 +360,9 @@ def sync_ref_audio(text_work_space_dir, text_role, text_sync_ref_audio_dir, if text_sync_inference_audio_dir is None or text_sync_inference_audio_dir == '': raise Exception("推理生成的音频目录不能为空") time_consuming, (delete_text_wav_num, delete_emotion_dir_num) \ - = time_util.time_monitor(delete_inference_with_ref.sync_ref_audio)(text_sync_ref_audio_dir, + = time_util.time_monitor(audio_check.sync_ref_audio)(text_sync_ref_audio_dir, text_sync_inference_audio_dir) - # delete_text_wav_num, delete_emotion_dir_num = delete_inference_with_ref.sync_ref_audio( - # text_sync_ref_audio_dir, text_sync_inference_audio_dir) + text_sync_ref_audio_info = (f"耗时:{time_consuming:0.1f}秒;推理音频目录{text_sync_inference_audio_dir}下," f"text目录删除了{delete_text_wav_num}个推理音频,emotion目录下,删除了{delete_emotion_dir_num}个目录") except Exception as e: @@ -402,7 +404,7 @@ def create_config(text_work_space_dir, text_role, text_template, text_sync_ref_a # 基于请求路径和参数,合成完整的请求路径 def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): - url_composer = audio_inference.URLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) + url_composer = audio_inference.TTSURLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) if url_composer.is_emotion(): text_whole_url = url_composer.build_url_with_emotion('测试内容', '情绪类型', False) else: @@ -410,6 +412,111 @@ def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): return text_whole_url +def start_api(): + text_start_api_info = None + try: + proc = common.start_new_service('api.py') + text_start_api_info = "启动完成" + except Exception as e: + logger.error("发生异常: \n%s", traceback.format_exc()) + text_start_api_info = f"发生异常:{e}" + return text_start_api_info + + +def refresh_api_model(): + return ({"choices": model_manager.get_gpt_model_names(), "__type__": "update"}, + {"choices": model_manager.get_sovits_model_names(), "__type__": "update"}) + + +def api_set_model_whole_url(text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param): + url = audio_inference.SetModelURLComposer("all", text_api_set_model_base_url, text_api_gpt_param, text_api_sovits_param) + return url.build_get_url([dropdown_api_gpt_models, dropdown_api_sovits_models], False) + + +def start_api_set_model(text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param): + text_api_start_set_model_request_info = None + try: + if dropdown_api_gpt_models is None or dropdown_api_gpt_models == '': + raise Exception("GPT模型不能为空") + if dropdown_api_sovits_models is None or dropdown_api_sovits_models == '': + raise Exception("Sovits模型不能为空") + url = audio_inference.SetModelURLComposer("all", text_api_set_model_base_url, text_api_gpt_param, text_api_sovits_param) + url.is_valid() + time_consuming, result = time_util.time_monitor(audio_inference.start_api_set_model)(url, dropdown_api_gpt_models, dropdown_api_sovits_models) + text_api_start_set_model_request_info = f"耗时:{time_consuming:0.1f}秒;请求结果:{result}" + except Exception as e: + logger.error("发生异常: \n%s", traceback.format_exc()) + text_api_start_set_model_request_info = f"发生异常:{e}" + return text_api_start_set_model_request_info + + +def refresh_api_v2_gpt_model(): + return {"choices": model_manager.get_gpt_model_names(), "__type__": "update"} + + +def api_v2_set_gpt_whole_url(text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, dropdown_api_v2_gpt_models): + url = audio_inference.SetModelURLComposer("gpt", text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, None) + return url.build_get_url([dropdown_api_v2_gpt_models], False) + +def start_api_v2_set_gpt_model(text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, dropdown_api_v2_gpt_models): + text_api_v2_start_set_gpt_model_request_info = None + try: + if dropdown_api_v2_gpt_models is None or dropdown_api_v2_gpt_models == '': + raise Exception("GPT模型不能为空") + url = audio_inference.SetModelURLComposer("gpt", text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, None) + url.is_valid() + time_consuming, result = time_util.time_monitor(audio_inference.start_api_v2_set_gpt_model)(url, dropdown_api_v2_gpt_models) + text_api_v2_start_set_gpt_model_request_info = f"耗时:{time_consuming:0.1f}秒;请求结果:{result}" + except Exception as e: + logger.error("发生异常: \n%s", traceback.format_exc()) + text_api_v2_start_set_gpt_model_request_info = f"发生异常:{e}" + return text_api_v2_start_set_gpt_model_request_info + + +def refresh_api_v2_sovits_model(): + return {"choices": model_manager.get_sovits_model_names(), "__type__": "update"} + +def api_v2_set_sovits_whole_url(text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, dropdown_api_v2_sovits_models): + url = audio_inference.SetModelURLComposer("sovits", text_api_v2_set_sovits_model_base_url, None, text_api_v2_sovits_model_param) + return url.build_get_url([dropdown_api_v2_sovits_models], False) + + +def start_api_v2_set_sovits_model(text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, dropdown_api_v2_sovits_models): + text_api_v2_start_set_sovits_model_request_info = None + try: + if dropdown_api_v2_sovits_models is None or dropdown_api_v2_sovits_models == '': + raise Exception("Sovits模型不能为空") + url = audio_inference.SetModelURLComposer("sovits", text_api_v2_set_sovits_model_base_url, None, text_api_v2_sovits_model_param) + url.is_valid() + time_consuming, result = time_util.time_monitor(audio_inference.start_api_v2_set_sovits_model)(url, dropdown_api_v2_sovits_models) + text_api_v2_start_set_sovits_model_request_info = f"耗时:{time_consuming:0.1f}秒;请求结果:{result}" + except Exception as e: + logger.error("发生异常: \n%s", traceback.format_exc()) + text_api_v2_start_set_sovits_model_request_info = f"发生异常:{e}" + return text_api_v2_start_set_sovits_model_request_info + + +def open_file(file_path): + common.open_file(my_utils.clean_path(file_path)) + + +def delete_ref_audio_below_boundary(ref_audio_path, text_text_similarity_result_path, text_sync_inference_audio_dir, slider_audio_text_similarity_boundary): + text_delete_ref_audio_below_boundary_info = None + ref_audio_path, text_text_similarity_result_path, text_sync_inference_audio_dir = common.batch_clean_paths([ref_audio_path, text_text_similarity_result_path, text_sync_inference_audio_dir]) + try: + if ref_audio_path is None or ref_audio_path == '': + raise Exception("参考音频路径不能为空") + if text_text_similarity_result_path is None or text_text_similarity_result_path == '': + raise Exception("文本相似度结果路径不能为空") + time_consuming, count = time_util.time_monitor(text_check.delete_ref_audio_below_boundary)(ref_audio_path, text_text_similarity_result_path, text_sync_inference_audio_dir, slider_audio_text_similarity_boundary) + text_delete_ref_audio_below_boundary_info = f"耗时:{time_consuming:0.1f}秒;删除参考音频数量:{count}" + except Exception as e: + logger.error("发生异常: \n%s", traceback.format_exc()) + text_delete_ref_audio_below_boundary_info = f"发生异常:{e}" + return text_delete_ref_audio_below_boundary_info + + + def save_generate_audio_url(generate_audio_url): rw_param.write(rw_param.generate_audio_url, generate_audio_url) @@ -461,8 +568,8 @@ def save_role(text_role): text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), value=default_work_space_dir) text_role = gr.Text(label=i18n("角色名称"), value=default_role) - text_work_space_dir.input(save_work_dir, [text_work_space_dir, text_role], [text_role]) - text_role.input(save_role, [text_role], []) + text_work_space_dir.blur(save_work_dir, [text_work_space_dir, text_role], [text_role]) + text_role.blur(save_role, [text_role], []) with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表")): gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") @@ -493,47 +600,62 @@ def save_role(text_role): value=default_model_inference_voice_dir, interactive=True) gr.Markdown(value=i18n("2.1:启动推理服务,并配置模型参数")) with gr.Accordion(label=i18n("详情")): - with gr.Tab(label=i18n("主项目下api服务")): + with gr.Tab(label=i18n("主项目下api.py服务")): gr.Markdown(value=i18n("2.1.1:启动服务")) with gr.Row(): - gr.Button(i18n("启动api"), variant="primary") - gr.Text(label=i18n("api启动信息"), value="", interactive=False) + button_start_api = gr.Button(i18n("启动api"), variant="primary") + text_start_api_info = gr.Text(label=i18n("api启动信息"), value="", interactive=False) + button_start_api.click(start_api, [], [text_start_api_info]) gr.Markdown(value=i18n("2.1.2:设置模型参数")) - gr.Text(label=i18n("请输入api服务模型切换接口地址"), value="", interactive=True) + text_api_set_model_base_url = gr.Text(label=i18n("请输入api服务模型切换接口地址"), value="", interactive=True) with gr.Row(): - gr.Dropdown(label=i18n("GPT模型列表"), choices=[], value="", interactive=True) - gr.Dropdown(label=i18n("SoVITS模型列表"), choices=[], value="", interactive=True) - gr.Button(i18n("刷新模型路径"), variant="primary") + dropdown_api_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), choices=model_manager.get_gpt_model_names(), value="", interactive=True) + dropdown_api_sovits_models = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=model_manager.get_sovits_model_names(), value="", interactive=True) + button_refresh_api_model = gr.Button(i18n("刷新模型路径"), variant="primary") + button_refresh_api_model.click(refresh_api_model, [], [dropdown_api_gpt_models, dropdown_api_sovits_models]) with gr.Row(): - gr.Text(label=i18n("GPT模型参数名"), value="", interactive=True) - gr.Text(label=i18n("SoVITS模型参数名"), value="", interactive=True) + text_api_gpt_param = gr.Text(label=i18n("GPT模型参数名"), value="", interactive=True) + text_api_sovits_param = gr.Text(label=i18n("SoVITS模型参数名"), value="", interactive=True) gr.Markdown(value=i18n("2.1.3:发起设置请求")) - gr.Text(label=i18n("完整的模型参数设置请求地址"), value="", interactive=False) + text_api_set_model_whole_url = gr.Text(label=i18n("完整的模型参数设置请求地址"), value="", interactive=False) + dropdown_api_gpt_models.change(api_set_model_whole_url, [text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], [text_api_set_model_whole_url]) + dropdown_api_sovits_models.change(api_set_model_whole_url, [text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], [text_api_set_model_whole_url]) + text_api_gpt_param.input(api_set_model_whole_url, [text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], [text_api_set_model_whole_url]) + text_api_sovits_param.input(api_set_model_whole_url, [text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], [text_api_set_model_whole_url]) with gr.Row(): - gr.Button(i18n("发起模型设置请求"), variant="primary") - gr.Text(label=i18n("设置请求结果"), value="", interactive=False) - with gr.Tab(label=i18n("fast项目下api_v2服务")): + button_api_start_set_model_request = gr.Button(i18n("发起模型设置请求"), variant="primary") + text_api_start_set_model_request_info = gr.Text(label=i18n("设置请求结果"), value="", interactive=False) + button_api_start_set_model_request.click(start_api_set_model, [text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], [text_api_start_set_model_request_info]) + with gr.Tab(label=i18n("fast项目下api_v2.py服务")): gr.Markdown(value=i18n("2.1.1:请到你的项目下,启动服务")) gr.Markdown(value=i18n("2.1.2:设置GPT模型参数")) - gr.Text(label=i18n("请输入api服务GPT模型切换接口地址"), value="", interactive=True) + text_api_v2_set_gpt_model_base_url = gr.Text(label=i18n("请输入api服务GPT模型切换接口地址"), value="", interactive=True) with gr.Row(): - gr.Text(label=i18n("GPT模型参数名"), value="", interactive=True) - gr.Dropdown(label=i18n("GPT模型列表"), choices=[], value="", interactive=True) - gr.Button(i18n("刷新模型路径"), variant="primary") - gr.Text(label=i18n("完整的GPT模型参数设置请求地址"), value="", interactive=False) + text_api_v2_gpt_model_param = gr.Text(label=i18n("GPT模型参数名"), value="", interactive=True) + dropdown_api_v2_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), choices=model_manager.get_gpt_model_names(), value="", interactive=True) + button_api_v2_refresh_gpt = gr.Button(i18n("刷新模型路径"), variant="primary") + button_api_v2_refresh_gpt.click(refresh_api_v2_gpt_model, [], [dropdown_api_v2_gpt_models]) + text_api_v2_set_gpt_model_whole_url = gr.Text(label=i18n("完整的GPT模型参数设置请求地址"), value="", interactive=False) + text_api_v2_gpt_model_param.input(api_v2_set_gpt_whole_url, [text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, dropdown_api_v2_gpt_models], [text_api_v2_set_gpt_model_whole_url]) + dropdown_api_v2_gpt_models.change(api_v2_set_gpt_whole_url, [text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, dropdown_api_v2_gpt_models], [text_api_v2_set_gpt_model_whole_url]) with gr.Row(): - gr.Button(i18n("发起GPT模型设置请求"), variant="primary") - gr.Text(label=i18n("设置请求结果"), value="", interactive=False) + button_api_v2_start_set_gpt_model_request = gr.Button(i18n("发起GPT模型设置请求"), variant="primary") + text_api_v2_start_set_gpt_model_request_info = gr.Text(label=i18n("设置请求结果"), value="", interactive=False) + button_api_v2_start_set_gpt_model_request.click(start_api_v2_set_gpt_model, [text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, dropdown_api_v2_gpt_models], [text_api_v2_start_set_gpt_model_request_info]) gr.Markdown(value=i18n("2.1.3:设置SoVITS模型参数")) - gr.Text(label=i18n("请输入api服务SoVITS模型切换接口地址"), value="", interactive=True) + text_api_v2_set_sovits_model_base_url = gr.Text(label=i18n("请输入api服务SoVITS模型切换接口地址"), value="", interactive=True) with gr.Row(): - gr.Text(label=i18n("SoVITS模型参数名"), value="", interactive=True) - gr.Dropdown(label=i18n("SoVITS模型列表"), choices=[], value="", interactive=True) - gr.Button(i18n("刷新模型路径"), variant="primary") - gr.Text(label=i18n("完整的SoVITS模型参数设置请求地址"), value="", interactive=False) + text_api_v2_sovits_model_param = gr.Text(label=i18n("SoVITS模型参数名"), value="", interactive=True) + dropdown_api_v2_sovits_models = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=model_manager.get_sovits_model_names(), value="", interactive=True) + button_api_v2_refresh_sovits = gr.Button(i18n("刷新模型路径"), variant="primary") + button_api_v2_refresh_sovits.click(refresh_api_v2_sovits_model, [], [dropdown_api_v2_sovits_models]) + text_api_v2_set_sovits_model_whole_url = gr.Text(label=i18n("完整的SoVITS模型参数设置请求地址"), value="", interactive=False) + text_api_v2_sovits_model_param.input(api_v2_set_sovits_whole_url, [text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, dropdown_api_v2_sovits_models], [text_api_v2_set_sovits_model_whole_url]) + dropdown_api_v2_sovits_models.change(api_v2_set_sovits_whole_url, [text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, dropdown_api_v2_sovits_models], [text_api_v2_set_sovits_model_whole_url]) with gr.Row(): - gr.Button(i18n("发起SoVITS模型设置请求"), variant="primary") - gr.Text(label=i18n("设置请求结果"), value="", interactive=False) + button_api_v2_start_set_sovits_model_request = gr.Button(i18n("发起SoVITS模型设置请求"), variant="primary") + text_api_v2_start_set_sovits_model_request_info = gr.Text(label=i18n("设置请求结果"), value="", interactive=False) + button_api_v2_start_set_sovits_model_request.click(start_api_v2_set_sovits_model, [text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, dropdown_api_v2_sovits_models], [text_api_v2_start_set_sovits_model_request_info]) with gr.Tab(label=i18n("第三方推理服务")): gr.Markdown(value=i18n("启动第三方推理服务,并完成参考音频打包,模型参数设置等操作")) gr.Markdown(value=i18n("2.2:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," @@ -567,7 +689,7 @@ def save_role(text_role): default_test_content_path = params.default_test_text_path text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) - gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入请求并发数,会根据此数创建对应数量的子进程并行发起推理请求"), value=3, + slider_request_concurrency_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入请求并发数,会根据此数创建对应数量的子进程并行发起推理请求"), value=3, interactive=True) with gr.Row(): button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") @@ -606,7 +728,7 @@ def save_role(text_role): with gr.Row(): text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), value=default_text_similarity_analysis_path, interactive=True) - gr.Slider(minimum=0, maximum=1, step=0.01, label=i18n("文本相似度放大边界"), value=0.90, + slider_text_similarity_amplification_boundary = gr.Slider(minimum=0, maximum=1, step=0.01, label=i18n("文本相似度放大边界"), value=0.90, interactive=True) button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], @@ -615,18 +737,22 @@ def save_role(text_role): button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", interactive=False) - button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, + button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, slider_text_similarity_amplification_boundary, text_text_similarity_analysis_path], [text_text_similarity_analysis_info]) gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) with gr.Row(): - gr.Text(label=i18n("文本相似度分析结果文件所在路径"), value="", interactive=True) - gr.Button(i18n("打开文本相似度分析结果文件"), variant="primary") - gr.Slider(minimum=0, maximum=1, step=0.01, label=i18n("音频文本相似度边界值"), value=0.80, + text_text_similarity_result_path = gr.Text(label=i18n("文本相似度分析结果文件所在路径"), value="", interactive=True) + button_open_text_similarity_result = gr.Button(i18n("打开文本相似度分析结果文件"), variant="primary") + button_open_text_similarity_result.click(open_file, [text_text_similarity_result_path], []) + slider_audio_text_similarity_boundary = gr.Slider(minimum=0, maximum=1, step=0.01, label=i18n("音频文本相似度边界值"), value=0.80, interactive=True) + text_sync_inference_audio_dir2 = gr.Text(label=i18n("被同步的推理音频路径"), + value="", interactive=True) with gr.Row(): - gr.Button(i18n("删除音频文本相似度边界值以下的参考音频"), variant="primary") - gr.Text(label=i18n("删除结果"), value="", interactive=True) + button_delete_ref_audio_below_boundary = gr.Button(i18n("删除音频文本相似度边界值以下的参考音频"), variant="primary") + text_delete_ref_audio_below_boundary_info = gr.Text(label=i18n("删除结果"), value="", interactive=True) + button_delete_ref_audio_below_boundary.click(delete_ref_audio_below_boundary, [text_model_inference_voice_dir, text_text_similarity_result_path, text_sync_inference_audio_dir2, slider_audio_text_similarity_boundary], [text_delete_ref_audio_below_boundary_info]) with gr.Tab(label=i18n("第四步:校验参考音频音质")): gr.Markdown(value=i18n("4.1:对结果按音频相似度排序,或许有用吧,主要还是耳朵听")) with gr.Row(): @@ -676,7 +802,7 @@ def save_role(text_role): [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, text_sync_ref_audio_dir2]) button_model_inference.click(model_inference, - [text_work_space_dir, text_role, text_model_inference_voice_dir, text_url, + [text_work_space_dir, text_role, slider_request_concurrency_num, text_model_inference_voice_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content], [text_model_inference_info, text_asr_audio_dir, text_sync_inference_audio_dir]) diff --git a/Ref_Audio_Selector/tool/delete_inference_with_ref.py b/Ref_Audio_Selector/tool/audio_check.py similarity index 100% rename from Ref_Audio_Selector/tool/delete_inference_with_ref.py rename to Ref_Audio_Selector/tool/audio_check.py diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index f6b4569c..2535dec5 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -3,16 +3,55 @@ import requests import itertools import multiprocessing -from multiprocessing import Pool from concurrent.futures import ProcessPoolExecutor import numpy as np import Ref_Audio_Selector.config_param.config_params as params -from Ref_Audio_Selector.common.time_util import timeit_decorator from urllib.parse import urlparse, parse_qs, urlencode, urlunparse, quote from Ref_Audio_Selector.config_param.log_config import logger, p_logger -class URLComposer: +class SetModelURLComposer: + def __init__(self, type, base_url, gpt_param_name, sovits_param_name): + self.type = type + self.base_url = base_url + self.gpt_param_name = gpt_param_name + self.sovits_param_name = sovits_param_name + + def is_valid(self): + if self.base_url is None or self.base_url == '': + raise Exception("请求地址不能为空") + if self.type in ['gpt', 'all']: + if self.gpt_param_name is None or self.gpt_param_name == '': + raise Exception("GPT参数名不能为空") + if self.type in ['sovits', 'all']: + if self.sovits_param_name is None or self.sovits_param_name == '': + raise Exception("Sovits参数名不能为空") + + def build_get_url(self, value_array, need_url_encode=True): + params = {} + if self.type == 'gpt': + params[self.gpt_param_name] = value_array[0] + if self.type == 'sovits': + params[self.sovits_param_name] = value_array[0] + if self.type == 'all': + params[self.gpt_param_name] = value_array[0] + params[self.sovits_param_name] = value_array[1] + return append_params_to_url(self.base_url, params, need_url_encode) + + def build_post_url(self, value_array, need_url_encode=True): + url = append_params_to_url(self.base_url, {}, need_url_encode) + params = {} + if self.type == 'gpt': + params[self.gpt_param_name] = value_array[0] + if self.type == 'sovits': + params[self.sovits_param_name] = value_array[0] + if self.type == 'all': + params[self.gpt_param_name] = value_array[0] + params[self.sovits_param_name] = value_array[1] + return url, params + + +class TTSURLComposer: def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): self.base_url = base_url self.emotion_param_name = emotion_param_name @@ -34,30 +73,26 @@ def is_emotion(self): return self.emotion_param_name is not None and self.emotion_param_name != '' def build_url_with_emotion(self, text_value, emotion_value, need_url_encode=True): - if not self.emotion_param_name: - raise ValueError("Emotion parameter name is not set.") params = { self.text_param_name: text_value, self.emotion_param_name: emotion_value, } - return self._append_params_to_url(params, need_url_encode) + return append_params_to_url(self.base_url, params, need_url_encode) def build_url_with_ref(self, text_value, ref_path_value, ref_text_value, need_url_encode=True): - if self.emotion_param_name: - raise ValueError("Cannot use reference parameters when emotion parameter is set.") params = { self.text_param_name: text_value, self.ref_path_param_name: ref_path_value, self.ref_text_param_name: ref_text_value, } - return self._append_params_to_url(params, need_url_encode) + return append_params_to_url(self.base_url, params, need_url_encode) + - def _append_params_to_url(self, params, need_url_encode): - url_with_params = self.base_url - if params: - query_params = '&'.join([f"{k}={v}" for k, v in params.items()]) - url_with_params += '?' + query_params if '?' not in self.base_url else '&' + query_params - return url_with_params if not need_url_encode else safe_encode_query_params(url_with_params) +def append_params_to_url(url_with_params, params, need_url_encode): + if params: + query_params = '&'.join([f"{k}={v}" for k, v in params.items()]) + url_with_params += '?' + query_params if '?' not in url_with_params else '&' + query_params + return url_with_params if not need_url_encode else safe_encode_query_params(url_with_params) def safe_encode_query_params(original_url): @@ -87,8 +122,9 @@ def generate_audio_files_parallel(url_composer, text_list, emotion_list, output_ emotion_groups = np.array_split(emotion_list, num_processes) with ProcessPoolExecutor(max_workers=num_processes) as executor: - futures = [executor.submit(generate_audio_files_for_emotion_group, url_composer, text_list, group, output_dir_path) - for group in emotion_groups] + futures = [ + executor.submit(generate_audio_files_for_emotion_group, url_composer, text_list, group, output_dir_path) + for group in emotion_groups] for future in futures: future.result() # 等待所有进程完成 @@ -162,3 +198,33 @@ def inference_audio_from_api(url): return response.content else: raise Exception(f"Failed to fetch audio from API. Server responded with status code {response.status_code}.") + + +def start_api_set_model(set_model_url_composer, gpt_models, sovits_models): + url, post_body = set_model_url_composer.build_post_url(gpt_models, sovits_models) + response = requests.post(url, json=post_body) + if response.status_code == 200: + result = response.text + return result + else: + return f'请求失败,状态码:{response.status_code}' + + +def start_api_v2_set_gpt_model(set_model_url_composer, gpt_models): + url = set_model_url_composer.build_get_url([gpt_models]) + response = requests.get(url) + if response.status_code == 200: + result = response.text + return result + else: + return f'请求失败,状态码:{response.status_code}' + + +def start_api_v2_set_sovits_model(set_model_url_composer, sovits_models): + url = set_model_url_composer.build_get_url([sovits_models]) + response = requests.get(url) + if response.status_code == 200: + result = response.text + return result + else: + return f'请求失败,状态码:{response.status_code}' diff --git a/Ref_Audio_Selector/tool/model_manager.py b/Ref_Audio_Selector/tool/model_manager.py new file mode 100644 index 00000000..53e352e9 --- /dev/null +++ b/Ref_Audio_Selector/tool/model_manager.py @@ -0,0 +1,34 @@ +import os +import re + +pretrained_sovits_name = "GPT_SoVITS/pretrained_models/s2G488k.pth" +pretrained_gpt_name = "GPT_SoVITS/pretrained_models/s1bert25hz-2kh-longer-epoch=68e-step=50232.ckpt" +SoVITS_weight_root = "SoVITS_weights" +GPT_weight_root = "GPT_weights" +os.makedirs(SoVITS_weight_root, exist_ok=True) +os.makedirs(GPT_weight_root, exist_ok=True) + + +def custom_sort_key(s): + # 使用正则表达式提取字符串中的数字部分和非数字部分 + parts = re.split('(\d+)', s) + # 将数字部分转换为整数,非数字部分保持不变 + parts = [int(part) if part.isdigit() else part for part in parts] + return parts + + +def get_gpt_model_names(): + gpt_names = [pretrained_gpt_name] + for name in os.listdir(SoVITS_weight_root): + if name.endswith(".ckpt"): gpt_names.append("%s/%s" % (SoVITS_weight_root, name)) + sorted(gpt_names, key=custom_sort_key) + return gpt_names + + +def get_sovits_model_names(): + sovits_names = [pretrained_sovits_name] + for name in os.listdir(GPT_weight_root): + if name.endswith(".pth"): sovits_names.append("%s/%s" % (GPT_weight_root, name)) + sorted(sovits_names, key=custom_sort_key) + return sovits_names + diff --git a/Ref_Audio_Selector/tool/text_check.py b/Ref_Audio_Selector/tool/text_check.py new file mode 100644 index 00000000..98c299d4 --- /dev/null +++ b/Ref_Audio_Selector/tool/text_check.py @@ -0,0 +1,77 @@ +import os +import Ref_Audio_Selector.common.common as common +import Ref_Audio_Selector.tool.audio_check as audio_check +from Ref_Audio_Selector.config_param.log_config import logger + + +def parse_text_similarity_result_txt(file_path): + """ + 解析指定格式的txt文件,每行格式:f"{item['average_similarity_score']}|{item['count']}|{item['emotion']}" + + :param file_path: txt文件的路径 + :return: 包含解析后数据的字典列表 + """ + data_list = [] + with open(file_path, 'r', encoding='utf-8') as file: + for line in file: + # 使用'|'作为分隔符分割每行数据 + parts = line.strip().split('|') + if len(parts) == 3: + # 将分割后的字符串转换为浮点数、整数和字符串 + try: + item = { + 'average_similarity_score': float(parts[0]), + 'count': int(parts[1]), + 'emotion': parts[2] + } + data_list.append(item) + except ValueError as e: + # 如果转换失败,打印错误信息并跳过该行 + logger.error(f"Error parsing line: {line.strip()} - {e}") + + return data_list + + +def remove_low_similarity_files(ref_audio_list, report_list, audio_text_similarity_boundary): + """ + 根据条件删除低相似度音频文件并返回删除数量。 + + :param ref_audio_list: 包含音频路径和情感属性的列表 + :param report_list: 包含相似度评分和情感属性的列表 + :param audio_text_similarity_boundary: 相似度阈值 + :return: 删除的文件数量 + """ + deleted_count = 0 + + # 筛选出平均相似度低于阈值的报告 + low_similarity_reports = [report for report in report_list if + report['average_similarity_score'] < audio_text_similarity_boundary] + + # 遍历低相似度报告,查找并删除对应音频文件 + for report in low_similarity_reports: + emotion = report['emotion'] + # 查找ref_audio_list中相同情感的音频文件路径 + matching_refs = [ref for ref in ref_audio_list if ref['emotion'] == emotion] + for match in matching_refs: + ref_path = match['ref_path'] + # 检查文件是否存在,然后尝试删除 + if os.path.exists(ref_path): + try: + os.remove(ref_path) + deleted_count += 1 + logger.info(f"Deleted file: {ref_path}") + except Exception as e: + logger.error(f"Error deleting file {ref_path}: {e}") + else: + logger.error(f"File not found: {ref_path}") + + return deleted_count + + +def delete_ref_audio_below_boundary(ref_audio_path, text_similarity_result_path, sync_inference_audio_dir, + audio_text_similarity_boundary): + ref_audio_list = common.RefAudioListManager(ref_audio_path) + report_list = parse_text_similarity_result_txt(text_similarity_result_path) + count = remove_low_similarity_files(ref_audio_list, report_list, audio_text_similarity_boundary) + audio_check.sync_ref_audio(ref_audio_path, sync_inference_audio_dir) + return count diff --git a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py index 96ce2187..c4eccd61 100644 --- a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py +++ b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py @@ -56,7 +56,7 @@ def calculate_average_similarity_by_emotion(data_list): similarity_score = item['similarity_score'] result_dict[emotion].append(similarity_score) - average_scores = [{'emotion': emotion, 'average_similarity_score': sum(scores) / len(scores)} + average_scores = [{'emotion': emotion, 'average_similarity_score': sum(scores) / len(scores), 'count': len(scores)} for emotion, scores in result_dict.items()] average_scores.sort(key=lambda x: x['average_similarity_score'], reverse=True) @@ -123,7 +123,7 @@ def process(asr_file_path, output_dir, similarity_enlarge_boundary): average_similarity_file = os.path.join(output_dir, f'{params.text_emotion_average_similarity_report_filename}.txt') average_similarity_content = \ - '\n'.join([f"{item['average_similarity_score']}|{item['emotion']}" for item in average_similarity_list]) + '\n'.join([f"{item['average_similarity_score']}|{item['count']}|{item['emotion']}" for item in average_similarity_list]) common.write_text_to_file(average_similarity_content, average_similarity_file) emotion_detail_list = group_and_sort_by_field(records, 'emotion') From 6cb3c15448618546dbb1092e09ed2752ffd5b97a Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sun, 28 Apr 2024 15:20:10 +0800 Subject: [PATCH 34/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E9=9D=9E=E4=B8=AD?= =?UTF-8?q?=E6=96=87=E8=AF=AD=E8=A8=80=E7=9A=84asr=E6=93=8D=E4=BD=9C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../file/test_content/test_content.txt | 3 +- .../ref_audio_selector_webui.py | 15 ++- .../asr/fasterwhisper_asr_multi_level_dir.py | 119 ++++++++++++++++++ .../tool/asr/funasr_asr_multi_level_dir.py | 3 +- 4 files changed, 135 insertions(+), 5 deletions(-) create mode 100644 Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py diff --git a/Ref_Audio_Selector/file/test_content/test_content.txt b/Ref_Audio_Selector/file/test_content/test_content.txt index 507383c6..e023e157 100644 --- a/Ref_Audio_Selector/file/test_content/test_content.txt +++ b/Ref_Audio_Selector/file/test_content/test_content.txt @@ -9,4 +9,5 @@ 是我们为首都天文馆做的一个科普小玩意儿。现在的技术,已经能将彭齐阿斯和威尔逊在四十多年前用于发现特制背景辐射的二十英尺的喇叭形天线做成眼镜大小, 并且在这个眼镜中设置一个转换系统,将接收到的背景辐射的波长压缩七个数量级,将7厘米波转换成红光。 这样,观众在夜里戴上这种眼镜,就能亲眼看到宇宙的特制背景辐射,现在,也能看到宇宙闪烁。 -这东西现在哪儿?能告诉我吗 \ No newline at end of file +这东西现在哪儿?能告诉我吗 +希望各位猫猫给视频三连支持一下猫窝,十分感谢支持喵~ \ No newline at end of file diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 41ce14a6..c9fdb793 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -515,6 +515,12 @@ def delete_ref_audio_below_boundary(ref_audio_path, text_text_similarity_result_ text_delete_ref_audio_below_boundary_info = f"发生异常:{e}" return text_delete_ref_audio_below_boundary_info +def change_lang_choices(key): #根据选择的模型修改可选的语言 + # return gr.Dropdown(choices=asr_dict[key]['lang']) + return {"__type__": "update", "choices": asr_dict[key]['lang'],"value":asr_dict[key]['lang'][0]} +def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸 + # return gr.Dropdown(choices=asr_dict[key]['size']) + return {"__type__": "update", "choices": asr_dict[key]['size']} def save_generate_audio_url(generate_audio_url): @@ -687,7 +693,10 @@ def save_role(text_role): text_emotion.blur(save_emotion_param, [text_emotion], []) gr.Markdown(value=i18n("2.3:配置待推理文本,一句一行,不要太多,10条即可")) default_test_content_path = params.default_test_text_path - text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) + with gr.Row(): + text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) + button_open_test_content_file = gr.Button(i18n("打开待推理文本文件"), variant="primary") + button_open_test_content_file.click(open_file, [text_test_content], []) gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) slider_request_concurrency_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入请求并发数,会根据此数创建对应数量的子进程并行发起推理请求"), value=3, interactive=True) @@ -703,7 +712,7 @@ def save_role(text_role): with gr.Row(): dropdown_asr_model = gr.Dropdown( label=i18n("ASR 模型"), - choices=[], + choices=list(asr_dict.keys()), interactive=True, value="达摩 ASR (中文)" ) @@ -719,6 +728,8 @@ def save_role(text_role): interactive=True, value="zh" ) + dropdown_asr_model.change(change_lang_choices, [dropdown_asr_model], [dropdown_asr_lang]) + dropdown_asr_model.change(change_size_choices, [dropdown_asr_model], [dropdown_asr_size]) with gr.Row(): button_asr = gr.Button(i18n("启动asr"), variant="primary") text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) diff --git a/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py new file mode 100644 index 00000000..301b2c5b --- /dev/null +++ b/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py @@ -0,0 +1,119 @@ +import argparse +import os +import traceback +import Ref_Audio_Selector.config_param.config_params as params + +os.environ["HF_ENDPOINT"] = "https://hf-mirror.com" +os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" + +import torch +from faster_whisper import WhisperModel +from tqdm import tqdm + +from tools.asr.config import check_fw_local_models +from Ref_Audio_Selector.config_param.log_config import logger + +language_code_list = [ + "af", "am", "ar", "as", "az", + "ba", "be", "bg", "bn", "bo", + "br", "bs", "ca", "cs", "cy", + "da", "de", "el", "en", "es", + "et", "eu", "fa", "fi", "fo", + "fr", "gl", "gu", "ha", "haw", + "he", "hi", "hr", "ht", "hu", + "hy", "id", "is", "it", "ja", + "jw", "ka", "kk", "km", "kn", + "ko", "la", "lb", "ln", "lo", + "lt", "lv", "mg", "mi", "mk", + "ml", "mn", "mr", "ms", "mt", + "my", "ne", "nl", "nn", "no", + "oc", "pa", "pl", "ps", "pt", + "ro", "ru", "sa", "sd", "si", + "sk", "sl", "sn", "so", "sq", + "sr", "su", "sv", "sw", "ta", + "te", "tg", "th", "tk", "tl", + "tr", "tt", "uk", "ur", "uz", + "vi", "yi", "yo", "zh", "yue", + "auto"] + + +def execute_asr_multi_level_dir(input_folder, output_folder, model_size, language, precision): + if '-local' in model_size: + model_size = model_size[:-6] + model_path = f'tools/asr/models/faster-whisper-{model_size}' + else: + model_path = model_size + if language == 'auto': + language = None # 不设置语种由模型自动输出概率最高的语种 + logger.info("loading faster whisper model:", model_size, model_path) + device = 'cuda' if torch.cuda.is_available() else 'cpu' + try: + model = WhisperModel(model_path, device=device, compute_type=precision) + except: + return logger.error(traceback.format_exc()) + + output = [] + + # 递归遍历输入目录及所有子目录 + for root, dirs, files in os.walk(input_folder): + for file_name in sorted(files): + # 只处理wav文件(假设是wav文件) + if file_name.endswith(".wav"): + try: + file_path = os.path.join(input_folder, file_name) + original_text = os.path.basename(root) + segments, info = model.transcribe( + audio=file_path, + beam_size=5, + vad_filter=True, + vad_parameters=dict(min_silence_duration_ms=700), + language=language) + text = '' + + if info.language == "zh": + logger.info("检测为中文文本, 转 FunASR 处理") + if ("only_asr" not in globals()): + from Ref_Audio_Selector.tool.asr.funasr_asr_multi_level_dir import \ + only_asr # #如果用英文就不需要导入下载模型 + text = only_asr(file_path) + + if text == '': + for segment in segments: + text += segment.text + output.append(f"{file_path}|{original_text}|{info.language.upper()}|{text}") + except: + return logger.error(traceback.format_exc()) + + output_folder = output_folder + os.makedirs(output_folder, exist_ok=True) + output_file_path = os.path.abspath(f'{output_folder}/{params.asr_filename}.list') + + with open(output_file_path, "w", encoding="utf-8") as f: + f.write("\n".join(output)) + logger.info(f"ASR 任务完成->标注文件路径: {output_file_path}\n") + return output_file_path + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--input_folder", type=str, required=True, + help="Path to the folder containing WAV files.") + parser.add_argument("-o", "--output_folder", type=str, required=True, + help="Output folder to store transcriptions.") + parser.add_argument("-s", "--model_size", type=str, default='large-v3', + choices=check_fw_local_models(), + help="Model Size of Faster Whisper") + parser.add_argument("-l", "--language", type=str, default='ja', + choices=language_code_list, + help="Language of the audio files.") + parser.add_argument("-p", "--precision", type=str, default='float16', choices=['float16', 'float32'], + help="fp16 or fp32") + + cmd = parser.parse_args() + output_file_path = execute_asr_multi_level_dir( + input_folder=cmd.input_folder, + output_folder=cmd.output_folder, + model_size=cmd.model_size, + language=cmd.language, + precision=cmd.precision, + ) diff --git a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py index 22fbfc1a..abe45e9e 100644 --- a/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py +++ b/Ref_Audio_Selector/tool/asr/funasr_asr_multi_level_dir.py @@ -39,7 +39,6 @@ def only_asr(input_file): @timeit_decorator def execute_asr_multi_level_dir(input_folder, output_folder, model_size, language): output = [] - output_file_name = os.path.basename(input_folder) # 递归遍历输入目录及所有子目录 for root, dirs, files in os.walk(input_folder): for name in sorted(files): @@ -58,7 +57,7 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag logger.error(traceback.format_exc()) # 创建或打开指定的输出目录 - output_folder = output_folder or "output/asr_opt" + output_folder = output_folder output_dir_abs = os.path.abspath(output_folder) os.makedirs(output_dir_abs, exist_ok=True) From 27325f4cf974c470df73556a7f12e825022946aa Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sun, 28 Apr 2024 15:49:05 +0800 Subject: [PATCH 35/72] =?UTF-8?q?=E8=B0=83=E6=95=B4=E9=A1=B9=E7=9B=AE?= =?UTF-8?q?=E7=BB=93=E6=9E=84=EF=BC=8C=E4=BF=AE=E5=A4=8D=E9=9A=8F=E6=9C=BA?= =?UTF-8?q?=E9=87=87=E6=A0=B7bug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../{tool => common}/model_manager.py | 0 Ref_Audio_Selector/ref_audio_selector_webui.py | 14 +++++++------- .../tool/{audio_similarity.py => audio_sample.py} | 13 ++++++++----- 3 files changed, 15 insertions(+), 12 deletions(-) rename Ref_Audio_Selector/{tool => common}/model_manager.py (100%) rename Ref_Audio_Selector/tool/{audio_similarity.py => audio_sample.py} (93%) diff --git a/Ref_Audio_Selector/tool/model_manager.py b/Ref_Audio_Selector/common/model_manager.py similarity index 100% rename from Ref_Audio_Selector/tool/model_manager.py rename to Ref_Audio_Selector/common/model_manager.py diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index c9fdb793..fb685155 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -6,8 +6,8 @@ from Ref_Audio_Selector.config_param.log_config import logger -import Ref_Audio_Selector.tool.model_manager as model_manager -import Ref_Audio_Selector.tool.audio_similarity as audio_similarity +import Ref_Audio_Selector.common.model_manager as model_manager +import Ref_Audio_Selector.tool.audio_sample as audio_sample import Ref_Audio_Selector.tool.audio_inference as audio_inference import Ref_Audio_Selector.tool.audio_config as audio_config import Ref_Audio_Selector.tool.audio_check as audio_check @@ -58,12 +58,12 @@ def convert_from_list(text_work_space_dir, text_role, text_list_input): ref_audio_all = os.path.join(base_role_dir, params.list_to_convert_reference_audio_dir) - time_consuming, _ = time_util.time_monitor(audio_similarity.convert_from_list)(text_list_input, ref_audio_all) + time_consuming, _ = time_util.time_monitor(audio_sample.convert_from_list)(text_list_input, ref_audio_all) text_convert_from_list_info = f"耗时:{time_consuming:0.1f}秒;转换成功:生成目录{ref_audio_all}" text_sample_dir = ref_audio_all - # audio_similarity.convert_from_list(text_list_input, ref_audio_all) + # audio_sample.convert_from_list(text_list_input, ref_audio_all) except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) text_convert_from_list_info = f"发生异常:{e}" @@ -92,11 +92,11 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ p_similarity = Popen(cmd, shell=True) p_similarity.wait() - similarity_list = audio_similarity.parse_similarity_file(similarity_file) + similarity_list = audio_sample.parse_similarity_file(similarity_file) if need_similarity_output: similarity_file_dir = os.path.join(similarity_dir, base_voice_file_name) - audio_similarity.copy_and_move(similarity_file_dir, similarity_list) + audio_sample.copy_and_move(similarity_file_dir, similarity_list) p_similarity = None return similarity_list, similarity_file, similarity_file_dir @@ -137,7 +137,7 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path if similarity_list is None: raise Exception("相似度分析失败") - audio_similarity.sample(ref_audio_dir, similarity_list, slider_subsection_num, slider_sample_num) + audio_sample.sample(ref_audio_dir, similarity_list, slider_subsection_num, slider_sample_num) except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) diff --git a/Ref_Audio_Selector/tool/audio_similarity.py b/Ref_Audio_Selector/tool/audio_sample.py similarity index 93% rename from Ref_Audio_Selector/tool/audio_similarity.py rename to Ref_Audio_Selector/tool/audio_sample.py index 8517c64b..9655911c 100644 --- a/Ref_Audio_Selector/tool/audio_similarity.py +++ b/Ref_Audio_Selector/tool/audio_sample.py @@ -85,11 +85,14 @@ def sample(output_audio_dir, similarity_list, subsection_num, sample_num): end = (i + 1) * step end = min(end, len(similarity_list)) # 防止最后一段越界 - num = min(sample_num, len(similarity_list[start:end])) + # 创建子列表 + subsection = similarity_list[start:end] + # 在子列表上随机打乱 + random.shuffle(subsection) - # 随机采样 - random.shuffle(similarity_list[start:end]) - sampled_subsection = similarity_list[start:start + num] + # 从打乱后的子列表中抽取相应数量的个体 + num = min(sample_num, len(subsection)) + sampled_subsection = subsection[:num] # 创建并进入子目录 subdir_name = f'emotion_{i + 1}' @@ -143,7 +146,7 @@ def copy_and_move(output_audio_directory, similarity_scores): for item in similarity_scores: # 构造新的文件名 base_name = os.path.basename(item['wav_path'])[:-4] # 去掉.wav扩展名 - new_name = f"{item['score']*10000:04.0f}-{base_name}.wav" + new_name = f"{item['score'] * 10000:04.0f}-{base_name}.wav" # 新文件的完整路径 new_path = os.path.join(output_audio_directory, new_name) From 13567362d929888114eec1663dbe60ad63cc4e28 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sun, 28 Apr 2024 16:44:45 +0800 Subject: [PATCH 36/72] =?UTF-8?q?=E6=8F=90=E5=8F=96=E4=B8=80=E9=83=A8?= =?UTF-8?q?=E5=88=86=E5=85=AC=E5=85=B1=E7=BB=84=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 333 +++++++++++------- 1 file changed, 204 insertions(+), 129 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index fb685155..8673a272 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -63,7 +63,6 @@ def convert_from_list(text_work_space_dir, text_role, text_list_input): text_convert_from_list_info = f"耗时:{time_consuming:0.1f}秒;转换成功:生成目录{ref_audio_all}" text_sample_dir = ref_audio_all - # audio_sample.convert_from_list(text_list_input, ref_audio_all) except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) text_convert_from_list_info = f"发生异常:{e}" @@ -131,9 +130,6 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path text_sample_info = f"耗时:{time_consuming:0.1f}秒;抽样成功:生成目录{ref_audio_dir}" - # similarity_list, _, _ = start_similarity_analysis(base_role_dir, text_sample_dir, - # text_base_voice_path, checkbox_similarity_output) - if similarity_list is None: raise Exception("相似度分析失败") @@ -143,25 +139,24 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path logger.error("发生异常: \n%s", traceback.format_exc()) text_sample_info = f"发生异常:{e}" ref_audio_dir = '' - text_model_inference_voice_dir = ref_audio_dir - text_sync_ref_audio_dir = ref_audio_dir - text_sync_ref_audio_dir2 = ref_audio_dir - return i18n(text_sample_info), text_model_inference_voice_dir, text_sync_ref_audio_dir, text_sync_ref_audio_dir2 + text_refer_audio_file_dir = ref_audio_dir + return i18n(text_sample_info), text_refer_audio_file_dir # 根据参考音频和测试文本,执行批量推理 -def model_inference(text_work_space_dir, text_role, slider_request_concurrency_num, text_model_inference_voice_dir, text_url, +def model_inference(text_work_space_dir, text_role, slider_request_concurrency_num, text_refer_audio_file_dir, + text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content_dir): - text_work_space_dir, text_model_inference_voice_dir, text_test_content_dir \ - = common.batch_clean_paths([text_work_space_dir, text_model_inference_voice_dir, text_test_content_dir]) + text_work_space_dir, text_refer_audio_file_dir, text_test_content_dir \ + = common.batch_clean_paths([text_work_space_dir, text_refer_audio_file_dir, text_test_content_dir]) inference_dir = None text_asr_audio_dir = None text_model_inference_info = None try: base_role_dir = check_base_info(text_work_space_dir, text_role) - if text_model_inference_voice_dir is None or text_model_inference_voice_dir == '': + if text_refer_audio_file_dir is None or text_refer_audio_file_dir == '': raise Exception("待推理的参考音频所在目录不能为空,请先完成上一步操作") if text_url is None or text_url == '': raise Exception("推理服务请求地址不能为空") @@ -182,7 +177,7 @@ def model_inference(text_work_space_dir, text_role, slider_request_concurrency_n text_list = common.read_text_file_to_list(text_test_content_dir) if text_list is None or len(text_list) == 0: raise Exception("待推理文本内容不能为空") - ref_audio_manager = common.RefAudioListManager(text_model_inference_voice_dir) + ref_audio_manager = common.RefAudioListManager(text_refer_audio_file_dir) if len(ref_audio_manager.get_audio_list()) == 0: raise Exception("待推理的参考音频不能为空") @@ -194,8 +189,6 @@ def model_inference(text_work_space_dir, text_role, slider_request_concurrency_n text_model_inference_info = f"耗时:{time_consuming:0.1f}秒;推理成功:生成目录{inference_dir}" - # audio_inference.generate_audio_files(url_composer, text_list, ref_audio_manager.get_ref_audio_list(), - # inference_dir) except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) text_model_inference_info = f"发生异常:{e}" @@ -227,8 +220,6 @@ def asr(text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang) - # asr_file = open_asr(text_asr_audio_dir, base_role_dir, dropdown_asr_model, dropdown_asr_size, - # dropdown_asr_lang) text_text_similarity_analysis_path = asr_file text_asr_info = f"耗时:{time_consuming:0.1f}秒;asr成功:生成文件{asr_file}" except Exception as e: @@ -270,7 +261,7 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): # 对asr生成的文件,与原本的文本内容,进行相似度分析 -def text_similarity_analysis(text_work_space_dir, text_role, slider_text_similarity_amplification_boundary, +def text_similarity_analysis(text_work_space_dir, text_role, slider_text_similarity_amplification_boundary, text_text_similarity_analysis_path): text_work_space_dir, text_text_similarity_analysis_path \ = common.batch_clean_paths([text_work_space_dir, text_text_similarity_analysis_path]) @@ -284,11 +275,11 @@ def text_similarity_analysis(text_work_space_dir, text_role, slider_text_similar similarity_dir = os.path.join(base_role_dir, params.text_similarity_output_dir) time_consuming, _ = time_util.time_monitor(open_text_similarity_analysis)(text_text_similarity_analysis_path, - similarity_dir, slider_text_similarity_amplification_boundary) + similarity_dir, + slider_text_similarity_amplification_boundary) text_text_similarity_analysis_info = f"耗时:{time_consuming:0.1f}秒;相似度分析成功:生成目录{similarity_dir}" - # open_text_similarity_analysis(text_text_similarity_analysis_path, similarity_dir) except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) text_text_similarity_analysis_info = f"发生异常:{e}" @@ -332,9 +323,6 @@ def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path = time_util.time_monitor(start_similarity_analysis)(base_role_dir, text_compare_audio_dir, text_base_audio_path, True) - # similarity_list, similarity_file, similarity_file_dir = start_similarity_analysis( - # base_role_dir, text_compare_audio_dir, text_base_audio_path, True) - if similarity_list is None: raise Exception("相似度分析失败") @@ -347,23 +335,23 @@ def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path # 根据参考音频目录的删除情况,将其同步到推理生成的音频目录中,即参考音频目录下,删除了几个参考音频,就在推理目录下,将这些参考音频生成的音频文件移除 -def sync_ref_audio(text_work_space_dir, text_role, text_sync_ref_audio_dir, - text_sync_inference_audio_dir): - text_work_space_dir, text_sync_ref_audio_dir, text_sync_inference_audio_dir \ - = common.batch_clean_paths([text_work_space_dir, text_sync_ref_audio_dir, text_sync_inference_audio_dir]) +def sync_ref_audio(text_work_space_dir, text_role, text_refer_audio_file_dir, + text_inference_audio_file_dir): + text_work_space_dir, text_refer_audio_file_dir, text_inference_audio_file_dir \ + = common.batch_clean_paths([text_work_space_dir, text_refer_audio_file_dir, text_inference_audio_file_dir]) text_sync_ref_audio_info = None try: check_base_info(text_work_space_dir, text_role) - if text_sync_ref_audio_dir is None or text_sync_ref_audio_dir == '': + if text_refer_audio_file_dir is None or text_refer_audio_file_dir == '': raise Exception("参考音频目录不能为空") - if text_sync_inference_audio_dir is None or text_sync_inference_audio_dir == '': + if text_inference_audio_file_dir is None or text_inference_audio_file_dir == '': raise Exception("推理生成的音频目录不能为空") time_consuming, (delete_text_wav_num, delete_emotion_dir_num) \ - = time_util.time_monitor(audio_check.sync_ref_audio)(text_sync_ref_audio_dir, - text_sync_inference_audio_dir) + = time_util.time_monitor(audio_check.sync_ref_audio)(text_refer_audio_file_dir, + text_inference_audio_file_dir) - text_sync_ref_audio_info = (f"耗时:{time_consuming:0.1f}秒;推理音频目录{text_sync_inference_audio_dir}下," + text_sync_ref_audio_info = (f"耗时:{time_consuming:0.1f}秒;推理音频目录{text_inference_audio_file_dir}下," f"text目录删除了{delete_text_wav_num}个推理音频,emotion目录下,删除了{delete_emotion_dir_num}个目录") except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) @@ -372,9 +360,9 @@ def sync_ref_audio(text_work_space_dir, text_role, text_sync_ref_audio_dir, # 根据模板和参考音频目录,生成参考音频配置内容 -def create_config(text_work_space_dir, text_role, text_template, text_sync_ref_audio_dir2): - text_work_space_dir, text_sync_ref_audio_dir2 \ - = common.batch_clean_paths([text_work_space_dir, text_sync_ref_audio_dir2]) +def create_config(text_work_space_dir, text_role, text_template, text_refer_audio_file_dir): + text_work_space_dir, text_refer_audio_file_dir \ + = common.batch_clean_paths([text_work_space_dir, text_refer_audio_file_dir]) config_file = None text_create_config_info = None @@ -382,18 +370,15 @@ def create_config(text_work_space_dir, text_role, text_template, text_sync_ref_a base_role_dir = check_base_info(text_work_space_dir, text_role) if text_template is None or text_template == '': raise Exception("参考音频抽样目录不能为空") - if text_sync_ref_audio_dir2 is None or text_sync_ref_audio_dir2 == '': + if text_refer_audio_file_dir is None or text_refer_audio_file_dir == '': raise Exception("参考音频目录不能为空") config_file = os.path.join(base_role_dir, f'{params.reference_audio_config_filename}.json') - ref_audio_manager = common.RefAudioListManager(text_sync_ref_audio_dir2) + ref_audio_manager = common.RefAudioListManager(text_refer_audio_file_dir) time_consuming, _ = time_util.time_monitor(audio_config.generate_audio_config)(base_role_dir, text_template, ref_audio_manager.get_ref_audio_list(), config_file) - # audio_config.generate_audio_config(base_role_dir, text_template, ref_audio_manager.get_ref_audio_list(), - # config_file) - text_create_config_info = f"耗时:{time_consuming:0.1f}秒;配置生成成功:生成文件{config_file}" except Exception as e: @@ -428,21 +413,27 @@ def refresh_api_model(): {"choices": model_manager.get_sovits_model_names(), "__type__": "update"}) -def api_set_model_whole_url(text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param): - url = audio_inference.SetModelURLComposer("all", text_api_set_model_base_url, text_api_gpt_param, text_api_sovits_param) +def api_set_model_whole_url(text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, + text_api_gpt_param, text_api_sovits_param): + url = audio_inference.SetModelURLComposer("all", text_api_set_model_base_url, text_api_gpt_param, + text_api_sovits_param) return url.build_get_url([dropdown_api_gpt_models, dropdown_api_sovits_models], False) -def start_api_set_model(text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param): +def start_api_set_model(text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, + text_api_gpt_param, text_api_sovits_param): text_api_start_set_model_request_info = None try: if dropdown_api_gpt_models is None or dropdown_api_gpt_models == '': raise Exception("GPT模型不能为空") if dropdown_api_sovits_models is None or dropdown_api_sovits_models == '': raise Exception("Sovits模型不能为空") - url = audio_inference.SetModelURLComposer("all", text_api_set_model_base_url, text_api_gpt_param, text_api_sovits_param) + url = audio_inference.SetModelURLComposer("all", text_api_set_model_base_url, text_api_gpt_param, + text_api_sovits_param) url.is_valid() - time_consuming, result = time_util.time_monitor(audio_inference.start_api_set_model)(url, dropdown_api_gpt_models, dropdown_api_sovits_models) + time_consuming, result = time_util.time_monitor(audio_inference.start_api_set_model)(url, + dropdown_api_gpt_models, + dropdown_api_sovits_models) text_api_start_set_model_request_info = f"耗时:{time_consuming:0.1f}秒;请求结果:{result}" except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) @@ -454,18 +445,24 @@ def refresh_api_v2_gpt_model(): return {"choices": model_manager.get_gpt_model_names(), "__type__": "update"} -def api_v2_set_gpt_whole_url(text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, dropdown_api_v2_gpt_models): - url = audio_inference.SetModelURLComposer("gpt", text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, None) +def api_v2_set_gpt_whole_url(text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, + dropdown_api_v2_gpt_models): + url = audio_inference.SetModelURLComposer("gpt", text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, + None) return url.build_get_url([dropdown_api_v2_gpt_models], False) -def start_api_v2_set_gpt_model(text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, dropdown_api_v2_gpt_models): + +def start_api_v2_set_gpt_model(text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, + dropdown_api_v2_gpt_models): text_api_v2_start_set_gpt_model_request_info = None try: if dropdown_api_v2_gpt_models is None or dropdown_api_v2_gpt_models == '': raise Exception("GPT模型不能为空") - url = audio_inference.SetModelURLComposer("gpt", text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, None) + url = audio_inference.SetModelURLComposer("gpt", text_api_v2_set_gpt_model_base_url, + text_api_v2_gpt_model_param, None) url.is_valid() - time_consuming, result = time_util.time_monitor(audio_inference.start_api_v2_set_gpt_model)(url, dropdown_api_v2_gpt_models) + time_consuming, result = time_util.time_monitor(audio_inference.start_api_v2_set_gpt_model)(url, + dropdown_api_v2_gpt_models) text_api_v2_start_set_gpt_model_request_info = f"耗时:{time_consuming:0.1f}秒;请求结果:{result}" except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) @@ -476,19 +473,25 @@ def start_api_v2_set_gpt_model(text_api_v2_set_gpt_model_base_url, text_api_v2_g def refresh_api_v2_sovits_model(): return {"choices": model_manager.get_sovits_model_names(), "__type__": "update"} -def api_v2_set_sovits_whole_url(text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, dropdown_api_v2_sovits_models): - url = audio_inference.SetModelURLComposer("sovits", text_api_v2_set_sovits_model_base_url, None, text_api_v2_sovits_model_param) + +def api_v2_set_sovits_whole_url(text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, + dropdown_api_v2_sovits_models): + url = audio_inference.SetModelURLComposer("sovits", text_api_v2_set_sovits_model_base_url, None, + text_api_v2_sovits_model_param) return url.build_get_url([dropdown_api_v2_sovits_models], False) -def start_api_v2_set_sovits_model(text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, dropdown_api_v2_sovits_models): +def start_api_v2_set_sovits_model(text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, + dropdown_api_v2_sovits_models): text_api_v2_start_set_sovits_model_request_info = None try: if dropdown_api_v2_sovits_models is None or dropdown_api_v2_sovits_models == '': raise Exception("Sovits模型不能为空") - url = audio_inference.SetModelURLComposer("sovits", text_api_v2_set_sovits_model_base_url, None, text_api_v2_sovits_model_param) + url = audio_inference.SetModelURLComposer("sovits", text_api_v2_set_sovits_model_base_url, None, + text_api_v2_sovits_model_param) url.is_valid() - time_consuming, result = time_util.time_monitor(audio_inference.start_api_v2_set_sovits_model)(url, dropdown_api_v2_sovits_models) + time_consuming, result = time_util.time_monitor(audio_inference.start_api_v2_set_sovits_model)(url, + dropdown_api_v2_sovits_models) text_api_v2_start_set_sovits_model_request_info = f"耗时:{time_consuming:0.1f}秒;请求结果:{result}" except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) @@ -500,25 +503,33 @@ def open_file(file_path): common.open_file(my_utils.clean_path(file_path)) -def delete_ref_audio_below_boundary(ref_audio_path, text_text_similarity_result_path, text_sync_inference_audio_dir, slider_audio_text_similarity_boundary): +def delete_ref_audio_below_boundary(ref_audio_path, text_text_similarity_result_path, text_inference_audio_file_dir, + slider_audio_text_similarity_boundary): text_delete_ref_audio_below_boundary_info = None - ref_audio_path, text_text_similarity_result_path, text_sync_inference_audio_dir = common.batch_clean_paths([ref_audio_path, text_text_similarity_result_path, text_sync_inference_audio_dir]) + ref_audio_path, text_text_similarity_result_path, text_inference_audio_file_dir = common.batch_clean_paths( + [ref_audio_path, text_text_similarity_result_path, text_inference_audio_file_dir]) try: if ref_audio_path is None or ref_audio_path == '': raise Exception("参考音频路径不能为空") if text_text_similarity_result_path is None or text_text_similarity_result_path == '': raise Exception("文本相似度结果路径不能为空") - time_consuming, count = time_util.time_monitor(text_check.delete_ref_audio_below_boundary)(ref_audio_path, text_text_similarity_result_path, text_sync_inference_audio_dir, slider_audio_text_similarity_boundary) + time_consuming, count = time_util.time_monitor(text_check.delete_ref_audio_below_boundary)(ref_audio_path, + text_text_similarity_result_path, + text_inference_audio_file_dir, + slider_audio_text_similarity_boundary) text_delete_ref_audio_below_boundary_info = f"耗时:{time_consuming:0.1f}秒;删除参考音频数量:{count}" except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) text_delete_ref_audio_below_boundary_info = f"发生异常:{e}" return text_delete_ref_audio_below_boundary_info -def change_lang_choices(key): #根据选择的模型修改可选的语言 + +def change_lang_choices(key): # 根据选择的模型修改可选的语言 # return gr.Dropdown(choices=asr_dict[key]['lang']) - return {"__type__": "update", "choices": asr_dict[key]['lang'],"value":asr_dict[key]['lang'][0]} -def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸 + return {"__type__": "update", "choices": asr_dict[key]['lang'], "value": asr_dict[key]['lang'][0]} + + +def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸 # return gr.Dropdown(choices=asr_dict[key]['size']) return {"__type__": "update", "choices": asr_dict[key]['size']} @@ -570,12 +581,17 @@ def save_role(text_role): with gr.Blocks() as app: gr.Markdown(value=i18n("基本介绍:这是一个从训练素材中,批量提取参考音频,并进行效果评估与配置生成的工具")) - with gr.Row(): - text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), - value=default_work_space_dir) - text_role = gr.Text(label=i18n("角色名称"), value=default_role) - text_work_space_dir.blur(save_work_dir, [text_work_space_dir, text_role], [text_role]) - text_role.blur(save_role, [text_role], []) + with gr.Accordion(label=i18n("基本信息")): + with gr.Row(): + text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), + value=default_work_space_dir) + text_role = gr.Text(label=i18n("角色名称"), value=default_role) + button_switch_role_and_refresh = gr.Button(i18n("切换并刷新"), variant="primary") + text_work_space_dir.blur(save_work_dir, [text_work_space_dir, text_role], [text_role]) + text_role.blur(save_role, [text_role], []) + with gr.Row(): + text_refer_audio_file_dir = gr.Text(label=i18n("参考音频所在目录"), value="") + text_inference_audio_file_dir = gr.Text(label=i18n("推理音频所在目录"), value="") with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表")): gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") @@ -602,8 +618,6 @@ def save_role(text_role): with gr.Tab(label=i18n("第二步:基于参考音频和测试文本,执行批量推理")): default_model_inference_voice_dir = common.check_path_existence_and_return( os.path.join(default_base_dir, params.reference_audio_dir)) - text_model_inference_voice_dir = gr.Text(label=i18n("待推理的参考音频所在目录"), - value=default_model_inference_voice_dir, interactive=True) gr.Markdown(value=i18n("2.1:启动推理服务,并配置模型参数")) with gr.Accordion(label=i18n("详情")): with gr.Tab(label=i18n("主项目下api.py服务")): @@ -613,55 +627,115 @@ def save_role(text_role): text_start_api_info = gr.Text(label=i18n("api启动信息"), value="", interactive=False) button_start_api.click(start_api, [], [text_start_api_info]) gr.Markdown(value=i18n("2.1.2:设置模型参数")) - text_api_set_model_base_url = gr.Text(label=i18n("请输入api服务模型切换接口地址"), value="", interactive=True) + text_api_set_model_base_url = gr.Text(label=i18n("请输入api服务模型切换接口地址"), value="", + interactive=True) with gr.Row(): - dropdown_api_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), choices=model_manager.get_gpt_model_names(), value="", interactive=True) - dropdown_api_sovits_models = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=model_manager.get_sovits_model_names(), value="", interactive=True) + dropdown_api_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), + choices=model_manager.get_gpt_model_names(), value="", + interactive=True) + dropdown_api_sovits_models = gr.Dropdown(label=i18n("SoVITS模型列表"), + choices=model_manager.get_sovits_model_names(), + value="", interactive=True) button_refresh_api_model = gr.Button(i18n("刷新模型路径"), variant="primary") - button_refresh_api_model.click(refresh_api_model, [], [dropdown_api_gpt_models, dropdown_api_sovits_models]) + button_refresh_api_model.click(refresh_api_model, [], + [dropdown_api_gpt_models, dropdown_api_sovits_models]) with gr.Row(): text_api_gpt_param = gr.Text(label=i18n("GPT模型参数名"), value="", interactive=True) text_api_sovits_param = gr.Text(label=i18n("SoVITS模型参数名"), value="", interactive=True) gr.Markdown(value=i18n("2.1.3:发起设置请求")) - text_api_set_model_whole_url = gr.Text(label=i18n("完整的模型参数设置请求地址"), value="", interactive=False) - dropdown_api_gpt_models.change(api_set_model_whole_url, [text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], [text_api_set_model_whole_url]) - dropdown_api_sovits_models.change(api_set_model_whole_url, [text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], [text_api_set_model_whole_url]) - text_api_gpt_param.input(api_set_model_whole_url, [text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], [text_api_set_model_whole_url]) - text_api_sovits_param.input(api_set_model_whole_url, [text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], [text_api_set_model_whole_url]) + text_api_set_model_whole_url = gr.Text(label=i18n("完整的模型参数设置请求地址"), value="", + interactive=False) + dropdown_api_gpt_models.change(api_set_model_whole_url, + [text_api_set_model_base_url, dropdown_api_gpt_models, + dropdown_api_sovits_models, text_api_gpt_param, + text_api_sovits_param], [text_api_set_model_whole_url]) + dropdown_api_sovits_models.change(api_set_model_whole_url, + [text_api_set_model_base_url, dropdown_api_gpt_models, + dropdown_api_sovits_models, text_api_gpt_param, + text_api_sovits_param], [text_api_set_model_whole_url]) + text_api_gpt_param.input(api_set_model_whole_url, + [text_api_set_model_base_url, dropdown_api_gpt_models, + dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], + [text_api_set_model_whole_url]) + text_api_sovits_param.input(api_set_model_whole_url, + [text_api_set_model_base_url, dropdown_api_gpt_models, + dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], + [text_api_set_model_whole_url]) with gr.Row(): button_api_start_set_model_request = gr.Button(i18n("发起模型设置请求"), variant="primary") - text_api_start_set_model_request_info = gr.Text(label=i18n("设置请求结果"), value="", interactive=False) - button_api_start_set_model_request.click(start_api_set_model, [text_api_set_model_base_url, dropdown_api_gpt_models, dropdown_api_sovits_models, text_api_gpt_param, text_api_sovits_param], [text_api_start_set_model_request_info]) + text_api_start_set_model_request_info = gr.Text(label=i18n("设置请求结果"), value="", + interactive=False) + button_api_start_set_model_request.click(start_api_set_model, + [text_api_set_model_base_url, dropdown_api_gpt_models, + dropdown_api_sovits_models, text_api_gpt_param, + text_api_sovits_param], + [text_api_start_set_model_request_info]) with gr.Tab(label=i18n("fast项目下api_v2.py服务")): gr.Markdown(value=i18n("2.1.1:请到你的项目下,启动服务")) gr.Markdown(value=i18n("2.1.2:设置GPT模型参数")) - text_api_v2_set_gpt_model_base_url = gr.Text(label=i18n("请输入api服务GPT模型切换接口地址"), value="", interactive=True) + text_api_v2_set_gpt_model_base_url = gr.Text(label=i18n("请输入api服务GPT模型切换接口地址"), + value="", interactive=True) with gr.Row(): text_api_v2_gpt_model_param = gr.Text(label=i18n("GPT模型参数名"), value="", interactive=True) - dropdown_api_v2_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), choices=model_manager.get_gpt_model_names(), value="", interactive=True) + dropdown_api_v2_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), + choices=model_manager.get_gpt_model_names(), value="", + interactive=True) button_api_v2_refresh_gpt = gr.Button(i18n("刷新模型路径"), variant="primary") button_api_v2_refresh_gpt.click(refresh_api_v2_gpt_model, [], [dropdown_api_v2_gpt_models]) - text_api_v2_set_gpt_model_whole_url = gr.Text(label=i18n("完整的GPT模型参数设置请求地址"), value="", interactive=False) - text_api_v2_gpt_model_param.input(api_v2_set_gpt_whole_url, [text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, dropdown_api_v2_gpt_models], [text_api_v2_set_gpt_model_whole_url]) - dropdown_api_v2_gpt_models.change(api_v2_set_gpt_whole_url, [text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, dropdown_api_v2_gpt_models], [text_api_v2_set_gpt_model_whole_url]) + text_api_v2_set_gpt_model_whole_url = gr.Text(label=i18n("完整的GPT模型参数设置请求地址"), value="", + interactive=False) + text_api_v2_gpt_model_param.input(api_v2_set_gpt_whole_url, + [text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, + dropdown_api_v2_gpt_models], + [text_api_v2_set_gpt_model_whole_url]) + dropdown_api_v2_gpt_models.change(api_v2_set_gpt_whole_url, + [text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, + dropdown_api_v2_gpt_models], + [text_api_v2_set_gpt_model_whole_url]) with gr.Row(): - button_api_v2_start_set_gpt_model_request = gr.Button(i18n("发起GPT模型设置请求"), variant="primary") - text_api_v2_start_set_gpt_model_request_info = gr.Text(label=i18n("设置请求结果"), value="", interactive=False) - button_api_v2_start_set_gpt_model_request.click(start_api_v2_set_gpt_model, [text_api_v2_set_gpt_model_base_url, text_api_v2_gpt_model_param, dropdown_api_v2_gpt_models], [text_api_v2_start_set_gpt_model_request_info]) + button_api_v2_start_set_gpt_model_request = gr.Button(i18n("发起GPT模型设置请求"), + variant="primary") + text_api_v2_start_set_gpt_model_request_info = gr.Text(label=i18n("设置请求结果"), value="", + interactive=False) + button_api_v2_start_set_gpt_model_request.click(start_api_v2_set_gpt_model, + [text_api_v2_set_gpt_model_base_url, + text_api_v2_gpt_model_param, + dropdown_api_v2_gpt_models], + [text_api_v2_start_set_gpt_model_request_info]) gr.Markdown(value=i18n("2.1.3:设置SoVITS模型参数")) - text_api_v2_set_sovits_model_base_url = gr.Text(label=i18n("请输入api服务SoVITS模型切换接口地址"), value="", interactive=True) + text_api_v2_set_sovits_model_base_url = gr.Text(label=i18n("请输入api服务SoVITS模型切换接口地址"), + value="", interactive=True) with gr.Row(): - text_api_v2_sovits_model_param = gr.Text(label=i18n("SoVITS模型参数名"), value="", interactive=True) - dropdown_api_v2_sovits_models = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=model_manager.get_sovits_model_names(), value="", interactive=True) + text_api_v2_sovits_model_param = gr.Text(label=i18n("SoVITS模型参数名"), value="", + interactive=True) + dropdown_api_v2_sovits_models = gr.Dropdown(label=i18n("SoVITS模型列表"), + choices=model_manager.get_sovits_model_names(), + value="", interactive=True) button_api_v2_refresh_sovits = gr.Button(i18n("刷新模型路径"), variant="primary") - button_api_v2_refresh_sovits.click(refresh_api_v2_sovits_model, [], [dropdown_api_v2_sovits_models]) - text_api_v2_set_sovits_model_whole_url = gr.Text(label=i18n("完整的SoVITS模型参数设置请求地址"), value="", interactive=False) - text_api_v2_sovits_model_param.input(api_v2_set_sovits_whole_url, [text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, dropdown_api_v2_sovits_models], [text_api_v2_set_sovits_model_whole_url]) - dropdown_api_v2_sovits_models.change(api_v2_set_sovits_whole_url, [text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, dropdown_api_v2_sovits_models], [text_api_v2_set_sovits_model_whole_url]) + button_api_v2_refresh_sovits.click(refresh_api_v2_sovits_model, [], + [dropdown_api_v2_sovits_models]) + text_api_v2_set_sovits_model_whole_url = gr.Text(label=i18n("完整的SoVITS模型参数设置请求地址"), + value="", interactive=False) + text_api_v2_sovits_model_param.input(api_v2_set_sovits_whole_url, + [text_api_v2_set_sovits_model_base_url, + text_api_v2_sovits_model_param, + dropdown_api_v2_sovits_models], + [text_api_v2_set_sovits_model_whole_url]) + dropdown_api_v2_sovits_models.change(api_v2_set_sovits_whole_url, + [text_api_v2_set_sovits_model_base_url, + text_api_v2_sovits_model_param, + dropdown_api_v2_sovits_models], + [text_api_v2_set_sovits_model_whole_url]) with gr.Row(): - button_api_v2_start_set_sovits_model_request = gr.Button(i18n("发起SoVITS模型设置请求"), variant="primary") - text_api_v2_start_set_sovits_model_request_info = gr.Text(label=i18n("设置请求结果"), value="", interactive=False) - button_api_v2_start_set_sovits_model_request.click(start_api_v2_set_sovits_model, [text_api_v2_set_sovits_model_base_url, text_api_v2_sovits_model_param, dropdown_api_v2_sovits_models], [text_api_v2_start_set_sovits_model_request_info]) + button_api_v2_start_set_sovits_model_request = gr.Button(i18n("发起SoVITS模型设置请求"), + variant="primary") + text_api_v2_start_set_sovits_model_request_info = gr.Text(label=i18n("设置请求结果"), value="", + interactive=False) + button_api_v2_start_set_sovits_model_request.click(start_api_v2_set_sovits_model, + [text_api_v2_set_sovits_model_base_url, + text_api_v2_sovits_model_param, + dropdown_api_v2_sovits_models], [ + text_api_v2_start_set_sovits_model_request_info]) with gr.Tab(label=i18n("第三方推理服务")): gr.Markdown(value=i18n("启动第三方推理服务,并完成参考音频打包,模型参数设置等操作")) gr.Markdown(value=i18n("2.2:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," @@ -698,8 +772,9 @@ def save_role(text_role): button_open_test_content_file = gr.Button(i18n("打开待推理文本文件"), variant="primary") button_open_test_content_file.click(open_file, [text_test_content], []) gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) - slider_request_concurrency_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入请求并发数,会根据此数创建对应数量的子进程并行发起推理请求"), value=3, - interactive=True) + slider_request_concurrency_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n( + "请输入请求并发数,会根据此数创建对应数量的子进程并行发起推理请求"), value=3, + interactive=True) with gr.Row(): button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) @@ -738,9 +813,11 @@ def save_role(text_role): os.path.join(default_base_dir, params.asr_filename + '.list')) with gr.Row(): text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), - value=default_text_similarity_analysis_path, interactive=True) - slider_text_similarity_amplification_boundary = gr.Slider(minimum=0, maximum=1, step=0.01, label=i18n("文本相似度放大边界"), value=0.90, - interactive=True) + value=default_text_similarity_analysis_path, + interactive=True) + slider_text_similarity_amplification_boundary = gr.Slider(minimum=0, maximum=1, step=0.01, + label=i18n("文本相似度放大边界"), value=0.90, + interactive=True) button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], [text_asr_info, text_text_similarity_analysis_path]) @@ -748,22 +825,31 @@ def save_role(text_role): button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", interactive=False) - button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, slider_text_similarity_amplification_boundary, + button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, + slider_text_similarity_amplification_boundary, text_text_similarity_analysis_path], [text_text_similarity_analysis_info]) gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) with gr.Row(): - text_text_similarity_result_path = gr.Text(label=i18n("文本相似度分析结果文件所在路径"), value="", interactive=True) + text_text_similarity_result_path = gr.Text(label=i18n("文本相似度分析结果文件所在路径"), value="", + interactive=True) button_open_text_similarity_result = gr.Button(i18n("打开文本相似度分析结果文件"), variant="primary") + button_open_inference_dir = gr.Button(i18n("打开推理音频所在目录"), variant="primary") button_open_text_similarity_result.click(open_file, [text_text_similarity_result_path], []) - slider_audio_text_similarity_boundary = gr.Slider(minimum=0, maximum=1, step=0.01, label=i18n("音频文本相似度边界值"), value=0.80, - interactive=True) - text_sync_inference_audio_dir2 = gr.Text(label=i18n("被同步的推理音频路径"), - value="", interactive=True) + button_open_inference_dir.click(open_file, [text_inference_audio_file_dir], []) + slider_audio_text_similarity_boundary = gr.Slider(minimum=0, maximum=1, step=0.01, + label=i18n("音频文本相似度边界值"), value=0.80, + interactive=True) with gr.Row(): - button_delete_ref_audio_below_boundary = gr.Button(i18n("删除音频文本相似度边界值以下的参考音频"), variant="primary") + button_delete_ref_audio_below_boundary = gr.Button(i18n("删除音频文本相似度边界值以下的参考音频"), + variant="primary") text_delete_ref_audio_below_boundary_info = gr.Text(label=i18n("删除结果"), value="", interactive=True) - button_delete_ref_audio_below_boundary.click(delete_ref_audio_below_boundary, [text_model_inference_voice_dir, text_text_similarity_result_path, text_sync_inference_audio_dir2, slider_audio_text_similarity_boundary], [text_delete_ref_audio_below_boundary_info]) + button_delete_ref_audio_below_boundary.click(delete_ref_audio_below_boundary, + [text_refer_audio_file_dir, + text_text_similarity_result_path, + text_inference_audio_file_dir, + slider_audio_text_similarity_boundary], + [text_delete_ref_audio_below_boundary_info]) with gr.Tab(label=i18n("第四步:校验参考音频音质")): gr.Markdown(value=i18n("4.1:对结果按音频相似度排序,或许有用吧,主要还是耳朵听")) with gr.Row(): @@ -777,20 +863,11 @@ def save_role(text_role): text_compare_audio_dir], [text_similarity_audio_output_info]) gr.Markdown(value=i18n("4.2:如果发现存在低音质的推理音频,那么就去参考音频目录下,把原参考音频删了")) gr.Markdown(value=i18n("4.3:删除参考音频之后,按下面的操作,会将推理音频目录下对应的音频也删掉")) - with gr.Row(): - default_sync_ref_audio_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.reference_audio_dir)) - text_sync_ref_audio_dir = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir, - interactive=True) - default_sync_inference_audio_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.inference_audio_dir)) - text_sync_inference_audio_dir = gr.Text(label=i18n("被同步的推理音频路径"), - value=default_sync_inference_audio_dir, interactive=True) with gr.Row(): button_sync_ref_audio = gr.Button(i18n("将参考音频的删除情况,同步到推理音频目录"), variant="primary") text_sync_ref_info = gr.Text(label=i18n("同步结果"), value="", interactive=False) - button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_sync_ref_audio_dir, - text_sync_inference_audio_dir], [text_sync_ref_info]) + button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_refer_audio_file_dir, + text_inference_audio_file_dir], [text_sync_ref_info]) with gr.Tab("第五步:生成参考音频配置文本"): gr.Markdown(value=i18n("5.1:编辑模板")) default_template_path = params.default_template_path @@ -800,23 +877,21 @@ def save_role(text_role): gr.Markdown(value=i18n("5.2:生成配置")) default_sync_ref_audio_dir2 = common.check_path_existence_and_return( os.path.join(default_base_dir, params.reference_audio_dir)) - text_sync_ref_audio_dir2 = gr.Text(label=i18n("参考音频路径"), value=default_sync_ref_audio_dir2, - interactive=True) with gr.Row(): button_create_config = gr.Button(i18n("生成配置"), variant="primary") text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) button_create_config.click(create_config, - [text_work_space_dir, text_role, text_template, text_sync_ref_audio_dir2], + [text_work_space_dir, text_role, text_template, text_refer_audio_file_dir], [text_create_config_info]) button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, slider_subsection_num, slider_sample_num, checkbox_similarity_output], - [text_sample_info, text_model_inference_voice_dir, text_sync_ref_audio_dir, - text_sync_ref_audio_dir2]) + [text_sample_info, text_refer_audio_file_dir]) button_model_inference.click(model_inference, - [text_work_space_dir, text_role, slider_request_concurrency_num, text_model_inference_voice_dir, text_url, + [text_work_space_dir, text_role, slider_request_concurrency_num, + text_refer_audio_file_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content], - [text_model_inference_info, text_asr_audio_dir, text_sync_inference_audio_dir]) + [text_model_inference_info, text_asr_audio_dir, text_inference_audio_file_dir]) app.launch( server_port=9423, From af0bd9f41430218a615d272e4c8006e432dfab68 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sun, 28 Apr 2024 18:47:44 +0800 Subject: [PATCH 37/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0ui=E5=88=9D=E5=A7=8B?= =?UTF-8?q?=E5=8C=96=E5=80=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../config_param/config_manager.py | 26 ++- .../ref_audio_selector_webui.py | 81 ++++----- Ref_Audio_Selector/ui_init/__init__.py | 0 Ref_Audio_Selector/ui_init/init_ui_param.py | 170 ++++++++++++++++++ 4 files changed, 231 insertions(+), 46 deletions(-) create mode 100644 Ref_Audio_Selector/ui_init/__init__.py create mode 100644 Ref_Audio_Selector/ui_init/init_ui_param.py diff --git a/Ref_Audio_Selector/config_param/config_manager.py b/Ref_Audio_Selector/config_param/config_manager.py index 53a2c77c..d540ae39 100644 --- a/Ref_Audio_Selector/config_param/config_manager.py +++ b/Ref_Audio_Selector/config_param/config_manager.py @@ -6,14 +6,38 @@ class ParamReadWriteManager: def __init__(self): self.base_dir = 'Ref_Audio_Selector/file/base_info' + os.makedirs(self.base_dir, exist_ok=True) + # 基础信息 self.work_dir = 'work_dir' self.role = 'role' - self.generate_audio_url = 'generate_audio_url' + # 第一步 + self.subsection_num = 'subsection_num' + self.sample_num = 'sample_num' + # 第二步 + self.api_set_model_base_url = 'api_set_model_base_url' + self.api_gpt_param = 'api_gpt_param' + self.api_sovits_param = 'api_sovits_param' + + self.api_v2_set_gpt_model_base_url = 'api_v2_set_gpt_model_base_url' + self.api_v2_gpt_model_param = 'api_v2_gpt_model_param' + self.api_v2_set_sovits_model_base_url = 'api_v2_set_sovits_model_base_url' + self.api_v2_sovits_model_param = 'api_v2_sovits_model_param' + + self.text_url = 'text_url' self.text_param = 'text_param' self.ref_path_param = 'ref_path_param' self.ref_text_param = 'ref_text_param' self.emotion_param = 'emotion_param' + self.test_content_path = 'test_content_path' + self.request_concurrency_num = 'request_concurrency_num' + + # 第三步 + self.text_similarity_amplification_boundary = 'text_similarity_amplification_boundary' + # 第四步 + # 第五步 + self.text_template = 'text_template' + def read(self, key): file_path = os.path.join(self.base_dir, key + '.txt') if os.path.exists(file_path): diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 8673a272..3544fefe 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -15,6 +15,7 @@ import Ref_Audio_Selector.common.common as common import Ref_Audio_Selector.config_param.config_params as params import Ref_Audio_Selector.common.time_util as time_util +import Ref_Audio_Selector.ui_init.init_ui_param as init from tools.i18n.i18n import I18nAuto from config import python_exec, is_half @@ -534,8 +535,8 @@ def change_size_choices(key): # 根据选择的模型修改可选的模型尺 return {"__type__": "update", "choices": asr_dict[key]['size']} -def save_generate_audio_url(generate_audio_url): - rw_param.write(rw_param.generate_audio_url, generate_audio_url) +def save_generate_audio_url(text_url): + rw_param.write(rw_param.text_url, text_url) def save_text_param(text_text): @@ -574,24 +575,23 @@ def save_role(text_role): rw_param.write(rw_param.role, text_role) -if __name__ == '__main__': - default_work_space_dir = rw_param.read(rw_param.work_dir) - default_role = rw_param.read(rw_param.role) - default_base_dir = os.path.join(default_work_space_dir, default_role) +def init_ui(): + + init.init_all() with gr.Blocks() as app: gr.Markdown(value=i18n("基本介绍:这是一个从训练素材中,批量提取参考音频,并进行效果评估与配置生成的工具")) with gr.Accordion(label=i18n("基本信息")): with gr.Row(): text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), - value=default_work_space_dir) - text_role = gr.Text(label=i18n("角色名称"), value=default_role) + value=init.text_work_space_dir_default) + text_role = gr.Text(label=i18n("角色名称"), value=init.text_role_default) button_switch_role_and_refresh = gr.Button(i18n("切换并刷新"), variant="primary") text_work_space_dir.blur(save_work_dir, [text_work_space_dir, text_role], [text_role]) text_role.blur(save_role, [text_role], []) with gr.Row(): - text_refer_audio_file_dir = gr.Text(label=i18n("参考音频所在目录"), value="") - text_inference_audio_file_dir = gr.Text(label=i18n("推理音频所在目录"), value="") + text_refer_audio_file_dir = gr.Text(label=i18n("参考音频所在目录"), value=init.text_refer_audio_file_dir_default) + text_inference_audio_file_dir = gr.Text(label=i18n("推理音频所在目录"), value=init.text_inference_audio_file_dir_default) with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表")): gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") @@ -599,25 +599,21 @@ def save_role(text_role): button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) - default_sample_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.list_to_convert_reference_audio_dir)) - text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=default_sample_dir, interactive=True) + text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=init.text_sample_dir_default, interactive=True) button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], [text_convert_from_list_info, text_sample_dir]) with gr.Row(): text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="") - slider_subsection_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入分段数"), value=5, + slider_subsection_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入分段数"), value=init.slider_subsection_num_default, interactive=True) slider_sample_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入每段随机抽样个数"), - value=4, interactive=True) + value=init.slider_sample_num_default, interactive=True) checkbox_similarity_output = gr.Checkbox(label=i18n("是否将相似度匹配结果输出到临时目录?"), show_label=True) with gr.Row(): button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary") text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False) with gr.Tab(label=i18n("第二步:基于参考音频和测试文本,执行批量推理")): - default_model_inference_voice_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.reference_audio_dir)) gr.Markdown(value=i18n("2.1:启动推理服务,并配置模型参数")) with gr.Accordion(label=i18n("详情")): with gr.Tab(label=i18n("主项目下api.py服务")): @@ -627,7 +623,7 @@ def save_role(text_role): text_start_api_info = gr.Text(label=i18n("api启动信息"), value="", interactive=False) button_start_api.click(start_api, [], [text_start_api_info]) gr.Markdown(value=i18n("2.1.2:设置模型参数")) - text_api_set_model_base_url = gr.Text(label=i18n("请输入api服务模型切换接口地址"), value="", + text_api_set_model_base_url = gr.Text(label=i18n("请输入api服务模型切换接口地址"), value=init.text_api_set_model_base_url_default, interactive=True) with gr.Row(): dropdown_api_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), @@ -640,8 +636,8 @@ def save_role(text_role): button_refresh_api_model.click(refresh_api_model, [], [dropdown_api_gpt_models, dropdown_api_sovits_models]) with gr.Row(): - text_api_gpt_param = gr.Text(label=i18n("GPT模型参数名"), value="", interactive=True) - text_api_sovits_param = gr.Text(label=i18n("SoVITS模型参数名"), value="", interactive=True) + text_api_gpt_param = gr.Text(label=i18n("GPT模型参数名"), value=init.text_api_gpt_param_default, interactive=True) + text_api_sovits_param = gr.Text(label=i18n("SoVITS模型参数名"), value=init.text_api_sovits_param_default, interactive=True) gr.Markdown(value=i18n("2.1.3:发起设置请求")) text_api_set_model_whole_url = gr.Text(label=i18n("完整的模型参数设置请求地址"), value="", interactive=False) @@ -674,9 +670,9 @@ def save_role(text_role): gr.Markdown(value=i18n("2.1.1:请到你的项目下,启动服务")) gr.Markdown(value=i18n("2.1.2:设置GPT模型参数")) text_api_v2_set_gpt_model_base_url = gr.Text(label=i18n("请输入api服务GPT模型切换接口地址"), - value="", interactive=True) + value=init.text_api_v2_set_gpt_model_base_url_default, interactive=True) with gr.Row(): - text_api_v2_gpt_model_param = gr.Text(label=i18n("GPT模型参数名"), value="", interactive=True) + text_api_v2_gpt_model_param = gr.Text(label=i18n("GPT模型参数名"), value=init.text_api_v2_gpt_model_param_default, interactive=True) dropdown_api_v2_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), choices=model_manager.get_gpt_model_names(), value="", interactive=True) @@ -704,9 +700,9 @@ def save_role(text_role): [text_api_v2_start_set_gpt_model_request_info]) gr.Markdown(value=i18n("2.1.3:设置SoVITS模型参数")) text_api_v2_set_sovits_model_base_url = gr.Text(label=i18n("请输入api服务SoVITS模型切换接口地址"), - value="", interactive=True) + value=init.text_api_v2_set_sovits_model_base_url_default, interactive=True) with gr.Row(): - text_api_v2_sovits_model_param = gr.Text(label=i18n("SoVITS模型参数名"), value="", + text_api_v2_sovits_model_param = gr.Text(label=i18n("SoVITS模型参数名"), value=init.text_api_v2_sovits_model_param_default, interactive=True) dropdown_api_v2_sovits_models = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=model_manager.get_sovits_model_names(), @@ -741,14 +737,14 @@ def save_role(text_role): gr.Markdown(value=i18n("2.2:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), - value=rw_param.read(rw_param.generate_audio_url)) + value=init.text_url_default) with gr.Row(): - text_text = gr.Text(label=i18n("请输入文本参数名"), value=rw_param.read(rw_param.text_param)) + text_text = gr.Text(label=i18n("请输入文本参数名"), value=init.text_text_default) text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), - value=rw_param.read(rw_param.ref_path_param)) + value=init.text_ref_path_default) text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), - value=rw_param.read(rw_param.ref_text_param)) - text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value=rw_param.read(rw_param.emotion_param)) + value=init.text_ref_text_default) + text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value=init.text_emotion_default) text_whole_url = gr.Text(label=i18n("完整地址"), value="", interactive=False) text_url.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) @@ -766,9 +762,8 @@ def save_role(text_role): [text_whole_url]) text_emotion.blur(save_emotion_param, [text_emotion], []) gr.Markdown(value=i18n("2.3:配置待推理文本,一句一行,不要太多,10条即可")) - default_test_content_path = params.default_test_text_path with gr.Row(): - text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=default_test_content_path) + text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=init.text_test_content_default) button_open_test_content_file = gr.Button(i18n("打开待推理文本文件"), variant="primary") button_open_test_content_file.click(open_file, [text_test_content], []) gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) @@ -780,9 +775,7 @@ def save_role(text_role): text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) with gr.Tab(label=i18n("第三步:进行参考音频效果校验与筛选")): gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) - default_asr_audio_dir = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) - text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=default_asr_audio_dir, + text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=init.text_asr_audio_dir_default, interactive=True) with gr.Row(): dropdown_asr_model = gr.Dropdown( @@ -809,14 +802,13 @@ def save_role(text_role): button_asr = gr.Button(i18n("启动asr"), variant="primary") text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) gr.Markdown(value=i18n("3.2:启动文本相似度分析")) - default_text_similarity_analysis_path = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.asr_filename + '.list')) with gr.Row(): text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), - value=default_text_similarity_analysis_path, + value=init.text_text_similarity_analysis_path_default, interactive=True) slider_text_similarity_amplification_boundary = gr.Slider(minimum=0, maximum=1, step=0.01, - label=i18n("文本相似度放大边界"), value=0.90, + label=i18n("文本相似度放大边界"), + value=init.slider_text_similarity_amplification_boundary_default, interactive=True) button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], @@ -831,7 +823,7 @@ def save_role(text_role): [text_text_similarity_analysis_info]) gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) with gr.Row(): - text_text_similarity_result_path = gr.Text(label=i18n("文本相似度分析结果文件所在路径"), value="", + text_text_similarity_result_path = gr.Text(label=i18n("文本相似度分析结果文件所在路径"), value=init.text_text_similarity_result_path_default, interactive=True) button_open_text_similarity_result = gr.Button(i18n("打开文本相似度分析结果文件"), variant="primary") button_open_inference_dir = gr.Button(i18n("打开推理音频所在目录"), variant="primary") @@ -870,13 +862,8 @@ def save_role(text_role): text_inference_audio_file_dir], [text_sync_ref_info]) with gr.Tab("第五步:生成参考音频配置文本"): gr.Markdown(value=i18n("5.1:编辑模板")) - default_template_path = params.default_template_path - default_template_content = common.read_file(default_template_path) - text_template_path = gr.Text(label=i18n("模板文件路径"), value=default_template_path, interactive=True) - text_template = gr.Text(label=i18n("模板内容"), value=default_template_content, lines=10) + text_template = gr.Text(label=i18n("模板内容"), value=init.text_template_default, lines=10) gr.Markdown(value=i18n("5.2:生成配置")) - default_sync_ref_audio_dir2 = common.check_path_existence_and_return( - os.path.join(default_base_dir, params.reference_audio_dir)) with gr.Row(): button_create_config = gr.Button(i18n("生成配置"), variant="primary") text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) @@ -897,3 +884,7 @@ def save_role(text_role): server_port=9423, quiet=True, ) + + +if __name__ == "__main__": + init_ui() diff --git a/Ref_Audio_Selector/ui_init/__init__.py b/Ref_Audio_Selector/ui_init/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Ref_Audio_Selector/ui_init/init_ui_param.py b/Ref_Audio_Selector/ui_init/init_ui_param.py new file mode 100644 index 00000000..4251800d --- /dev/null +++ b/Ref_Audio_Selector/ui_init/init_ui_param.py @@ -0,0 +1,170 @@ +import os +import Ref_Audio_Selector.config_param.config_params as params +import Ref_Audio_Selector.common.common as common + +rw_param = params.config_manager.get_rw_param() +# -------------------基本信息--------------------------- + +# 角色所在工作目录 +base_dir_default = None +# 工作目录 +text_work_space_dir_default = None +# 角色名称 +text_role_default = None +# 参考音频所在目录 +text_refer_audio_file_dir_default = None +# 推理音频所在目录 +text_inference_audio_file_dir_default = None + +# -------------------第一步------------------------------ + +# 参考音频抽样目录 +text_sample_dir_default = None +# 分段数 +slider_subsection_num_default = None +# 每段随机抽样个数 +slider_sample_num_default = None + +# -------------------第二步------------------------------ + +# api服务模型切换接口地址 +text_api_set_model_base_url_default = None +# GPT模型参数名 +text_api_gpt_param_default = None +# SoVITS模型参数名 +text_api_sovits_param_default = None +# api服务GPT模型切换接口地址 +text_api_v2_set_gpt_model_base_url_default = None +# GPT模型参数名 +text_api_v2_gpt_model_param_default = None +# api服务SoVITS模型切换接口地址 +text_api_v2_set_sovits_model_base_url_default = None +# SoVITS模型参数名 +text_api_v2_sovits_model_param_default = None +# 推理服务请求地址与参数 +text_url_default = None +# 文本参数名 +text_text_default = None +# 参考音频路径参数名 +text_ref_path_default = None +# 参考音频文本参数名 +text_ref_text_default = None +# 角色情绪参数名 +text_emotion_default = None +# 待推理文本路径 +text_test_content_default = None +# 请求并发数 +slider_request_concurrency_num_default = 3 + +# -------------------第三步------------------------------ + +# 待asr的音频所在目录 +text_asr_audio_dir_default = None +# 待分析的文件路径 +text_text_similarity_analysis_path_default = None +# 文本相似度放大边界 +slider_text_similarity_amplification_boundary_default = 0.90 +# 文本相似度分析结果文件所在路径 +text_text_similarity_result_path_default = None + +# -------------------第四步------------------------------ +# -------------------第五步------------------------------ +# 模板内容 +text_template_default = None + + +def empty_default(vale, default_value): + if vale is None or vale == "": + return default_value + else: + return vale + + +def init_base(): + global text_work_space_dir_default, text_role_default, base_dir_default, text_refer_audio_file_dir_default, text_inference_audio_file_dir_default + + text_work_space_dir_default = rw_param.read(rw_param.work_dir) + text_role_default = rw_param.read(rw_param.role) + base_dir_default = os.path.join(text_work_space_dir_default, text_role_default) + + text_refer_audio_file_dir_default = common.check_path_existence_and_return( + os.path.join(base_dir_default, params.reference_audio_dir)) + + text_inference_audio_file_dir_default = common.check_path_existence_and_return( + os.path.join(base_dir_default, params.inference_audio_dir)) + + +def init_first(): + global text_sample_dir_default, slider_subsection_num_default, slider_sample_num_default + + text_sample_dir_default = common.check_path_existence_and_return( + os.path.join(base_dir_default, params.list_to_convert_reference_audio_dir)) + + slider_subsection_num_default = empty_default(rw_param.read(rw_param.subsection_num), 5) + + slider_sample_num_default = empty_default(rw_param.read(rw_param.sample_num), 4) + + +def init_second(): + global text_api_set_model_base_url_default, text_api_gpt_param_default, text_api_sovits_param_default, text_api_v2_set_gpt_model_base_url_default, text_api_v2_gpt_model_param_default + global text_api_v2_set_sovits_model_base_url_default, text_api_v2_sovits_model_param_default, text_url_default, text_text_default, text_ref_path_default + global text_ref_text_default, text_emotion_default, text_test_content_default, slider_request_concurrency_num_default + + text_api_set_model_base_url_default = empty_default(rw_param.read(rw_param.api_set_model_base_url), + 'http://localhost:9880/set_model') + text_api_gpt_param_default = empty_default(rw_param.read(rw_param.api_gpt_param), 'gpt_model_path') + text_api_sovits_param_default = empty_default(rw_param.read(rw_param.api_sovits_param), 'sovits_model_path') + + text_api_v2_set_gpt_model_base_url_default = empty_default(rw_param.read(rw_param.api_v2_set_gpt_model_base_url), + 'http://localhost:9880/set_gpt_weights') + text_api_v2_gpt_model_param_default = empty_default(rw_param.read(rw_param.api_v2_gpt_model_param), 'weights_path') + + text_api_v2_set_sovits_model_base_url_default = empty_default( + rw_param.read(rw_param.api_v2_set_sovits_model_base_url), 'http://localhost:9880/set_sovits_weights') + text_api_v2_sovits_model_param_default = empty_default(rw_param.read(rw_param.api_v2_sovits_model_param), 'weights_path') + + text_url_default = empty_default(rw_param.read(rw_param.text_url), + 'http://localhost:9880?prompt_language=中文&text_language=中文&cut_punc=') + text_text_default = empty_default(rw_param.read(rw_param.text_param), 'text') + + text_ref_path_default = empty_default(rw_param.read(rw_param.ref_path_param), 'refer_wav_path') + text_ref_text_default = empty_default(rw_param.read(rw_param.ref_text_param), 'prompt_text') + text_emotion_default = empty_default(rw_param.read(rw_param.emotion_param), 'emotion') + + text_test_content_default = empty_default(rw_param.read(rw_param.test_content_path), params.default_test_text_path) + + slider_request_concurrency_num_default = empty_default(rw_param.read(rw_param.request_concurrency_num), 3) + + +def init_third(): + global text_asr_audio_dir_default, text_text_similarity_analysis_path_default, slider_text_similarity_amplification_boundary_default, text_text_similarity_result_path_default + + text_asr_audio_dir_default = common.check_path_existence_and_return( + os.path.join(base_dir_default, params.inference_audio_dir, params.inference_audio_text_aggregation_dir)) + text_text_similarity_analysis_path_default = common.check_path_existence_and_return( + os.path.join(base_dir_default, params.asr_filename + '.list')) + slider_text_similarity_amplification_boundary_default = empty_default( + rw_param.read(rw_param.text_similarity_amplification_boundary), 0.90) + text_text_similarity_result_path_default = common.check_path_existence_and_return( + os.path.join(base_dir_default, params.text_emotion_average_similarity_report_filename + '.txt')) + + +def init_fourth(): + pass + + +def init_fifth(): + global text_template_default + + default_template_path = params.default_template_path + text_template_default = empty_default(rw_param.read(rw_param.text_template), + common.read_file(default_template_path)) + + +def init_all(): + init_base() + init_first() + init_second() + init_third() + init_fourth() + init_fifth() From e89f986e3fa6346169b2a99fd673e5a2d99cf1cc Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sun, 28 Apr 2024 19:07:09 +0800 Subject: [PATCH 38/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0ui=E5=8F=82=E6=95=B0?= =?UTF-8?q?=E5=86=99=E5=85=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 79 ++++++++++++++----- 1 file changed, 60 insertions(+), 19 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 3544fefe..5ed15122 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -571,12 +571,7 @@ def save_work_dir(text_work_space_dir, text_role): return role_dir -def save_role(text_role): - rw_param.write(rw_param.role, text_role) - - def init_ui(): - init.init_all() with gr.Blocks() as app: @@ -588,10 +583,12 @@ def init_ui(): text_role = gr.Text(label=i18n("角色名称"), value=init.text_role_default) button_switch_role_and_refresh = gr.Button(i18n("切换并刷新"), variant="primary") text_work_space_dir.blur(save_work_dir, [text_work_space_dir, text_role], [text_role]) - text_role.blur(save_role, [text_role], []) + text_role.blur(lambda value: rw_param.write(rw_param.role, value), [text_role], []) with gr.Row(): - text_refer_audio_file_dir = gr.Text(label=i18n("参考音频所在目录"), value=init.text_refer_audio_file_dir_default) - text_inference_audio_file_dir = gr.Text(label=i18n("推理音频所在目录"), value=init.text_inference_audio_file_dir_default) + text_refer_audio_file_dir = gr.Text(label=i18n("参考音频所在目录"), + value=init.text_refer_audio_file_dir_default) + text_inference_audio_file_dir = gr.Text(label=i18n("推理音频所在目录"), + value=init.text_inference_audio_file_dir_default) with gr.Tab(label=i18n("第一步:基于训练素材,生成待选参考音频列表")): gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") @@ -599,17 +596,23 @@ def init_ui(): button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) - text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=init.text_sample_dir_default, interactive=True) + text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=init.text_sample_dir_default, + interactive=True) button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], [text_convert_from_list_info, text_sample_dir]) with gr.Row(): text_base_voice_path = gr.Text(label=i18n("请输入基准音频路径"), value="") - slider_subsection_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入分段数"), value=init.slider_subsection_num_default, + slider_subsection_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入分段数"), + value=init.slider_subsection_num_default, interactive=True) slider_sample_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n("请输入每段随机抽样个数"), value=init.slider_sample_num_default, interactive=True) checkbox_similarity_output = gr.Checkbox(label=i18n("是否将相似度匹配结果输出到临时目录?"), show_label=True) + slider_subsection_num.change(lambda value: rw_param.write(rw_param.subsection_num, value), + [slider_subsection_num], []) + slider_sample_num.change(lambda value: rw_param.write(rw_param.sample_num, value), [slider_sample_num], + []) with gr.Row(): button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary") text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False) @@ -623,8 +626,12 @@ def init_ui(): text_start_api_info = gr.Text(label=i18n("api启动信息"), value="", interactive=False) button_start_api.click(start_api, [], [text_start_api_info]) gr.Markdown(value=i18n("2.1.2:设置模型参数")) - text_api_set_model_base_url = gr.Text(label=i18n("请输入api服务模型切换接口地址"), value=init.text_api_set_model_base_url_default, + text_api_set_model_base_url = gr.Text(label=i18n("请输入api服务模型切换接口地址"), + value=init.text_api_set_model_base_url_default, interactive=True) + text_api_set_model_base_url.blur( + lambda value: rw_param.write(rw_param.api_set_model_base_url, value), + [text_api_set_model_base_url], []) with gr.Row(): dropdown_api_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), choices=model_manager.get_gpt_model_names(), value="", @@ -636,8 +643,14 @@ def init_ui(): button_refresh_api_model.click(refresh_api_model, [], [dropdown_api_gpt_models, dropdown_api_sovits_models]) with gr.Row(): - text_api_gpt_param = gr.Text(label=i18n("GPT模型参数名"), value=init.text_api_gpt_param_default, interactive=True) - text_api_sovits_param = gr.Text(label=i18n("SoVITS模型参数名"), value=init.text_api_sovits_param_default, interactive=True) + text_api_gpt_param = gr.Text(label=i18n("GPT模型参数名"), value=init.text_api_gpt_param_default, + interactive=True) + text_api_sovits_param = gr.Text(label=i18n("SoVITS模型参数名"), + value=init.text_api_sovits_param_default, interactive=True) + text_api_gpt_param.blur(lambda value: rw_param.write(rw_param.api_gpt_param, value), + [text_api_gpt_param], []) + text_api_sovits_param.blur(lambda value: rw_param.write(rw_param.api_sovits_param, value), + [text_api_sovits_param], []) gr.Markdown(value=i18n("2.1.3:发起设置请求")) text_api_set_model_whole_url = gr.Text(label=i18n("完整的模型参数设置请求地址"), value="", interactive=False) @@ -670,12 +683,21 @@ def init_ui(): gr.Markdown(value=i18n("2.1.1:请到你的项目下,启动服务")) gr.Markdown(value=i18n("2.1.2:设置GPT模型参数")) text_api_v2_set_gpt_model_base_url = gr.Text(label=i18n("请输入api服务GPT模型切换接口地址"), - value=init.text_api_v2_set_gpt_model_base_url_default, interactive=True) + value=init.text_api_v2_set_gpt_model_base_url_default, + interactive=True) + text_api_v2_set_gpt_model_base_url.blur( + lambda value: rw_param.write(rw_param.api_v2_set_gpt_model_base_url, value), + [text_api_v2_set_gpt_model_base_url], []) with gr.Row(): - text_api_v2_gpt_model_param = gr.Text(label=i18n("GPT模型参数名"), value=init.text_api_v2_gpt_model_param_default, interactive=True) + text_api_v2_gpt_model_param = gr.Text(label=i18n("GPT模型参数名"), + value=init.text_api_v2_gpt_model_param_default, + interactive=True) dropdown_api_v2_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), choices=model_manager.get_gpt_model_names(), value="", interactive=True) + text_api_v2_gpt_model_param.blur( + lambda value: rw_param.write(rw_param.api_v2_gpt_model_param, value), + [text_api_v2_gpt_model_param], []) button_api_v2_refresh_gpt = gr.Button(i18n("刷新模型路径"), variant="primary") button_api_v2_refresh_gpt.click(refresh_api_v2_gpt_model, [], [dropdown_api_v2_gpt_models]) text_api_v2_set_gpt_model_whole_url = gr.Text(label=i18n("完整的GPT模型参数设置请求地址"), value="", @@ -700,14 +722,22 @@ def init_ui(): [text_api_v2_start_set_gpt_model_request_info]) gr.Markdown(value=i18n("2.1.3:设置SoVITS模型参数")) text_api_v2_set_sovits_model_base_url = gr.Text(label=i18n("请输入api服务SoVITS模型切换接口地址"), - value=init.text_api_v2_set_sovits_model_base_url_default, interactive=True) + value=init.text_api_v2_set_sovits_model_base_url_default, + interactive=True) + text_api_v2_set_sovits_model_base_url.blur( + lambda value: rw_param.write(rw_param.api_v2_set_sovits_model_base_url, value), + [text_api_v2_set_sovits_model_base_url], []) with gr.Row(): - text_api_v2_sovits_model_param = gr.Text(label=i18n("SoVITS模型参数名"), value=init.text_api_v2_sovits_model_param_default, + text_api_v2_sovits_model_param = gr.Text(label=i18n("SoVITS模型参数名"), + value=init.text_api_v2_sovits_model_param_default, interactive=True) dropdown_api_v2_sovits_models = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=model_manager.get_sovits_model_names(), value="", interactive=True) button_api_v2_refresh_sovits = gr.Button(i18n("刷新模型路径"), variant="primary") + text_api_v2_sovits_model_param.blur( + lambda value: rw_param.write(rw_param.api_v2_sovits_model_param, value), + [text_api_v2_sovits_model_param], []) button_api_v2_refresh_sovits.click(refresh_api_v2_sovits_model, [], [dropdown_api_v2_sovits_models]) text_api_v2_set_sovits_model_whole_url = gr.Text(label=i18n("完整的SoVITS模型参数设置请求地址"), @@ -746,6 +776,12 @@ def init_ui(): value=init.text_ref_text_default) text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value=init.text_emotion_default) text_whole_url = gr.Text(label=i18n("完整地址"), value="", interactive=False) + + text_text.blur(lambda value: rw_param.write(rw_param.text_param, value), [text_text], []) + text_ref_path.blur(lambda value: rw_param.write(rw_param.ref_path_param, value), [text_ref_path], []) + text_ref_text.blur(lambda value: rw_param.write(rw_param.ref_text_param, value), [text_ref_text], []) + text_emotion.blur(lambda value: rw_param.write(rw_param.emotion_param, value), [text_emotion], []) + text_url.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) text_url.blur(save_generate_audio_url, [text_url], []) @@ -766,10 +802,12 @@ def init_ui(): text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=init.text_test_content_default) button_open_test_content_file = gr.Button(i18n("打开待推理文本文件"), variant="primary") button_open_test_content_file.click(open_file, [text_test_content], []) + text_test_content.blur(lambda value: rw_param.write(rw_param.test_content_path, value), [text_test_content], []) gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) slider_request_concurrency_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n( - "请输入请求并发数,会根据此数创建对应数量的子进程并行发起推理请求"), value=3, + "请输入请求并发数,会根据此数创建对应数量的子进程并行发起推理请求"), value=init.slider_request_concurrency_num_default, interactive=True) + slider_request_concurrency_num.change(lambda value: rw_param.write(rw_param.request_concurrency_num, value), [slider_request_concurrency_num], []) with gr.Row(): button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) @@ -810,6 +848,7 @@ def init_ui(): label=i18n("文本相似度放大边界"), value=init.slider_text_similarity_amplification_boundary_default, interactive=True) + slider_text_similarity_amplification_boundary.change(lambda value: rw_param.write(rw_param.text_similarity_amplification_boundary, value), [slider_text_similarity_amplification_boundary], []) button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], [text_asr_info, text_text_similarity_analysis_path]) @@ -823,7 +862,8 @@ def init_ui(): [text_text_similarity_analysis_info]) gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) with gr.Row(): - text_text_similarity_result_path = gr.Text(label=i18n("文本相似度分析结果文件所在路径"), value=init.text_text_similarity_result_path_default, + text_text_similarity_result_path = gr.Text(label=i18n("文本相似度分析结果文件所在路径"), + value=init.text_text_similarity_result_path_default, interactive=True) button_open_text_similarity_result = gr.Button(i18n("打开文本相似度分析结果文件"), variant="primary") button_open_inference_dir = gr.Button(i18n("打开推理音频所在目录"), variant="primary") @@ -863,6 +903,7 @@ def init_ui(): with gr.Tab("第五步:生成参考音频配置文本"): gr.Markdown(value=i18n("5.1:编辑模板")) text_template = gr.Text(label=i18n("模板内容"), value=init.text_template_default, lines=10) + text_template.blur(lambda value: rw_param.write(rw_param.text_template, value), [text_template], []) gr.Markdown(value=i18n("5.2:生成配置")) with gr.Row(): button_create_config = gr.Button(i18n("生成配置"), variant="primary") From d6e255a0715454b695bb99ea0b104956d537e317 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sun, 28 Apr 2024 20:21:15 +0800 Subject: [PATCH 39/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0windows=E4=B8=8B?= =?UTF-8?q?=E5=90=AF=E5=8A=A8=E6=96=87=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/start_ref_audio_selector_webui.bat | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 Ref_Audio_Selector/start_ref_audio_selector_webui.bat diff --git a/Ref_Audio_Selector/start_ref_audio_selector_webui.bat b/Ref_Audio_Selector/start_ref_audio_selector_webui.bat new file mode 100644 index 00000000..07b66eba --- /dev/null +++ b/Ref_Audio_Selector/start_ref_audio_selector_webui.bat @@ -0,0 +1,8 @@ +CHCP 65001 +@echo off +cd ../ +echo 尝试启动后端程序 +echo 等待一分钟以上没有出现新的内容说明不正常 +runtime\python.exe ./Ref_Audio_Selector/ref_audio_selector_webui.py + +pause \ No newline at end of file From b1ad8b5dcd97665c630014f421ccd1fce8bf415d Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 00:20:13 +0800 Subject: [PATCH 40/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/model_manager.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Ref_Audio_Selector/common/model_manager.py b/Ref_Audio_Selector/common/model_manager.py index 53e352e9..ac2e7fb5 100644 --- a/Ref_Audio_Selector/common/model_manager.py +++ b/Ref_Audio_Selector/common/model_manager.py @@ -19,16 +19,16 @@ def custom_sort_key(s): def get_gpt_model_names(): gpt_names = [pretrained_gpt_name] - for name in os.listdir(SoVITS_weight_root): - if name.endswith(".ckpt"): gpt_names.append("%s/%s" % (SoVITS_weight_root, name)) + for name in os.listdir(GPT_weight_root): + if name.endswith(".ckpt"): gpt_names.append("%s/%s" % (GPT_weight_root, name)) sorted(gpt_names, key=custom_sort_key) return gpt_names def get_sovits_model_names(): sovits_names = [pretrained_sovits_name] - for name in os.listdir(GPT_weight_root): - if name.endswith(".pth"): sovits_names.append("%s/%s" % (GPT_weight_root, name)) + for name in os.listdir(SoVITS_weight_root): + if name.endswith(".pth"): sovits_names.append("%s/%s" % (SoVITS_weight_root, name)) sorted(sovits_names, key=custom_sort_key) return sovits_names From c9547ab669e6218380f8271ce639a4ed693eb36f Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 00:27:38 +0800 Subject: [PATCH 41/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0url=E6=B6=88=E6=81=AF?= =?UTF-8?q?=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_inference.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 2535dec5..93bff6d7 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -189,6 +189,7 @@ def generate_audio_files_for_emotion_group(url_composer, text_list, emotion_list def inference_audio_from_api(url): + logger.info(f'inference_audio_from_api url: {url}') # 发起GET请求 response = requests.get(url, stream=True) @@ -202,6 +203,8 @@ def inference_audio_from_api(url): def start_api_set_model(set_model_url_composer, gpt_models, sovits_models): url, post_body = set_model_url_composer.build_post_url(gpt_models, sovits_models) + logger.info(f'start_api_set_model url: {url}') + logger.info(f'start_api_set_model post_body: {post_body}') response = requests.post(url, json=post_body) if response.status_code == 200: result = response.text @@ -212,6 +215,7 @@ def start_api_set_model(set_model_url_composer, gpt_models, sovits_models): def start_api_v2_set_gpt_model(set_model_url_composer, gpt_models): url = set_model_url_composer.build_get_url([gpt_models]) + logger.info(f'start_api_v2_set_gpt_model url: {url}') response = requests.get(url) if response.status_code == 200: result = response.text @@ -222,6 +226,7 @@ def start_api_v2_set_gpt_model(set_model_url_composer, gpt_models): def start_api_v2_set_sovits_model(set_model_url_composer, sovits_models): url = set_model_url_composer.build_get_url([sovits_models]) + logger.info(f'start_api_v2_set_sovits_model url: {url}') response = requests.get(url) if response.status_code == 200: result = response.text From 01468158d30a409d37062720f79e2a1e065d7258 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 00:29:52 +0800 Subject: [PATCH 42/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0url=E6=B6=88=E6=81=AF?= =?UTF-8?q?=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_inference.py | 1 + 1 file changed, 1 insertion(+) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 93bff6d7..530d9c95 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -203,6 +203,7 @@ def inference_audio_from_api(url): def start_api_set_model(set_model_url_composer, gpt_models, sovits_models): url, post_body = set_model_url_composer.build_post_url(gpt_models, sovits_models) + logger.info(f'set_model_url_composer url: {set_model_url_composer}') logger.info(f'start_api_set_model url: {url}') logger.info(f'start_api_set_model post_body: {post_body}') response = requests.post(url, json=post_body) From 536c226b1aa0e9edff652b09395ce5ac0ee387b3 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 00:32:01 +0800 Subject: [PATCH 43/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0url=E6=B6=88=E6=81=AF?= =?UTF-8?q?=E6=8F=90=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 530d9c95..e199e5ab 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -202,7 +202,7 @@ def inference_audio_from_api(url): def start_api_set_model(set_model_url_composer, gpt_models, sovits_models): - url, post_body = set_model_url_composer.build_post_url(gpt_models, sovits_models) + url, post_body = set_model_url_composer.build_post_url([gpt_models, sovits_models], True) logger.info(f'set_model_url_composer url: {set_model_url_composer}') logger.info(f'start_api_set_model url: {url}') logger.info(f'start_api_set_model post_body: {post_body}') From 61db7f05dcfa3df3f1cf90709877b1168bdae489 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 00:41:27 +0800 Subject: [PATCH 44/72] =?UTF-8?q?=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_inference.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index e199e5ab..31c62bab 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -167,9 +167,9 @@ def generate_audio_files_for_emotion_group(url_composer, text_list, emotion_list continue if url_composer.is_emotion(): - real_url = url_composer.build_url_with_emotion(text, emotion['emotion']) + real_url = url_composer.build_url_with_emotion(text, emotion['emotion'], False) else: - real_url = url_composer.build_url_with_ref(text, emotion['ref_path'], emotion['ref_text']) + real_url = url_composer.build_url_with_ref(text, emotion['ref_path'], emotion['ref_text'], False) audio_bytes = inference_audio_from_api(real_url) From fe969ab9a21bbf3284ce106a6974ddee0fd72b87 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 00:58:15 +0800 Subject: [PATCH 45/72] =?UTF-8?q?=E6=B5=8B=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_inference.py | 2 +- Ref_Audio_Selector/tool/text_check.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 31c62bab..6e9e5e72 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -180,7 +180,7 @@ def generate_audio_files_for_emotion_group(url_composer, text_list, emotion_list f.write(audio_bytes) has_generated_count += 1 - logger.info(f"进度: {has_generated_count}/{all_count}") + logger.info(f"进程ID: {os.getpid()}, 进度: {has_generated_count}/{all_count}") end_time = time.perf_counter() # 获取计时终点 elapsed_time = end_time - start_time # 计算执行耗时 # 记录日志内容 diff --git a/Ref_Audio_Selector/tool/text_check.py b/Ref_Audio_Selector/tool/text_check.py index 98c299d4..6281940c 100644 --- a/Ref_Audio_Selector/tool/text_check.py +++ b/Ref_Audio_Selector/tool/text_check.py @@ -70,7 +70,7 @@ def remove_low_similarity_files(ref_audio_list, report_list, audio_text_similari def delete_ref_audio_below_boundary(ref_audio_path, text_similarity_result_path, sync_inference_audio_dir, audio_text_similarity_boundary): - ref_audio_list = common.RefAudioListManager(ref_audio_path) + ref_audio_list = common.RefAudioListManager(ref_audio_path).get_ref_audio_list() report_list = parse_text_similarity_result_txt(text_similarity_result_path) count = remove_low_similarity_files(ref_audio_list, report_list, audio_text_similarity_boundary) audio_check.sync_ref_audio(ref_audio_path, sync_inference_audio_dir) From 371a2d713891297e92056eb0d956f60aeb3987a0 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 10:13:22 +0800 Subject: [PATCH 46/72] =?UTF-8?q?bug=E8=B0=83=E6=95=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/config.ini | 2 + .../config_param/config_manager.py | 8 +++- .../config_param/config_params.py | 2 + .../ref_audio_selector_webui.py | 48 ++++++++++++------- Ref_Audio_Selector/tool/audio_inference.py | 4 +- .../tool/text_comparison/asr_text_process.py | 2 +- Ref_Audio_Selector/ui_init/init_ui_param.py | 9 +++- 7 files changed, 52 insertions(+), 23 deletions(-) diff --git a/Ref_Audio_Selector/config.ini b/Ref_Audio_Selector/config.ini index 43b5ef65..b4658637 100644 --- a/Ref_Audio_Selector/config.ini +++ b/Ref_Audio_Selector/config.ini @@ -1,6 +1,8 @@ # config.ini [Base] +# 服务端口号 +server_port = 9423 # 参考音频目录 reference_audio_dir = refer_audio # 临时文件目录 diff --git a/Ref_Audio_Selector/config_param/config_manager.py b/Ref_Audio_Selector/config_param/config_manager.py index d540ae39..5aaa1b6b 100644 --- a/Ref_Audio_Selector/config_param/config_manager.py +++ b/Ref_Audio_Selector/config_param/config_manager.py @@ -48,7 +48,13 @@ def read(self, key): def write(self, key, content): file_path = os.path.join(self.base_dir, key + '.txt') - clean_content = content.strip() + + # 确保内容是字符串类型,如果不是,转换为字符串 + if not isinstance(content, str): + clean_content = str(content).strip() # 转换为字符串并移除首尾空白 + else: + clean_content = content.strip() + common.write_text_to_file(clean_content, file_path) diff --git a/Ref_Audio_Selector/config_param/config_params.py b/Ref_Audio_Selector/config_param/config_params.py index cd75c73b..b30924d1 100644 --- a/Ref_Audio_Selector/config_param/config_params.py +++ b/Ref_Audio_Selector/config_param/config_params.py @@ -3,6 +3,8 @@ config = config_manager.get_config() # [Base] +# 服务端口号 +server_port = int(config.get_base('server_port')) # 参考音频目录 reference_audio_dir = config.get_base('reference_audio_dir') # 临时文件目录 diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 5ed15122..aa18b48b 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -279,12 +279,15 @@ def text_similarity_analysis(text_work_space_dir, text_role, slider_text_similar similarity_dir, slider_text_similarity_amplification_boundary) + average_similarity_file = os.path.join(similarity_dir, + f'{params.text_emotion_average_similarity_report_filename}.txt') + text_text_similarity_analysis_info = f"耗时:{time_consuming:0.1f}秒;相似度分析成功:生成目录{similarity_dir}" except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) text_text_similarity_analysis_info = f"发生异常:{e}" - return i18n(text_text_similarity_analysis_info) + return i18n(text_text_similarity_analysis_info), average_similarity_file def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_boundary=0.9): @@ -579,11 +582,12 @@ def init_ui(): with gr.Accordion(label=i18n("基本信息")): with gr.Row(): text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), - value=init.text_work_space_dir_default) - text_role = gr.Text(label=i18n("角色名称"), value=init.text_role_default) - button_switch_role_and_refresh = gr.Button(i18n("切换并刷新"), variant="primary") + value=init.text_work_space_dir_default, scale=2) + text_role = gr.Text(label=i18n("角色名称"), value=init.text_role_default, scale=2) + button_switch_role_and_refresh = gr.Button(i18n("切换并刷新"), variant="primary", scale=1) text_work_space_dir.blur(save_work_dir, [text_work_space_dir, text_role], [text_role]) text_role.blur(lambda value: rw_param.write(rw_param.role, value), [text_role], []) + gr.Markdown(value=i18n("下方为公共参数,会随着进度自动填充,无需填写")) with gr.Row(): text_refer_audio_file_dir = gr.Text(label=i18n("参考音频所在目录"), value=init.text_refer_audio_file_dir_default) @@ -797,17 +801,21 @@ def init_ui(): text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) text_emotion.blur(save_emotion_param, [text_emotion], []) - gr.Markdown(value=i18n("2.3:配置待推理文本,一句一行,不要太多,10条即可")) + gr.Markdown(value=i18n("2.3:配置待推理文本,一句一行,尽量保证文本多样性,不同情绪、不同类型的都来一点")) with gr.Row(): text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=init.text_test_content_default) button_open_test_content_file = gr.Button(i18n("打开待推理文本文件"), variant="primary") button_open_test_content_file.click(open_file, [text_test_content], []) - text_test_content.blur(lambda value: rw_param.write(rw_param.test_content_path, value), [text_test_content], []) + text_test_content.blur(lambda value: rw_param.write(rw_param.test_content_path, value), + [text_test_content], []) gr.Markdown(value=i18n("2.4:开始批量推理,这个过程比较耗时,可以去干点别的")) - slider_request_concurrency_num = gr.Slider(minimum=1, maximum=10, step=1, label=i18n( - "请输入请求并发数,会根据此数创建对应数量的子进程并行发起推理请求"), value=init.slider_request_concurrency_num_default, + slider_request_concurrency_num = gr.Slider(minimum=1, maximum=init.slider_request_concurrency_max_num, + step=1, label=i18n( + "请输入请求并发数,会根据此数创建对应数量的子进程并行发起推理请求"), + value=init.slider_request_concurrency_num_default, interactive=True) - slider_request_concurrency_num.change(lambda value: rw_param.write(rw_param.request_concurrency_num, value), [slider_request_concurrency_num], []) + slider_request_concurrency_num.change(lambda value: rw_param.write(rw_param.request_concurrency_num, value), + [slider_request_concurrency_num], []) with gr.Row(): button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) @@ -848,7 +856,9 @@ def init_ui(): label=i18n("文本相似度放大边界"), value=init.slider_text_similarity_amplification_boundary_default, interactive=True) - slider_text_similarity_amplification_boundary.change(lambda value: rw_param.write(rw_param.text_similarity_amplification_boundary, value), [slider_text_similarity_amplification_boundary], []) + slider_text_similarity_amplification_boundary.change( + lambda value: rw_param.write(rw_param.text_similarity_amplification_boundary, value), + [slider_text_similarity_amplification_boundary], []) button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], [text_asr_info, text_text_similarity_analysis_path]) @@ -856,10 +866,6 @@ def init_ui(): button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", interactive=False) - button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, - slider_text_similarity_amplification_boundary, - text_text_similarity_analysis_path], - [text_text_similarity_analysis_info]) gr.Markdown(value=i18n("3.3:根据相似度分析结果,重点检查最后几条是否存在复读等问题")) with gr.Row(): text_text_similarity_result_path = gr.Text(label=i18n("文本相似度分析结果文件所在路径"), @@ -867,10 +873,17 @@ def init_ui(): interactive=True) button_open_text_similarity_result = gr.Button(i18n("打开文本相似度分析结果文件"), variant="primary") button_open_inference_dir = gr.Button(i18n("打开推理音频所在目录"), variant="primary") + + button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, + slider_text_similarity_amplification_boundary, + text_text_similarity_analysis_path], + [text_text_similarity_analysis_info, + text_text_similarity_result_path]) + button_open_text_similarity_result.click(open_file, [text_text_similarity_result_path], []) button_open_inference_dir.click(open_file, [text_inference_audio_file_dir], []) - slider_audio_text_similarity_boundary = gr.Slider(minimum=0, maximum=1, step=0.01, - label=i18n("音频文本相似度边界值"), value=0.80, + slider_audio_text_similarity_boundary = gr.Slider(minimum=0, maximum=1, step=0.001, + label=i18n("音频文本相似度边界值"), value=0.800, interactive=True) with gr.Row(): button_delete_ref_audio_below_boundary = gr.Button(i18n("删除音频文本相似度边界值以下的参考音频"), @@ -922,7 +935,8 @@ def init_ui(): [text_model_inference_info, text_asr_audio_dir, text_inference_audio_file_dir]) app.launch( - server_port=9423, + server_port=params.server_port, + inbrowser=True, quiet=True, ) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 6e9e5e72..8729c5df 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -114,9 +114,7 @@ def safe_encode_query_params(original_url): return encoded_url -def generate_audio_files_parallel(url_composer, text_list, emotion_list, output_dir_path, num_processes=None): - if num_processes is None: - num_processes = multiprocessing.cpu_count() +def generate_audio_files_parallel(url_composer, text_list, emotion_list, output_dir_path, num_processes=1): # 将emotion_list均匀分成num_processes个子集 emotion_groups = np.array_split(emotion_list, num_processes) diff --git a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py index c4eccd61..6511cba7 100644 --- a/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py +++ b/Ref_Audio_Selector/tool/text_comparison/asr_text_process.py @@ -157,5 +157,5 @@ def parse_arguments(): if __name__ == '__main__': cmd = parse_arguments() - print(cmd) + # print(cmd) process(cmd.asr_file_path, cmd.output_dir, cmd.similarity_enlarge_boundary) diff --git a/Ref_Audio_Selector/ui_init/init_ui_param.py b/Ref_Audio_Selector/ui_init/init_ui_param.py index 4251800d..b3e72a26 100644 --- a/Ref_Audio_Selector/ui_init/init_ui_param.py +++ b/Ref_Audio_Selector/ui_init/init_ui_param.py @@ -1,4 +1,5 @@ import os +import multiprocessing import Ref_Audio_Selector.config_param.config_params as params import Ref_Audio_Selector.common.common as common @@ -55,6 +56,8 @@ text_test_content_default = None # 请求并发数 slider_request_concurrency_num_default = 3 +# 最大并发数 +slider_request_concurrency_max_num = None # -------------------第三步------------------------------ @@ -108,7 +111,7 @@ def init_first(): def init_second(): global text_api_set_model_base_url_default, text_api_gpt_param_default, text_api_sovits_param_default, text_api_v2_set_gpt_model_base_url_default, text_api_v2_gpt_model_param_default global text_api_v2_set_sovits_model_base_url_default, text_api_v2_sovits_model_param_default, text_url_default, text_text_default, text_ref_path_default - global text_ref_text_default, text_emotion_default, text_test_content_default, slider_request_concurrency_num_default + global text_ref_text_default, text_emotion_default, text_test_content_default, slider_request_concurrency_num_default, slider_request_concurrency_max_num text_api_set_model_base_url_default = empty_default(rw_param.read(rw_param.api_set_model_base_url), 'http://localhost:9880/set_model') @@ -133,8 +136,12 @@ def init_second(): text_test_content_default = empty_default(rw_param.read(rw_param.test_content_path), params.default_test_text_path) + slider_request_concurrency_max_num = multiprocessing.cpu_count() + slider_request_concurrency_num_default = empty_default(rw_param.read(rw_param.request_concurrency_num), 3) + slider_request_concurrency_num_default = min(int(slider_request_concurrency_num_default), slider_request_concurrency_max_num) + def init_third(): global text_asr_audio_dir_default, text_text_similarity_analysis_path_default, slider_text_similarity_amplification_boundary_default, text_text_similarity_result_path_default From 5280d17d2f01b54584bcad76e6c9f7f8276d2bd8 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 10:49:07 +0800 Subject: [PATCH 47/72] =?UTF-8?q?ui=E5=B8=83=E5=B1=80=E8=B0=83=E6=95=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 83 ++++++++++++------- 1 file changed, 55 insertions(+), 28 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index aa18b48b..62491934 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -309,9 +309,13 @@ def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_ return None +hide_voice_similarity_dir = '' + + # 根据一个参考音频,对指定目录下的音频进行相似度分析,并输出到另一个目录 def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path, text_compare_audio_dir): + global hide_voice_similarity_dir text_work_space_dir, text_base_audio_path, text_compare_audio_dir \ = common.batch_clean_paths([text_work_space_dir, text_base_audio_path, text_compare_audio_dir]) @@ -332,6 +336,8 @@ def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path text_similarity_audio_output_info = f'耗时:{time_consuming:0.1f}秒;相似度分析成功:生成目录{similarity_file_dir},文件{similarity_file}' + hide_voice_similarity_dir = os.path.join(text_work_space_dir, params.audio_similarity_dir) + except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) text_similarity_audio_output_info = f"发生异常:{e}" @@ -363,6 +369,9 @@ def sync_ref_audio(text_work_space_dir, text_role, text_refer_audio_file_dir, return i18n(text_sync_ref_audio_info) +hide_config_file = '' + + # 根据模板和参考音频目录,生成参考音频配置内容 def create_config(text_work_space_dir, text_role, text_template, text_refer_audio_file_dir): text_work_space_dir, text_refer_audio_file_dir \ @@ -385,6 +394,8 @@ def create_config(text_work_space_dir, text_role, text_template, text_refer_audi text_create_config_info = f"耗时:{time_consuming:0.1f}秒;配置生成成功:生成文件{config_file}" + hide_config_file = config_file + except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) text_create_config_info = f"发生异常:{e}" @@ -582,8 +593,8 @@ def init_ui(): with gr.Accordion(label=i18n("基本信息")): with gr.Row(): text_work_space_dir = gr.Text(label=i18n("工作目录,后续操作所生成文件都会保存在此目录下"), - value=init.text_work_space_dir_default, scale=2) - text_role = gr.Text(label=i18n("角色名称"), value=init.text_role_default, scale=2) + value=init.text_work_space_dir_default, scale=4) + text_role = gr.Text(label=i18n("角色名称"), value=init.text_role_default, scale=4) button_switch_role_and_refresh = gr.Button(i18n("切换并刷新"), variant="primary", scale=1) text_work_space_dir.blur(save_work_dir, [text_work_space_dir, text_role], [text_role]) text_role.blur(lambda value: rw_param.write(rw_param.role, value), [text_role], []) @@ -597,11 +608,14 @@ def init_ui(): gr.Markdown(value=i18n("1.1:选择list文件,并提取3-10秒的素材作为参考候选")) text_list_input = gr.Text(label=i18n("请输入list文件路径"), value="") with gr.Row(): - button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary") - text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False) + button_convert_from_list = gr.Button(i18n("开始生成待参考列表"), variant="primary", scale=4) + text_convert_from_list_info = gr.Text(label=i18n("参考列表生成结果"), value="", interactive=False, + scale=4) + button_convert_from_list_result_dir = gr.Button(i18n("打开目录"), variant="primary", scale=1) gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=init.text_sample_dir_default, interactive=True) + button_convert_from_list_result_dir.click(open_file, [text_sample_dir], []) button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], [text_convert_from_list_info, text_sample_dir]) with gr.Row(): @@ -618,8 +632,9 @@ def init_ui(): slider_sample_num.change(lambda value: rw_param.write(rw_param.sample_num, value), [slider_sample_num], []) with gr.Row(): - button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary") - text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False) + button_sample = gr.Button(i18n("开始分段随机抽样"), variant="primary", scale=4) + text_sample_info = gr.Text(label=i18n("分段随机抽样结果"), value="", interactive=False, scale=4) + button_sample_result_open = gr.Button(i18n("打开目录"), variant="primary", scale=1) with gr.Tab(label=i18n("第二步:基于参考音频和测试文本,执行批量推理")): gr.Markdown(value=i18n("2.1:启动推理服务,并配置模型参数")) with gr.Accordion(label=i18n("详情")): @@ -639,11 +654,11 @@ def init_ui(): with gr.Row(): dropdown_api_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), choices=model_manager.get_gpt_model_names(), value="", - interactive=True) + interactive=True, scale=4) dropdown_api_sovits_models = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=model_manager.get_sovits_model_names(), - value="", interactive=True) - button_refresh_api_model = gr.Button(i18n("刷新模型路径"), variant="primary") + value="", interactive=True, scale=4) + button_refresh_api_model = gr.Button(i18n("刷新模型路径"), variant="primary", scale=1) button_refresh_api_model.click(refresh_api_model, [], [dropdown_api_gpt_models, dropdown_api_sovits_models]) with gr.Row(): @@ -695,14 +710,14 @@ def init_ui(): with gr.Row(): text_api_v2_gpt_model_param = gr.Text(label=i18n("GPT模型参数名"), value=init.text_api_v2_gpt_model_param_default, - interactive=True) + interactive=True, scale=4) dropdown_api_v2_gpt_models = gr.Dropdown(label=i18n("GPT模型列表"), choices=model_manager.get_gpt_model_names(), value="", - interactive=True) + interactive=True, scale=4) text_api_v2_gpt_model_param.blur( lambda value: rw_param.write(rw_param.api_v2_gpt_model_param, value), [text_api_v2_gpt_model_param], []) - button_api_v2_refresh_gpt = gr.Button(i18n("刷新模型路径"), variant="primary") + button_api_v2_refresh_gpt = gr.Button(i18n("刷新模型路径"), variant="primary", scale=1) button_api_v2_refresh_gpt.click(refresh_api_v2_gpt_model, [], [dropdown_api_v2_gpt_models]) text_api_v2_set_gpt_model_whole_url = gr.Text(label=i18n("完整的GPT模型参数设置请求地址"), value="", interactive=False) @@ -734,11 +749,11 @@ def init_ui(): with gr.Row(): text_api_v2_sovits_model_param = gr.Text(label=i18n("SoVITS模型参数名"), value=init.text_api_v2_sovits_model_param_default, - interactive=True) + interactive=True, scale=4) dropdown_api_v2_sovits_models = gr.Dropdown(label=i18n("SoVITS模型列表"), choices=model_manager.get_sovits_model_names(), - value="", interactive=True) - button_api_v2_refresh_sovits = gr.Button(i18n("刷新模型路径"), variant="primary") + value="", interactive=True, scale=4) + button_api_v2_refresh_sovits = gr.Button(i18n("刷新模型路径"), variant="primary", scale=1) text_api_v2_sovits_model_param.blur( lambda value: rw_param.write(rw_param.api_v2_sovits_model_param, value), [text_api_v2_sovits_model_param], []) @@ -803,8 +818,9 @@ def init_ui(): text_emotion.blur(save_emotion_param, [text_emotion], []) gr.Markdown(value=i18n("2.3:配置待推理文本,一句一行,尽量保证文本多样性,不同情绪、不同类型的都来一点")) with gr.Row(): - text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=init.text_test_content_default) - button_open_test_content_file = gr.Button(i18n("打开待推理文本文件"), variant="primary") + text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=init.text_test_content_default, + scale=8) + button_open_test_content_file = gr.Button(i18n("打开推理文本"), variant="primary", scale=1) button_open_test_content_file.click(open_file, [text_test_content], []) text_test_content.blur(lambda value: rw_param.write(rw_param.test_content_path, value), [text_test_content], []) @@ -817,8 +833,9 @@ def init_ui(): slider_request_concurrency_num.change(lambda value: rw_param.write(rw_param.request_concurrency_num, value), [slider_request_concurrency_num], []) with gr.Row(): - button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary") - text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False) + button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary", scale=4) + text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False, scale=4) + button_model_inference_result_open = gr.Button(i18n("打开目录"), variant="primary", scale=1) with gr.Tab(label=i18n("第三步:进行参考音频效果校验与筛选")): gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=init.text_asr_audio_dir_default, @@ -845,8 +862,9 @@ def init_ui(): dropdown_asr_model.change(change_lang_choices, [dropdown_asr_model], [dropdown_asr_lang]) dropdown_asr_model.change(change_size_choices, [dropdown_asr_model], [dropdown_asr_size]) with gr.Row(): - button_asr = gr.Button(i18n("启动asr"), variant="primary") - text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False) + button_asr = gr.Button(i18n("启动asr"), variant="primary", scale=4) + text_asr_info = gr.Text(label=i18n("asr结果"), value="", interactive=False, scale=4) + button_asr_result_open = gr.Button(i18n("打开文件"), variant="primary", scale=1) gr.Markdown(value=i18n("3.2:启动文本相似度分析")) with gr.Row(): text_text_similarity_analysis_path = gr.Text(label=i18n("待分析的文件路径"), @@ -862,6 +880,7 @@ def init_ui(): button_asr.click(asr, [text_work_space_dir, text_role, text_asr_audio_dir, dropdown_asr_model, dropdown_asr_size, dropdown_asr_lang], [text_asr_info, text_text_similarity_analysis_path]) + button_asr_result_open.click(open_file, [text_text_similarity_analysis_path], []) with gr.Row(): button_text_similarity_analysis = gr.Button(i18n("启动文本相似度分析"), variant="primary") text_text_similarity_analysis_info = gr.Text(label=i18n("文本相似度分析结果"), value="", @@ -870,9 +889,9 @@ def init_ui(): with gr.Row(): text_text_similarity_result_path = gr.Text(label=i18n("文本相似度分析结果文件所在路径"), value=init.text_text_similarity_result_path_default, - interactive=True) - button_open_text_similarity_result = gr.Button(i18n("打开文本相似度分析结果文件"), variant="primary") - button_open_inference_dir = gr.Button(i18n("打开推理音频所在目录"), variant="primary") + interactive=True, scale=7) + button_open_text_similarity_result = gr.Button(i18n("打开结果文件"), variant="primary", scale=1) + button_open_inference_dir = gr.Button(i18n("打开推理目录"), variant="primary", scale=1) button_text_similarity_analysis.click(text_similarity_analysis, [text_work_space_dir, text_role, slider_text_similarity_amplification_boundary, @@ -901,11 +920,15 @@ def init_ui(): text_base_audio_path = gr.Text(label=i18n("请输入基准音频"), value="") text_compare_audio_dir = gr.Text(label=i18n("请输入待比较的音频文件目录"), value="") with gr.Row(): - button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary") - text_similarity_audio_output_info = gr.Text(label=i18n("输出结果"), value="", interactive=False) + button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary", + scale=4) + text_similarity_audio_output_info = gr.Text(label=i18n("输出结果"), value="", interactive=False, + scale=4) + button_similarity_audio_output_result_open = gr.Button(i18n("打开目录"), variant="primary", scale=1) button_similarity_audio_output.click(similarity_audio_output, [text_work_space_dir, text_role, text_base_audio_path, text_compare_audio_dir], [text_similarity_audio_output_info]) + button_similarity_audio_output_result_open.click(lambda: open_file(hide_voice_similarity_dir), [], []) gr.Markdown(value=i18n("4.2:如果发现存在低音质的推理音频,那么就去参考音频目录下,把原参考音频删了")) gr.Markdown(value=i18n("4.3:删除参考音频之后,按下面的操作,会将推理音频目录下对应的音频也删掉")) with gr.Row(): @@ -919,20 +942,24 @@ def init_ui(): text_template.blur(lambda value: rw_param.write(rw_param.text_template, value), [text_template], []) gr.Markdown(value=i18n("5.2:生成配置")) with gr.Row(): - button_create_config = gr.Button(i18n("生成配置"), variant="primary") - text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False) + button_create_config = gr.Button(i18n("生成配置"), variant="primary", scale=4) + text_create_config_info = gr.Text(label=i18n("生成结果"), value="", interactive=False, scale=4) + button_create_config_result_open = gr.Button(i18n("打开文件"), variant="primary", scale=1) button_create_config.click(create_config, [text_work_space_dir, text_role, text_template, text_refer_audio_file_dir], [text_create_config_info]) + button_create_config_result_open.click(lambda: open_file(hide_config_file), [], []) button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, slider_subsection_num, slider_sample_num, checkbox_similarity_output], [text_sample_info, text_refer_audio_file_dir]) + button_sample_result_open.click(open_file, [text_refer_audio_file_dir], []) button_model_inference.click(model_inference, [text_work_space_dir, text_role, slider_request_concurrency_num, text_refer_audio_file_dir, text_url, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content], [text_model_inference_info, text_asr_audio_dir, text_inference_audio_file_dir]) + button_model_inference_result_open.click(open_file, [text_inference_audio_file_dir], []) app.launch( server_port=params.server_port, From c26fa983a49d171e2a0b7461781d06baa8130c2c Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 11:23:41 +0800 Subject: [PATCH 48/72] =?UTF-8?q?=E5=8F=82=E8=80=83=E7=B1=BB=E5=9E=8B?= =?UTF-8?q?=EF=BC=8C=E6=B7=BB=E5=8A=A0=E9=80=89=E6=8B=A9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../config_param/config_manager.py | 1 + .../ref_audio_selector_webui.py | 50 ++++++++++++++----- Ref_Audio_Selector/tool/audio_inference.py | 6 ++- Ref_Audio_Selector/ui_init/init_ui_param.py | 5 +- 4 files changed, 46 insertions(+), 16 deletions(-) diff --git a/Ref_Audio_Selector/config_param/config_manager.py b/Ref_Audio_Selector/config_param/config_manager.py index 5aaa1b6b..63d23352 100644 --- a/Ref_Audio_Selector/config_param/config_manager.py +++ b/Ref_Audio_Selector/config_param/config_manager.py @@ -25,6 +25,7 @@ def __init__(self): self.text_url = 'text_url' self.text_param = 'text_param' + self.refer_type_param = 'refer_type_param' self.ref_path_param = 'ref_path_param' self.ref_text_param = 'ref_text_param' self.emotion_param = 'emotion_param' diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 62491934..bfbb6b77 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -146,7 +146,7 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path # 根据参考音频和测试文本,执行批量推理 def model_inference(text_work_space_dir, text_role, slider_request_concurrency_num, text_refer_audio_file_dir, - text_url, + text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content_dir): text_work_space_dir, text_refer_audio_file_dir, text_test_content_dir \ @@ -173,7 +173,8 @@ def model_inference(text_work_space_dir, text_role, slider_request_concurrency_n text_asr_audio_dir = os.path.join(inference_dir, params.inference_audio_text_aggregation_dir) - url_composer = audio_inference.TTSURLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) + url_composer = audio_inference.TTSURLComposer(text_url, dropdown_refer_type_param, text_emotion, text_text, + text_ref_path, text_ref_text) url_composer.is_valid() text_list = common.read_text_file_to_list(text_test_content_dir) if text_list is None or len(text_list) == 0: @@ -403,8 +404,9 @@ def create_config(text_work_space_dir, text_role, text_template, text_refer_audi # 基于请求路径和参数,合成完整的请求路径 -def whole_url(text_url, text_text, text_ref_path, text_ref_text, text_emotion): - url_composer = audio_inference.TTSURLComposer(text_url, text_emotion, text_text, text_ref_path, text_ref_text) +def whole_url(text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion): + url_composer = audio_inference.TTSURLComposer(text_url, dropdown_refer_type_param, text_emotion, text_text, + text_ref_path, text_ref_text) if url_composer.is_emotion(): text_whole_url = url_composer.build_url_with_emotion('测试内容', '情绪类型', False) else: @@ -585,6 +587,16 @@ def save_work_dir(text_work_space_dir, text_role): return role_dir +def chang_refer_type_param(selected_value): + rw_param.write(rw_param.refer_type_param, selected_value) + if selected_value == "参考音频": + return {"visible": True, "__type__": "update"}, {"visible": True, "__type__": "update"}, {"visible": False, + "__type__": "update"} + else: + return {"visible": False, "__type__": "update"}, {"visible": False, "__type__": "update"}, {"visible": True, + "__type__": "update"} + + def init_ui(): init.init_all() @@ -789,11 +801,16 @@ def init_ui(): value=init.text_url_default) with gr.Row(): text_text = gr.Text(label=i18n("请输入文本参数名"), value=init.text_text_default) + dropdown_refer_type_param = gr.Dropdown(label=i18n("类型"), choices=["参考音频", "角色情绪"], + value=init.dropdown_refer_type_param_default, interactive=True) text_ref_path = gr.Text(label=i18n("请输入参考音频路径参数名"), - value=init.text_ref_path_default) + value=init.text_ref_path_default, visible=True) text_ref_text = gr.Text(label=i18n("请输入参考音频文本参数名"), - value=init.text_ref_text_default) - text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value=init.text_emotion_default) + value=init.text_ref_text_default, visible=True) + text_emotion = gr.Text(label=i18n("请输入角色情绪参数名"), value=init.text_emotion_default, + visible=False) + dropdown_refer_type_param.change(chang_refer_type_param, [dropdown_refer_type_param], + [text_ref_path, text_ref_text, text_emotion]) text_whole_url = gr.Text(label=i18n("完整地址"), value="", interactive=False) text_text.blur(lambda value: rw_param.write(rw_param.text_param, value), [text_text], []) @@ -801,19 +818,26 @@ def init_ui(): text_ref_text.blur(lambda value: rw_param.write(rw_param.ref_text_param, value), [text_ref_text], []) text_emotion.blur(lambda value: rw_param.write(rw_param.emotion_param, value), [text_emotion], []) - text_url.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + text_url.input(whole_url, + [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) text_url.blur(save_generate_audio_url, [text_url], []) - text_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + text_text.input(whole_url, [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, + text_emotion], [text_whole_url]) text_text.blur(save_text_param, [text_text], []) - text_ref_path.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + text_ref_path.input(whole_url, + [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, + text_emotion], [text_whole_url]) text_ref_path.blur(save_ref_path_param, [text_ref_path], []) - text_ref_text.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + text_ref_text.input(whole_url, + [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, + text_emotion], [text_whole_url]) text_ref_text.blur(save_ref_text_param, [text_ref_text], []) - text_emotion.input(whole_url, [text_url, text_text, text_ref_path, text_ref_text, text_emotion], + text_emotion.input(whole_url, [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, + text_emotion], [text_whole_url]) text_emotion.blur(save_emotion_param, [text_emotion], []) gr.Markdown(value=i18n("2.3:配置待推理文本,一句一行,尽量保证文本多样性,不同情绪、不同类型的都来一点")) @@ -955,7 +979,7 @@ def init_ui(): button_sample_result_open.click(open_file, [text_refer_audio_file_dir], []) button_model_inference.click(model_inference, [text_work_space_dir, text_role, slider_request_concurrency_num, - text_refer_audio_file_dir, text_url, + text_refer_audio_file_dir, text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion, text_test_content], [text_model_inference_info, text_asr_audio_dir, text_inference_audio_file_dir]) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 8729c5df..e1aa8c2f 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -52,8 +52,10 @@ def build_post_url(self, value_array, need_url_encode=True): class TTSURLComposer: - def __init__(self, base_url, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): + def __init__(self, base_url, refer_type_param, emotion_param_name, text_param_name, ref_path_param_name, ref_text_param_name): self.base_url = base_url + # 角色情绪 or 参考音频 + self.refer_type_param = refer_type_param self.emotion_param_name = emotion_param_name self.text_param_name = text_param_name self.ref_path_param_name = ref_path_param_name @@ -70,7 +72,7 @@ def is_valid(self): raise ValueError("请输入至少一个参考or情绪的参数") def is_emotion(self): - return self.emotion_param_name is not None and self.emotion_param_name != '' + return self.refer_type_param == '角色情绪' def build_url_with_emotion(self, text_value, emotion_value, need_url_encode=True): params = { diff --git a/Ref_Audio_Selector/ui_init/init_ui_param.py b/Ref_Audio_Selector/ui_init/init_ui_param.py index b3e72a26..df7bf17f 100644 --- a/Ref_Audio_Selector/ui_init/init_ui_param.py +++ b/Ref_Audio_Selector/ui_init/init_ui_param.py @@ -46,6 +46,8 @@ text_url_default = None # 文本参数名 text_text_default = None +# 参考参数类型 +dropdown_refer_type_param_default = None # 参考音频路径参数名 text_ref_path_default = None # 参考音频文本参数名 @@ -110,7 +112,7 @@ def init_first(): def init_second(): global text_api_set_model_base_url_default, text_api_gpt_param_default, text_api_sovits_param_default, text_api_v2_set_gpt_model_base_url_default, text_api_v2_gpt_model_param_default - global text_api_v2_set_sovits_model_base_url_default, text_api_v2_sovits_model_param_default, text_url_default, text_text_default, text_ref_path_default + global text_api_v2_set_sovits_model_base_url_default, text_api_v2_sovits_model_param_default, text_url_default, text_text_default, dropdown_refer_type_param_default, text_ref_path_default global text_ref_text_default, text_emotion_default, text_test_content_default, slider_request_concurrency_num_default, slider_request_concurrency_max_num text_api_set_model_base_url_default = empty_default(rw_param.read(rw_param.api_set_model_base_url), @@ -129,6 +131,7 @@ def init_second(): text_url_default = empty_default(rw_param.read(rw_param.text_url), 'http://localhost:9880?prompt_language=中文&text_language=中文&cut_punc=') text_text_default = empty_default(rw_param.read(rw_param.text_param), 'text') + dropdown_refer_type_param_default = empty_default(rw_param.read(rw_param.refer_type_param), '参考音频') text_ref_path_default = empty_default(rw_param.read(rw_param.ref_path_param), 'refer_wav_path') text_ref_text_default = empty_default(rw_param.read(rw_param.ref_text_param), 'prompt_text') From 5081168918d5d2af2a4b219c29987049da6a7fef Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 13:04:33 +0800 Subject: [PATCH 49/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=88=87=E6=8D=A2?= =?UTF-8?q?=E9=87=8D=E7=BD=AE=E6=8C=89=E9=92=AE=E4=BA=8B=E4=BB=B6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index bfbb6b77..0e22acea 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -597,6 +597,38 @@ def chang_refer_type_param(selected_value): "__type__": "update"} +def switch_role_and_refresh(): + global hide_voice_similarity_dir, hide_config_file + init.init_all() + reset_list = [] + # 基础 + reset_list.extend([ + init.text_refer_audio_file_dir_default, + init.text_inference_audio_file_dir_default + ]) + # 第一步 + reset_list.extend([ + init.text_sample_dir_default, + '', # text_list_input + '', # text_base_voice_path + ]) + # 第二步 + # 第三步 + reset_list.extend([ + init.text_asr_audio_dir_default, + init.text_text_similarity_analysis_path_default + ]) + # 第四步 + reset_list.extend([ + '', # text_base_audio_path + '', # text_compare_audio_dir + ]) + hide_voice_similarity_dir = '' + hide_config_file = '' + # 第五步 + return reset_list + + def init_ui(): init.init_all() @@ -985,6 +1017,34 @@ def init_ui(): [text_model_inference_info, text_asr_audio_dir, text_inference_audio_file_dir]) button_model_inference_result_open.click(open_file, [text_inference_audio_file_dir], []) + # 设置重置刷新事件 + refresh_list = [] + # 基础 + refresh_list.extend([ + text_refer_audio_file_dir, + text_inference_audio_file_dir + ]) + # 第一步 + refresh_list.extend([ + text_sample_dir, + text_list_input, + text_base_voice_path + ]) + # 第二步 + # 第三步 + refresh_list.extend([ + text_asr_audio_dir, + text_text_similarity_analysis_path + ]) + # 第四步 + refresh_list.extend([ + text_base_audio_path, + text_compare_audio_dir + ]) + # 第五步 + + button_switch_role_and_refresh.click(switch_role_and_refresh, [], refresh_list) + app.launch( server_port=params.server_port, inbrowser=True, From 8182908f7d147383705f7c2199ac126d9e24b0ba Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 14:14:16 +0800 Subject: [PATCH 50/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E8=AF=B4=E8=AF=9D?= =?UTF-8?q?=E4=BA=BA=E7=A1=AE=E8=AE=A4=E6=A8=A1=E5=9E=8B=E5=88=87=E6=8D=A2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/common/model_manager.py | 12 +++++++ .../ref_audio_selector_webui.py | 36 +++++++++++++------ .../speaker_verification/voice_similarity.py | 23 ++++++++---- 3 files changed, 55 insertions(+), 16 deletions(-) diff --git a/Ref_Audio_Selector/common/model_manager.py b/Ref_Audio_Selector/common/model_manager.py index ac2e7fb5..b0f43fb4 100644 --- a/Ref_Audio_Selector/common/model_manager.py +++ b/Ref_Audio_Selector/common/model_manager.py @@ -8,6 +8,18 @@ os.makedirs(SoVITS_weight_root, exist_ok=True) os.makedirs(GPT_weight_root, exist_ok=True) +speaker_verification_models = { + 'speech_campplus_sv_zh-cn_16k-common': { + 'task': 'speaker-verification', + 'model': 'Ref_Audio_Selector/tool/speaker_verification/models/speech_campplus_sv_zh-cn_16k-common', + 'model_revision': 'v1.0.0' + }, + 'speech_eres2net_sv_zh-cn_16k-common': { + 'task': 'speaker-verification', + 'model': 'Ref_Audio_Selector/tool/speaker_verification/models/speech_eres2net_sv_zh-cn_16k-common', + 'model_revision': 'v1.0.5' + } +} def custom_sort_key(s): # 使用正则表达式提取字符串中的数字部分和非数字部分 diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 0e22acea..73579b6d 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -71,7 +71,7 @@ def convert_from_list(text_work_space_dir, text_role, text_list_input): return i18n(text_convert_from_list_info), text_sample_dir -def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_similarity_output): +def start_similarity_analysis(work_space_dir, sample_dir, speaker_verification, base_voice_path, need_similarity_output): similarity_list = None similarity_file_dir = None @@ -87,6 +87,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ cmd += f' -r "{base_voice_path}"' cmd += f' -c "{sample_dir}"' cmd += f' -o {similarity_file}' + cmd += f' -m {speaker_verification}' logger.info(cmd) p_similarity = Popen(cmd, shell=True) @@ -105,7 +106,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, base_voice_path, need_ # 基于一个基准音频,从参考音频目录中进行分段抽样 -def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, +def sample(text_work_space_dir, text_role, text_sample_dir, dropdown_speaker_verification, text_base_voice_path, slider_subsection_num, slider_sample_num, checkbox_similarity_output): text_work_space_dir, text_sample_dir, text_base_voice_path \ = common.batch_clean_paths([text_work_space_dir, text_sample_dir, text_base_voice_path]) @@ -122,11 +123,13 @@ def sample(text_work_space_dir, text_role, text_sample_dir, text_base_voice_path raise Exception("分段数不能为空") if slider_sample_num is None or slider_sample_num == '': raise Exception("每段随机抽样个数不能为空") + if dropdown_speaker_verification is None or dropdown_speaker_verification == '': + raise Exception("说话人确认算法不能为空") ref_audio_dir = os.path.join(base_role_dir, params.reference_audio_dir) time_consuming, (similarity_list, _, _) \ - = time_util.time_monitor(start_similarity_analysis)(base_role_dir, text_sample_dir, text_base_voice_path, + = time_util.time_monitor(start_similarity_analysis)(base_role_dir, text_sample_dir, dropdown_speaker_verification, text_base_voice_path, checkbox_similarity_output) text_sample_info = f"耗时:{time_consuming:0.1f}秒;抽样成功:生成目录{ref_audio_dir}" @@ -315,7 +318,7 @@ def open_text_similarity_analysis(asr_file_path, output_dir, similarity_enlarge_ # 根据一个参考音频,对指定目录下的音频进行相似度分析,并输出到另一个目录 def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path, - text_compare_audio_dir): + text_compare_audio_dir, dropdown_speaker_verification): global hide_voice_similarity_dir text_work_space_dir, text_base_audio_path, text_compare_audio_dir \ = common.batch_clean_paths([text_work_space_dir, text_base_audio_path, text_compare_audio_dir]) @@ -327,10 +330,12 @@ def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path raise Exception("基准音频路径不能为空") if text_compare_audio_dir is None or text_compare_audio_dir == '': raise Exception("待分析的音频所在目录不能为空") + if dropdown_speaker_verification is None or dropdown_speaker_verification == '': + raise Exception("说话人验证模型不能为空") time_consuming, (similarity_list, similarity_file, similarity_file_dir) \ - = time_util.time_monitor(start_similarity_analysis)(base_role_dir, - text_compare_audio_dir, text_base_audio_path, True) + = time_util.time_monitor(start_similarity_analysis)(base_role_dir,text_compare_audio_dir, + dropdown_speaker_verification, text_base_audio_path, True) if similarity_list is None: raise Exception("相似度分析失败") @@ -657,8 +662,14 @@ def init_ui(): scale=4) button_convert_from_list_result_dir = gr.Button(i18n("打开目录"), variant="primary", scale=1) gr.Markdown(value=i18n("1.2:选择基准音频,执行相似度匹配,并分段随机抽样")) - text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=init.text_sample_dir_default, - interactive=True) + with gr.Row(): + text_sample_dir = gr.Text(label=i18n("参考音频抽样目录"), value=init.text_sample_dir_default, + interactive=True) + dropdown_speaker_verification_1 = gr.Dropdown(label=i18n("说话人确认算法"), + choices=list( + model_manager.speaker_verification_models.keys()), + value='speech_campplus_sv_zh-cn_16k-common', + interactive=True) button_convert_from_list_result_dir.click(open_file, [text_sample_dir], []) button_convert_from_list.click(convert_from_list, [text_work_space_dir, text_role, text_list_input], [text_convert_from_list_info, text_sample_dir]) @@ -975,6 +986,11 @@ def init_ui(): with gr.Row(): text_base_audio_path = gr.Text(label=i18n("请输入基准音频"), value="") text_compare_audio_dir = gr.Text(label=i18n("请输入待比较的音频文件目录"), value="") + dropdown_speaker_verification_2 = gr.Dropdown(label=i18n("说话人确认算法"), + choices=list( + model_manager.speaker_verification_models.keys()), + value='speech_campplus_sv_zh-cn_16k-common', + interactive=True) with gr.Row(): button_similarity_audio_output = gr.Button(i18n("输出相似度-参考音频到临时目录"), variant="primary", scale=4) @@ -983,7 +999,7 @@ def init_ui(): button_similarity_audio_output_result_open = gr.Button(i18n("打开目录"), variant="primary", scale=1) button_similarity_audio_output.click(similarity_audio_output, [text_work_space_dir, text_role, text_base_audio_path, - text_compare_audio_dir], [text_similarity_audio_output_info]) + text_compare_audio_dir, dropdown_speaker_verification_2], [text_similarity_audio_output_info]) button_similarity_audio_output_result_open.click(lambda: open_file(hide_voice_similarity_dir), [], []) gr.Markdown(value=i18n("4.2:如果发现存在低音质的推理音频,那么就去参考音频目录下,把原参考音频删了")) gr.Markdown(value=i18n("4.3:删除参考音频之后,按下面的操作,会将推理音频目录下对应的音频也删掉")) @@ -1005,7 +1021,7 @@ def init_ui(): [text_work_space_dir, text_role, text_template, text_refer_audio_file_dir], [text_create_config_info]) button_create_config_result_open.click(lambda: open_file(hide_config_file), [], []) - button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, text_base_voice_path, + button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, dropdown_speaker_verification_1, text_base_voice_path, slider_subsection_num, slider_sample_num, checkbox_similarity_output], [text_sample_info, text_refer_audio_file_dir]) button_sample_result_open.click(open_file, [text_refer_audio_file_dir], []) diff --git a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py index 3c7ec718..4f11fdf2 100644 --- a/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py +++ b/Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py @@ -6,18 +6,24 @@ import Ref_Audio_Selector.config_param.config_params as params import Ref_Audio_Selector.config_param.log_config as log_config from Ref_Audio_Selector.common.time_util import timeit_decorator +from Ref_Audio_Selector.common.model_manager import speaker_verification_models as models from modelscope.pipelines import pipeline -sv_pipeline = pipeline( - task='speaker-verification', - model='Ref_Audio_Selector/tool/speaker_verification/models/speech_campplus_sv_zh-cn_16k-common', - model_revision='v1.0.0' -) + +def init_model(model_type='speech_campplus_sv_zh-cn_16k-common'): + log_config.logger.info(f'人声识别模型类型:{model_type}') + return pipeline( + task=models[model_type]['task'], + model=models[model_type]['model'], + model_revision=models[model_type]['model_revision'] + ) @timeit_decorator -def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, output_file_path): +def compare_audio_and_generate_report(reference_audio_path, comparison_dir_path, output_file_path, model_type): + sv_pipeline = init_model(model_type) + # Step 1: 获取比较音频目录下所有音频文件的路径 comparison_audio_paths = [os.path.join(comparison_dir_path, f) for f in os.listdir(comparison_dir_path) if f.endswith('.wav')] @@ -113,6 +119,10 @@ def parse_arguments(): parser.add_argument("-o", "--output_file", type=str, required=True, help="Path to the output file where results will be written.") + # Model Type + parser.add_argument("-m", "--model_type", type=str, required=True, + help="Path to the model type.") + return parser.parse_args() @@ -122,6 +132,7 @@ def parse_arguments(): reference_audio_path=cmd.reference_audio, comparison_dir_path=cmd.comparison_dir, output_file_path=cmd.output_file, + model_type=cmd.model_type, ) # compare_audio_and_generate_report( From b8356880dcd512099897c603fe8b6e8d7376d179 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 14:19:24 +0800 Subject: [PATCH 51/72] =?UTF-8?q?=E4=BC=98=E5=8C=96=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 30 ++++--------------- 1 file changed, 5 insertions(+), 25 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 73579b6d..9c98be04 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -556,26 +556,6 @@ def change_size_choices(key): # 根据选择的模型修改可选的模型尺 return {"__type__": "update", "choices": asr_dict[key]['size']} -def save_generate_audio_url(text_url): - rw_param.write(rw_param.text_url, text_url) - - -def save_text_param(text_text): - rw_param.write(rw_param.text_param, text_text) - - -def save_ref_path_param(text_ref_path): - rw_param.write(rw_param.ref_path_param, text_ref_path) - - -def save_ref_text_param(text_ref_text): - rw_param.write(rw_param.ref_text_param, text_ref_text) - - -def save_emotion_param(text_emotion): - rw_param.write(rw_param.emotion_param, text_emotion) - - def save_work_dir(text_work_space_dir, text_role): text_work_space_dir = my_utils.clean_path(text_work_space_dir) rw_param.write(rw_param.work_dir, text_work_space_dir) @@ -864,25 +844,25 @@ def init_ui(): text_url.input(whole_url, [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) - text_url.blur(save_generate_audio_url, [text_url], []) + text_url.blur(lambda value: rw_param.write(rw_param.text_url, value), [text_url], []) text_text.input(whole_url, [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) - text_text.blur(save_text_param, [text_text], []) + text_text.blur(lambda value: rw_param.write(rw_param.text_param, value), [text_text], []) text_ref_path.input(whole_url, [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) - text_ref_path.blur(save_ref_path_param, [text_ref_path], []) + text_ref_path.blur(lambda value: rw_param.write(rw_param.ref_path_param, value), [text_ref_path], []) text_ref_text.input(whole_url, [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) - text_ref_text.blur(save_ref_text_param, [text_ref_text], []) + text_ref_text.blur(lambda value: rw_param.write(rw_param.ref_text_param, value), [text_ref_text], []) text_emotion.input(whole_url, [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion], [text_whole_url]) - text_emotion.blur(save_emotion_param, [text_emotion], []) + text_emotion.blur(lambda value: rw_param.write(rw_param.emotion_param, value), [text_emotion], []) gr.Markdown(value=i18n("2.3:配置待推理文本,一句一行,尽量保证文本多样性,不同情绪、不同类型的都来一点")) with gr.Row(): text_test_content = gr.Text(label=i18n("请输入待推理文本路径"), value=init.text_test_content_default, From 1de89feb7b2bbea1bbbc5d03f35d51bb96db9381 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 14:49:27 +0800 Subject: [PATCH 52/72] =?UTF-8?q?=E4=BC=98=E5=8C=96=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/ref_audio_selector_webui.py | 2 +- Ref_Audio_Selector/ui_init/init_ui_param.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 9c98be04..64835b00 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -818,7 +818,7 @@ def init_ui(): text_api_v2_start_set_sovits_model_request_info]) with gr.Tab(label=i18n("第三方推理服务")): gr.Markdown(value=i18n("启动第三方推理服务,并完成参考音频打包,模型参数设置等操作")) - gr.Markdown(value=i18n("2.2:配置推理服务参数信息,参考音频路径/文本和角色情绪二选一,如果是角色情绪,需要先执行第四步," + gr.Markdown(value=i18n("2.2:配置推理服务参数信息,参考音和角色情绪二选一,如果是角色情绪(第三方推理包),需要先执行第五步," "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value=init.text_url_default) diff --git a/Ref_Audio_Selector/ui_init/init_ui_param.py b/Ref_Audio_Selector/ui_init/init_ui_param.py index df7bf17f..c208e165 100644 --- a/Ref_Audio_Selector/ui_init/init_ui_param.py +++ b/Ref_Audio_Selector/ui_init/init_ui_param.py @@ -129,7 +129,7 @@ def init_second(): text_api_v2_sovits_model_param_default = empty_default(rw_param.read(rw_param.api_v2_sovits_model_param), 'weights_path') text_url_default = empty_default(rw_param.read(rw_param.text_url), - 'http://localhost:9880?prompt_language=中文&text_language=中文&cut_punc=') + 'http://localhost:9880?prompt_language=中文&text_language=中文&cut_punc=,.;?!、,。?!;:…') text_text_default = empty_default(rw_param.read(rw_param.text_param), 'text') dropdown_refer_type_param_default = empty_default(rw_param.read(rw_param.refer_type_param), '参考音频') From ed8d276ac91d81d8b7e745a47e1cfd1f04fe2851 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 15:02:22 +0800 Subject: [PATCH 53/72] =?UTF-8?q?=E4=BC=98=E5=8C=96=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/ref_audio_selector_webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 64835b00..033035da 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -918,7 +918,7 @@ def init_ui(): value=init.text_text_similarity_analysis_path_default, interactive=True) slider_text_similarity_amplification_boundary = gr.Slider(minimum=0, maximum=1, step=0.01, - label=i18n("文本相似度放大边界"), + label=i18n("文本相似度放大边界,因为原始模型输出的相似度差异太小,所以进行了一次放大,放大逻辑为,边界值以下归0,边界值到1的区间重新映射到0-1"), value=init.slider_text_similarity_amplification_boundary_default, interactive=True) slider_text_similarity_amplification_boundary.change( @@ -989,7 +989,7 @@ def init_ui(): button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_refer_audio_file_dir, text_inference_audio_file_dir], [text_sync_ref_info]) with gr.Tab("第五步:生成参考音频配置文本"): - gr.Markdown(value=i18n("5.1:编辑模板")) + gr.Markdown(value=i18n("5.1:编辑模板,占位符说明:\${emotion}表示相对路径加音频文件名;\${ref_path}表示音频相对角色目录的文件路径;\${ref_text}:表示音频文本")) text_template = gr.Text(label=i18n("模板内容"), value=init.text_template_default, lines=10) text_template.blur(lambda value: rw_param.write(rw_param.text_template, value), [text_template], []) gr.Markdown(value=i18n("5.2:生成配置")) From f70fd8ff87b04c410199209f51714fcb3ee609f6 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Mon, 29 Apr 2024 15:37:47 +0800 Subject: [PATCH 54/72] =?UTF-8?q?=E4=BC=98=E5=8C=96=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...233\351\200\211\346\265\201\347\250\213.png" | Bin 0 -> 52186 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 "Ref_Audio_Selector/\345\217\202\350\200\203\351\237\263\351\242\221\347\255\233\351\200\211\346\265\201\347\250\213.png" diff --git "a/Ref_Audio_Selector/\345\217\202\350\200\203\351\237\263\351\242\221\347\255\233\351\200\211\346\265\201\347\250\213.png" "b/Ref_Audio_Selector/\345\217\202\350\200\203\351\237\263\351\242\221\347\255\233\351\200\211\346\265\201\347\250\213.png" new file mode 100644 index 0000000000000000000000000000000000000000..318f6697458f6ea0a095b219b930e698c74f6524 GIT binary patch literal 52186 zcmeEuWn5KVx33^dsDyw>BPrc%X^@UhE7Fn@vXPQ5MJZ{dHYE)b(j6+@4dMorZltB} zT=>4{JW6Y8NImTE!NKHlV77iKCl`B_nDacD}T)A=;cI66s zGS+qQ7pi-4i{L*rM-91$SBm;5R<2y3xuPH~spV?8o_s5wxHVzxs z<@iY1fARY-!hz(TfB645BOMrMEpWyjdAfi9;m^7)x5WOlSLl-2t1GjNgcg|uf8qMd z>~-qDGQuNy#XcIFcL?j>g8H+r{cF6xGLk^!TixkTYK8F^0s2;d#{3H-|8K?T$3pw! zacXU^l`X>77Ct^6PWjR9d$eBan1Gty<$msS0%^G&X$V1Ff5Zx`t}*-J`}y%B7Bllo zTRXLez6Vm2uG>H2bPv}(Y0LzoBq3o-5EDqCv?5EaSfHoa5t~L0JkDHwVok-hR_o%q ziw*rB;YgTImUDmKiN-h<%gsxTn=|-UGqW1v1OUB0CGlemMB0T?7q-oa=-9f>b{1C% zzzR!4xGMZ0wfiaF1{9*i`rm?Y1DzP9v;Op$L;6NsPjcssY+YN+@)ZfHP8de zm2OE@K;L3sOT@N>{z~BS3e!FOl>ytb>+{N?sJ~BmX{$prK$RRG>Uz-DOR&t~D6Ds+ zj!lJ|C)-ywzbCp(Q{BK#xCfaD%|0;7JlMTkkS*Y+NZe@+Ek7SSx-mL+Sd{#266Iv$ zN|xz=$!P(Qg;{TUIVL0+G;|$e*wFm zEDRCm_H>K}^=1jWwI&N+z-{v^{M^0JZq-Hs=|{HSWO!&;TI7BOA><`XL&U73vs>Oi z3TxBJGX(-CLx#`}MpdrZYi>|UoPZVcY1MAc%HDKe${$?~JKidon5lPEencAI_7G6d zbpWoz;&LSsT9U-ingC`pTDR3;)O&XDrSAK|Vxz9Xv-4K)XYVcXv zo|BB!0!?0vnS+e6h8j-o4>eXikb?5*e5><>qKBViNu(uWYay!%^u&Mv%CPPd@6{Qv zg}FHMe^05Df{0%S#H~N^*h1oZ@k)X}nB404xE+>@%sHJiE%bP4m>1JEhkxmX5pZzt z^U3#*Av62MpjJ35`}Dc1Ni>xTV7FXq!LI@9PkABFQM!&R zPqba^v#a5~9ktbTJY{N=P z5Hl^na)R2!QIM!KfJeTrR(StaLkC)s0GkFui%us985kd-1d0YRwB9rVu_nO{mNfDI zEec(_30w~3mia!uYv59|0+8l|YtRK2THk6(5Si+RwSZaTfYtH`mib755gLLZ(E{Ih z7iS0IC*Ke3nXEvW9Fl($h})8=0aA!Hvjm{V?y)Reh&K_iY#_PN<-%-$8MAM-<+lJm za7zr{i+=chWqQc+o{FNigzht@zq(XCoy7dx4!Fa#v;y>X9c;CvpK$~W+QB!F`|x4r zjvP3Un?FSZk6n{Z!-4J-3ITb?epsim&GAxQz?!mekGL^Hd&0TgEX%yR$Q@{v9FTpT za4e|ML6mC7g?1H6nIg{)Jr)xH9uv`LCIip@+J8|!1cWdqO0JkwA&aQ zwae+k*@@Bn z{v~R4MM_P@P4DH0+s72R!q@; z7AzjszohC&u{0yIe0mUC)o#G^qB-qEyVyTl^2g+wT%PTB@)U$^-z~J8KxDujfl)Iy zRAwyV=enbNog7T*Z{Sn3A4O=an%)zv&yI07f^S$Ec-JnhMCjl!buTzrfAPm&?TB=# zJZ5|HN#YizP`On0%5YBIMJmiL<>avIJ?rS(TjV1<3*C?UQ$*j#lvobZ9)_P94cVcp z2jRj`o3A`FGAh>9)g39&DH}-=bks61NIntnTsqUq3w?-+Q4I6EvV~XjS%RD7NqaQ#KioU%<3`bR*HFbMrUW+B@4Q- zi=oYyj=}N?d{$8>a&aQ~JfS>fz?p z=xl4q_qedmerKVe+P%#wy^4y8yb#XDCxtp?ZNyAv&-!1!31@>vcl3_rX$n8_nfPRk z&}t`A2*9J>#J`2}ljB-64Py2-y*Q+MVc=!!pRQH_uTlh_y+VT!I=gSl8%5jQi?iW3 zsMJr7Lx;XJq{QibxFp{qdobRUEIbY)n=^1{yolp5BSnTS$NUgtGM7ZrOGffsdSeQ3 zs~v%~>%jYhfJZ2%Q+`zyT!`V+n!T-BTutzEyN9>GI+FaTL-OiyHrz2T)aoZ)_t3oB z6|WZ(HZ~v0L|i}Btt)Sp3@Y{R8DEPtc&T$@jB)#0q`B=5Hy+9T!8em-_x_3hAYArN z4@7Wl-BoKSPRL)eC#2Mmnq@p5-sttKE*TdXxe47v1Y+#{ za`_u*SWiG)CY+)OB=@lhL@qZ<<3dg?05I8vCGjc-8kRpWWb8M2QnW3%W*KY8BQ8m} z1JMtevRdPyVYz~j>2(+&BoI#7bU-Hn8c`s+l@en9H&?)i5-%w32W?0b0e&Q!O+XwA z36&ZJ;qD7YAczO}|FAB8P0+?wD3U*CO25oOcR*l%@|+_7IvN&!jlv`*6u&aPf_EUr zO-Ns{H;v}cy#e4s(3S2X+Mf=DbRYx|HoT>Pyb&0ueKeJxx+dTNd?cJ=fSz79l)BvQ z3drIL;f1-(7{HAFnJN*Z#5`kd02JE)W?kP7_;YF~S(ykET!TW~r+DBCCUa{1SQ6mc zkQkJE=r=TDFd;E80x=*9Bn^NVoD!zvarLH}Y1v1+oj}QSc2R=%>VE=KkmJ^v!$!ur7@Q;rC0WPpC+vK}uy~1R-|fffmTrG!D__c#vWgPKXwuEchu1 z2%phLjRctEZM4`dnJ){BvOqcM9=8N9(*_8PjG~q4*xKw(}OIjRAsZY5!e4AuLIM!k+a8y0=sb zc%Q-dAs#F<5MUW=gJe+B{f|;8wpP-oIT@Oq^&1|iFiCR~+Y=^}UTWvB?mWwQn`7_D%EedqVXI=&t;>*W4Wy}IBgBFWRYrz@G?7lWz*va@F1oKg#`qBR#Dw(TEdukH1X}os-gmHiJ*37j-hU@l#&)2_^|-$k zYV-U9NR7tge<#%FIiSV^rN0&GQwIu2jh|BgMyP-rpvIG|zx53+LEJYA3%f^pU$WLw zS|7EG&oSL*_s5NVo-K(9qJ}Bjb?$O7yrN5cm9Jl3M>hwwzc&NEGI zZS7wP8ZYC^Ehy(_2pN^t@`?&#B5?dg!Xq>l&l8*wNeZQs^Zy6cL7t|MMxhw1KMER;E z)H_>8J8$l=@vwoU@4?)=rkir{T2Kt2d3pnoWRFdw0wm3#eRfErlvBmdUVRO~t5Yhq z=ug?$RE;z@lAO(sVX$yFQfBr*(1)|B3tyb>PA!;vQ|olS{BB#7H~LW<&!S--ZP+4;!(}}1Ka?ni-o9r0k z*=kIovDcKBuwq&CLpy%9NJYC#1A_4bBU)m}IKM;TvG{4RI; zLh;3MvH0GA6yAF_#9p?Kp!)M~rREza7JG)V%$nKN$dlK&%)dV7P3xJ;`UkTIL`xBv zVe1SGRnZWbZq|U43UK@ek59gy^L!7EI#%o*PRrRw=^QjsRq~Ky|Ih5myyb_kP6wmC zxi?LrQqD^cZ(lGn4jqUxtRR42@>z`eEl1e^}X-^uOj ze{u{)V?|>LoE^R)F$@GMdHl!v7=S6fxA>p|OyMpt1+Ew^N&C4eM>>Yp<-k+D8D?JF zNxog&_ybIv!56*!^Qg->Xu~{fu}zHA>=)yuY0#OC#o^)Eu$(i0tR8_Tbt* z`reFkvTJh%G81m8AHLr|34IrhdVnL8OI#*b<7xV4q=nmMDENxs2wezyx1G6y}?nPcJ@WXz{u)kw_n^Rv$65~>?c+< z#6JLK;2=}ddV2(%(E>pskq|c_kUVST+E0yWaFU~iRYM&gKOzXg3|MM`TA3>ZeUq5L zR_%8(LG8jnR2f=^1!QAQopTXTjSB=V5kg!;vD79y{jNUv{&Gh6IknsDBW;8Ixjrw4 z>Kwr^UO|1u%kBC4vI7BDK6&L1fmmRbv~*=GR}ad%D6~Ko#P>g4BEMC7kmaUcwtN3Q zbW#qJg23AU&+>l*o&K-0e6nPkOR9I=gVo*74whc z0zf0=c}O=S>k+T%kAl1YFM-~EVgoqP-_;f~(SM0ZC!~I?i#b;Yx6uU++6OGc zA8PWqO~`Q2l(6Xe(L&0kf0Q2zIlr3Qzvx_I@p}{WSY$O28GW>!UHeIOJ6p&wRY`NR z?095q5HPC{D(=%IIYCsJgq3ujTCGp}AVe4G7(HyZTu z(-8EOnz6Ux5W6P;tm>5mfIX%RvW zb8u%q@mcVXp_Ni-b&v&jOK)90I0||Oj6lUt<>pfZ-=N2PMSj(S%no0Xt5}E@GY<3u zBF6n-BpfM_=e#zScYN&rNKcP^B0WPcJU@t#DW8B&Hur;6K)yal4!~C#i+#!D@pi|1 zE7k*Pl8EE?tlIg+tdD=kHa-1}4uhA3j~M?8JmeSkO_8||Ee(8-`tQK3HQ33A)7b`q z5pxM22^5hN?TXrm`kpV+6&3sjM&d6E8f{e(4R1y>6iI2Czw`Of@qChYLHwx|)FWSu zkp!fixiy%EYL{bFNVN2%%Y>lBX~`4`U>XOx>KQ^J4{xMZR#ry0glq57-$O|Ez>|ge z%Rii-_LPmJka)M2^0iu7HL7F?YgvYu-v3#-aVvkE^jYpzPO4LlwUlJMCtCdZQ3L?P zw!k%t-hq-z%Qiy`oWlL((#A~FMUSVLIMZ|XfLr9*i@nGt#<=A+zpI#&7hA-z>|YV` z0|3-~ryg$baDfdC<*24g*w2Nc0+$5={1|8EO`JU;CC0Xb~zBCOV1XRhe!(FAt8qM&YIxlKfjHce6si^NC2hm~35<8Nu$sSs}|BNuR43QBZw99WtiC=)Lh9Fr%KqGh}dkMzNW zADicmXDURa#N7qswNZii5#e{FrUTVV%&nrd#~JQFjMkc$o%{wsezUoQMjWU@*O}nj z?-r@jA$V92jr`u_WInrQ6+eTRn#LwkJ z-2MdV*q#28Xh^C#Tq5yAPLRE;`F1kLpGpQk%sxJgdFu`JmdA4c`6*qFc*#$0nC=`& z4R>7H3RX{}*NG?S>Kdq2^5M}(h|pT%Qj4uqNzg<_jg39#cm4I_Rrk7XLRH#nzWDEr!Cvee#NoE_OozfJvA1Nkk&2d=hhFg76c#j9&>I zq8wF)%$1kkn;WRBDh->o647}NCRgWV5$nEvma@}!*S|WC#hxxSnfvhjY9qBcS4E-Q z(uUQMLq^I=kjiNgQ9H_z1BG#NCxl4#pVyOk*4wRl)Co1*7#$JGdfpkuVEy~oPs-Q; zD>Pc<@mW|ztccO+EXn9lmXdHFxtqm+mDRBX+P4{Ai=MmuFV`my=03>CccNXJ`P~M) zEr=tu9%M!PN#Md$IL+##&^|`B6juLZ}nhXltR;@u&6PS_l5D4dny0MMYxEv zI@Te9rqEOGLzHWHNEi!3+NlE#y*rN6B}2@i+D|7n6PJj_K@$qS`N}AHM-on*?AW3w zHJ?CbtA?-Yo;Lqw*D`9y=a2BWAKt?iJ=t9?S;SPj(1DsVQ`N=diwy!6N?(G>33#(< zwu6ySEJcqpe*sQ!FI44`ywAcsXpx{h@}ndBVscU!W)w9mOPxuxaJvS3sR}gmiNJ(G z7VG2u)|s>OsIdm&Cepq~^4I)a06GP=&i!kDNQ0KbK$);zllR$tSYuP1ZdYsK;$Ytn zubs412&K6C2UT_6*2t=!`;p{=>TqhYA6b?h;x}$T6$(#P+m4l+&*l@CL22bxM_SC( zSXb0$oC~uaz_utf6Hg*Ok^vY8C>@DHp4J`*JZ~Pd_DsW^}(H9NVK00O#D#F`)26gIFxF0N;7X(?*9bcWzPHWGsks=mi zXU`oJKkgLbH$4Egw<+X_LwS+(sx^H??D0 zG9ptPVv7nLPwX@|aYxm0tYWj*z*b2vi|Xa=`{vEu44UZt|;f&FS|1)1q_u)Z>__u|)&&(TrWr=rscK{7hj{_h4etW_OuSKYN@RhW+GeV3S20UVY$Z(2?Hdigb2E0CmBabzk)nXnDl(}zHzbhA zizhT@feEo^Ztb1kqI3pYM^52eG2t<+?ZF)S)dQkIau+JnVU5DgTh;2~N)T22aptv#YDQx0dmn zZW8M3O|RWMqURCvK6Q1WT{>$@UC6w-Tdmi`H{ED)uux$$p8t?g(!elDfy#@+9bmIC zu&30S-&4=#Y>P9Y=;u+BLlsir{t%Np>Y2Nj(DFj^;SQX6m?9wf$g5=LWHj;|YMT(z z{F+~6UtODP+;gM#0%jT3?)%8YKZw;k-kZDm|zY{#!=vB8>%6ZgqxU5Y2 zfDxWJe;%2u692ioULVfrFwMjFfySQhb+ESbQPBMsxTld6N}2?w`y$>B*>LtyHC?)A z`2<#pn9AStA~m!pWZ1DfcmuMM+QrQ|1LegpYKnGbO(ktxPo7cQ`>dC=s~m3%v=cRL zy^IcXS+OsX?-_4A;NOotcx3r+#7e(_nnw6YwtJkAZ z_Atlm`&isK`BURTC)4ZsX1}ZNr4#n_8a}C0T#x0N7tRK{4? zH3X3044!C?IbZ~l4~ti*H65>hoWiRA20NuaNQ}fv27@b3fs1R8u7g~;D`O>#n4c+) zscCF%JeyTX?kwI?^ob5PlvHEm(Of_DCN{hI5bXP*=MBVJYnk9Is%5m^-8N?rJr^XG5l*siuhN zOxd?nQpOf?f0E8xO{KCY1XT{Z`jh@(cBSafl(3h<@mzK)<4Wd5)w96|49_f_c0nmq zo3rt^iR4`3$xym1;>&td$gGa?V)}rh(d7Nv#a}JU6?Q3N-TAsc zM^ri;nElRRu@qLb$19KWZ^+f$S$pkr_chK5?;+~O$n2fX=6(35ha>!hE z=a=v(h4|K+ck6U*Z8-N=JkgC$0+dT17wRtHkdwO3gI;lp$EKjmj&q;j5{B%HdIB;> zjdG8}&FF)E`ETBK-StjAK?&!{Et z_Xh7kPFA5|gRX(k+5Tqf^`pI&VL63)i@CPYRq~nNvY*YJY7Laj)9XZMvp-c6T->3K z=_hIYb!}XnvR!b+|Nh1axYLo2=g4!K_#r6x4PYoUx_EnBIi<}M;?dL8)O?|nXqa!2 z;(6aW+I{_7nVf;gMg@XAtHHq`9gDY#;iDm5M!(FKqg5AybARaXm;(htkv3MuyvvRQ zT%|om{N!N6(F<-^-dRoC$fPuMT7-N5&QWAwS5}H#J1D>DwC-S3WcbMGi#JmnB) zH_LeV#Rz1sMN0QTlUCZb*!V#mvwk zQNsP8=E$n9Ruv^eKaOY^!A*x_qloHlm19L6j_kOh`1G6!D=TjTx^EE;-e>O4lU6y* z-{{OzET8n0#rF`?#ca;Vg`$?dEQQkV7sg5PnUQ9*EyZw4AAA$p?cv=(*A2NtIT~l4 zU4gzRCo5IjmhmN{^H)l?{Ot0%qJU7Oz>Cs!WPh$Wkwy{>1Zn zvcEh?w}(XKMv{Lh#EiZy%?Y|okczrHosf&?h0NqV$Zoy+B>jYQmhlPdH+Ed^tZdhS zlHne`vZtZ91xFKeW&N;6eQgo4-Z-`sDIIIp|H|`JMJYN}$w}+DY=)uUTTZ7~uPRK8 zq(RA?MNZ#-ju_wl9lrc=*84m6+=F!E7a}ZX_Uh!dyg9w34ic}2l26yJFgWsz#dl9S zCBKgC=DvPZClX0c&7sa@mvAh=JaCuMZq#Ju%f`D&@$zc!{#}xvPZ1p>uO2eWQXh55 zjm(@)&D7XzhD<5Z@Q>O>XGkOVCZFqk$*{{Edz}=sLK)FR`0>?r(x7voC?xL_+zndA)AY%C40$aj$0CivupG_|4I@ zvUp%2$^ALDhwB?^Sku)TdpF8VI9qyN+w{cUs#R6Eh>5#??8MjL872z*DFWd?>?LL< zm2Bg0a7+CJ_}xb+vpfDd#vpp=k=YX(pooNVbQSmNYIL8+Ba<39 z>6{8P0+A07Wr#hv3FdYF#RG_(nYI=ezc@&Fjbg7IIo%VRrbC|j%ifbDF3;2z9~Pl3 z8HnuP3z#*8N(4lQQH0EQW@>n3uVo?lamAHfrRv z@}IpvkBn*mTZj$NdSQQc-pT_8iSDO&u2ZzY^AHBw+J{o@_G0-l4qH|B6we|~!BpRV zO|yzv>cu0DxD)HVetAc+A1F?<+_40xa)jTq5nkg(N6UJH1?^@WWX0uK+2Yb=chAtEA%(GL_%McFUDkit zXcT?g>>Exg?tS`9F>e#ioML4ttNQ|BY5CjyKlBqq+E{J#x(BJ}@o(%JR+MuyYeC@` zj2%7<&z_69!v0#Pi~ENe8sQmT3~}Aw+W6K5bJjFr#Bm{|@XOO;@e~VB5iKiHC8A2Z zqp9MbGR9PT1|G%~RD&ZkCPLJagP9WP=ftpWDlkQ-DaU9=rM9^zH_7+z^>Wu<<6-@r>U zZGQK>fJkvK{Kd%e;nsybD4BP0R?IyJ7Z4Ee=FnI8rbL`=XVo+oH-4=C>$tqC*z663 z7YA$Xo}FC$IsSW4h-WEr%9ewmk_X3C1;`lU_k8I{;tF*u6sKJ0Lh~yt59kNejk}PR zHS7(Ijni-@7pLdnE8(3C{$M81d3R;uIy4~YxV!V^f6fKwwPpDD)4_o3S;MKt8+DoY zy;Z};!>Gl*y*T3ZsEab!vsgx|_OAKN*6lEtm$$7AOCF*!lWDv-Htw+LUu3qExg}CD zx$m!LJuZ0i^Kr9Bar9}=$*X1$>>?`RObSw^SB3*ep_~e#f`;yU0WLw%`Rqdz<;sH( z>gGbmg4Zbs<6-C-K1{De8WjvUxb$nC@>T~k0)+NQG^akddg#^aWV_jv-$dNHb*o4| z3Ohqz{=v)jC)!x8M+4$thS$c6XVkN_LYdl2fPzQfax!=@e?2ICXqQ@nMaFF+U8jot zA|K)|?rs&5*0$n!F zMXfONr254Np|ic=QB;lN)XduUa*|^owx?v3$04Euw9ar@Z$>>~{xQDz;OrjGSH4Uc zNJA%9QIt%n{VqpywZ{n`s8znmV^Fj+e{W9S^Uc5{AfPy&tCDL@O56Y22}ewF9fZXu zY`}cgTqIjw2tcv`OEvnAloo}9@CW<97JHX=eKKNd**baVVx7DxQmeUWq^8AW<#q<{StIW%) zLmY^`Zgw3s8fsEbWwdm-)dBC33mhIgS!y1V2x<^@v=VJp1VXhG$^D*^OyayoX$hR; z^+&U{YDwq#hucpRc0;JMrVn->qt%!$0p1j8-JYZIQIXb=#!s4KxvOIodg4c{$p-k#NesqNk@f`s4HK zDf8>ouyOaiUh_DEvBq_Y>+GbN^K=XJpT#=QBYpA8NsKe9BfD;q@<^X;x->bi9f2OV zs_k^WV5fygcRrcD=5w7%Vr~7OjS6C}ztXzpw_p)I-7Tzef{_PaeMGP?&;M0h-*gE! z6rjJrJzm}-{?@K-nU_i`BL?8wXn^?XPuys|gx_85dT%|$6ZU_F$a>%zVhhQruCj3Y z%zKQSY>UYaZT<@nZz!1iWJouvda7Eepj;0fSKqE}AJwiEInoKxs(^FuB*$~<`Q@_| z3%-t%S1KV)%uC>OVvpCzJg=AKP0+T2mnAkl<_VQEEoYQV=Pd0_y-C*;@~E78gFEA@ z>$47V8-+p*#mQ^A?=al-a1Rm0N@Vg&fJZLq1Jw6ByqmM>RCB@FWGIq+OOG?*sR;1( zqUDHiX2j{3TnJw04^|WT5F$rfnrN7g3XO;3UZVAe4T=5>$pGgu`*?Ge^#Alr<@eUvQ;!P!Y+uHFa<-Q%^EkmX7rWw9)MukcXYY@H zV2V$C7NZq2=eef@YcX8BIQxyk&Mp{PY26Z98FXrUTsKXPPd`6=S2A>XN!fnx3>WuU zMWFq@<4S?HYha*Gqi2n<-{_g-^jJ4fwyO6|bdfI~tQ>h>aFUu@y?tOTS~GSOjStg^ zWw!9_V$(tCI}OUjxvY(CY|VI|Y}C~{t(4}Jlx)j?*s6shXY9G*@RtD(HDAxvP6u3DX|C?O8J$w5{J=dPtmxX7;KXd{Ujqbb>$xeQAFz6=fQB%N!~?eK_Ol3?rmR zoKDAgk_50xM#1P8$pwyv1_-^e@w$=ByQa8Y#sXc}{UYkUZETckGZG`<%Bys@P((McViCh$XG?Lu0UuNLvjci|mljNfCl)a9NedG_5S;pKg zk>llz)wm|lrJ_DnuT{-WX|)5j6^Gs>k5>4-!FhA0nXAK~_aOul$@l1avg`r}_+ovu zUGefE&NwaF1kc~d(+-?NDh+e5whddpkAAgUD(W*dsTmUc^v zHz%ko4w)76ra**$ud(_eJ3IU9bZx8>s>4&#C!lj?v1HZIer`j_n{xuEA#IqGFw>{d zpsw<(QZ?7iJ@uiM6_EC$-6hx==UU;dL+#c0Wo!%nDR(%9Wqb){m|MBSsGrBVkP$hz7CP zHZ1ETF{ar?(0en&4z@hnINL4a7#JiMOgG$`+9ib1PB1vId8u;B?dat_RVi&q?-sWB z-H^sL9PibvADg`fbK5a2B~I0^eKo5_N0Z(2LE7B-iZ*uUhn`JfMP z#(k~Ox$fo0?d^=Ndx)r0L`7l2gR{?-l`VyMv)MSHhvq^-|IL0@8d0w_O#hD4?rTq{ z>Id@ruZEMkun*lBz4b;i`iz^Rx4i|hg?c+wQ5X$1VkMdPqNu+A?KxtO*6G>q!;%7_h?12I>a*NB`VH`sinEtSYQ4GaPx#DXqrZ|aYFRWy61mdA zdHa@_LJ0YOyJ^jPwp|TH%Fd--F!ZpyMeF2!()N9h12l67&OR23FLR|Y#WJ}iovWpg zJu1>_fd{w3CrmMHNnq}R3Cb_$AGD>3!rsJYb!MxaJ=UFmcg=Qa#E$QVf$x$t_Ljw0 z6Y`h6@QtB64Hy!!G_EtxNmh@eRIh(M*{s{jG~0>Qn|#_x0Grg$uQo=%Ggmh;=C=R> zdEGOB4Cs1A(T!U}4RP~Ji1cDD8bf2x3l2#!WDsUSp(6`5ODT?r_rZ$<-bF4)<~VdP zN|BIG{dZv&X79zuCk7%udfCO*<}!3m2kiC5d97{gAQEkCGlY_;%qZtYh5akn8Z)d17X@_x}G`Fw5wo@*;S;>k!P z34!hqhnsNDn=lG5kI_fkttJ*0W6{D5%5ETDVBp5r6 zZi5Vt7!7RS)?F5>bFW3s_|epcxl_?_%I_;;X?7%Zw>ncblyBQz9M}nK{?}_r=AsJR z>Eqv!6rcfY3*J}5hb@}FKAwPkU9Bax_68O1Njt?j>t*Z`Sh!B>!`U`{76#|vaXv>R zhY#B6>0mptY}oe0SxRvi&eLv72DEl1^EdRS1CQWpJ(b;YmG$p=n5-=P+k^vu8x%1t zTx-a~IqqV4gmSeL8H;XBBb@13@(BMuvOi`wbR!5E?zL{p8BPvEs7@3=1&7qknBylM zd&v+;s>ddITij>M70+ku85Y;a2i;^nZ?yzF#X z$OEDhwp0~-SddEdt1yGmHPhtenK3_^gUf-?ATW$Lt=U!1c^pI@T_sjpIiT=bDD-Fy z^$xY{%@ob`?Kj(?!fvizLlTpGrh?bIWq1}I(ejTl@9v=(4VR|llR49GrudCZ0BBEV zq%Gtv8na6dQZ$)cWv6zBFz=c?-PM9`%kBOmZ|dlUHE!QJyh0Jg5;VcJwaXLkEpU=P z9nOg9wCw$8T>_0+yA8KEaA;3h9rYqj!ZnUFo(JIYT%0i?P(O&Sjx_15n4e~#EY<4S z+9^&G@g}t0va+(Uob(vUIBYx65P=|%Q<|?BQCe13)>j81b|FwPr+|S1Vyfw;%{Jov zuyWL{U-=%Dp?!ues}D(?+?TqaBX&B;GiANJzxSWA%q~FX!NbYrtXqq>F$FDlTCNyW znp-Wz``$w+u7v*F{T+n-RP$PZ`y`0mYbP;*2^0#=4Ta))~{=$XdZLnW3455rS^ zj9Rv9ym1=tGEEfK#)jv)t)U`PHxhGmvP92+&rwRpylZ-6cqhqH?=~im*82TrfBpV0 zli58?R&t^Y4Pa$Kk#Q#PojaLu+o53bo_%A?DOUVi`z zPx_@yM(fij+@~$bu*V(bFfW|M7r)n^iGM%8qvDO2uBHIyMD?i$J5Dem%3bVN^VQXC zB*Fo1qB5N!i>P<;a-AVt&5eFWt$uqpdzkTQAoA|ak9(zwv$BjD;zymTs4dEG8CiH^ zSVB}&b_Pw#>pgUt4c(>y#yI8`HP&mUe+QGfE=)e|d3iIu3tYF|hS_~QWBniNqPP-n z{QRgM-9j3DIW=4ye|Q+*e~4q|c)CqhSa?{~fp!gaJvL}G>dz#%{MaiGhlyacLOqrd z-k-C>b;mJpfrNEfE;ZF?a)^?zb&y7^uCBgK1+O$4<<2vtsC%)&SLXHt_$qjn4G1pd z7SAWp;~JP!N(Vk{6u*;gaQ%4=_BavEv`DsEWL3NnkMUw8d~do=|8&T0s{(6|UI-pV z+0KgCGbAJB+pJyVaEZ=7Qe2zyD!O1t#jI@5i+%z2y}!17xLQEz_z*2}0q zPs5RpP(&FlJ;!VPEA{!OHSwM6zY?CM3}W)eupn4?(1t;)uH^fcA?_BzbfTZKXD__c z{q~QkEn~xA7X4n>*`jyFeXlRdNVALk6}a9<<<*g5+dJKyA3+DF&PN#$rMf$Wnc~U> zdL8&Z3Xj$1+e8Lh3R1nlQaUw78+p+;v8`=4whF|Z%af&w+7J`K6jT>`;InTU+v{C& ziIDOhMqlVxd=}VPRZ{Er$MhUQX^uKh%6qR1$1$ihmHKGPn9ejVuDCK<3};fGg;6QQ zydHPbBd>d?e+U!jH)YocRapR*zK^Vqew;e;KR8PxlxncWTJ>4LhpBt{c_7jWLWZf+ zMLW+&0+If$p(*u>B|IxJRpC1fbcA*~Zy5=nJ9b85r8b>*H)$0&9_Q;3+NHFHkkpyT zZe?NLhVhheVY+Tj6P%foJDV64bt2t@mH>D|sa1Q^QP5vGy}q3G((Q0`Vu%>PVs%?RiyGv1w%U1HEw6cX?nr`xF~v9ReZ`T zgR^OzWGASZzqOIHFgi{ZPHwQ=Qs+bmY30_1#28V?h|2kJAKm zUN1bj$sZpdzF~BnUQRsJQZaBTUdF3i;t9Px(4!nRt?F{tj~F&1y4a_CYuKzh8;qVe1~*0h+2OjUT5fPSpRw8!BxSYs(cMqM|8%EF_NDNOc%eCqn_bPYpAI4@vl#*Tai6E z<=B2_y?ZmWSbm<7r7Ep);jolZKIVj-`zD?Km&~Ba6o;H}UPcfKD^>DaL-+6;72F1* z^tXaInnBS?UtI`s}WCm0u%1^ek^0|HEy5e5}O!v z6ulS1*A#~#*=^gKswAz`M*6MWWi>CSwma?hCeE%0n3p-D>AM4_s_omQpvI*~XAqJ7 z_1jxgJ4f{etO-QNKk45|{|JwxjkW!u;@-UNVM@VAI()UydE8(r1)qLaI@a3SI3rwf zU-osqT}_2+lmia?a4W($#&pGeEOthi+K0rO41x@w&Zz?$@4dn79bFHAsspi*v4DP)JLmucyKL2{T_~bf# zt6{JTwRU4`?>mo-{AALyrP%?*kxGRTR+X|V0-%oC#7gPwGu zu{)S#P7jmPBYMQ>tX{>gPn3$ph{bI*M(;f#bmlA~?H$3CT&@c;OZ~*=7aKl)@Is2#Sh{=jtqiJH&)A_1LUy*dz^R!>HeER=vX&LzVx<1YLPO z=GAx-;%o^Gw)I2*&y^P-(vhDDw64BeBU)E1A4#$YA+?@mDaUpoOe z9v3_(Pp7*7L-h9d;)7|;>#ud=fj;(Xx{hsXQVokr7?DIo^*&YOuRPFH=iD#>TMEx$Jo`c(M-KEBeNMW?LY8F%%Yj9{KXd7tRCH_7bvAab2p#?*2)u#R)->z6xo!TSL!MPd8l ze_AjMPMSh0*aJNAZ=lI4$*Rh9g}p*ci>J!LP^)AGZ;(y)a=;{gpOp4L?Uu)0bweBJ zb~0sGqH8Schhh`Y@q117Frhgk-Bd|bOf&08ox3maVLJO8id7+v#J%WXE@E3z&-z%3 zF9d3f1|luJK-&eqxWK}=SrTP7TE#lW0eVC?xc>BL{zA9~j$L1xc*)?SnIfk;&<|p3 z{@?pS|2GVF@&AZ#M}<*|>V}f>=Tm+-ykmLvCQ}>~&QD~l94Izxo!3woO2^QTBD}`L zt2JNVq3>f>0~OrWrt`I?5$LTh09y1*hCga^Y7?>RluCPThZs5SB3*nt zE@bxJQ3^i>KRz)+EJ?_u;N=+0lUV^unFMEOD*EH665!QdKg2Bo@x_e>da$s60aWBIyi1n0nK1T#g9qVsvgT^&)dJdvwYy@&p|hlVUjYSC*;OgCeJu(gw`> z`n6?#DG|vv5n<60~hL=-!c-AqPV#~9Q0gL3DdH43ppiIsS>_PyW{A4V z@Q&KCKDSNw$)Q3x|3fMNUhv}`lW~lgG7=4gDx|PG9QwH`;%83$E}QzW)ncQ>WXC>1 zYw*h(t8L_VT0CzP`X|6VcR<#FLZjNQF4SQKPVp;QuALc8_j(z<=nwydv;@0)vz2eG z#s7!Bw~mUkd*4SDQ9(sY47xi6gh7xH>23rBqnyRy6e+Tn|vkQV>$I#IN72gW3+1Z z^9kwg&w>~a7!IR`)F=+6uOv4_!8LO24munwB z<{Im2q&q9l%Yzwa%VrvO>Xw^_vN_dr&3AdT^xlsXe-~O6>zDY~wsTxMFX6^!UL_aC zRLrD3uqDwogg<74ABcJH^@n>8QD^{MQNK5c67m2UuNF6vNs!JAZ_=vGTRca8OqzB?9DXDC9J{S^I<~jXH<+Z zG~}dJ=#Iq&`!lgRFW@X{dt`Y ze*GKw_vq@@WOczy@{2Eew$!ifa-g>q(R!BK?T$Lvh|OdZ?+GiO@7`FC*J|^mHpjGa zhXGDaM}rH!sXmb^ATV{E_f48O7*bbu7I>?kQsc5dvFFff^bj0>*=dHD(!l($nGU%G z%<1reSS1k1ceb_K_ugXy*L$%mv~C6n=U`=9+LMgI3L zzyF>3ly_-F>p>u}|2INp`(I{p@SV^kwk-YkAw~eX=VZf{C@~#mex)_@|Fb8`|Lc#u z|EC9-HE^j1BRSL@NURx7DLho~wmk$OdBZ?NNE5J;bw56TeAHxPV@Wdt6anV+#M*l)XqB6n!p_McDIAIrvF8iy31kW zmuE*|f1LPhnXN?YXs_~7S<}P)18l;~GG?ZjN>0bD-WB!ggGk>?SKLm-LvNT1IVZvK+YQ7($w1+-f_k*0$C&9`O zgjB5(Y_#_yQzgaNos zO`QdtaaDZiF>{aPZ~CImiZ!_EBG^1qX_3VD;i^3SW#fX&xo|vi%g1soQyMufYMU!= zE~&N1Z8^n_ks`fD*Am`jeDCiko7cLYZcH~GCRz)G0Qk>U`UQ%8M8g0+9_QH1KEwjdh*Nf`x?DL~& zTi`31GHOZ;fFr;vD~c*7g!~Y|Flm1hw0B~&kGu(HxVUp2qZ*+JG$u@%Y6u~F$N*A| zE)|CE4fF(_0<{MdU&0`bi{QUQb7{{12!0Q^xR(K+=Js98-wFTq_Y?4UaF_Pb6aIAn zKlcHb+y)v0PoHDcLiWgf|M`e9Wz3^P15ji3bk;b4!2H+K{}N@RcKH7v@c-f|3J>i8 z_F>$6@L=>~RaN%tYFpTF^?FBm= z)lCnpyS!a9|Fe@nVdM)v>@sZp5giCb#DZdih;)(1uYUMGWU5x0I%SC9@9%dEXla>*bNE^NA@X=&blv|{IW;}f zkN(Gp_*<;)$p>8^)Tn}ziHa+Jns?i0|43eUf8#j&1S*FI#AbfK+Ft@@uZ{VDFzkQR zlDbsSr6GU+IM)SC*O4&btG9r8_1+%xu}iK$P6d@bmlE@O+lD2NwF^cf)&i z9RHZrWMEtd54ti$2m#K~is|F&Cf>>d%h|7|fNQGFnzf~x9JurMcBB4=FDHwO?}FZF$Zx;Jgj?<1@2wXfn+K3Yz=bmfMMD+(sPaf0^onYPsl7{N$X0@X6} zC0SM+|67dpcDEST`9@TMH7maVNmxkxtU_iWUJ>TF)JMbjnZ~}0rQe`+WF<0YWWl0{O+|ZNOY)To;Jp}PwqWfmcol9gT z86tpn>le_jIPdAtl=B*>OLz;>KzW#fHUFZ2Ge_|8IJJtl_Pf4re-X#7hHU4(k@Byt$5ai$T;8>_ZY1^PU&cTX;x zhIjoD@B})aLIDKhye2_J4ez9a$p{sQ8v1)977rz%^lJ1+E<4gITi;*fU*VKLIvi~( zp2MfY5fkh1uFU}sjXTzPGF*rd@KfC|IEi4+`N5?(>yUahBqC+bqOD+hWUjN)esti$)k4$+oJ%Z znj`%qFP**70iT^go4qb+<8@NKi9G;Jn0igU`N5d!# zvf&LNCF&f$zsQo>u87;p2s9j@kG;i zJwRk8G~b#CuzFjE9#mjsMKLP6pXBAF$Gtwfh3wwvnm+^JQD(Ow zQxKT@b25;-)2*38amAK4Ngns~O0N z2_+)xg;wQ`jKTP`n-k>ua9aBxb35b*bl)f<4*^r0HX+#oXk(W$aL8hm_}sFiF)nf1 zU2vm&&7qKM+jo36@2mjAoU45nfS)?|0ve=~d#D5y@m>&!{O|G)V37go?+w{ zKi6!;_$1ViF@pmzK(tgkk3+|VNx1L`rH-t-P9oHg{q-t9ARp0Oo-&c1Jyc{0f_u!2 z45uIayhD_>lWJf@t1+RDuL9;YevE2Y`-oBDNK7Q8>k-2`AR$62 z=(p7M^xy$Nx>W8Pu!Gl&2(-biC0HRNIZMG8t!ME8#Hm)BexmgtZ4xN2F- zz`M&HPU&^69S;TN&jBHnXy5CCtPD3A(iPvNy5%RaFOvzpNQ3vFXI<&4Q@g+1AFKz; zx|B{O1T%~`WCs4kM!%)1*l$4`Oe)>lUukeF%t;x3cUv3duGZXgjl{}Pn~{ng?XYsx z?Yc~n^4uWNI-C1!z2WJw&8c`3zGHB;&Qze6Y90y1+1XiiZK873F>Llxe_!86X%=VH zw_sriuIF|2zndon0%yx99YNSnv{SQ^UoZruH}q9!nPjek8E=yA9LXu}XnO5!kiYNb zkI?z}{PVz7%9qcRif7#xlXLbqCdr*b!e%|ySQzd+NL~@>X=*&#Ll!vJpxnCw&(hkSmgOuo*ttpGHq*sax8IhAqXru1C)(B-23WDjqbjEAFvz=v8$! z9BTyg2c}@-hJZw+QN7u>+k6- zJ^$pQGJS1X3(8wU8|Nzersya%2`}E$MqyL*+Xb zL-4k#4lyKKzHvc7$xbBIXB?Kq#mCQo+QsOT#rfy<8dSfn@Mti?%GR=dxUu@1kWuIx z(F4!ZWt)8*}rahs9C&7u!A?A-u< zuU^g#yjhkpk&GJz+%xuf>M<@n+kZkdX!h2~ZIH9?9eX&8Zod zBntdmAE5Zzmi(ja{cnXUjE_zajN_V@c$;<05zQk&(Xd%3;|!F)J8wx3(?g@xd{pFU0PBjG=ubA{{e#~xtL)rb zRsoxB6~@RN^~z?=u*>t3kyK2}qHFE}EpAd?^AMhJ$Jj??{I)6>x$6Csc96(URebuq z%yeS`Zr=z4)fUkk@V0CxphmunbTrY5Ftg&n7lKiCJ)hY$0tg8aqzqdwR6L1yWwrf}zQl1k=61b!SFc6aWvGh$*~pj>0FYILlu4UIpSfF8@=%8YkB?hf ze*4Q$Vs%Y=sFL6{z#^<6`-!Av)Tooy{2=PhbiX&qTL+h4e4gZyKsz0 zr-B{}>oK)0GWp3<#HZjG2}`ZDp;1}b1FG>K{J7)Y1FgOiEup~vfyu?>hl;0M&pTH9ntNyR9H~~aX5V4%ev+Pd2 zk~$5a707|`^HX-PW0PW}hcOoG6-Is-<2VNb$C-5y_h@%@zfjFuOpJ~UXa&Jw*%$i+ ze*f}+eE}s9ZB6%w&7wX;3w96ojcy+t&jG>)Nx~Je*+1LrBTx*+fQcSrRsj$Nr$J@V z3A}68Y+JaibCPSLIDqf00EQ?MGZEz&zQ_Oh|QJqC{UhU08MA^dY-MsBtR{OT4|u^L~MqKpt3WW zqHXO^4V~O-@JL?v{CSDN2L3e0v$V2h_<;{>q^7i!g>Uv`hlpQb(Ew&Nb0E zN^fs`Hu-B^Fu8v4jn&a$W~_|y5g0MWWUVtusBPyYS>NF&t@xMV-#6yyzO zd1JAk=kW{W*n5C#DzZ3h#629;HYnO#w{bJ|aopH8I55AA?&^OS!lv)2`Gqr;M!~dG7c3lMBb5dPzen?k7c3>UC0`BT z`C@DZ`|tO_=xxjh#P6jFm}#)0|4X}0BcJlos5h3?-nQJ}N$(w>4DS>8bS|5y&@88RoGGruudJ#(nLP#1FLt7E6T$x7{ze>hgaGExOtC!;h#by7x7wppN8!B2 zRnIQi4L?~$oOIWi!Q$UTJg^7W7GtXXQxOq@^V@EfrsMcS`>|)=&j3>I;nDts6X~qg z&TX3SAngmB#YtX3q*IOIqj}8_l+s>jjr#zUgn+_7lk3FquDg{>Qg?d;c|{<6qhIm_ zh}9h~qB*qjMRu0L$zxLMe9%32SoJMZ@GpWsC%|mgN`vLg0Gr{89Z(UxDqnu~E0D~H z+hn79*h|#=xwYPQ4|4V+jlBy_=6Elg-CMxo8byz{GP>IL^j4i1H@eSO*4WwEos|qY zFCGvOoLlpy6wq%2vu1zKDKS0tO`uJc?t>DotJV+NCBC z)~hJ+#cD4V$OY;8c$~NnCl_cdyMx>D&mPC1@stZ(?|l7+eU>SU>i+XrY|)tBIe+DV zCl6&#&Sjbixj`6RvR4&|&~YBc*QhQ=D-mABBh3WXGMki?RLQ*+fUNUkxeSXb5908w z$ML<#mX3dfDx#NHl0bkE#r=_nxt$$VH#u*Y_?S+9 z<_yyX_7}!BiV^k^6?Aln)Nbl`u^Z;(pTIppzU(jVh7H{GMWo)_xP|I$W zD?bCQBypqVpcv~-*QTaSXLOIfe};H|Az{GkqS2do3VYp4K{&xAY{%MWfT2}Lu?b)R zN?$l=<!!5?r9lQGRoon?8Zq+QnrOR9S$dKPhSvLg|HAf*#n zJB=`5Wq&bQ?3tSr48jjCCjbTckZAKWTsG*NX0YQ5gjSl$jeWLzxPP?2XO+i}+;Mco zjbkANh?o+Iig?k%8{O@p0XKE{`GM>4Da)MJRHASRN41X^P_0rw648`nDBRv)emj)C z)g}RZ5jrG=Ufm4J6^I}!!M8i!XByt3I{vxjYq&mnlv1ouX=7`fgailf8BoHo2a*`{ zs!8m-f*un9CeU_!2pcqxpnquj&i%O@5e@XqJ=X410L{Y*{Z~Uh(y*I(W#ZIfslf#C z7&%7=dell@f5w@Q2O7&x5`5R_*ZwIu)^NhsGpE~WpNB)ku-)Pv zu6wNIya%$Q_PY{QS;;OFRtsY$;p@VX*^(Iqa-eWbZsMlvF>Xe8Ymq3#ttu<9J#v_0 zJe!eC!NJ86=*lkuz9H|~FJ*Id$+XDN)YExs{G$Nw1>VAmA+ri+10m2qY?KMKKjXly za2zb=)ol(fb}g_K)oBSOIM_zBLk!`Ly{YJ9D`oT#a|#wU%>*%37f!v}IXb^NBD7zc*jnE%0A@iGrLHfIvx3)@p2e9flSy?gOFD_jf^WJ=Y zAv2JSjmI>`sxz9!i?)WOhNBzLlKNm{qOt(;(oe!XYTZpfPQ>W+wBhwai!&L}Bo%gC z45NVWX-!V$`dDDnVp={bWH^~bmiyVsx?+;V)KDi1ZL^~e@VSK42hSEjPR@EaPthy^S%SwXFnU=${;*S@{ zV;7|Sw^!$o0XTUL9-@70{6UGwhNq2=i#^O+i2+~uR{7)Koz20|KDuq6)p{-Nr5u6s z*ihXzCcrLH&LzawC9+)1*0-n#Ak#-{jeAc3gJscTSj@uU`S$?kNgG^^>?e2KU7YVo@0 zffC7jsGut*w_ve*wbECN6|)`kE^D%11gEu;W>u;S*usYeR7L!TjT=pme8%j(SJNqk zypMgHhh#MQuq4N7JPewPKLrrHsLnO}K?M2HTjns^Xegzn=BaAj@)}$Gqw0X|$?+>J zU6_gCP3!78S#K*g2!HM;pnutk)$LHxQ{ru=>1T&upR}TCj&k4z zJzrug2ws4^!pEyP8X;nYko6!w9Zmh_&}&h7Whb|2pl=3Nn%p{`9mSB1kU#pVtnPif zjn_01L8lpmbIWRRL&zQ_>wTM27M<2n>p*8VahT+})zN^QDqJeFu$G9vK_ybZub>iZ zX(dhJTye15Sl#vgD`~5tl^ihydf`glTO}=PkVZAJ_0qenJ67qTyxk92N_csJ!?OT6 z-6-lP;r1Q4r~Fo}(q^$ZEl|%&=E)Cxi>Mvg7v^uBp@k#AkkhsgBb%sDV%WU^#NEx^ zMowI`_4LC`<7=jkq;Rj^?gAvgWOs;7@U%B>B=x-K3~}mG`8uZ+TcZAC;PrNDx2B&E zu7r2DIYbafv)K=Ys?Rdd^ppn$U!m&~L7||aF11xVLaYw5&RV6TZqXd-A89n-`nker zP`S(5F!`h$?EMqJNl|3Iy7YSuP3l*vUDNW7h>leCBW-VwabENOjPRx^i(W4K$r`Ug z;`K$Do0A~FGKklpKxQNP3hv_RN(*%|H!wS5ahPu%I5b37!S9{Tw}QMc`k~KgZ}qW; zbc9B-Qd*~rNT02cED9$W;7p|!VW+>8Z=ZoOv*TiM>fR&Kz7Xc7a!gLC&L@`w`;hQ^ zb|}Ki;;F{CcOl|n$BG#FxDP#Hc$#X+NA>b&`2NYt*_yD$VlO|G^MNA|83C4N)%L}U z3%9IWx})T8<2z{{MxU)Vd6d_zC7}6E5A6*Ozk$apPH%U{Y76E2X=IyVKGSTrTpVmC zY=CD7hV$8siZ(fV^ga}foJ`2?+@k|Umfj!ls2YK)=xQ%#`HiicPryr1`l~pwXrXR6 z3G2pS{KTfIvW*sAp_;HEyYJwZSHgrDo>1Sp2%QLkKl9)IAqhSzaOdj5o!HSSFEm#9 zdXDH5@;E60+t&4onoyBgEt~Em{Qt+(+o>=qPR@`nLU|8tnQsQg#Tr=sf2DPo9R|AY)q;# znrCcqEv0IP)y8OuO4hkh?%zxh+CY3oV2RVlB?G;Vh5GHD;8zTRH$?SG^#kdvz)3cB zzt?JcuvrncYlY62wGPkbfRb7Td!p{db)pkJBPT*jl*@4%rBTl>5#-+Wb+Mlyu_>;t zMg4Tj{Ycdu+M|}AoUbp`TkAj^H|4bm4nGff-_DhG;eC`zEW3{JNXcl4lpJCnxV?#O zlpYF`5BZPFxkhH16%(JzyEBDXNE?u{8s%sgKO4_U1_C_1CaoclQMn!=ZzMt7eCu(< zBWi;8n7#i>6HoWnjMO`_qsOQv_NWD%bgXPzk)(| zNHhnnjF#&fe~rMN3w&%o&TxtiO6mRUhsc*!gGHz-vdm0g!ykuQ+xB1`RulNL6f4}$ z>JlGOZTYiiMb|x{qT>4(yA_e5pI9MzfDR!kTIT%8OPq;0GG(B}F2QoQ3VN2OPh`ID z&#a7B(y*sH3q^h$?FiLni;hoP3DGqXtxZ~qxptJN?Nep5C|QJ3NZ@HfSUAssjmprc$~ihVQV<0`ftKAK^1l2$rgPU3qBvMjfR?xjNed34RVk^1AOV$=qL` ztb^$OAmpxaTvT%08@DLuC^Gn3Fsld9L%%pd3)&@lF~>o0G(8h`T|@Zb>#U+a+4nQ&uf%u zj43x9mRkxb3ss~#El7D5|1w#HNM1kzC7ToMfJ{(xd%Q!3?}@sq1`}_=6Z}4+=-w~V zUN(AwUozl6A)-O_W9F*rdmFFECdhK=WG%A|uK)A5q{x8*%WbcFm^8EsgEHtuNxh!E zKGi;#wiqEzX=MQ9I-5ybCfg zxpw^k3Y^vkz3-ks%$|q>-PoGxpg;~TbZ4kX!_bqeTv;K8Ae7;8i(x$I(!y??3ba8< ztFne|Je}QbfGa!oWC$ON60;$uWH5k#$5DM($pU^C5A6Z1tNT~-bZTJJpT9T3X_oU$ zA`yXbg)y8*R!BB>O2(Yp59y^;`bxWUm~o2+S2%hQ7~*ss#3JkrWiMVnmi0=^{n*$i z^>Evxh4MHRG+nLv;ZN3eiqnoT|APfMIXZGq`sG1LR<>fSy>=pDwflZB@C>ZIg%v9-_Am}v%3JrQ9uG3zw@?T}F@hzY$d@1=O0O2z19&+d+j9Jfy zhu?^$b>3vfm(MhMBOs_JlB5{c#=Xph}&3~ z#L|Wpks^wZmNTQ$1NgSQ-#$VPptc_rRdM~I$?+mTt=vc;a}(MJt!^{=vbG|sp91H{ zwheK5xN1Uot@2CvoQ6e+r1GItmWwy*TF&+nvpinuHhV>WOl+xc(J5)b1TFN6pP-p| z8Xi_6yvk>RakVov;C<%Td^P8zOm6TGuUZyB-F-*i6I0HADxG58`c|I85;kbdFKQN2 zlNP$+$mNyOZ^_Qg%;n5adZcQhmGS2EP2RX|PoCjH@<@ zkSK!J^ViR*pTnflG|-m;WQxifym&L_pAPY0yw8^kbY9>X8Y=P(O@OS>^Xr(y_W^8H z`jC+O2UK3(rkLz9GfR>1SS`pWG1n#}tw+S%JDa^umxlz9{q8SsuV+-hK$gu3Lh5sm zIsGg^fzjHRc*g%7Lbg0*#XFS|T3E!`0HM#D=YQ&d)<_I#*fVDzL+u&=8UFkTQFIRa?o@mt`xc{_qiGm_tStnaMITglZq{p1&yuVPto=7rl( zr|kOJ1V$+RbY|RO6)SHzqSWsz-rA4q$!j=ngAHCUY0{re-R2T!<&2ju*w?SI`)QbF z^L7%P*~W8Tny2U@;JbV!azz$!OEEz6p^G1yEeD=_pcfi?)D?NE7&H{?T3_j?k7GEk zbb@gC4jj}gjrh42v3UrWnmeqruS7=+G~<;Jw_vb;B1l{QCYz9lKTmB~ZbDjZt@z>L zq$Vj-6;^`=RIWJ_Knsihi($dOHG=qot8z3{?)-jT;ntVYJ5Tk?NHGmEFTPTWAh3KV zmZJp9k?Mq_{)ivW2n39~ankZZm?b})i=Q;K&3hX@%|2c4Rvtjb`+hIOOKy$7S%f>b zsV3^acHb=yXl9{#`N4(|x_-MqGuJC|J%v9pZEloR#{a0IL37FFn>Yp<%63S2aZdV4 z^f1}Ox`pBkfH2{Bm*k;TJiMopfc;ILeLZ#nu@q={G%=d~fEMwrbU}l< z0I~d1DEu{*puwh%LL>;@C`0awYIsXPcp^rKe-3r zf3ns&y$6AJ(~*U?p56Mey-;_x
    Ez3vVjGyHOPX9E>6eySN#l6q-tZTc+ln<^)a zN%qZuRgf})EDUbF;hhfo6_e^$E%SCQy&sLfb^!;(<|es7-w~}oq=D_1DVE!8C?pXm zT$RjscB?B3o6SjMM{z-Ir%r<@whK`*NbSO>{T@=B#mSMaBP}V&QZBdXz4mL-0qleV zuv9p-*6Qmb2tV;-3v@m6trrLAU4=|^j5T_MgW5@^)TTgt#B?e)W}O<@^81Y?#K5Q| zyGKB62=#m~I z=QCZ3cx~|Xq<$;XM`=M6&qeZ$*3kHKB3U*n7&5-_!pGwjuf1^*y|EzO8rZr8Xme?S zX&tZAwd5$K_dPI()#nsw8-N%o7x3Io;xIKCg0i93GdsE)_TV>|+#0VCpx zxw-6wW&-HkwV}t}(_nF?K_$UXK9SqMV!NW&^kj))G=b%S%mP}rN4i`9 zTFR-muTpvTJ#!g1>wlwbYc^Hy*1zKGiOSV0l1SFG zOkxhy>n}y7&gmxfap{z0_VVX|MT=%R2q%BYBbaN?6ePR&`Z(>V3Z4{H+V`J)*LsFd=as1aua^h22R6vGS;x%&$Ry zoPp-MrOUA-PVva*+F60my@&KHulgu+NdNOkHZt7)tZQz87P0Gst?_O9Cnq14C2i#$ zkDdxjO}SvcL8+!|!wy zLUd43K8C0<&3g#~!Ji0W#MQz**A=IG65y7X3!^bOx$WpLxJ*|JaNqR=$ zjH(k~t0Aw`l*W$0%Q`aR?Ysra!oKwp%@jyndrv|?)u5u_9+4fpzpA~ikZcMlr~ssw z(zjewx0|9lZ~dxf3|UR9M;LJV66%pf=8YYluP+`1sFWZ4Lf3TALJL1e2iWj~fc08> z-4@Y4eWT`1d1mQa$%0H{8HHlc-EoeWFy!nt@W?T@{>F}Z?by4!y%#(05`1t6u6hf9 zn-^#y_{9+AQk|}2wRQc+D&H=(;vzWrouJbyyg1E7%ui75p{k6!Jlq&Ivi#OY)T9=Y z5~Z+3{2}pVZc7q(=<#lE6byLG;|2pGz0_T(3 zkqK2cp-t@zD21Sj*edh&!$nsp`l0j|$f~u*hj*T>cfv#P589hxSWG?UeE2vZ>1pg( ztm5!mJK)ntHhtI+ucCm+80L;PpNL|+a4M4S6A#qhBki4E^Q}phowW|?`NM&o&^{2tN5L_qzGU}=e&r{|HCz)-%44&^rCKZ1~L^-iV ziktaP#W>#hgtc+3+4VOI6pZVBc(AX;uvvTgC`WuCgY)b`;Lg|l!~TKJr#1H=P}JO4 ze9_~XGQ*kWU@|tDH93hZs-rmt>p|fkiH_un1TN7)-QC?qSdr^BQtGoCt@ovziQQpz6I7vX&HCr;zb5u7si~Tpn&V3eKGeSHjNCNniZa$ zcmoy{#4M*0dtxa!8gLOFo~%;CRjd%Sr@2@4KoKbu1%G9&r#phWI?!`c+ZqT@Z?Qzl z8<#`nztOgr?@322Klyb?yDD$+19)d&cylChDkRfF;8h@`D?&_u8CbEZ&BVJW{s8!C zNNwl@59(%<07shGJME^<;erOhkXm#yo&>j0vV}MgfJ(`dP6QDa#pARauvti#S4n^; zlXCB55d**1oglc6O~`@k<$GU*LZwU|KX%J2$2AgN>2y*Ncl|PC%RfK$Qu?Z9$%v+# zX!LecI$1#S9mNXAttTiaB||4`MG$LG@D5$a!o}{ax2jVu$W}-g@VNW*W;~~EMeei_ z8j6h7x|+d0=$+s74p%+Kqr_*jblZJ52T9s$?R8X@8%qNXqLmdPz>&bbo^bQJltPeR zOzDiY<4y4wLw0()X`Gyu@jPoeL)Wf$CgG}6_xT2W?z!Yk`O5fi!KDig#*(y9))6C( zJ6NcSS-V$cDNx(zqGREhB-}Nj5|^{ed|}Yo#!FA*8v#Tzf}RQooA#3;yu(T;+yCUb z==zwlW^qf8voJ%!D*v?)cx=UWTdy_BR=B8e??4T)SkW4tQE^qznnPnX_Orn&+fNT} z1rdGE?w+fl7;N$WnCh~Y!#ZlbSeqh_S`vuJ*9D&c@CpFX`Cjx=mSvhoB3YmB-5IyISReB>w>0?>G$d8k#m!Kf3Gs!Ynp@b?D8PBi8x>RmY5?m@(1g^_3gS#p+!kS3+jor^y~r`g1U;@Zv|KqN)RNrsSwrzY;j zFCYXD*`N0D*-2LDp>msxSgHfC>v+Hp+r37*FC@$6_X9X)0RxnP^+h=Lw~K*}oHDS# z+zR}hbVv3rLf@YNU%T5On)e9aH0@E06X9|S1f%b3KZ8l$>K{m{DK$gZPSncRNzNs^ zPPuulur6Bb29jCDdCmzQO}y%eVv)oWy6KS+|5J)Y8*c4bv=jUvC&zZK3n|{SK|^hqdguX}bii+-6(? zh-kOVeL@mxBN`TX&FVe>f2ZR&tTOR z`VJio{nz|Y5Xj24Y5E3F)4Es^;>KUHY`q*`i^7rob;Al0l1T{IDk&4x^cxCD24w7I z7NG)uIf`PGx+`eSs)Hhgr0Q}?7PM%F1B~$l|klxwpEg&k` zlthStPT`8Tbu-_JT_tFD9Nc(i9cKW5!8f`0UL*Q-a$qX~O#Fqpl#tTsz9+sNvdjW7cC*qhlpG1i<55M9K>RenV|Jl6z)ma#d&jOI% zHumoKqS2#Hx@WW@z@kcbBWw>Vxuy;5XT zITx*SzIf+P`gpIenl3`0zb<@-2ILf~3jb2wHUo|-#Q?amTOd788 zNR9Q$@$7xN9-Xp;atBUzwSDeAY-4ad5kS^>96hPFT54R4T*|;ApOTtkvtD#AH>4 z46|3%@^Z{%3mqkEX~$zgSA$^(Jj-9bCrVTEp9LptXXTWQ62qdBb*lA!5zAGSISnzu zHX1ohgc0)oZ~jb=PiTL-RGp;flB#lZx0p(aRzd*WdiPG{+gFcK_aE9>+|{rf7**0M zfA;MS%k`ojWy}RFiUy@o^MYMqS|B&LKdjKY0XW*CU#N4+Xkiox!G1tg1h(6 zAz?zT+PkubLU#7fU?$khE=Ac7gY+!JRtg;S=iT!=`YL+-WE+66dCg%i_(QvZer;`qQpq{P3U2fhT!;{J#fh(;SLThyWnsZZ7Z5>zGmWbOdH{otOS94)!^^v%=(sCVU4R!|bE9(7#s2(J*?9!vqlExQTYV zq-3Lz50fY=kiNO3I8zeZaH_6nhJTA*+GWn^(_t85JZ4 z$bZ#;-1bGkF0=mhlq1;IYfBO}l*45u8+VQmp5S(7^cqtZ+Eb6QDc#QB(b#hb^TfMa z#YO;`9GR`C-U9QfpoHW${=fM%p=BGU@LGeqKLK7&@%n1Ei_zllhFz<5!%_9*2n}?C zp?#MC2P8_sVB6+Q2S$5#ottjM)Waz^7=_N-skf0;qu!3~^+1G2!Tz!dU$Sa-Hi$`W zdIvW;i3E(g5Ee?0U^@V#cJz%E>CeOJeJUB3sBJL5IjLQ}?^zsj+(8WB$YM-HcR`#s z{`v~uVL#~J-Lt&B?AA!UDw9l25DC_j-mcS6>>_p;6)*G@052`WW_nuKkUMcR9m3NB;W=Q3VSKa+lW6Oir-9y#y{cm zSq?A05AigsU3T%M`?BDBm5Mw3Y&Pm8>%~2~K-?DY$ML-###&j|wA;hf(Uwig9 z!K8(e*Pn1rTD=;qy4F6wz6y~JCGY<=#@(P-yxK2j#x{l8j~%b{2rnFaK-${n7JIYgZi8}xcCdWm}KKozQHwcG0W4ATYoqsIMHAysf(D%e zd@<4+As4Z{fYJMi^M}(wt%9N71i6q8bB2OfdQf3{<1c2!1?*1nA+p4A)o6!<|8e({ z3jISIcqJwQwg8P-8W%82w=whb8%5442Oi!ZmNAh9OnkI|Y{-8ur~xCM-(uKb!N4R- zdULt|@B;_}z(;u>8)pYR>@)2j82u|TNia;-37AIR12DdUzR3Svz#IpL^Q>{sKp6bb z%I-Zr|Gx+$7qAGyc>FK;HiCye{hNVFkjo6_LQ*KqIu1kPCDrBm&js_~bB+EMhbq9x zXEcapwEm$}h*1Nbkk16vec)kNf-wkr48HyX76}+;o@JZr`T!WV+!Oi#T#)t*49DRh z(qk9=PyVRI(A~cmMFNcHE*AD7rY=MH_Y>06F{`*Pkgrb)#%E>O0WSE7`vBg(9m<{R ze*@cr;Ma7h)NDWbC=3_dS`iFzbt4P*5Zv8CY?cV@MJiwXmhMhZZAZ;GVgxB{aQ?Q~ zwh5TF`#1icP2cC>8J|_~EgzROoz^Ds*=X(XLQ#nH6(9=}M1L?81g27JQr6@e20eWN z)UYmK31ZPg0sX1o0e~9%FONQRRNN^!Isyfa^d`{tT;OmY=s80GZTtZUR>HlTzWZJ8 znsl9}I5A}G#{^diAQScwWDJSeO09=k%Rx)6B|&%zM_ZH3R{d<4P#2KK=msrL?|4lP z4w|IAoWXTmOm~0&Ab=AGQR>BZKMa{jbBN_B!3TPhM8#2Vq?!B`sPu&Bd7QeQk z*Y0T3&$nEtBrU_ncVg8T_~HN2-dBc2+3kA^f&vCDAP7q15RyY<&?y2#i%2634hX2k zV4yH`w;+s2cMOfh&>$%>fDWak(#=`p-p{k$*Lgp_*SX$vz1y$Iux8!sUjO*T-!yD+ zig3EbZhHe^JDqYPidmr%<8(eabe-UlYl7l56?%lpE3 z+u1^PbpCatYn;ylFsl2S$&$r}oSfv`)N(N1J$xgIm)aH(7lc8q`k^-jXye@gU^^(| z(>`C-B#>PYIS-(GiJd7g7f!SyRp5i#*+>l&)a?!-ZQSldLAB_X;kJ=a6-xV)DANP; zm+{tfnMDz*%I@GuX6v~O#Il1kfJ#$k1@a}-O&R<=U@J@L02|0P^?(`*g5UOzX@3!Y z`h7i&XU^LhrB?DD!=rG-%q$ z^D!|u)Rs`phDGDYzfNoklpJNIz-q#sGVwflI}_|Tni}r~?yeieX=_;izqkM^0n7>A zT@oLc3LE52+{42l+SU&Ra?exB{=B(%DFT7*($}!T#(10@q zqAga0BX2-^7RGtBJHDxL?qwjW(4^*O-^H!C=2uL%lGkC@aS|8^6w!?=UuUz8-;`fvH$04FhB}H`TZt}JcSzyKBY>P>9!z5iF)t6Sxes$CO!2j^N!sT4lKgBwVcv9l zAJ(8T>m5lpNC~`<`>%AaV`|Yp0Ghu9{pv*!J zFD6bO)J=;5skMg-jn_POI@Xrf2=VOFgZ2(^ug4Q zW=8+rLYyjgTQelD-Uu;M$XK=DJ^LH8s$|(S?+l&5a!+?yKBqRH-s|;z{)z>YtHjQa zSB$Ay=a6Lu{Rvggj`9)_aWHVF6p$Z=PM>&)&&8U;)#Os6S})-N^#MsBu&_@l0$>-m zR)4VC2kb@nb3MAgJ|`QBxS7!}c>v@T3!9spvKDRI{oG{M1h1akxu*x%;nHceS-0WD z3#rKCworxR-D2M}<9@^?G#qv|BL(J3yk`zYIq$)6FaZm{S};9X_c5{YuLxC##F!*> z{-Y8K_T1|dbS2SSOBN^6`$=S#wxwgHp+kL>vD~!YQXCu)n|w`Dsv^Wnv~)V@Zp~B- zpSc(=2Az6pwaLLoE#+Fja8_aW#oj!$VayJaDlmBeQO3}o4|jSOoJdpSQDHgJXv!QXH`sliEQV|s63v0Dqr^bREL5@au!FPgxl75OhROYlpJZpIXPbJ1v& zg!G{0nAnI1Li9vsaQn870jB^{k1Wb&3f%cXtFN; zP~5>C7)7SLb189^?m5EX%U+LPzr66)i6gyZ+PY>8RO^<6XCgn*5DE0rK2gJ09D=-* zd9-kx`LB18mDl1{7GqNj*+irTFDK9bc=IWCy0id?nNvUMKqZX6n0#)35>pD*k=vrf zb5Gph5C_rqMFBUs-HdiPe)lUYDT%0|VC0P{U(_RG%c3Zg9VaE6hU{dV<$f-Shtk(* zJ?%iB_jdPQ7h-tt8ymu{AHqpri49GAaJfgV9W}H8ozk@?W%foX+I>|qpY^CzG31sZ zk{-Ml$n30!m)4{x<~QSTwx#@6w2U4 zX2)Lv$p~npR#Lto43X<}K^{zK*d#@fotApb4w2aQR7>WD<13J0`$_12@^r4&JT{}I zVkj}{+-?owHc50&GZljHTKnI5jWhS09JVWOW`LG7xdC>E%GyjsSnYvwMM=jr*U*MP z!+m?!=ilGUF9aVB(|VF}{g_KWUW}|+M~e?*=nDaVQBV|WlCly9YB3dd9mQ`NLUq=C z+eHFe6KZIJ-0^cOA*opMZqaI$4qn{8_82(Y9DSkYP{y&FhR?DDC@RaWlnth5nU~OO zaF-r4FA?I#OaX56{7O!MSJ9-e9Zv@hbvKIZN)YOp0Ya+ZRU&T)-iGP)Z6s!<5Q{`lXS)$b1hi6huBFrkU?A%ACR zr-(m+Nc-}=VCK~pEn$|9zx+q| zey5Kw_6ezPp3Xnk@=pyp;U`p}mf&Vwd`NK{N7o!-_IC<=ie!##QFClSJPH$U3`L;*>A`@kFSy1i@E) zyE6&cH=%He-|$TS>zre&jQ(9iU|TUd{Y~rr^FM!q7u0N-`n9dr@_>8ej*Sl_ugags zAZE}H)!F}eK15{tgu;T@hcI{H{-C|sJhN5kX3l{}=~!|dq3uL(f5zZ1oid@`xtR^V z=Lft1_Xc2hxQ`N?lH7qyTl3`*%Idr^r$pfPb43=+)HU=Q zkh$>-6wtZ;%cB7-%bid;4S0gbcExqn^6k;Ia)z*K4$et}@`~)!pIJRgP_`^p+AQvW zou^YI>V#5{-laGJOwaZGAUuCa>2~xD8^SEaTS4ObmyG)Nm;U|fL!Q719aA>ubeyeU z1x~}W<~J9*uIu~c5UdG6hL?d2K6%p2vTxQL-=AseE8~Lg$;@8%DYERY&lHwPwEZkv zQ{iG36-V^N{VWkSIO0=p{10Nc!aO~;fchV&+CEFpXT0Lfy6HAO&9?t&4=Q$+<&Hf* z`YaLIE{Qgy=<1S|l`U|*K24LGlJMx{cU)bsKaRG_@j@Y!+ zH{+{tx+|}(8ijegUvq-TaEiDsj0uYT`Snw&MD4Cw#)N-Jt3Xsac>}T_?<@akRh^xO zA3QC9g8D)C<{Cd%)Bn7riUBp0!d~(W%c~|$Rbp_LTz%XWkLUyU>UV|RZ#z=1-|_M% z=^8zd!sXr7+zBsvhBRj0^xGtiqQ6pFR_!D^B%F~@@eEflMNuE9?M z%XaogsR{k3c+2jg&NTXvTR7gU-xwdCrx3kH4(>0%Q^$-*md0*J0}`t@!g`QbFfrhi z<@axUv1@gRI3=2EKpo74)yzL{($5IeS(;FywzM0xkzlWg$i9aCY9s2-gDKsxWfUzJ zu~)JJ3sL>gLjJQO|MO>L#u{1sS09w;CiJeNKN;av(u;h#orA*YK4=mC%1CE>o6v+g zMRX{9Ea{gpoI{2^Zyo2O_zJov8c4to6!gAReMS~Men993HLO)PwFDwvINRQCM)Bgg-tndB#5>9Q+(aOk}Xa#RscgSKfNUF4?BFfsYU}0Vh{=yq>ivw(o5$3wQ&WEf7d^|hXXCMc_6D1+nMxC zHE!^&E1p*K!5z|*J&bxR+M^M=1n&A;60u`Rmnnn#g7Zia?F%Ka-nE@(XR=m>#4m@O zNZqvAfy4>}B#6%Rya8oP$jc{?wlr#1LK7-=n4bDl&?Nzsz~0Mqg5Qr0Ktpd*C?p^Y zkSB6=d`py^w-$%7T0&bT_<102353BxYQQkJP%uB*z;|O)BXYrZ+(}|#mM`cLSVt&N z5e6hDCCervy62#}2X}GaKhYsNmxxbT5mBUH8qqzXZ<*o~?>WLiLz+~0r3WA+z^gQ! z%}}g63ECH6A4p5H3aI`U$0Q_t_BUJde+%J9>C@37Q7u}@Mbcn|bI)l?St`>8*wb~% zHy|hS6Q0kdSSGwmIZ9t=0+L=V70Dh;=2w9E#0QVzKuEm%%kcN-3}haA?g1=4;{jv{aTT6Vd{huPz?DDVV)M|@6;@!Defkxi?jPQ|*3DI7qDcT43Wl?#VlJfuc>f$k|=AoVEhY zb(g(dM6;!tT&|g8UBcebF7|OnPd;z)NQ^q_S@MfswST_#<@p3KWQ{njL%au zd~KqiMOlW&x1esST_xfth#v*(HV1ga{Wn8oQ(|;jten4wz_z*uF6Mv!Ntzk$(ps7F zx!uT<=cWhbJ7}XHkkw$^Tz$P9X~H?bbpEYIIH=!ad3v(<`5QJ~pvQJY*Q$FEke|nw zN(WF$i8l-=dqlV$VK_|vUD19`3ZsmK(KnKYi7GhzRi5`SIEqE|*{bEk=x3)dRY`<< z9FNK;56HIYQ_}HRGFfr!$YC;{5>DYDIw{*65g{s4()l1&wdS@Ww_=GpJB0FhMpHd5nj?BLcVQtpdG-}6GfvB6BV!)nP}z=Sn*pH7fmm$fat!c`QU zhjxcfl3!q=PuS~pZ$+G$_pjFZA0JagVZ6(GoXj3IF;xAUf**qUb(m+km&TJy1iBcW z=ELOh6-v7kyg1*^eKGxltostmRf;Z3pW&;?ZKD`?ivBmV*nRjb)aO;jGXgkm)55hAel?Gt!NcLF;EzLySo@rSL=%VSw*?jK0bXn@r4AQ!l&pGH8 zO}cF8CsYzTCJBaDt`JOlGN5J))PemM)E8H^6a7DE=1k_ZY@c~fXg3MTg&efxKOM{W zM^G&!$}ycv`B zonf91sEFls?kLSOCFf7Vj-$&H0J0xs3#@X0A&ykPV|4I)NoJZ~Tur@l3J(rCM+V0o zaz9Rqld0ZAV}#dKU}7Q4PlCp$og~7dzw5C-neyKG>Q1OmjCBOnDDeiTGZ(+bgSXwL z9!M?605P@i%cN(e1K{z;IQ+6&ZNB)W&5_j=8t8gT zfbowtn9!%;+QE+PHt;q~x#T9~)dtiVzi0L=-CE*nFgFzp{7Zn05m=HUai@Jw)~vSI#uKJfbmTOuTuN+EfX4JAGsGcc?%tk$#0h+iYdNNZs&xf{3aWyu|y-L8KKUP|2o4!J7v9`NX~J>e7vZ+MB@U8b$wj)tC?2rJs!9 ze>mJiYJH{luxGk=R;M2A>#@#%)@7wa&hl#z6e5P-FCF&G>D5r3)m~nKX;E7e_>rk` zt<|QivnM6QJWZT}IjI-i-*Lj1DTd;fx;C#5Pabp7Tn*4KdzRMXeCBD_Az+85)oQx;Cm zrgBeFxy@S8$sxeY5w{($qHHfu6s?QImY?1^UQz&Dd$D4AV`H~Ik^LG{6S)?r;Y?Xd z`w5$tt$$oA@5l9th1-60J}J+T~Hzj*4O@zeq23b2t5exdUpuevrM z=at`O$pKMKf5Jtic;g)ZLzU!@0jH|eI<9$oMt)vY3oD{}dmUnezZ-QiqS_~AA#)jA zM31uyGISEtg7Qc6pEljZbjF^uF>vQWLT*a|RpuMnFL3uumd^=+F^D-wgS z2uR#i)FMGS$%LOG5gBvgdV|)R#q!@`0RHYHSz(7O&+}pMlBsAeq}N8=i78^HQfcP< zkpNqhAbdE}pwZtWWB~ptGRtoODneBBi8SxCqY4hAyXk#vww}cBb>7@H)7j-kov9e+ zXHAf8-j_V^a~U-XKEULv*bcs|r}l`W)D1!E&iC+3sahPQH7%Sqk3IJ-6IFpvT0FqyoA`-a3+(QNDq)f=wWfH?xu5q^y15;=};^WScr7v^U z=qtpCC~vFoo51J7N_jDLi#1loXnX_Zs-sFln)0MC=p5}TT;mQ79WuLmz>3KD?$)9u ziW53YRXqpx%qe*a<~e z=x~y=Q=f`#SZ>~)_5mu<5?u7;^NyS#dgw82j+lx|N{zX*^m;o-hFa;#@u{A7i?R{6 zc?Ot;u4Tn$_{@4`deuap7ywI7Be|41cUnVCc_5qCkJ7y(*(*9cSLGj{+;z^4k+1RR z=S%amu#H^VgJ9rPoLYh?6y!a~q_JQ*Iv+QM&+Fx4=F5-U*^R5Grf2S|%)Xhiv`wm@|bPABHiX;&zx5 z=;-~oxWfTQ(L#ZoXHuu2et)sJ#9WPixE*%t;3D(0^cB|lFw?4tB$EMJ@n_EkUv|hB z_g#lCUl3d6yPJqn^9sy780w=JUZq8Py(+%eywY+%y{QU1f+_m2URPscIl+#M6L=0E zs^q+t#z;}Db|^qxksmo)$Jgqt5tMX;(?2@ooL-KD#ye++)gGCpu0!;s+BQvPYq~O# z8kpq_9HO=4!E^^9!F8^8;tz9JV{J5t)&rP#aQ;5ufqEN-jNP&52;24DGy3(;{iQQc zy>DX}Y(ZV=CJ5D$hbA}@nfdoh*$}8wSOPx;`po}>_$)sWDZmQ7>Wv55nI0NnP#?#K zV03E^x=kxjojRwfrg%#~Vw_VE$6HN<&`ZV?_YYVvMdtOk&l5f`tE87>p?8`b#`1y_0lS9B>dp4&xuHKI7UOVhv zizm$W@adJ}&VSSC(->OAPwT`a^QlQB1Od^2$0d&hg;JGWFq zp4svS2x@&tt82#vweb}k@(xzquZzlZ5lX<;GV58z{WxJ;>GbC8*C1R5U+od*Ey=Qp zlnwm`G!92=3cw=#qs_Fs~}))6fJfzr10Zvtq>fi_H=BpT??T{c zmX!)I3-9WeI?H|L9mP7Hrp_80eTSIzwiItexvL$~e0o^m1v2;1My5s#X~vUjoNhG# ztIffOzS1?y?#@dyolt#|)$O-2748y3ep&?t@q7M#<~VL)t~ha=E?}+3Qb{JwJWd0JY3Q ze5KNLP|CiuP>mIt41=_2U4GxP2H}FcR+iuyVufw5hoU8hReql-T1vh!t0KJeixfh0 zz8w6qGer%AmwQnnsYi&}>e$^xx_(XNfpbQ`Q9qwqd@~*N?T={r53KnBPNBQ>(u%iN z7;v+sC}w}R8C1UZT@Jwtbrwh*V}8hw$*4JX)!sqQMEXfnE2*uV|?Jp?s#%b zpVNBarTKK_Pt4@MvAc<*NB5NeJG_)f=#!_})UQ{vfgd0Oda)`@e7#4i$zs Wg49wHC<;!2e`-qFiUqeHy! Date: Mon, 29 Apr 2024 15:42:46 +0800 Subject: [PATCH 55/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E6=B5=81=E7=A8=8B?= =?UTF-8?q?=E8=AF=B4=E6=98=8E?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ...3\351\200\211\346\265\201\347\250\213.png" | Bin 52186 -> 97204 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git "a/Ref_Audio_Selector/\345\217\202\350\200\203\351\237\263\351\242\221\347\255\233\351\200\211\346\265\201\347\250\213.png" "b/Ref_Audio_Selector/\345\217\202\350\200\203\351\237\263\351\242\221\347\255\233\351\200\211\346\265\201\347\250\213.png" index 318f6697458f6ea0a095b219b930e698c74f6524..bedc31484e12ca081484427312b5a541acd6ef35 100644 GIT binary patch literal 97204 zcmd43XH-;aw=Im?Dk3VNNCv?oN0AJIB1o1jAW4aWWXY)vNK~>!0VQW7XG%pxKtKdU zvSb0t8H%dAHg@;h=bZO^_q*esA9s(@E$k|4@BKV0&N){Fsoj)2MMOg7eJCe}@z_P|I+V)LB;7S<~La+3l{AIf15~ zmBZZ!&gO;`9m50!7YGz&u4%cy{56dB)^bbQ+Fug2^hJ@#TPY=!3l&))6Yjk{Rz(Wz%Tt4Vv zxb^<&N$EhnQVYG9jNzbYCo{7Te9vThgX0651jCe+YOtqQO~U^ocBnc@fdJPk0#};8}Xm7&a@!aekY*9ZXSKJ zXITAcMzp7_L}$o%e~#1vKH`gtN3Y5A6kU~nPyPpEb=_gc6FMY*ldD92D1&Pxk!_qL zkq-a$4YwJo=#qI2s$RJbN)v7@i-t~!gj!iIw#I097S1SiI?^uO)DnAe>t^jKZASVq z%Tm3;igIyMX_3&2ei3a`sS-tn^8*zHK5l*_6}B`t(uEBA%DLM3h@~TqAB9g|L>+~z zdi$l{IVv3)7Je@Os{ATxq+pDAf+^Ib1S9c4Um5p~E9^(_1kl1M&jGI&lx1%f|MDvP7i8rNuw@N^4< zF&c{Owu7NfCNbu-fMSKa;ko5t_S8*dy3I7TBE+jOfP0r zoU?GgrjRD2iK#*Oq90s$V5wasG^*Xgvg2jW@RJLurzHQKF@S|ccbN|kbOxlJ@22s8 zZP+ZJ(&zH@Sx*p=XS5KqE`t?xVa*p^Q*WJ<35wl)p@bq+7o(!0x}>66p!_B8hX`16 zPF`NfY9cS8!t0v{59KC}m|fM`gU3udxb~tS^571veliYaw!e+ew zWv_3NL}I~azxaz?lV1h%xk@wl2V)DE_2GZ}rq^8`e=bY#^QXEE?x=_$<~!1Rs?otv z*o1X1{*A4E?WtLXJ{QyCxcQoDgPXNy@r!1qdJ9Z*i!xT%4O(S{5_>~q@ps4~yWpCz z!f-(nX)-WWX+j0C&=ZWPcYnVS&c>MYw#2rPkGPS<nx^ zXob}tv;9!q0pgaf_-Vr?xpJW72B$SQ} z03ZCvi>ZEQ>sEneAy~<>`E2rK=^Jx0E{uD(BP~8e*!0qTyM4{5*n0h& z`{S_#*Yg$Hi7k^>`1vs9Pg{^vrya;34PCOqP+@Mu_og zVq$BZK#_H7X-fas!Oyfm5hn3CbYut_H1CJ4&7iSS4bKxB!gqU%Ak2QHCm@I``@Tj@ zQ1z9X#?s9OhdtGYc|Gu4>#Y(-V*hQ0Tv7dWLgGxldE1jS_Pf6(*YqwD!fRy63cmnLkgV|RZN7zHiFNbY?{CC^ zz!DQ&<-Xf51fLS6%z~c`lNu1%8o=NZ?U~0{GB8^s)^2|g5O6$!<$I`D!Zp9Yv{xTt zb>E?E&WTUvq$JD{Wp*<)6 z%u7btPfxb&hZ`CzlBD)NH2=ZK?#KOi#yUu2Z`hFZJ3l|aVW&^Xn6WOwo7m1mcA~AM zEM(G2lu?F1zGqS9DChZ*CX!g5JW z{;YCd58vR4rJ<%K^P435vwVqTh&1CD9cHnd`Zij>6S8Hu`~Jw86W6i*tlxTid%>eC zP={<3{*g|(6yy>He)JpsYPmAjDcezeAKO$!gBN2k%-S?HaC#l;W` z-dnYF5OH{r(yK6A-!2)GH{aD$gs$HjRLu!9JoIO%c*wa>ur^v!@Sv^8*mtU}w!vt@ zh=Aa#9^#tWkA+7Z_Mhl4dX0_V^nLwc@W+p*2ljbT*u4_7v+$l&@n6#LPU~pxS_&wY#ScC=N0)0eNXpQsFH3TydZ7!w-HdkC!<-_xy73_w zS`PxY=}yU&;`qy!zxFpZMF(*|80s(s3j_pwDwiOSs2w_v@tBH>!0BsA%pru!T;@^ez6fMXbF!wihcvTJ9qe`#>y$kDo{lFk&kVE7w0s7C^_JM;PTsqd6fC>9lJ~6eF~=E% zh%eHI?VTuU34g<^{yp*^ZI^CzxYm!$oe+NTN}mK&8u@OFx`urT!sFFBWVj+n6+T#` zM>7pv&x?_}HF+uMarikXthQwdTXdmfR^XuzGkmb|GN|E2f2k#v#qwPgYK+N7;p5!j z?;*nwGE(J5S<#a<ML4VY|MD{xoDFv2uXB4FzhSgLQK1BQD&H~kuNbqoJY-8VRbABMl)wbxOPbxf#Ee-nYD>2I}Io*O2-dy5{WSaN^Ua`6c8SGiYgP ziR-NUbmhNCZvTf}jsv?3n7LnA&C=T5N7l$^QXCh}AFq6?HnxT7e289*ti1PMUPvAZ zdwE$@?k?rWU;U-`H{6~>Le>9gH`sj|@wJuL&W3L8?jrHZA155^i8NYmggCG0Nu?Rp zo@#bwAeTJT z7fbS8X`;v*K1x9F_Qlye*CPZEb7p#SG`EJ`MxP6hY`;F^bFU?mSw%&q)S^AO$z26Z zQZPiynSda{7q^yz$IeKh=W?>9J5w+YV}Nu}jCjr$z`J`FPeMvsMNBvX7F}{{=z-K; zf2T#qOHr@QUvBmDhHWw2UY%YeXQv!4!0kkxO7S>Gp!IOZgM!DveP=5646Vp=f2Z$e zBLi-^BS{>+S(=h0?#7eYZuIEb*V*-4?Y#4bCDy&U&SG>8x@a_>&TXjJcYhaKRQqdo zRzUsQy-M!1&i$XD=@=ju)$)3K<#;3k&f4 zR(5tBn8`YiHCQ+F>ST*%*3I&Y3L-|ZG-KlwlGLIi`}PFEQ>RYl{(utq@zImi)YQQg z_V@2Ybtdk;yyuim|TC&%Ek{*=P#OW~_yZ9k3g z8AOO_wEiSNZ2KLWf7ceyX{TxmBlj_QM_}_rsby!f6dI>*-@xoCbyQGLa3{@)C91#9 z74;1_-*hyyu&uwu`UDz^@6sIavx?cd$qWlMs16`!f6 zqcd}+6XIIa%uZ%z<{9rFN5D**PYDn4d2QM-e%M)?DW_p$#}-dAB_t-oEjUbmB6ayv z1dp7rTWm5?=W+5pm)cplJ)L}Ab&D0aVntcmr0T@;=g*-iE&OB%{Y}&#&>+1!tlZ7D zs;zz2^X-B%OR3BJH!Y6d-rgyO^1N-Hd*wyj--}I!hn>6MzI{mT9Lr;v%QDxUrMlN` zFWS^A;QRB->xt&DtnS&1a-pA7q|k8Tvu?5Q&@Y;PS_lT0iL>zZ~BP|355_r!ByXpHlX>TlDz6B7{h5JTzqFtf5@qkv)eOF#^l-q)TUf&1U`b@LO7 z*x^Iv!sxZd1H3#vYiesl3`EdVn-G?Im-8E%=5}UMq`>=v$vIcEMkAYgT~uF+I4%x} zo+dv=a8+ge9yH^QiNf|HZVhWmK6{(PE`5f=quy&TMIYpfh~_Ei5_WZU72oShl?^$+ z09M{1zO>F8-eUHx$QT1>M@EeB%H<*3eHdIh{;39dFPQGa{Jc_z2CS!MeAE7XMR|FK zMovRR!*ayOKV6pKnf`+ZYwg0L1`9k!m2lOmw%ELG*tJ^Mg&J7H5Mx=NyV`eni>cF( zSjOlXkRvN*YzFb#niyP)p#09>CftmDr&P04gU0?GcV)8?I4=0>B5qtslg8O|8%G*$yjP0MV<{((9zh9`lFBdCyc*-@?pQ{L?KeP>*91)kqJzR@sC4;arXhaz!)#9G^A7r`bN3T27% zL6>1&uJcIgtkV4~QXQ>#^sie$vq6>fd(lznCpgK{TK`&sx*@-i-(@-J0cOsn(G>nS z;QK$xGW-qJ{$G43!c+jl>lghMQ{zh4xt1QS@hLFrkKL~OjeO78WVPsegJwLS?j%=F za!KL#AV;$1q*gTD2h+%L<&`TD-~d3M#74uY&+FVP@_j#E4^=1d8g4;r1abD z{{A;l9rl5qimI!@1K;96+w4f&&h%bQ)fU*0?pcx0`I-cBiP`F8@={-q)(AREJIzUd_td3#yGb;mpHwUbeMsu)2P?0o}PYMO*S?_8cn=p;k zAUskr**v+idXk<0x9K6il<`yD^g||_Vu#K%D=}3bREQUi%ik-qiqby_j!NsMdlr#7u0}9 z5NA9{6GIB+()?l)tYjx3G~?(#O^*jvA@NX4EX{cwp+#cMn@ld%6BSMj#BlosYRI%q zIY|e)OBC+c<<_2s@Wr6b2!M$o@G~@$2exO;g2K(}a%HLqho2)2X^MM^G&v+~11A;Q zZ4}(Afe`(RjC(WZZ?;bQY+J3SkdqilR#&=>THB)!kSGlwfFHNRE9>Y89&u_ zS4))C(*7QXLfU)zI$FdYhh>7ujDbM|gLz;;xK!ql;l%32`IfaX=|FfM*CLYBQigpYW|p6UaYBzsd2YkU19U zZguNa0qyQap{6eW6ZF0{9IkbzzhX|DK1`0wq=`#OS3MZJmDY`Uq0GX~`Bn3}JP{LWhecbSNmB6OQt8lJ0fwKcAm1wYPi8#7hLOsL z5I~L$pEYazGuy#pU1jl{IUhoHb07>CKdebKJ94!Je+AM&(^h6yMlol^4D3Rj$$Iz> z*ZGj`k!K`;!dPL1VX+2I%E=?#?5j;_fIWI?wT+F79J7G$j3!j@@u^3rx~KfUg^$|5 z$!aL&fTo9}dDZ1QL?=0KuN&N%!o=5WL#+}D=ZUCt&XTXQ*%C6^3<3uPYw$~^fGv=W zcY`xl9+m3M542`N=I!`bI`168$Tf9zD5kblNFt>H-g~G(PKJ491VtPlv-G&sC*~Bl z|7L*?iYLDx&}k67VnZ-!zfbzkuY73L!8#jLeNAW(^L&`loQ%A52Vu?Ff)VtO@xUp9 zO^`EiFuwuN;_s7)8xY0-)%h^>Kl#q@lL!by@))Q_xF&K24zdz)RVgc(`{$zAMX$T_V2zsjrV| zuKXXwy1EkM%1P^JCcu)_SXmy(W3xuq>c%(vBmGpO!k^`xI@mRE9gx3(B!L&jO>Bolc1|0$~|aeAP{E+&v+Eh144-KZP}Gr$drhG zBbhS6o+z70IuNS&l^iG;f0J!ux$*82b_u)9E`WSqBEA?Qf3I@PI3j!!bT%keXe^{a z=YbTGZ`esA-6a-N*hX0OBi~q&MMCNL55-EJ-2uB+{AuZWZO6|&F@6o!+(4u%po|SY zf42T??=x4C9=UT&BXo|l4fmScmQtV_QMjt&FO&4m*XD%6+i1d|QL~nS5N?HQ-HfOS z?%!e&LPVrA1})>OV8$`T+zH=x{VNd-J;d1~q^ahs>GAe7*%@h$wlXs7gwzs!S z2jXxzz*@@6%gIw70h7A9I9v-rQ)p=D>P#n_>#y&m^v?h*<>TW!0MGoW?^oImPGe50 z4FS$J=`qsav)7tZ=>wMu`RLkA=XS3LfPjf69qmH|g)b2(M2#4PYPXK)?xN z9}EK~^nAYJU$~{H^Up836gM+8=CT0BL7-t(-_@v*krBgMSHLZIdr#9#R9UoByL14g zN$>Tmcyn{}$nn#Y!Z9soLiQs=eSN~;P8Ar|3VB=T==1@2sny7NE1z(v8!ognv%bFm zYp{|RbLnYO?qOi(5pJyzsGm|a9#>IO!K@TR$hf<=x3w@R46va?vX+p|_i!nNT>zoh z0UZUDT6Yqp4vFaHCk%l~M;2h+EA2<=8{(+-04EE_cVF?^EUT=vhp^t6e8rk|2Ubus zN6YQt!1bJ-Vht*wx=(?L7O{ye)=&76VA@?AMG( z$HzX$UtR+mLfGCYf)g|xK?symB@M8s3jn#?`W4x-1AALb_BJ-yk*$WIn^CFY!+ zk2Bn4E<**)(cTB*wt9fP^qQrt9U@0dKFg2M7?>jvP)CmUul4$J&zQTMEw{v z9>6=Xf@7Zw3k#>Gr^gv+I*iMH%>(xQ^C#i2*z>Wj{1o}6-aD&IUdfF!y!sWt)@Qpf zTXraCb*oD5m_?j!{yH>NXVMhBxVQ*6viqPU_Zu8tUFyC$9UV8(t6Ms8CmE?}Xd0EO z0Q!@mq)P^kn})1UPb&x$$R#@eR10;krIx+sh|A2`?JB(J7Y?Ecx@JfLwNywI(+8Vk-F#$#T@4T26%9&tj@F3kB|-z4%V7BpLX%g4Up*> zkMWC5e4O+fN;xY%ei3ErqEQeI%n~GqrHZE{x%L}sVxehU7FF+TYl)4F7PK=~`%?1`c>&h{$gc4?dEy)+sbCG#1*?IY5;S7jAf$qjt3}d# z5-2AFD1{ISCDLN{+XzL-11x0-$?XJVWJ|g$D~B#nr{}v3LKmSw7vOHws8w9gt~I5+ z0Zf#yrp#~>(urjgG2Xaw1Db9?nrtVXs$aim{JS}KdC&I@P_Tn)3*t*h@|9Z0i5ulv z&VU93Y)NErgU+$$rS)&%cP{2nZ;-_?6J{xr_Z%1|>g6HpT;v z^WOX0oFJJZFestIE7*ZN6M2z4lmvH71O)`L_doCN?|0?p2oBEe6#|8W+qB2Ku1&Yk z#eIK7>I}FoZaL>;7jQ7gKK?2`Ey2P=N8=5=kEeKXn`pF-rDaw^CpliJ^rH*cC4{L$ z#A}(601@Y$Va$uKc>Nj*e_L>B1hTGvVEJwdeOkF=$M{$N>6`*dQ`)$cFgkG-zfV#u zdK@n}R^u%%CBK+CnlpXmINAK{>R$?6ZP5T1+Mk@93@!m2&Y)c_H(%?q2JRie-Mx2bv%+XaQ@`Do zGbrW$h_TUsJ`V)QJ>b>M_~$+k*SZM{3PL6*&C64w8%9aof#R4G(U|cGI=i^!937Np z`HHX_S>od2@Hwjm_C896zCcONVWD-|>DwQgk?ia4E`_H!MI)5`;N!-XucQ#-IhHf@ zAZ}~y)$AWKI`utA%X({Zc#1&@&~z+Kpb~<)%Y2Vz`jPt; ztOJl^AWijh3ke87I-g<~Q67%%=;(-IRka(b+esY7P6Mk6tlHYI(oUn|(;#h`MoOZn z1ZwkM$(gs4`L4a08tnL&uoxiY-Kn+iEm?SMz7}gcGe0+{QR=3zuYafAwdIKa6q?Pm zF2!RLGov?b%=p0#vp0-AwmK@gohG%L2AhP&PB8FR^V7mj6CpRfx_Tyh|MTTezp%F6 zp&^jhVPAdf?M*g&J_rkGzx{dn(C6SU+qoQbB_ni1^@tBr zQM^A$gSYkdPx4%*pZ9jOQhfQNp_RW-bZGRsfdTHllOQ9naRat`CV8z>Yl-fqgvVF> zr0c5;s5tClkygf=?9qFJZNH%R|A{pAuXc$#2AN02@7db!kDWoY`7KM>A5MgFO**dE z4k_9|QH1D<_{K{5e{mMoMYc^beRDJvj%xW{QDAsYQ4|_BZ$}YqS_*DG%X(``6}{U% zE=${7?Y?4ykP$u$ps)8@5bSd)4?$R7px!`KqaF;O>-4uu6yc-+fdC{~(Ixdj_oaaN z3IU@Z$Zq+F|KmACj7bnYe)%r|@}CXVU%U%n&5Xyrd-An@`2IJ5#!QJzK;JYH6#aEg zVgYW-g_iYv$dr?4ZF63qqtuKCZT} zp|Jp^^8sw`7kC3Bu^H?bMYrpGah3q|*7j?*@H51taQE z@2Qrbrfh4~8YgGDUHq z$a7$FU@)-fT?X=-jRN#j4y+*g3c5Y0Tm046_N>%^dlHMuLf{m&uUd|f!#eeU2geCCK=d3deeUTW_Q-fVCa z{(RnJAT3i>k=-v)S5A?tUiyO&SbyvI5%13>WI-|#^!WO%hM8`(V;_xgmfqep-|+7{ z*KBMn!tT`@doMRj&7Sp_|8UmDrG=?hw~fLFTPPn9ti`5hhD!9Yy~AfY_|L zB(a_NReLEZm!!0Bs}O=WgRXetS5m^nLZjNIXaV-t=rCzp zRnqoZ_*pe@hm^f_U3k=E7me*ipOeP6FE&%u=ecxf>uA@evHwcrjKwdVWtBuPQK(Ab zO*-9@aqp))wQDabBLYg6qb@(906d)+vFqKUsCS;z;)#7=xYUFew`yuOY+b7Znx2S} z!~Ks~5IDRsC5qC_I3E#H`n?3Jp#r_0bz-;hTTAG5o4qW&$tIW4U3Cz@KjdhWS_VMSgH>516^qj#FBb3yPh`| zVEhRHnAS==kcrpW@x+EWl^ON9^a>kUqYrogrQB5JMZrixY}?d#OF<)CXpRBT)?ELC z7MaxTsVryt_bkpzsU143!CNgcGE}s6cf?PJ$a5-jGnLa>IxI_{0(!+v%toPAc!WEs z#^FboQdTzUw_2CQTusp!K%g? zo9L1jv#%xjI>tVNx80d4Rqox03jMdi2pstR@fYXz$xLm8QWjBax6J`wSkaL?@;M`|b_)(eJkw z#`=1QIW%Atf|H+db_#q3b@$fK>q&irg4%CvxXzby-KX|9;{trnq&U!+#lTcRI#M2W zh5h%lt)?rZA~kpKZQS{`eh(UYImEF^mPzXL2Pwka7a~c2g`ud}ko9hMz zn|Way-B2+m2LunZwr!^?o|9w4JMtnXkgoc`vZ~NDcR_ye%x0v#Y{+ zb_)RXbuhu$-5~xiabnh8c_OEOjX=tJD5pn5ndN^M$>!yp)Z80)qmG0xv`RRRwda>!;8HEO`5@z<%stOcNm+k4c^4{+9 zQ86(wqK@a#MdN}OL6Zieu_7ZwYz06^DZab20Svw(tIN-!9FY0CaDS$%(FjVu$uX`ja?diM@TDsQFP3JY~C ze#h?66r!(})8sq^qu7098wRR)LE%oXHV_eytoL`;t9tX2>~( z#AK_m2%*b19H+=RZ)InEvaac1cyR=5_uZ4@_L%+6@h9eDNh1}u`W!FBJlB8Dc4w`* z^%fdbYY=vQ`{p`dP#I$*!uJR?jQ|q0Cy8%C-wGHdhr2`_$kB4U+(1eAV&# zVJ;%5gLi0cI?*oQ)fk>2n`CxBWpwA;pk2Lreo9#R==R%3N5D~=g2`pA^B#HMx${GR zbT=lUxYuk1BFy-Q6X+yY@Ho+=&8;mr=(qMee`5i8Iz@JWDwATlW**+1(yg$G_gwl> zzvN>dud~x?THB*fc2$%z`M%npvqn%e{21dM-rK==H@`o6Fit=ap`beLpDg7#4$m$t zSqVOY&r$W^z+Y~$u6(dE06n*LiUT0QxRF_BF`w}dL2sXG=0p#W(t$Iv_(4N4Tw<;y zCl?^5+_y5(eEit4>5e4*j{0YX+W{%$`0$CVJ8%P<67+lD=d@d7##e^D5KTM&%kjm) zCig-Bt21*)anr*5!YIG$g$OxlySitc--Q4Y>bvAMdfy&7wq|CacLH#gsR_NF#&Biz z(?!s9;Uq0b;gWzpDk-&rwo<3asK9k0M&eE!i3u2q_1KmQNX537$Do<+OcYi%7;rT7 zcHnleyTpu*p8;i90LDM}Q$R}Z=}o4})6bqh%C%sQYLVKR2oq|rcikGX?|6A>Vrz6O zC3lz<_RTrHdzQ5US4EN;@$$7~j*bi{&62NpVRuBK#TRv&ghm<*y_N3rD!@;0n5rZO zoMC8~iqSXnUTLDbYyq9SsexBmU%p;ZNwE!%jy%0e^uapkXtePUZ zUr|}V0Boi>#P52qEzq^vfk2ag(C*&7d-nFDWmZunCeTpMeTHn91OXMhl=-bO*;4rr z{~2n_xR0x6d@sZ+OIB~`A31V_VShPThDm)P6V{9G^xVC{l>RF9D%W>Kxvg0&V}PE* z1;C0sUy7EdrA>gv?ZmD~jd2D0?yutZ_ts(++?<`gCul|~si`?IYw&)6Iqh`IdB526l_?;p!!cKFLNt+>fnMxAvE!XMAScGR^mA!hF*Q z=wPMp3_bfm%=kfvVR+LRvxN?lJ{i|5_Y`bpA!T%6fO=$apu4+!cz77%!BYL!h}TlR z>9B#l{Zi0bRw26~_aAQ$x!eRHU<|jur>Ey>Ix#Km!@H6$(e{iV0IWODYm}b=`eUPF zlO_l`V7caQl6m#5O;SG=?%UWH)Gl_D*SYja_|MFyZqS`v+Q%CEI?i9Lur1pC6cDAb zDFrRK@)b~|1NH=_-YJ#tx~-r2@hkW}qDU<+4i)>>-Th$s$2-tO`#^=__HL}=Y5|3+B@Vj-I#R29_jF4wRG?ab z)e2^?^vZwjX>go)esS@%>JJ2CUMjK%40=A9AeK);l~-w`emmm$68dKs4|@~du`laj z_K3h)0Kxw{!JW=amz9;-3{@2~y|^6z889Hg%`fs~FV4*<X4jSi^Q?uvd^nf3|bFJSl4S@gZ{_BNbe)M7ram6($4si>X-ZdwNh6AU_2OVdwAlt zFpBfh6ZhvX%Dn0$U7yxrQ+^HCR!%mxGPL!&UBhp)`b6kA3cR;W&MN93#89c7 z7FE#8KNb}9ADl@+8xa;;cGBZnTGRdUk52^*0)a36KE~|voE8cdCn9(jCbZ&2=3MGD z?DPA~U}R>p^0m)!SDNj=m-@!rW+z81)nry1*KHLwRBeo@&?Rk+R?eQnblXt+&v?vb zb@QQ2wLznMpa!;6=Pggy`#|G!mC5dt7U=?agpqt1`Z!?M-@bnb5=9#S63q4Z>81Ds zGEo9zKFlNFKkFEIm;(U>IY7&DHRT70@7U3M$%GlWWN9`rq5eoP0qIciR z@9W@VJti4=wKgvJE!R?0^tmWWPHT#x?w+k-Ub(Ed`p8@~R*iCC+CH_L8 zkLAqlGs1kvmv7}3*twDFww;( zvhOfQ_~SoA;2Inlz&?Pw)eRD{Rd*(Ic6mCZK-_I^Y`D0%kYk##OPfEJme2s2Gx%4?1^+q#EF1pkBWk z!C|NHzZGn2uf7kGG0DY@sD|G!OfH=*bz53o^jPPtrT57$wOTCa&;@fnT3who@~jNL z3n35y%Da>>jwPoqcs8s#%}=kKbV^-NMe`!c6Z6En-4&;0peB|?Pf1xdo0+)UX>(9+ zwrQ$fd2ur%950C0^;jDD0Dx<0_Ero)*+^HSd|8wv-`FTbGhvzrYJ-SEOCXQ{@!Xf4xmf|b z4otL-fzGpW*bX2pZr3J?ops)Y z!JQl}jyGBy5*oQ;(t(gxKB8qI|9(HoTldCks&%>OtFGxA^mS|YTVO&+p*0TFF)aKT zx$cwKDqLm{`itMn7OT-y8@^!( zjYr$OBEfq;|zE?V!aplE|B7g|S?t(0zdy2z^$vu~iWb+1H$%dC#3YNv>5WP!fp&7F6TvGJPq z#78BEdJf7zI^PsGAWi@^gniJIlWQJp3|PzR#$M7rfpN_A-EY2qsc_^h4AQu?)Jk<7 z*2n#|7w2+-1wTagR8ZjauU}O&_6_?zeJQ_iA5seKtgUlh0EJ5};XYOTC zTMc0birrXX;a61$izS!qz!ct!KC1s_ZNh6LfsOU6pdU;*ozoTf zvr9m4at<4(2_z-V4SmbkyZr5bwWqLQ(k*oLu@QC#CEW4k=C;w^X?*V@UN%ySd;2(f ztx0Xu!X|5m-X1E+Gpt!z%{fXFRe7WES^ol&ki&k{A zsKbvis6*c#RczQZn7o>NbhND1_~9{`56jO?=H}-l>o*H+c($H)6og5xzfE;(xbBs7 zj3i=U0T~uVM$jHHz@Q=e&Yf335~t>5JrREKTfrW_-)n0*yKryBYoS_Ro|srTLNtyz zk~=GxY}%iq(k zcoo%e{u-^p^2}F$`lEv>Y^#-QTva(&CjN-QE>~+7!G%-dPc6IuxSjj>M!+o#yZ#c{ z9RU0|+1Z1}y<=Xa83evIAXE5O%yyzN-Q0Ti6oLG?v@}Ey(J8rSV`^#&ilgrpXRWlK zIz2Bt$ItlG_ME_{8w$Hk)t(!3z*qF}s*r-lioG@F`Q3#9>OP4hB&=%5*NC0PhIQ~> z8%(7;gQHdh`t_xgXOfkjpTG5i?-TJD4`NkRQZlz@ zn4S3-apLF6=Y6^RLl+v`XU3;W;Yl7ju`;9P> zr23m#4b2btLS4V-de!dgbC@P zSWQ3AI)5zn)DyqN+90989$>lK-qI33KX)ixLhabA$Bz%szI&6HdUi4))9erR;PQSO zDcnlfER-b6t5a>}3zt|~1Ef_a)+IWbhEQ34%J#SNHX!wWd3jBmjgu3^W!AQ~h8W-a zo;zjMy&SVvcLgnl>UEAjwUec?&A4TuhZ2&@)q*YpCKeZlYh%$$)1?4e%6P>5toS8g zeT?$!sfT)pTQ3KmeDZGDG9{1jkHanjp*{G8KB=_;X3r1yPbROnNr<$5)F~b3q^G7X zxm^&w8yLFn-4JW+t4G}=2J_8AiN)tnrk{-_N5h;O9=8u8sRDXo@qEt)Ob_RZt0*a* zG*wozso;uxTJ%}wlW>)=Ov^?&u;CB}VYp55%6SJPag_)59@ljEw_M?(|6}FFy zf<0$5Vh08XO+TVZ!JtkDTo=Q3S@h5d*#tUie`i+LdITtIoHEM?w}+xfTQA2QGRBs6 zcBZ8G%z5EOX1AnDS7v2kvUC%-x1^LNAga;$ApM7OLSI7rG+evsK@pp8=^Hirb9!@W z0C9;W-4sc^Mm@2%gN#TQ8C0)Am#z;H{Ppz!6N_Y4Hjisq;mC!OG*mv5x~?we<|OFl zR##U6X;7?g?HwbFaL}N2Q_!;%(EE(`%~7{fAp0P&?t?Ge=qBXvygbkHvE9 zY2X#pMwdowcH%{AP7*kl*!1(yethAr^X#Wm!FDQFX+P+Wc&|KwuYQ?X1EOGnUom&8 zL%>GA+HDHzoiN|^w7I!CST9Hd&wUKuC9gCd36C~_1Z89M>vaG<^u^-NKh+M9sD13t zr5ntx@eD#2Uy-GB3X6$R3t7kne;&;<(0zx}L@zp|@i623?rPADnDKX6tWJJ-GEE#A z3|fuv3$Cp-&1Y7ph|9~%%k*Ci+ex`CzSOeYAxkeQDFHkbR{2{`4-9^qKvn|QKi#Q2 z9{R5N4gX=Pn_>dp)X%}DCue3-#9V@VT&No+1S}s^(Ro8z2H$)$=HRd-RzCjd5zI!P z&fcyjAUqj8vwnA^PWj{aBH#Uy`)GWUmqV>RZg?f&|dlLXA5Ld_Mr{855l zRa5h zzpl<3D)S*y6(XUXGQ>X)39uGsXFG@`L40J1ngt(bz4@~0!OYFi_uE=qqp1!xrj{v_ zdLPOvK@)ntfpA(-uy7o}HmKrA)-u%t%Lp~83i)gGS_$GvLC>0+`q6GiVr(mpB5M28dBf$ z!jeayi;F=HpPd{+69+N-fWOfgSQ)@i6&78d)3mGx9#3?5#l#%zX6)WYWoDYHseMcg z814^RH&Ro356A=f?)O)F&=4F@7u~jig$|?T4viV00AOr|(+AYoA~MhB|p* zIVXqiJbq{e)YRrXR?W*)+`9yFt#(AK1s;vQF#46q(*DySv1CprvtGBGIrBgLTLW(K%KdkWCd1J~jai}EZhfPD3 zC00mWV|tQ$!+9>_XQTVYv!Q^*8$$=iXs-w*MidF{z-f=#`oJBX$I5%X!d)8LX4Dru z)TJ)x{q6%*jw^s|J%Yn&#NAi%Wm2Fxb_Nj=&MpS)z{z&2DaI_ z`JO~vG4;u;y}p|sqO<9Qovj0<1`s}xn)+;iReh~4&fpZl^@!Lvw0;t`jL|E( z!|&aD=IdMfz;2C^&OPz;5)G5jnk#tt*(dv9k@A-G_9`emY4pGF(|8MKO8t>t~ zfIq*aAi2OqJn2P zrczjym35EorZJ4XmX?;LW{1GHq)}V@pnc(tU~>=;N4jMS+x4LJQ>xdN#7NiU(#dlP zDe)o8bR3ADY3j9ermHRE|EMkT3@ptLn2S!(9{pvYp&|V!{16Q`>kxUeemXS}hMB40 zh<@xEn}M=FlsVt|l=wHqo9;TQgO8csBB=la79jIYYHo{N#-yfE{ZQ=8h zyoK{VnXLlh>*{Ex54FufXlU}txLx+poOQYf>Whh`oXkX5Ugxmzs#_>q9=!JA{Vbg(DWIrN z`=6n-_4^pcvgbaoU?(bqq3>Qp{9mR4rc{an+ z$LF;3mg%A?CVD_c1?-j}<}(iZLBFsI**13?DZL5&vKvGLJrOr(aSh{sJH(bS4{Hz# zc1*>sKMP3e#-Nu8VbBox92?xMBSWhhd%1Ycx8PUeO&pAF>+%A8rq8A08id~I0rzM~ zuOVSWca8a$xjr+y@+#-9u<-4b69sizTvKVpHO%|oK@z0b{+#)Z3z&~xf89THTGE$o zI=+2adahj@WyXD%hbOxs7zRe!PPQEDksAHu+O{|ng4()IwElprlpf;*~}9C^)} z%$`^Kd98hsmw=fL!kPB?sI`7oULlh+{QPpFgfZFLj~_#KE^Ut0PX00s>UAdG7a0%E+1E>Wmb?ag2S@1a+RT1IPX|&{WpmYyadTKy zl$2i4sw*m^2Yg&y%p0~1D@)Ou-*rIdgPS`P%}Bo5{_TAJZ2(4gv(!2;q-A$3Bfuw>ddE69m!_aQ=^f`LZhs z2cDIcm8$Acnl?T>&8w@Zr`gYgckwI*iIhc7LZ2%BoK55D$)7(*x=^nz^~PIQfqwH< z1PleLnG_g!dV0DVV#uD*-Y#7}F*82i>HHYP{s94()|P0BS=iZ)s%@ipk03GReRGuu zynhP3mwii>Gn+c40?Wq$6oB@$t~v$^V(e3IUthC{vzuELd)8$%?x$*cB(&ED=cCli zs;jl^C!s08%E1vIVhRs0uNs@c&XE?uHt@9rUN2h*D#u{}gpzd}Q=R894lTZI8+cWzC+UMH6>mb2&0dFshYkuZI#Gq-`m@} zdgaPdjYQ_qlB@pPFMvA-hAq&;aRPsVh?yl)a<88c;aHjugVDT$BSt6mo-^bKj zUtb@_W1pvfuktclwME?wfIZ~Qclcy&*Mdj9WsvjH(Kp|TMTUjNB_xm#5&c36h={Bo zco%o_Afck@?d|o%h1P79!CS4s5hY}oY4Q%)a}4yQ+h|cvP!6JW*G_eA9rdGY)iayX zgM~l9*|d!eCkNRYk+H~KF)^j8OYhE*njgkU=YY{q7ka)3X@mEdef*Y_Z9r2^pq+U_ zl47&6xLEX?mv$GBXl$2y+Q&hYsXKlR+8*dtjx%JjUhW8L@gs3oze zGATEOxF=ZY{AAxpdx)dbK9k)+CUq?o7Z>A-s=zu+7;%7c_A8)Bw zfBV|>@82H%8Lt-)Z@o3B34MkGv%BX7!Ry5}`7Oe3dgV;3tC8_`p3(4JQz7mP@0;l` zAAc}<2Y#BUib;UgZ6F9w#w^Lt&xfIFYD$xHG5X*ll@e^)u+_7$W7OoSE_{w2rH6u1 zBiE@8mt5|4x2Gp2GKZJxSRaB92zH^M#deYq?2Bo-_ZV9ZdC;k*I!2~^cXrCk4#ZsryQ&q%if&%YiVPU~! zT|V$$qloRP_BnP<>|-RyV150Z|08~Fh!(7=vGJSpC-l~tW#9m-eyY;HmpjaB_QqT8SRL@ijm$Bx z|7rn};LKemn~jXR{bq%FK5Bu@d!Gr`c=%_k&XIdyn8dTH+=m(nq5pL>2*!L(Q%F$o zMfk^gwY+gSu)5JZ3m(aXHVuG$BFD+0cQYI-`}Ku|Hghe$yi3Ur3(n04BpM+B3>W}? zX|7jHIfAW;zw42$EqAgK#G1_cjp>mQZq zQ1vVy>9=jr;IyM!VU|EZC?<#DlZX?l-0Ghy}#))SmBi3 z0*?;{dWm9^CQu6yGIvrDe>dl%|K_Rh|Q zF{LF?to7!Z&z$nZjMY_}%k9FVk$}4O8ozCiNBpO)Xh6}1iwX#L<1I}HVGMDn68Db# zDYnR@NVe6em6{psPW#n<^@Ij%K(04t4zx zbWz!Zs$j`E`*UDe=X88_K2=(y0~6}Y*RTCg3`)nnfhuEp8!2Zef1ks4GiJj~U;lP~ zF*8I(ww^9=v0Yu9P#DlgW59j!Ko{-#FCZa-YJgqs6ciHbs-k-}p9>B# zn%RdppFdMvX`AmOZ;Xr^JT^8VEwdACKh4>%~bcG%zIC*e;!H0S>9Ds{?$c`3l>r!lT|uTU+~- zqIA3g%9_mKy?b+NB{5eMvxmexLWY{BB|0h+*RMV)Z|{8Mu0rklUhDl2)XM00+zP1%8|!x6W) zFTPNwf#kU*TCZD4&W3N!Jp)a7d3go49R9W|C-GB`5YqQyz}msiE*h`naXa&)Fdlu} zVeDNNZ2(Kx9g+*C$bD-K-acM$HU~_%-Fne7vnla-{*%(2qf%y=Yc0`h0Jgw&-kzoW z+S=rGOwj0F?CCOeV@OB*6DhfvrvC64^-`dn_}<@7>dG_Zy560~cSUZHwBoniMUq-v zR1^i4_6FS#3VZbJgZ!Vf@84;Dwz@aCF;4_w4{7P}FLi2#V((>qtNNocdNuw=ZTj1k zie+(|u*gWDfjIOlEdl-Ey<7;urqX+FCY4iLU-0@vHCsZ7l6sa8h7YclmzUS0N1MY1qWQx@Jc}T>X#?&7mUQ$xof`c;1Lu=#-D;pVkutNBzp+f zNdAp=YaCwCe?xBPaV8uW7hs1_a(I#HwVWIiL$JYjMW*r zDn05J@2KE^j*v>=@ocD^w|Lm-eWTSfbP$%+H3i9`2hn)tPv*olkBwrhNz&8L9QRb- zeRTG#Iq5EFDyb@OHau-#8x>WgpWRwc?p>iv#r%_cCp48vj&LLTe2L5d_{POA@J+Pc z*aZZ6^(y3K4!5_r!`?Zla~$0y4-SXkLAlyu*EBT=^tGGC_u0#a^}P}4b`dzt6FvdZT7a0BONw*H^iYXw}vpW~VD&D<=j8c#DQ!l$Bv6mO!{Obb< z(fEk>M73(_>W??41(K8XmVYw+6rPP!5;S)vlb^}tc%V)JMjdi;5b_;@uRar^H22pE zGrxg)(8*~FfHyQ2g>DI9q7|X_@_mTx9vC?AZiax860yB>Vx0V2s3tc+zBj^|-#cnM ze&D2*1S_3?l^^5+nP0@3R`lR8NTbtK3SC`6nYZ`nXE~%Q-t|`3xGy12bcRz>)sBT8 z075-bx*U$=D}o++w%TqKc2a0b@bd9p$J*z;7XZFX^|QP%3$tY7UhSKOUP zYlA7{!~+^DZlsoN-&or~@%rr9vkR1j6Fjrkz7u>yVtZ?<{vxzDXAcE(JT1&VN*@<&$J|_Nt7%>&ey%AmCHt z+VK=?Gvn^X1tX@tg9DT)Movyn=H@?87@}?l#4zT>-^bI8v9NtmS82KOjuYLn#xgB*+fiYjA2UjvlSLl_=U!lX=4dLL$wceyb6jzMU8zF zsfs*@egXixPZzSV&q9}q67S_0zly%nJgb78X5lqXo|St$E&ze4PrqXWDmgwrhBR^O zs-=~ca)TO$@(Bknio6>)wA00(IdARn{MuL!owr7k&YZ40?2!CgUQ%>C$Z!eu`!)#N zZLUbBq|RX5{4MOIj6Ijq;#drIqHXxFK!F1OQz2A~ztZw`$B#+S6quM~OgX-w-uU); zcqjJI@5}+2wElJ5x#gHCH6wxDv_&j5JK^V$=~4SFOTMW0rJk0_QLM@hj{asM%JowR zAYo?J%9xNNlR_4pjjvPb3ktqmm?!b)Tzeu>4CC9vbun^)l0P49^h6{J-%YM zT~My%5hNxgUsyjHAXeo7`XY~8@b}{mDF(hSI8h;Lsk`$9o(5AYXo8R8U6P}v z*DqxiYovwq;N!f?VG$>|hIR7yH3XzL)DBHeO;8T5qjsr84nw9!?!l5k1c>&BFnMGn zZ(RuqY|Cmh`^-Cc1p_^h#+D}#1szMu(+gVH>bWZhOLL7iyz?4Qa8mJ*=PW9XduBd$ z8>ege-A`F&^{X0ks@z;`7RUP}&n7iC-ygK@KinC+U3yrZ{+8^x&i_rhJ$3fkCcq#y z7NVu#8xlv~QSr}-{>Qffkbn*PC6)K8X6B!<6ak0Np({lC6D4@->kIPuk1F(9F3z^M zL6iV9R3q_u44@~d>M11s(rzK_MjRkqV`gU41I7P6K~6%3GYkeK3)m+8_HNcGeF7~c zV1tK$ew~?7+`C;kdRuVBp7Xm<{_81d^g!>%N?$9FcJ47}Cu*NeOSuyyOplL)u#qS( znhj=zIU`M*t^z|-{R&?^f(D0wwdmb73wC@QTOw->HZ9XZe^uggYmzvd^{;D%T4(pw z3bUG-7)*Aw=t-{)h91BT8=I3A3qU>rDbK9B4M)MzRz%#(+~(%9_3=_*hd_1xq~2(0 zY00N*ie+vPr*c7IpaVrSkE&}Z2-SEzPgFlpew)M324Sz=?kS(yi`tPOA8P&Tova$6!)uA-hp!l<{5*8JG zRy+OO_YLWl^K!++mW#-XsMXL2UOZ?l{w{J5yeh0~+UEREO*LZ-}x11EEtvRU(i!&CvewZn%3J)})Rxbh~^s2ywY!~-Ik*12-;U8WZg1Rw2<>%{^!Iv(;%D96O>OU$VA`Xwf&@Y}NE3C@+& z!P2xG0#Ifx&VQirvFpNXEk1; zed=aqmA~#f5BxCfc_g>E!uEV(2&M3?|5QB_%Yb6-{!2Wb0rQ!-_n($rVTJ}7ok3;F z`@}fq@h3+?2wkw&k^ahRW5^%_H)Sc{4`O4@@r(pocOAu?=tDypcR8R^>5&_psis!rOgFo{UC` z@Qhz#<;z_)JN5u^ttYjO_IKE7Li>a$*S4%jAt9)7A7NB_9UEx<+ZK+$+^v>^foTu$Aqe4 zH-|X#P%%1T66Pii?!_HHSU5P0YwY9s%_rFhjRUSp;diUa_!ks;5GI(>tWLliG`K=* z+hW5{rrk#?9swIL0fzt&4=qk$g1w7YQ%bR^+JfrsYgox2QC%F@?t&b`vBZ19(%E+z zEl6&e@7gk6@p=31T|$YW82_PL<_U#`(lRpa{L!fz3l}EQEPc8ZeunRs+wZOgZT6tH z??*?Tva&Crhj76JFdCXpK4;pe<}=YQcPRiE0p)4Qd+_^rj5PV0W%-2Yttq+u#l=O0 zfIpD9KP`BZTi|*qdsZ=xK|Q)amH?7fD_Pj31v zYs<-NgI@K+KZzwajF8ZT63I<<{n?mc_X!+!MkfLABl2jAz13mdmm> z&W59wfvWcHysk|9`>6nIvzg7`&}{E+VVvL(v1BuTHQX9bIE^0MRB)&i0@)oVAGl41 z+_*{k$-;qs+F=>;5q{_1A3;<@{Dp_yV}LJp@K}D?Qon`J@0K6^A8`kJ`w!>rGY$&R zehaj`NgBf-tku(({;(Q6Yj-~-h(ALb1Tyo&o8SzaJNb6_a9Lt_%^jL2Mh)(3z42^7 z>YOJJ`7CcbymOOW3-cp>r6SeVA(raI0L;RtPY-l-5)uNyn*uRTP%t>F#H}cD5y^!` zWSHcz51fo&?Ce=!20ZmEe;vQo^K;uKUv98vMF05f)p##+&F8*hZKH);6$9nXy(XSK z5g&i01_e1DAW1{+v=4SXGNIfH>CJlOzzf>`0iR!O*u&*8K-IK=yz}q!BrAC|HiO!x zu<#`okwR88q%AESOLW6P3BGH$aZ>APP8bs1fPo^iV ze-ZXm!9f6^7C;(G`8^G4{W87iC`f3V$LQVLxHl_q(ZA$6jBdSFSDlp1AGx2)jdq`jOR`0!>x3lBFW9K$tb1FLS60z+XuZ#p&f^d`T20SwnAlo_UF z2F5521Ed%FUxES@TE zjkD#g=iWq1sJJfphX8~MyUiID$+7NHoQSEeoW(dVQ^?^T4e%9mJ^D74{xmt%yJ&r* zbXy0nQ@30v9GGsjUIzt*wqMVeWSo*x&ibty<-NOkx2w(DrGVZYUd z-caR2ToV1cE6cZV0yl0X$PEFNzjLSJMnY!__88vhY#Nh-w?A1`sGBG1KZe>4s5Y&a zuf4=FpbG_eb~P9G>!@@$eHl@v){aU{Jlu!;9zcq)kWdpTYl`WLO?Wh9)m_d_?N^Fac4)Em5qKVXzetEH#>uxmqm4YIS zl9I#fSHGfiS;5=5>n6)xf|SC2A~yuP5TprpdmA^Ald0-g%f-4)gbfcqo7vb%&NS^= zU_EbpdUAUPvF_Wyx{b3O6dVN_0br~2^o~|@BS}a|Kyk%8ED)M_!Y~ye`%fK0{|HGt zkVYqC-OjYfze&2g+sCD;@JRp5H70#_%8MBO=>;_yXe%A4ztgG6e;4o?dc_5s-_dLG z+(BI%AV=jDoM22EE^9l8mUl%h!{*X7V^KrHpNcN3+=@jgQksVH)BlG2KwX@&>k8_T z-9e#0bvvUYBQdF|eFFVJAmrl;H_Y%I<2EWOV#c-R!7&D;@AADqO*e;N0^dd>PCIlE z#jUv3=lx6*X1EM>N1RhxC(&!ehth_Y;JcFflba4#Gc398bQ)J7WMz>M5};G2xATcm z)n}P6)9dKROPeziKAClK9P$B`3S6*3YKY2A4j9AsWBZguM0q*!d&ubHrE3`xFW+{Y z^?pwdO3#-o*#!d4*eij)$q(v0u*Pku8!OOO`1mI_@GPYj?ig3@3iZ0SxM+_Rww%-H>+1anSo_u?6 z9yIF&Hr+yPp?^(SAD%s!fMq&t9E71OGxPKF0Jar<->VnBean%t36y z_R@rRtES3)yBrNh`bkB-_gTtqKpNGzbcdA{S_u-F`@wj9QP z`L_Pg{DW5LMnV+@-0jn+v4M~3W@?mFVMjpuBVl7bot-ui-(XJpMfIz~=Pcy(HH)4hz{js3Y_;_5+k72W$+Etsm|CMLCXeT{ zz>gXYyRXek!E(Z8OF20a6)hzCDuj)#dnhEX>$O~PnlqSb@~~7W!7oqiM$1XGX5BvG zX`S?KPVpRT)v}_~eKSx`$gCi1i)XH9coi3CRw!~&#l8DZO|g;q;L(b9`lcU8d2!BB z?n^Q;V|}C3cHqXJFb9l!L0JLx8&ra($Gy5#K+J)geK2R)NLQEni9y5c7PKP}9Z^Fo zGtJNH_<6r9x%&RNYWhlYmgp5m9qyf=QoBa%DKofH1>|Ekbu;uFw{mEqaELc4e+n3+ zw-Z^}Xj<>ux#^K9c`>Y_xZhwmLZFkMXkw_Ye$y^0CHFgAAE|~uRK;}kN25h3zQ?kp zxOfWATj0p+WfHa}tSa=n1$^Q16L=1&UlDVHLUON|!qA39Wc_=U4g^y-fy>!pCs#4B zwfZn_zw}t&JQ2e!zM!`Di&I^u=+g+ES`f8m5XA`=v2$@1et%Tln8EWKth+lL>32bC zo)kY@(*1tkwVs7dl$Q88C<&)#YknF)bra|I2ehdm2`p=9FlfjrtE&Uq*{F#_^u`}` z?$iM=m<|pM)abFx$IGrBOqc=`f^Q;2AJ|*vvEp)v<_M8w`yn^nc~jlTa(FrioFaG! z09lI)zGr!3J2LaDNW||l4;^j_Frw>sK zxyng`C>L9a0g9Pe3+kk1{QB+G;BYFqbRc-x)(4QGjk@`ARJh)>u_hO#T{uC_gRP!L zNM77Ax`(-6r8sVukEvGw#Es#DRr2=o#}i#y%qNjtZ5_TfGs=5E-COGZvDy=^9)M=|V>$>pX zlijItnBYYn%w9U&jJ?4o8r#%QrkbuUV`2WWIx=`nl`q)%BXB>0E`;8->S}A_@Azkn z>`Rw$^2T-;4H=5~OVkB-OG-*$vpqUop@Ad##dUH}tma!W=hff){RhYv0WqU*Dd-_teWMAJb)4u+h3}doe)LqC<>JA#OZ8Lo zhRm_GV!)m!1!l%mEo@Ebp*}Ur{#4g~Jk*l(KgbYVOLjJC`82s)NKCdm&~}%=a)`)a z^0cC}hp4Ifczmlwxomy>=^InWlB_>1&_gk7Ua_0H|AZ><>oh2Zwzjuz5>+z4>6e6& zt8%5OFrR$Dbp?4e)GE9`38ZcVHRH@BfYQam%fsO|l{6TX{CE=szJw9HlD<>-pjy}r@JxF?e4=Jz+{iXQJ)Ltd^@%o{T zCZAqT?C!c2+scqB!ELQHA$}xvGXs%o`!7e>rDLzy&E(w^M3PdAOzp4A_pMo_@L7b} z)LnOYy3BJk+O_kN%XYx-wHV5^m@(i1pydZ@qm~1$w6KT>ClEGmdMJh`j#WXmj(7dK z{DFKpafHb$$M=yJ(hp@FGV+;-QzUyxzV5QLOivrc_T=s3=J6{Cm3RDr*a6*ljbPCP z-q%M)q_aVkA_5k%1P7>&bxO4p%vA(?-W$B#ms;*!Hu;L&Ts_)bEXk-;A8wI-< zN|c&_QcStbFlfwrS|XJL>=-Q^oE;_xhd%F95H<}l?)PfXjK z60<@1x#+)IfG@xL`Q6{w)g|(LnV4WU*1Y9$ix98AvK}-2VQs!>%D167sx?llF!WTR z)@kwlKQETDOR6L-q%WqFnQ|9AmylFXMZVQM(n%+UzA7xv?fT zuzRuTx(9n)oU5rJ8Muz|ZGXy~5+_(t{q!*SR$?h$~>hk=rJ zw0yuiHJU?TR*C+w%rkZS(w(vcb7MB8%0NW2c)Z^XR1wui=7`Halg)Z8W>8s)pY4x| zNeWX^fPAQ@XUDr)%uaYIEtZVe@`wM3S3TTN3YS2PIQY(Kk~u9{V4@lghMlA*)5BBE z#*2-8alhW5@X2Y7?Uf8tnTKgB8xv$oNV?TXYG=79+vRkveopY~edVN4wea->4b&oP z*X14aNQLru`}c-5NJJ9*wDt55&%C1aLk*(Fay~IEKl>AFBOE`KUMz3$A8pqjdRE|~ zK`stjk_=*QYhxo$`8L=b;C`>%Rpk1Yc1CjERuWY+DvxcaE2JxlH>8E-`dri`3-X_eMEB=O4EY=ZeL##8RR< z<|Au_BbL}C%#9djxMfcj3R!94fSnEZx{} zteQ##?OB0t0b*s}pxv^FgDsXaiuWGwkYx{EoCu#BnR{;my&2B|HY-MQ(T7VcRESyV z0*%bp+O2ND>*vpf`*#HR*ZgLQwrDw^F9$#bTvtZT-Zdbtljy&N7DN44S!xN17!Z16 zR9Iv17Fr15^XNIRxl%J!39BR2J>G+IsG=hJLf_c<3|iIt`ZTRNBD!eb&jY@2?|rbv z8_1@&i?99`Otb*B)9I1U=XHFhvj(&Z7z2M5WRvXJ++G2wJ#7Ls@wMcmpHOM5Q&x zo)vQ6Jr3a`lgJTFg|Sz*v3I_Wc!f%sz{=2T@i~S#^L^Po&pyEq=rmr>QFmXUp;blp zSmxj!0z!wo{#I!q9<`d_LvHT9e6EcP+=Nq9}IgK zX;>wB48|68Cp?O!SJuYA0URR`n{1q%c!q(y9%54gWA(~xf}kq~jR60Y)CvE{vz|Nm z3e}ky7(~&E5pi#MI<~fouXR48zH#Y|tJfrXepdoDjd8%gcjSgX&|X?vdVI5S_8jJ? zWDKv7jt-OPF)R$eT?txv8)(n#EqeFLcxG%s02d;it%i6aoWo+z{v+2Ok zvY4ZzCo>oZbtt}I%dq(BsH;R=aIQsfJjW0i4|$-2b6+*5O!lidE^dS%IV^t zXFSkH*Bp;kVjY4?N$06ElJv|6oCNqdtL}GGprz~Cr@QPaMX$TczLxZ7>KT5qF-t@p;i?DCfdAV zG>`@dsL#g=Dxp{8^9Y!jnTyE`?CC=3^y%QOuKO{7$}va^24eVm7bzd8Dd%F9aRIt0Du$7?fgxw*(7sanhsO^b!F7;0|wj2ZZ91X z%EsVjkS4n(q=9-))*-S-o77OZP4`V!s{s0iWE!~ zd32as^TINPqxk_*2q{&%e^!JvI&zd1p*=9;!Ha|pjBu3b{_pG6rN17VzhBc}{WIyH ze)F6ZmeFSC0hH>gm6n0_%b@tY#@cItKEU|kjRXrb_CH_y3*3GQOiygmS5Q6 zM*2w1!6pD5F1T4C?`ie|>)HTwG&CMoO)W3$UqaY}5daFnCKIl+wF7TpZY%|vP!Dhn zr3om>A+;-lO1zhpjf-nL!y2}&%2C8DObBR!xi5lL8)zKJsRH}Sk6t%j-NcEXPwrCe zuYkjw%jm;BNDowLoZa$hgyha4QbTZaPdP%+VeFqSMB;MdWf#+7NRODF?j?n=uRI-1 zW8(o*=t^ibL@P6m7HbwZ5c3NP@*+VyX;GoKbw-coVoDoM2#6#B)8=tImUc+^3MeRJ zGr$(Y9M1B?gJ0xb*v;Gp?z#YmOMi8-aGA!QcQcq2fX+EJgEr^Lq2l@$V1P4W5s_Zf z8Ndz`sGBJAgTQ;MQ(9A0^a|x`hWj%*ZGQdpq7IsefyERQX;9adA9#vE`FQGzwk6d4 zTMLE&AhR3@&AHDbPOnXCb@ORiKSZ0faruV@;uT zMaCDN#jWd!J4oTtp8V1Q%LB_JCm611X_+F{9c`7dZ(x!F1BRy?DF__iJ+rs9jUs7z zG$U9#uE40r4#Kfogz=R5TW2%|5)|$^p?ujPmHPysD}ZvT6g&_xf@WR96=c1C{&?5T za0sN0+AHDNE$|Ozc~<@N)O->nN5c)-A3|M*`^w0i<$SK=;>d=&#D zFGJ%eOI|PwxHP9A)n0Z)$Hvmv(6HU&e9TwfKgQ^0r5SG)fG&@Ta>q27^Cp9gc5tPJ zht{;BZwVxoB2S~?d%2OIjDyhAX|CjPE4QmlJ^(fYb6V_nDg8gNq8;K^8-Yy8(l6A1 zJ^_?2ncS>At3i#&8+6i>+a0 zPGq12wSha3FK2#>I;Pwc*te>x*z&#{+EW0~c^ry0ripz|V2Akgb!5bKcTrwH!2JkL?la-Avmx3AE1&e$eaQi_O&Xg}CNi7|CqYR@v zb|Nyluxwz_g>U6q^C3YnSKJSgnW%s;kEdkkAXWvXs!GeH5B@Zi+GJNLCB?7<*k6r3 ztY`w4%}96|tYItOWYo0bh?>&1r(`-ZSbcwPl1gMZ)dtC58hy7d zz_bAQNWkpA(aLg6K>K)xHIOYY7f)44AwA5usu8nU-&gqtJ}$%$ht%V5>X7UTOnYdh zK2c2yU}I<=lW`UoMMp2yW_W~F`MXg?en<|Xp zVL_f^h5c`joENStD`R&IoP(!X3sn%|%Rs;MVeb1)3fP@AMv0GebJ+8=qOB4|8)412 zf-WIxWzS_4fH0Jn2_EjQ;;P=1h z=0_u`<2{uOu^wOj>Mfp7`ZvlpQ|ZxrBq+}YBt!^YK>g3x|AB^We>#VLClYKH*pQGT!*x2XlBxxZa`@j#KnWbqYv{^xM(F_E6 znu-miUP4!<-c_-W4YnqzJd8OL-EYijm+FAx{aG`J$`40wy2i&EDkW@u-1OL!13nNX z1^QXu%GoR$QUa?crsj~1>K$%7{DOZ zUUtq`;ee?J;Q{BU*;dl7*h^66xSa-BD{n;umPMh>O8d{)#N$aC8K9XOpEWdije^7&W5%2~ z(%htDdW7DDeyx%j_DU)$BPW`^jVBc9M9j?0Hv26aDv0((d;-9GZDu%FL7n~uog&?PuQYFZjz~#EsFS2%rL4L$9wvfrzR~{jPi|ZXTW!1ZvmYiIi9|wZL zYgpm`e62Xiqt#;oTfry{W-snfD#6zLWW$DFgawdKdU2wF8g_Jxelu=cqHK6Ce{O3h z-jC-Y3l~ZEzCU?%^nnHka7{^R8&}$JsTBDbBJB}(e#iq{Zvw{uzc0*b*3z52}auERiTdK4M-)_^)pBCrm zB}6?4Oi4?y`5}D_`{r1+`m7R0PVjBJtlo)gFVQE8VAG2!s7M2zAl8_=pM*A+wOhN znt<$4#s*FrOm2n-^w%r8t$y2}Re3N$7k@NfKBwqTjFvcKkDe|7YG-wk|dFE@G_(R=MQu7HsyyMNDKeW zRf9Y#lkJz*d+iS<;0j%|qgU{rUbGD z5VRy5{N&+C`HRJi`rY3)bqN5SX@&(-=$3QQzxoHP4pL_^D3>_4IsX;wDgUFB8G}cCRzd~Fsp%VB&Rx#k1akq z-X1ocQD64P0qXMw9y=mF1vryQO6 zg^!!@ZOFs#8fGr3(BQv1PA&XbTQLNP%Y!^DYa&uriNO%xPT{^(UUD*w8WQ~TR?44a zU96{LEv*Jl_Fph#y?l`_c_Mi+HDSZ%4M?fDMHGt1qw=4U7i)$WcS8HmKX9-)Wb8jG zD2jS;=Mt2QWJ=nqy!m3_M=xp=O4aFKEgZW`P9g)=_E+#OyPJL)&WZUf%u4BFLahqU zM^LJf@DO7JIJ`B&)&WYCY}~VXrd9XkcxTQjw(?H;9kvv?QjXmL1)Zm=0OFiVA(I8ECS^Ra(r{4@y zQ~Fpz=3;h%2+fHz#*je7%Fqz-U<#LMqvJn9`J1FJ(DHu4?c&S0xXZ$9tKSc2A+dTt z-D#T7FKJ@gt+oun+_WWCy4*Wg`!I!x+0Ik<5!|F; z;UL5?2SW?5>CdpNuA3Nbw)`buuCG1@$N#!%JddD+1RMY`VC$%z{~T1k5YU*MV4wFc ziUk)dXlxNk=wVwe<5+&e7zla*vQJD>EJYNyOC-5GSRg0lnu#;Z90taOv$qrly z`4BNylpSp)qe#0?#!n)B&M$=zER#KB8l5;iKeF!rwoHxZ&Y$~;yZ8jR*Mq^~*opAT z!ONIMeZ}Ly2md`I?5P6sW~!Ai{6ju}Zs0!C*GT%$Kz@GraTt#$?;P$G#Wk}Jg z{Fk@JXWoi#eHWf*%+4Mfr|R593NH>`D3S59hK4B++)kBGcB}rThHUEZ$7G5fw*7jv zuc>W%8n{H*7r&BM9o3Bp`XMU|bc9|Bh@NQe2s0UkyPeQuLR(MYaGe?IKE2>_qIsot zwUTCpQIX`L^VWsfa6EcXUku|L)2|ofDe%t@oqc~nqdq)R)e<(Y``vkS+e7{RV{7i0 zFSo8X`6Tn`G(5Hd9p28ZoY~^l!xPEVU-zq~C_1%M;{}{ie&Acl!}I3WASifH+Rz|Gy*Iiot?{FLM{+kyhVhy6EL40C>NA{`0qCoe&LB&63|mJEwSps*{0qfcfrv|JG>jh^h|!`D-8P+snXnMB1f0*h zwz6AoRZ3|&up?NbPT9GWT@yG6%QCf|9y?+hf}aBjYin!kl0h~_!@gIvJZy%(3M-H_ zQjc4mSBH8!louO~fz)n379jmSnk4D-ocEx=1;EXXwB`v?^za~|KwZRuhKBpsyUUvO ziMcP4L;145IyY(Fb?X?~psRoXI*FK@9OBLYz5en||M~iV#}WPSM?WVeUG;~QuaYH9niAD@5`mgxWeS9vxDh`HGpjf{*OT7_$^SR7*BN$0fC;S$HN zE=`$nqYefVuGTqcZc=xV_;-%yoNx{TuNfo-eCou{@pu{>^w$M&2Ar`2S@~HHPdvh; zd_sc|MdAiRrP>wWLD zFh4pR;4$5a3C70}Wl>7TQ2;u(zBwA4Fvo&ST1yx9ULY`lZ&g%=?ix!d?97l?V2m4Z z@A)T(0%c=a7NQ(1m>JmyUwS0s%=*q6BpqBPL#)9ymY$A4fzs zH=2kXSnAwN9BDfG@Q?>jlnK~Sl(IfP21mwWvG=NbAWhWudy2|O{xxw~75H|_{5rBk zGLIyC;+7nB{0KSjA#%nrq}#6sL_H|M4GzAAPWxJm5C@kF18N2)d;$}kU>m6L*@LR*Z(o!a6gAF3L(}? zd7I2bG4j7F5}5BNA_obWf2$r$DQ>dxK$;>p27H{OvvX8fj>ALlRqFme<4Vtp22m8Q zL%neN&Pv7nf3M4R+N2UAp3WXHr_)5oKS^RbFqGwc%RY3Yym90*A*RpcCfKBOWC?cE{N;%BXT&R5 zC~)0Z$5uwaPD7t&W9zxL)85-_tfz%^Xv@IC`Z=j}Wv9pIKtZ}5@BZd_*z-D>sXBlrG@7_S_!t9WK8|uk^{@*en!u5;bAtQF?Xo|tC zBz3jEgX2t(UQsjr`=yqMbypC~x8I4f*;&?PkM3z*-E$o@CB6a}{A6Z-h=~V}JZc~; z-I>m?{n=KR98E~0R%$o*;oadmPP);(bWYMQno#GT_33?T13`^e-pP`sM|KVR6C*OL zb72ciN~!7S`n5=wEPa4T%p0`|OH}`%bj1Jq6PX#>$rC61OR4ou=r8`3+ep7$Zxec; z?L~8FexBO?^aI$dEgIPP*kjdGsH->j0*HlR8!KPuA8XIfABS+~op^9AAIvi>^qFwN z_AAzTMRqzoGXLvO^fT#Q9+$CtubpFqN7DLmD@d|lQ3zj6 z4{rmJ0=n?<+mM%5Lc={CnkHf|RUD$`P0pWv3!xljqw@EP)7E8d;A}kJEE;OM#iV6? z*oW_N6xr;53Fu&X`?|N7nA{JBFdSfzEKG{dKmWcC!jpdo@znjR64tJ+8bqEDANCfo zl3oUAaeDuPa=-q60r*!$MgPA8mR1nDYFyf76tCC_U#DAD8oELSqVIA)cax-ct?v0R zCFSk*SSA@c952dnQGj6W>GX%G>{a1WWS%JmF<-~=oMH3~n}GHF=HcR^2=E0-G#UQ$ zGHGQHY2bFQIiAZD<`%xwwVPX;vtZ$Z$|_XGdxnAcvBr_KY#r)UrG>I-wG~wQ)<=4dn!_6AurE7b<2XT7c#v%3| z;rv6(>O7WLyMA(5n|b_c$phrDm@4`I^O8`RhfZHZb|D?1*8X(Tb<#0fbCFjWb9C+` zQ3L<7I0t(d>cz)v=_=Vzly!>K*`88pCE8ay%nzGL4JF61>#)>4z0E+F6{lye_9?Ei z%W%+8mNFvuXX0{M9W81{f1>WiBbiK&(jH3b%o>!l*ZSY z@X7FS*Y%pCeoObYq*^M9)$jaZdvN48lZCs}i|q2|hnfdt+J`f(1;G$ez@;<% za7}QIt@4#_Z)LOgah}^{m&95wE}{ji(;p+6&X9J5cU_L{H^=O6CLJF}Ckda_@8Ofb zq&ZcCLwr;98FxZg9#@*0@+uDqoRp1aw*Ak0=ZLKdI;FQKm062T(H-w*nhzx{t*nID z^Zk2oJ#zmU)Y80{v3+UoEsV#B)K--u)^-Ih&v)Zag;gx(b<;snJMynBht|w;^!`0D zic|%`bFnT{_j7HVV`kI;rqAljJ0fqxYQ*6{QQK@eN#So;w25% zX9!B9F>l5xIl@A@hE!f~!^c;V53Et+HdGQ+ zN#t_rYicW@bKO-e*1}}*+|58E)rB@>2FLhZi_1#|o0c)Qa7GRUta+id4h>OW;dWsyOmEtCF~qUA5;)=TNT)OZBK z^q1uIi?0qCra+*1@({hsegh>ApGy{syeUm=j4w@m73o*(RS=6j>^F-t@VQ2T7N=?w ze?sgP_MHWV6EFj1<{tvQ^qx&^*72|_jQXObvv3<$9R)Rb+z(^22)rG$4MyaXJjSH9 zg07H_M#seP;e{i!9^Uv?m?M+EesCny{)nKVlf-9T(p5OA{+7y^-q=b2GC36aRGK*K zP9(A?<4F%m&0^Ivsvt=xd{Bf?{)1~Sm3L*wsy%`FIz9nqdzj~qnVA7-l)5~isZ+7ucSBCI*oW(B7a%`-Mr5dJOT2+i(I>t3faA4 zqpF!b$u@+R^AWdA{psAt(rZc+M4VqW{n|7oV=*=tWI`!`8!R0Ipf?vKnUgx2xdPR5 zqZ0giC~{S@k+H@UGKtEOG7=ra;j6O5q-gO=sGIPZ^$RKQ;)NlT-_^y6|2d0B8gcQU zcJl`K6D(9Y$H+!rWdy#4>8iu zmkWD;R6(y)<*)v`zeVYnamd||Ui4W?pI6*7Hy3t1wg%|>Po~=R{yXV!;3x=+iGMvV z$@}uN)^=h#*F$(;nV%^!h;^L`Yu1CjbA)3w|P(PaV=5(|6qnbE(2Eha}FoXU5 zWdM?Zqs)#U;BG2kG}N9xy;;LM>juIu-h$%p?(XL1>?iVNLz-%8v7w<`raKVNH=nq>y5AliYYZ3xm=%fN^6ipP5pE`tt6 z(SXra(DK{tdtqsBXNS8m3KTzp)%r9j!lkS%k!QnqU0i`4|Da`#{hBbGCoz)>g7Vj` zt(`@GT%>a^KJpQv^=fcCxoH19Jw1axM&wfSS=iKdRfRf=KTZkw!zdZ8fRrOXJ;?q7 zCuwYC@5}V66l`b|^vsF3g1wACVO;ZPb6=0rYzt9(TMhs9z!SsOtVy7^+uNyY<%Ec}2*9Wf3X-yil#becKp9q_LIW-@lPmPlR$3XCQP1!%6%Z!JhfS zDI2`z=U_liBj}i)oLr}Wx5Wiy@(_w|$DKqAa$F7DFMi~g{-?@T75Ydx%koc=)$KJDT zlBP4*-!u-Cna-F*Bx9+0V04`X^BS}%vwPR6#`xM-c1VJa?9~Ut&YwL4#AcOj10N6o z6{NjFFP_(*O`7vqWY-E8gxd%hHl8#W=0K^_q(tT>;6Mo)KqPr(2QF#?oPiFOYq;k_ zk)nzy^ucl!=dTjrT*BoU3TU+B`t9v);PnrRTpznR^fbkw%s5v|O|1fKg(!2^yyG_x zfk)+IdFFIQ;iAO>fDb5!!uMw=TH(}sxVW{s84(%jwK9G7%~j&Bju{6MUA4I?tn`yW zY48~HA_obkhs}zti&oR=85zr?HE|sKj0vmN(9=F%0US5XboK6y`+)7bBDAT1yyy&C zbrDkd2(b^~{wDce{Tvf1k_Yi!3;L?f6R~#P5%k2@3Bp z8k}ZpwpL=%-zFvKX3>;QD>xbI4I>TIymccy9|@?!)>wZF$#Uf)idf6T!vmp4g8~F)K=aznB|c4_7L1OFIIn5$BCV#TCN_$L zx_KGLkl2{(Jj%wHE5-00AXC`cpP-ZwdYAnT5frSzLb`;H56Gcn_1M(Ee8GUL(0CIN zM|?|wpcW(Vv(ZBM-axHv=n1YOUuozBUr2KUS0FkW4Hf)_t)|At+kTV4?q-Pufsoc? z8xUj@0h})|?ONPbZbun_ex@Bi4cgOR2Cd+_Wy(W^l`Q1ueou&lSKD1;g)FsV_S7|Au9L&x` z67$D_LZ{sJbFGkO65R8ze#IpB%<20)!qrvVh6e_kHz-jFMieeRbUgva0Tvqo49Lsl z1p8Gj0U!f+?{%a+{`HFc2;>`JA2b^*{*;#|ucXI%XEJ^U=<(kFXa2FUpw4nN!QH~H zvIt*W+igo(6btu3J9zhQ!~+5%I(o*TDt&7kzVCaL!NU(@4VBQPxFO1%ct$5j;qD4P zLXs{aoh?2{5Eehzgt^6kFLi;v3l$%P|jDPUh%WJ;%I>3(uG(rony7r}^%q65I-FinTpST>E9|fcDv`rguQ^2q%0GVm2ylxe ziTH?!VlV~-69!Mr=mVupyO{QwRebNYIAc6ZqR%@oij|e!-H!oUQZN2E6j}FI_U1ce zm?#=M|Ck=43CKFIyUCS~S_+tED?6=bo?M&h%EWkX502bLMmU89^ZFlaiwyXXgsJ3i zR!IZ#55^ss4InWBc0Ar1E8@w3eWD6#0w%3=pNitU3BJ63>am4;@O2&W&v1Oh`Av22 zGzM`%dM0}FjWY-jEOi?8&TPP9GG<_Fbrz%@V28fVz6$!NyK{%(GAwRSO-fjWOA9HqC=SEobAXuA|BkLGxRhN%@Vd;o)9_IR(ej6Rlf9kFY87@A`JE- zAmQANoh91_AXa_k=dKcg=CO)ub`ZZaw8sNpDSqTlAuOOOxa(vXS=+H$zuy8L7 zjr?|j@2=|fZU4z}&M=vGdxsaf8_(S)^-exaAEQSJhrm{KJ0A5;oHz*ScMaqnfz#XN zm0FPbTS1-=jg)49Edd*jQ$P9_mQz!npzf*)vJQ+TXbhWbW%dN6*1$$|~y!jm(vV?t4jfJ%Dshq|H8bvJOlMy6^IjqJ&%c7xB?-16mkIl6$lxjhl51JJ4DJ{ zBPYLgK$E<`Ihm$>9u{HYL^`IK|3+z)#mJ0)psVUpK7}m+u7Oondr}bY z13BVBGv3s$KcHSeFo41Zs2q1(8^AmgPA{MVO3TQwu(L0bhCY7~0KG*Z5P*Tw`I@LN zksspYa}FOuC`Avd(14k85vAz`FKCbywb%{FTLq+#6rJ1TmUE;hcDw9ba$t=ggxu?f zSK`bp*mPdu*K$?`$-w+p_3~v9TO=Fp*5LwScQ6Ns{dew?F02CvPiSzk242)5UClN> zO*aGY%{m2tEgN;t{`m2Rh=?|d!8Okw1d6+iiU-V!i};O-0KLi)Z3U^^jTV7%fzK zY(Wkagaq>Q70NH&uN{7!m0oeCDWZd1Fw0CD8pxqLeQh{9yYU?58r07fmk73+`(r^l~fve~_# z)9QZA2tI?N3ZUfJ+1cUi*sYqDh!t(n0^ zAd1_-^(=#VQ2bwhEO~iakafzT3hR{H*|`c@h?GH+>JL~g9aL=-KY_>`cz^;fG_waq z&JK7_e;F=B>N$LG>@R1TH4jA)fXJx5^ znyq5KAJ4-ao%Yk3Z=f1;qLc#CHZlD=p_GJ#SL+n97W4JeC!*v{c2oI4uE3sV3$V6s z{faR^9fda(qHBWQhfwAl0&OqQKm(baJ$z$!O=abK`uFeFq<0w!TK)Y7RAP`}0~>?= z|D^sA4e>L7Ku#DAScj@3kneI`D(UxL{164jdi)6#s$z2I7Wx?}{5=wzRbY zB$eT+6G8RYJ8=7Z5vwUZZ4wamP+uVtmAgmYB_cUI7Nn*o;UahCBseoJps>*YgTb)v z@B{n#-*J{-DHkAXfGg#kP)EV<(NT*6Rgmlx5D=Iud5FKF(31&BDY$Y~Ou_1gJ`Du2 zB@Kl8YQAP3YZi{j=2mzT-L)L8-`|qhc(Fs)F=1)-xnlO~{%g*fp8CQ*DLL{S5z@*5 zuc>GrVxfu()_LlAeBSnjG>Xn#En7FY&lSHQ{j&)MlNzq`Z%Q zQCzjqD*MNKa|fe*uAj>t>l)+Om`8`yWN{x<-!VO!>v%lgKz-rb`lo5ogPIZQsn{a4 zS&Nck5U9z_`ZzWc`kYQZ=~|kdw(9ZvXa_?Qk?`^E1ZeuqO=;J}e~9`3mAkO80!Y|E z$qk1cU7$mOy;4jQMj2O0+wC$ zVn6EbZ6>C?^L7e*-akRrY)#N$b-f>hy-qqjj=`H*EUzB}pAF({Ng^I!nbx zi%zF|vUbe~6_#yW-3QT|lz02D$)jqk@4e2)chlW#bqSyw7>X^t!^&!!e|&PdJqh8O zX=hh!@p0NodNf1=z%GKi56jxPM|~WMlT=H9i|gCzfkc>2h{5fc*#Nb^K0wyG$IMIEsKa#8t8mxSus&e3&~E;eT;&&MVgYGU(sn4%qUFS5s_m+Xh0 zVb)l~|IdE)Ke` zgW!!lSJ>P6zd7#8-6y|R9X7O&>u1((YdE7aXENjw>azN*&4n-w6r#Vn7`gS$bqc6! zYD~ou@hcw??YS=7t&O%jQ5W@*>(^LYJ1sfAdl3%II!TJhKx9#iB8-aKF{1EJUbzq? z$xvmLp70B@a>p^qMF3#G(Q+$^z-b3`5~0t4CHy?k?gd;wXvq3O4ed1x#Ss*w*jjPU zo`IMJywu*lS#+3fNX`pejE)f|fZ1;zafnd`2YR85RJUl z$c^zzpY;1%mZeC6_aslcUPn!3oB5^F?3Z@lICf4>%io0jHhjUL9OMrAe4h8GPgrXm zg?lwNztf9KAGuMtYA+90y}3ThT$KLEW?C@GVpQpbBlnN|J@S#~+M!$SexJG|C1Syw>E3(YqAc8VoRsdrkt&X zBI*47*wGjwVw={_!!UO-Dc2lp^6`b%U!Q4nf`@&OD}@WQgJYx|8roPvS~{ zas9%sAo-+32r|I4ZOFWAt!!m!iKhCpZt>un9TT4aR*`T?2vJJfRiJSai$+xQ>pehu zo5s5jpZ7xIokNMzqv{``6{OKzp-56Rl7N!4?JsxM>L-1*C@%5zr!5ws-H`Hfvg{!k z#^7>`wV2}k&L8#siJlzKn(k+GYZL2mBGfX*J|Y6^{J+n;E+>&kl{s8;@v|*+C~fCt zQB9I4-3TxsagX2jUrJ~l{h{tL+v)r$(}4>HVDz|YhSgH)khE*VMydUgjJw>pR4uNy zMu~dc{jC1EXFX4$S^*asyE+eOt#%a{9TGGIR>3?bXy9V;CQ5nwDWaO^`hC8fhwvC^ z3-3gQi@y?(CO(5itvZAv&q|~JF5N&uk*RLz5gPXf3TNf!N+_2}j>-azwULkaUPvcFlzK@Q< zcTa1p#$`idBi~MIh^N9bz{mkw)^Byt|FzPaU08r>XhTiEzP0sLv$}naaowRu!(U6d zhi_^X>mAzJD!R=M$?`kbxI@MM{)pYP94PO?<$IR2L-7?Cg~Q4ydrkZ{PL;-&w6rup zl!v$Q>=!(FMPBuK0iQQ=BTlN$R~_sAdO$g*veTOPjnad zWsuΜI+^{SY1wrV*MP&jG**f?&NSKvmrtb+H7hIj}lC9s^AxE)J_S3~CE}_wPq< zo0XN60OEUUdK$#mt~zT>G{D(hSlAc7l%SNAke>2)^I(P0jRAYl2#S!-4M8ekFX7T_ zAI(2611k;iw*-x8Fdxty^;QP{PIdM0%L0(02l3NCI$WJUyRY+>9=qEeW@pQJrUzKKVAn7(4#NR$bTPVsUR3q4y7|#BA|Q3@A812J{hD@ z!Fd}*5D)(@L6I0_y+Hd1^f5HJKu;y_pTpg3S1LrHAOi>PWf1Rd;($0x2DMDn@nI)q zLI{-jBc+*{8Tqse4RR257&%50iK>{@>Tj8~Ftv!iP;hPWoT2xuwYEUEx=IxPi=YsJ zx)5iR(whJkI)9;GDMJO+ zb!c|)?70@c&{5ki0@GEH8c>ab;z_eLD0EOzP{7e6Gk7{@-zdHrf3OcOWdKM*F4%YN zSAb-6-9iy>2Dxz=ngb2Ish5AP4__-gEr>$!e3 z?woBcgi4+9TV}rSRrb!z3)r(G&5G$N6lmzN-Aw2tLKhPb&GRVH%VZr`e)MZIQCzqE ze<$N7eFp>E>*PA;&T#zQIWUkCE$YeMmLzV1faO7O_)*%qW^0%^AzZdi#%NFDN8q?1 zNSoAgOMqC^7qfE96Z!`-qlav*&}v z3@M28BWg!JqDOg#{_j7fm8J!I%7HOoFZr9Q{@&*>pT%w@y$6rH8S0%>{AT*MWv zPNRsgXHwA{m$b=6eL*+^TWn5=_U<;~k)gecZ84HCp{$tjpaJQL{9k{H?v6WwlrGc{ zt*qooNzQFlr&?uC1K)!wW4Sdde=cVgo`0ZIL6C!meUO^yy^;J9H6SM-m zV~rM-H(Hu6IJH(BzTCjf)hGkCC}mZv?j(9>GA_RMgp#DA;PvJf=z^|D<*EqS&nqGE zypUI+{R#K;zv2}n-aV+89XfdvW*AaPItZ{t`WvX#-}d)pa`x2y7^40FqQQR!FH(T& zZ|q@P64JBzB#bjRoVEIZZ+a69Za#9RqisWnmV$x$dYA^<%0nX~Z6_PxLz`+i3F%oJ zuKS=YwDV+T0|lYK`Fr;X`Xh9UC81jkvx5Q3cTiak``7c94n>}Sp?v0{?BRm;i8ClR z?Rh{BvG;6;S~GHtV>Yr=R2tJvju?54uy&Z8+o&d(_ah~g?qTe3wHDFdY`dUth3X~r{Ac+3pVu_%!RFeRjJcky^-N(53fTC35^)>5Q~2Kx<9W9y;Hb|&O;Nzw+5Ct(1(*I z${}(8fBobQYJPFRHYvg0Z98_MNB*P@o6zb}NNPP3PFjI95R#kJFb^5jlNUd%-|8it zWcQjFlr>n#cg#%i$+i5kj50WKKPfi$hN02q9pQ81pVq0 zM;eAXd-**t35o%`W{4CToEw|0A&^zg;SX8{#-3}oX56Lun1H{q5rgT zfJa}*YG|{jn5CqsDmF%*cPNQMPL7jgS6>ZM&+T6*QC}jD9@bcIv(&JzxV>H_{|0$g zm%`iXeb^7e_s4heL@ery;lI2d0p0`m#Y41j{YQ@*C{HZ%L$^~T6j={FuN1&0-mRx# zON92>B+Uy7?9x6sT3>GGB4{nV#E_il*)bDcM~C;)_tw-D&!s3Ix4wuSfS;m}V zT_Bdp?k%KF)~vf^$O1=2i8>8R{)0-kL~QQ)1}^nn-u3?+a=7%6rR!dTO8!32PYWNM z)Ls*Y>vu=;XADn{nng%N-76Oca;UitHOLfi^Tm$Tu$-*il5@2@3b(A?7=6snndfoX zCB1y_3xI6EozIJ8Vov~@HgmBPA^i|=KH#56-GrXY;Mvo}<6Y@PNJnJQ`l|*CXtH`f z{W9*|D#A==;z3`TbpN!ltY01RlQ&&Ido?{rdx~OgWN7i}@GeU_@OC?~T@{WmnrWvo zIuO$R0SE}oiWW02ib)!~r?J9r>aUtg0xa8wt!8&>JtnS9nfhnS2N)gYTYo_6b~fKU zc<&$LLF_*4`EMWozfWm#6#CvNf!w=f~AO?GePz238fEJVi) z=n@vtm{CS1Tt$61L_6JXu_Y#nxQ>fpqJX_FywMB)oO@KS5zkdZL7dS|;~prXCmC>R zC{)_r!YJ&U6E-t$bMXH}H7vUSY6nLT?6E9SF%f8YOD zA>N9Q9(h;KH9O6t80$pd(hz&##Tnst@S@eCRWbXH5(xsgE*}cr6i-$X1nt+4dN}&k z2H2So_uFxKpX2uuG3*KaU?gwQIa;h0-kapK9PC#S?Ai$NU>;T@)sfB|T@kk5-yA4v za3r~{ksEC(?u5_ct$W^*xl4d`cB(#GiGU#|-e)Lc)h6=q8rQq*S-NxQ66HmXcB(Wr z<<0kw_irURFC@IaX7S}5g5;bt91%LjltDf0k(jD={O8~KLz(&PD_x$o^$irgVwcra z*!tabEyQicdm*pkLj+g%tBwyP$7{_T_k(Kd2#a4uF5e}Yw{cd^YTc{CwS<3;iAea6 zj2!)4(ZU>L_wC)j9f>(=9}7Myo10tV{Ce9(`FI`5S@!~cE+I$=VFyR}zGbboYGl73 zt4AY2DjR#(izr1UJAlr86&CjK-+`GN`J7g1ZPJBuo$<%04=!TU_65N&U-GYQ3lNAY znJDk4WRqiTdR#Pby)>=0V7gG)4QtLcn@2LzC7)blZGUo_Qxo# z1do}v=^WLI7Z9n~vk~#|H(-YIj+;mP@QOUvdtN1|#BHRf7X;%nv86A(u3L?rM!Fxo zz?AQN;jAs+5lI>?6%ttRzSnpqlOu+1p&X$Tq4S3q-iGH>@BEPE(bKj2w0}yE1~9d2 z7s29j!547@YP4-r84u)@Tz3WxOOGesUwKTI?YI_wvX>_>>}Rim;G#N>>4^*i{adu% zeYEoOFe8gL70bu#GQd4u_=vdi!Qw~ta{6==ru1m}{=pSmzRiP_1AL34aWrMLCK>iF z&Yyx2e1*MrqedJSawxfZJx(Yi)sG^po;UN~MEKs#RdLygem?DLJhy_`6`M#*I@<3& zV#GM_1_W3JV{hUqml-t#L}~h3Nd?C)A;LT1BPRpR z|H*%;9`|qVe{Mc%BCvNnr`mi0!EjcnWAgnKk^R=VZB?l|iLR?-QU!!Ck%v_)cV7)f z&C`e+T0{S>f2Ed_Lym62SLfX~6=l7j@!!9*_fpqZ+cf5>@8o){e^TCSjh-5w-(SQ) zrVL3Ebl2*sL6QWR;c)PSYQY_qGw0qAgx5GaRJ0E|9CP(428zIPO|^E;Nre?mM}~>7 znfeqIS2oBOEV#b<9sqSWu=N0d()_EBZXL z6m+sgX$qhd35dSZ7qyNVS`5^v~uHIuG!QgfzzZ9&=?tuZg*-DT!t|0KfKqx{PwNv!U$>)hmwZh1{~y zlt%8QMX+mm)Q~^41fr2C_~B@@Q4%{wfPJy{9t8mu2FQdjiAp1{Il?1-@!|z&?U^qn zx-8EvEEEkbK}qG%O^7Wvy3%TBItT=DS7t%^W%8w$EXR^frsnwMOAMd3j>qhbd-@4rUi#!6v>`J9$Bq?p;oVj{gHV`-oilepNx5i?GO0gc zZHS8uT;V|71+aEsFQsFhx$fSD1w}4OfG_V$ckdGwHPGOaz+*3NHxDFa4K4hLyj@{{fs^D6vOW^+H^T`A#vDl zIo{>T+Wql~i7;e67p|}chxsFCCcp7G-<5|Jrl#TB$PEw%0;ewuT}`Q~XgvCQce796 z+8i7l3?Ko;vTay2@B(OfH_$yEq;o&qT?1H0Lhl8Vaq<|gioP`OI? z<3{a24$PV)%_b$leFh-kuXtU%;|aUDGFtvW?z<;Pd$UtTfdGbpwOcmg5QkZzqo8nH zZ@C2+Qq5=2#JpoqcFXau;nc-WVo~JXT)sI-O14BH+K_6!@PG~Dxo4kNJ=?A0(bCL} zx}A$kpOn~IS)AAa1K!dE;#SA>A@w`d6?`+X)0Nd^yPKkDg^By9*|!lLGfBm>&uCEl zK^P=n&so227$a%i3TIvQ1oLe3nW8Gdm=_dUE^Mv$V-iDA??R|qe#eTBnX)4nA7(Tp z!a7KoDD0Wzp&%%hir!YbXdXmY`UD^{_$S6i1D@PTvgT!}AK&y@3@X8m;Ay;@U` zqs}@Ev8X%dxQV=WS3=acb!vZiA_QxzK)L4~LJhbgY)uMc?tI6@y9WN%dL*KJ!T9IT zg3^9wPYbP%>@7SJ(Fj+ckA47E5HGT2Xf9-P;|1ioN}UNu*N2V8odYNnp>>CT&Ip3U z2w;6+CK(+v`Dx`POtIV6IR)T;1+1b7Oi$(T$cz9))M3-s0 zliiK3)DUHLT6Wg8gK8+i0eB`dY;U(btUea1ayQeFC>(7Z#w%}#>I>N#xS*Pc;*6AO zyujzh9W=+uDnpgb`MzG#sP+;yIRe za<_OU6h#+*_>C)0wt0v3&y=ZYJzh?P_m)uKSCUN?0au>B~-GC2^=x1COXoTnS z?tPa3Gjn|}Sm6-eUr+htX+2&?L*o_B8JD8HvvC~6l9wx{uwv=i*|z)%@SXkb>WlKT z5AmP3_kRB2vr>D|DRyPe=dp~;Ahgxh{vorK=qCvpO)5%?xgrd4z-<(m&3FLK+9e0D5$?aPUUAg?Jr zi(rIM{Q-$6$ZFZ!-%F^up5acmtpl2%T?*zbu8K5xUHkeey9_Im$3vLo?e}ztZ!F}m z0chibaozJFLQZb(jo#NDPq z-H^qF5?z{$n}MNWS#dGgN&te$BN=RZfHqlsyp>lACWW#=JtLuejn6$?R(mnD8YeYU zwZ(P?^{WV-Nu6E5cRplmL&-YY98GBDWfq$B_|P43tD8Qm^UTyAg+ouP=v7eTORyk- z!x40a{6x?hK6Ly_Eivw3SDI3vckjMn+2nLwpJmRd&dA|t%d?;F z&%X(KlL)EK8;Zx$G3MF1dtLHW*jOSYkNq)9j%r_+Yp&%u(yRR`lP4&Q|1skYXuLV1 z8{@DFz8~EKUGN=XodV{oU&FRd`*@vv?6VGl;GmaRZ}7$G=S8y(&=-dA0nXXow;)=3 z@F_F#XGaJ5XcOEcodg()B^a5Ps7qWXd@%E1){-PeDBh>qKbY6j)bs@$s79AJ0FTBX z6%3y}IWyCf`J-^z1$?%vEKe+JtJ(`q8rQG2u1F_{aLyJWZm66>cE6A2#B#IHvSNDw z6kFRaahJrwGQEbK$2PnNx?>)oEC7z^%g-7y65{3CF+`ev{Id>i z4plaFmRw%h})qdu`Kf{+TfxV%Ad5ue_4 zAYL?O5avK`V5h8KS-7C!RZsZyboOuKlU)3s8=-*j1Wf$*E0!6onqKO8tbu@Mgd#2u z4U^C$uE|)byk}_mEqMoUYXXV;XfSK9S^vojOAln`-8b6qx1?X#wh{}%#>fCH)YME^ zjXpc3qTF58IeGxbX@IF*@8EJ{+GG5QM-PxIu#0w;KaMCbhssDC^_P=8%%TUJ+h5lg zFlMFdUo`E4&`ZIH8|tf{paa9DLP4;m(puRVU^%)s{Ds7_mSn+7=Ur-1G;}}Z;`tuk zS_Z%-mTM4IqLO_NrZemaFzR7BciAT$st)$|p<|SCw|N&7x_}1|K<@Ds-3QqQ$SZo; zv$5x6Njd_Y`~-a~c@TXA)UgI@))4H9%byz$;+3;T+rV~#a2*W4KyvLlU@&r1Qya{9 z0c_niW`(=*MJ0RX;HOZK$d?x-X6$5ul^GedF>fm931b2^fQR$*#^g;Yg-KVFk9nPR){OER`4541^ z-IK8cgdCu#cPoENjQ@rLmQruHJelhhYeNmEW)mz>Z3-Ort+^f`3hd1|z)1y*2X=z` z*hwXs?qN%EkBQ*n_-J+alG2wA39lzr+;;@HIjUsG#LgiG{>>BiXXE2vH4pQf7jlcu za&k*wjaUgaS-w{yjW!?;z8O{UlRWpD1UV32dX7&wn6ybS4CBWQL!Gq5cmJoCbp~R4l`3Ofus$na41$?o4JP;$D;g6@g9T{ zJCNgj!k_L}&Hfr+J6^Fr^4hH=>s9Jh6ZMzcU%oWSXI>((g@kW0sZT$BJxJ(o4Qw*> z(5Nll3}Y-Cz7C1*+3{QBEE;42oSdBNn$az*GlKY>168l{^10)M&+F1zf+;;TA5_%T zoS?~IK2o_gBXaWKNb%dNBD@d^I}&59&I=8q>_KD|A2pADhj!>X^j+>)Dn3e{kauxE zetO7Sd;D`mTdUxZkg*Q&h5#4_2t-p>p2t!)Rjvc@#!7mrqd6px2%;4VJGdFuOnhwx-s@{Q>?eZqK*WYHnh(zt`cxk=xy1cK=>eTlr$)N%=vmwz_j`omieo z(S{U!Tz6kv-vht9{nQr`l>Z)#B301z!VWng%}m?+=5O71oeOyUo1*q|xP0eL5m`=- z^y*YcGnp8n1L`ZTOQkQOkJGF>?mzA)bKhcpJ!*YWY~1sbhWuL^5I~{&AbZf%UrRFD-#s?`EZYbA04-f8U#xD%Y%=GbGheFibim3e=^DDU-TNa&i|B5=>0mU2iJ zMXY}4_|hde9kB_xO#hl^=S}J;gT3-9|0M)2ARf;74Bmsj$vM24`RwjuswH4hChvJ^ zTR8nJv|D3`ty1UaDf8lnwnls+(R!T^o%zXFh}4)B=H$_cvhgQj%Ea{lVgZctr$g!s z5KQbP>SDM(ZqsxxO3g#;*&0@Ny@NPU#tm<`pb7}j>(pbLEQlMJ`r_Y~!m68n0xN_E zi3DPN7OSk7$0vcUTdrX_?soN|)uAFNybDq8gtzYD0VQ;Jf$b z2W2Bn)e8QjW&<>>w2pu4!U%VFPZAtoK5P5-?VB_mt`7QiY(M(`-^8=MX>l%V=3h92 z=8tzC(msVtOzd239T=He7#T@Feq5x=mhu7&Y<``VDR}F-2yn!J;Rp1o$BzNa6I#=6 z$87h07ILp2D>sL?)bn~wN~mh&Zn=h>l=K{r`OpdO&kn9yx+e9fs$^%##fn<;>LXKX z%DsX2HmB-Or$Y(|PZ$ddkCeM+0s?9|ehnC$N2b zE@=0F&Q%a_?7JN&y+zMzyrB=otQb}DC0DGsTY7`K_qelQ4mkK?nEJ$~O z&~m-`HYyVpJW&o_7}DLAhtTVnOtEKrM9YF~Z}5vj?v^KuCZJ~Hq#bk=>DelbeOQhT z00ZM08XQdMj&oOpeJ>-ii*b2n*C!1q;}2P@k7{oT8m@wFn{qP4k${629E(lk7j&a zU)%#LkUQV({3zKTuiIDDa6caY0Y%?1A!qiWhl~PE_yNr=7M@I*@&AA@Zq87g{p=BNETm{eJ7|S9pQ7DvWa=|hyQTbcy#&o z9|sEX!@T9Gudh$w)cX7PZ>aXcY%oU2UUbidxC%UJEx}Lj=tmd>ptcz4C&B&C9KxKe zcYQisu+CCp)SzUW+@#c_HVng8`v}~4X?%go2?XPZN$~p_K#c$|=&Y2K@nj#`OU{tu zi}NdJ@Z24qv-=9co0#x}OA@;e7~gUG6^G$@Fi~{}g4Mg+vbocLA zA0IkZjkp755^z?nb9Uaf1z_$T*9KXLHQ)7=Y$!(oB+P&}uHm(IkJfTCt;;X{yRotv zK(~RqaoG^fwC`!D5ENN8A6e=Y4FJUhQr8CA=NVpZu+V{P3UG4mXU|BE!(hLXh>bW~*V|@Qtp<0|oJJOZfSP^)qi`8peLKwV7L5l4}9V6J$`qx4mc1_3zt@ zX&A?p_9cfrwW21I5=qAc7&d_1z5c-o9V(KBn86vW>>(sv?JX@2Y$m0%&MF&i&G$;C zu0brB(h7yiAjt`Eg>D!IcMI^u1(kD*bu-N-`e3Cti3nmw;%TjaoE0EtKpLHr4ZJmo zCP7oYp7ZfPP@yKsjRU%Hc{nyEMpZ?HY+lSqWzaJeupd>xxZ_-ecv^t*LtI>tq+X2J zIlx}@mY&fuBJ`PnsR6kWyuDl|_nkY6JiF|(gx8^_*wWHc|3L5N*mFimmXD5(Ak{!C zYl+V~bL(1s)R!VcyuoAew@t*OhhGIo2nc7}jmk+&6Kx^mWHUg}8h}=~J})+s?u}J> z%uVX*dKT+dq@>(rWMuqR0i>VRr7{dzdj=DEHs~OC>$))m{|UYo$i=|*kxm~}i|wjl zMXv#V7Q)Y>0kbem%msuV8J0D9R)i$b;R0Yd!So$?#8^9rlge)^7eq6g)-XtTn_VoA z3=A;SifTQ>XLlvE-(9Mt!L4QYx8kLeFRu)=8&iS%zrpgpVyexFP~GNP068bHiD93C ze-MgnR_5n-T9n-fCgyNOj}F(WixfE-nRg|=&tOF=L=tek? zf$CPncS?UKj(}%1An^dA33dIiYeANq+uOhn>yZMAk~2_g5=A`R!Kn_iX^{6`TU)ye z>`%Q*@U3>^t*jN*3%f`4__t+?Ei>~oF4uO?g#x&2&(boPmo)j@Qazm)Mnh@WKEJ9; z0Foc{Ju`?LuX;6p-{rxYwljM9%P4V1K&zJBX{t^o_3mjSXc;lYz-kz}HgOnH~%}=J| zEYSg8LbVTg5rNteuKu;%q(9VOQFOLc49hZ_o`9;+Sd-j?E)7?3qs8DCSDWXTmhORcfh;Ysr>^ofSQA{&CZzL&ja(m{LE!T1 z#3j;#yb%py^Hj999C<|4u@>bk0p?_&wYmSbi;3&fez38z0iL;w9lI98mCG|kc*^Te;Pj-8|TvreXy0uQp$jR}> z%G&9xcy?Sv1TtWUGRi7ODiJ`|^;ps_{sH1)j<|I;i#p?=S*fSzd7p;0)rKptU%wXF{rf)b z*MYLlcYs2Jw`HdoWcW~}FU`p@12^ecMQW;*Jy*a)-RdlF?Rsf(J2?mS?T}uRC18@y z&JvZfx!4zk@e-Dvy%?U#IMsSr9ZKhm^A z_FF6izN4G#Fc@Ix1N!e!T43Ir3fpc5?TgiuG*-vhwpWUpG8`7e4=q8jOOJ<&fihD_ z)cYU{wrV_jSh@@(?Lc^{c5$I@OL-8b_<=v8tXXfODfF{l;Nq*qwz)bA@$!M0Y8c)6L z2!I1{1^-w!n|$u^H*?+lkq@e@tgPqDUtp-9^Is^VW{?|<1~CPqx`aYGRe?-S3=KdW z`&RA^nVhgxEx}sDwxd|P6QS~^_uf27q@{!%O+Repmt5i?euShG9UksghF8?biujgB z?B4!ZpXJj&OBw`1 z_B^2v1DI4B*DX9Iz32|D5D!n$L^+P`oPtLb3XsfAWkn z_!elyuKf)=?hc>V3!0mmu`oA>KmfQ4E|#&U@aQ7#URbk2BPhmug4et{=7Sp>26?gN zq<@2bGljM-D19(7nY6yKUl4w^wdJ%B%BO~F=WcK0MW>`5rPqBhc^l|plrQv~LFl0B zA!=6~s9ipll&o!9w>BrxfGT}pyvA0@i?Fp-767@g{cHeFp!W9W%`@xi-v}ksW!IWD z&lwxYg(1H^SU-TFuU4_r9+)SK{E<*g{iICa9jTBp@VmSm(!3Kh;XQX#3&M|%j@|~r zK7(;%Kn3LRk^x4Tx(g}dxwBNxuE$K3HJ+E7OWa?G%2#|x`uXV(=&-Yh>)&BTrxdwi zBdf`4*|h;z-Th9a*nUzPAay3^x0LvdHh2XDoT0`IK`;b1GL+rWTweufO zSa*r5(FdZjCIh&lfQ7~LJl1m4_QIgGf~QHCEWP7Tc+_+8kIRwI2Sqt%d%F>0Q(wEQT*@&(FW& zT=IiVD=#BM*kj8&V~BmexE)Fq5w%wy-iJCE3@&i%7QL7yI=msb-yx@>60(-`xsU=f zywBQ_*1vig?gjcB6yEM2#Dm4>z7YiSDl1288d>=nXC7tC3 zNZ#Oyl!nazAith4&oJx9eZLQIt zwbhC_`l_+0Bw=m2!sM%LLhOsrekKUSdwIY|L6fnp z4&)2#_PXpKq5}1GP-coj=}JHHU;lhOi2`j7;A2@Bdh}g6E-{h2ChH%B$EYR_KgaAZ zuOMvVtNl$;Yh=6bxIafsXGyD(F|hi9&*4*5mHy3~7`+;gZK!KCx}M~`@Bp&e7h#}C ze2&MU%7u&0mLX!FV?KT!f^_;B-jo?Qy&O>-^W6*}VKgb06yT?FF6?WXi!d#|d51!Q z*GNqm=QE97A&?>johM2|Q($esNrE6ha||Rr88QVo86J{Q9hf*=aV@s&syy5ojX9pw zB->8@o^m6$@4kZYANAo&?fG4a2?@&jq#>i7ZEYik$mi)phBb)$8gu=!9Ou}n1(I{O9zy%pO zb_wuXb_N58X#djPlip8d3>C$aAb|ONxf{>UQ(zPIBvPKhV)=2|*bWR~ZAv~^1}N=~ z8&6xgE`I#-7jUK2bojt`U-tAVI$5C~tW|phu6L~MGcPb4`0lJo3cpBuRxvyCjNpcMfw6`1YR~PcCKi# z6Xl1M4OX#c(T**0kh8F!+Taj+sEgAJPsCO&2|CVY}YL^nX`#B4}XmEt7y1>#`#L{%_Vj^wX%Gg zrWMB1zlWJe0NB;Z-hQo1Xu_cE$ruofLn2rQBXA(;VzED{_r2;0;E#b)V#ZC~LJWdr zm~qt#?6LFNj0flOa5X3grlh#I25My7oSaI4H$N*&Vf*0c&j;+6u@Q4HZcyydxnfR+ zbz1%&v=b|=K|mCqd|=E2yXO!H)KuE4PW(bU=ANEaMKz|LS<0;-P9o~L z|H}B_vt>UnH|i_e30wofFIJm%Z}u&B^&WX*8GcifB6QA$K~fi880!W_mKlj8 zIG|~g;h|7JaLoD3tfu%-Mh}2a%oX zjU=e6^w**mt0j-tC65}jm)4cSny$3sG@*lVS9aph7a=^940v^MYeAAqMS1Znq$5D3 zJC-MX|AM;(6&=}tlnCq5o2x_ndjNTK6`anqCwsMK5b> zXdu7yhC``Y)>i?Ctb|GI!L7_nTa&Lg9FKx+&WJ^eJ z>-EK?TUB+Iz9PM?BjZm~+6RZu#t9Zsz0UB-3*Fs$&3SHk;Fj?xA7MxVqGFjk+#qgp z1gXD0v_<8NL&ecSH_kxUuVo5-Gb>`-J*!e*(^Y^!C&&dp8g@&K{p86@kAIXuh5yZp z=_*#*^re8R@BY^C@Hdb$2R=n84QOa;E<9=VD2B8Jl>IpJC*mDG^ESGEjAIHNR*k(k z09me4&3;d^3=|#p!yf2yjNB$q z58Q4KXm|>@kzr<$EJQBS;?eNV>p1SZqN^3v3l)`#ER9po5o#3&=6h$F7prC|?~RCN z%m+&VTUnYw4_)bD&8oMZhll81AW4vnh^;38-s+(0{2X^=nMTp`Q4FR2^&=o&64+ls zSb;gi*D6%Yp^X@n&oU%h%7gc|XK-Vn-;j~n%Mp3fEgQl6cyNMD+( zkk~JY(P%97b=e#9^%!e4-QIj)Z(jh!STL`R+u*@rH=04-ROM#=TP07*Vw2IVn8MV< z04gJ1@s(4%aKuIN=X|gr*3Ek5S8U`@Aq!@QSrkskDtm~U`cHy$(qY1c+L&pUVg>3& zY%}$@*&oBG?2(E4B0t1_%+e25P{joYb3C$Xr@$#$QE8NPNCUw?+l0XK3zHng6AaVZGeFeBQ+Kd|50L4^c7WBAE&r%1UI^rG)?n_p z7I*YF(>2=K)YPwwGkRJeD_(oXWRx%Zf;vry#|=~{HvPt~M-AA2(~&>MM>EFsx8VNt zet@Ceg>SfBpVeJ5F5ZZ*=#p}_3+ICxVYK(5wY3|}n)kUM&d+D;vFgzhch-9 z;+LpfK3O@MQ7A~;%p)a>HD3(}cR8Ke=c|<5qB3_Lxw+|krQ;n`k(_r+GBW1j(jadJ zh*Q?M?Tp-WNOKqtex8LmwQUiA=i9JqN9zo@b&wubUm?8u6KEAxvlCSz;42l6+6agn}gK?Afy*m8v8QTZx;4!{*^bcPRbd*BVC*ptnBqcw-WT zb!#u|_FSf*DAY${k=?LBu&_%gONP9SZx4iwe^k2{fBeWgvqrG@!<;2?*kcxyqRr?RP0&^~99a~*aU*L?9wO90;{^#^`;e>4DY#h3R?eIXdmx7ZuregrgM4)N7 z_EXAb$Gv78Fg+5ugH2n%r_83VIt7#0&}=oqoonz0b!ni$=A-PU_#?bOhD|8yvP50=G1~GB9YoX*t@!wrc99B> zcM9cR7~1MJF(R)rrmG(r z|1UeHOFL7`&GtOB1;e4gyLw;TnI&v~K)~=QV*1Hd5`sMP$MRcQIgn^iz#J#TK?8$0 zgf!6kzyzjSjzM^WkW>< zh8^Is;r%^p@R`7M@Kya??=bR-mz1}w9lnd~#maidZCEZjp}wJSb`SS;!HLe%f2+)O zQ^wS1qC7Qh)UBpofRO*TRyBAn%=T+VMVjjw)R?=M2pH!{0O&F{qM#_iWY z!9+zxrLC!HQX+_G#UJDyi*K8qgBWiJ#`{(qGqbYb0GN2Ik7Pa#xw%UV{AjFrDBBnudZ0S%o{R8&fychn(40AYc4i2K z)-|0&pF#^b1*w2`{nOamO4!i^jeueu?vd13+*$ty=!q83IamFR)NatxzN%=+M|UQP z#C^eXCa!a-s1@$XbX>N_>zUZXnZer?s~8C@)+0t0NOrSe^?|2F&A5V`+M~EChzOrBf1^SPfe2{yAcKT7m)P}-K#}^ z29|O@Oq$S0{7OW>WHf8}8U{||0ix2eC-|mx; z3&Wr{1aBby`B6Ct`{U&M$a%}9*Z6IeH2nO&H#17gg5`I5S{mGrMoRS_pYofIs-%|+ z-N$Aq#-i*R?zk39Jf;{kxmP(&Vl~6g!7K{XCqs*4P;pAR>T?ot@f6ky(OP>m=&?Fm z@eM9rewMcWk@>1q!#bqNEKzQ(w4ywX%#x8Rc>^P78ucH=p}wf2GpPc0w%MLo$-{FIUy3Tdn)|uSxt&JNn|2urv35+V37eLkJ=oJ0} z+D^R9KWJ!kHD{NJ`&iyit>VKakU-R^BM-GmRvJWeTqKB=(TcC1c#z2Ce&*Q{KW}AJ zE_Y)kc|=D;tR8(S1YyP%PMd)hxbctsn>l#Tc6hPu$*TnMLx(sA`)@LNGdSA--Z9DR zLq^e=+%Bzj&m6{v4JM$d2XfBUF3U6>wer1Pc?zDe*K|*75QiUl?71&L*oGb|q+zU* zuWi4T*$j%xqyM(?f!?P0rLhXfpx{LJ*(>fG!ppy{Q-diXc>O|y+`kiM^{eN;N?-5z zcp~z#mg!?T=;nOm&L&AFl*SZ!@XT?~|NZ%2r-P!Zvsa`u-nXsv^g>^)Ua(Kokw@gj;G>Ll+gh|2Qr-uaTGL@ z0jUJTSc?!P$d z!Nh*6fbP7z;HGjWN*0T}ZZ+uE-L4Xgx{F1IOetvrUvET!`Qm1m0uv$Ga_Ou;vT7RR zH611I*TDFfysASjQB&Xb3Gk37BhBirj1hS{s*e+(LrRcg1b)b>fa#eb;@b+==puc2 zqf(@zxcsj+M-(N$`YD$R{8yNf=kNR`OG<#oc529w5~y9~qI}gB*Q6R9%W(xI`E(3uInnt*rBzmSBo)>fO z5oZUdYgy|m%(mn~l9__!GtbQhZ{kHo6{;TmT|-y#Qb*$ zObZL@rNek%3CYuvoaLkB7Y=b0rd@bP2tUATvv6_|{pvwJrkyno`?(m1{)T;fkb3*N z{*bw&`9n?@5*ji89&E<6x1uS+@5S`=$fP~Bk0&!3!SgwH_)s^Y;g3$}$j)y_%2${a zpliLLEkpW39x0vH_$q;G$x*N=HfQf5n$OC+*|J@_rN)7xVXyx7lUogc_WkspaHkTXl=0xJUOK*+PtP zICpFyp0P4fUjW1jBvb9(@Ebe@^&Wtml#>30OvE9=b=wz7r8%>^H1Yuj@8P`} zKM_eKtMtO6wVgO9FtB8#st?LuS7)A!I?gH?8#Bbq==%_R^6lw&+^0X$C-wvp_6TAk zSQ``tWJvB16CLyIFwoI$yNN^P45&A*r9c@3=&y_*w22KVGVlNcFAMCx@b)k}I6u$M znOs#BAMD}bQ4(J;L@R9n?AFNG*m6?uj~`01{{H?s6HvB;x&x`2w|C7!9GJ4~zCXZZ z3b1}bA)$_Z;6+b*_f8y$LpBU8_v%W&qc4znaSXv=O>vb@qaM{rWpx52bnO|f@j#xky414agiZmz<1@;F|XU@Bluq%jd2;$`3pWo`)Hc_O^^9_t{wRRQhF4 z3csyG766L@!(Su$o-mpRwVk~r7?h3SG0w}+*S3rT_vHl~VDO3kSE`{NZe#H&LN#Q0 z-EC)iw1s|kw5)q--3#XLz|Y4_BrN;cv_#1J5Ku2aJ@w4MpXloGH=!ILG#|k;fCmOb z$fldt()S&tV`Dl%fdOBXy2lP-_mky7NZO(q?8d8gzjjAy_zz zm~Br)UQBHObo?$NIYy)OT^yot5c$bHjfN8hC8brX>ZDo)8(=)FcoOaC32a1Fwd$=d!>|$FN^FfR18; zo*Q|K`rJN{vHihg^XuMMR(b$e*#>dj5<0ZyLykT` zp%oQ7z^s5~hr=&!ExmScNypjOzxhrqswE|Tor}DSKv16F`e!kS+P9Nn9aR%VfySC` zxkE?0z$W!ZS7&weAbT{FzOU7}mzS1mRpbG|&}Py>TT+IRYxPf%T>EnFXH%s__5{Af~8w(&Dfq5=aiGqyDC)`HZqzs zpZ;PG2vZGQX%G$scv=kDJ7urmyfLix!m9z~7G6Mvo&UGz>-|hNeXh@KVbi~Tul4qH zcHTZYNQ}5m)L4rDR}Y>)oZI5y zJv61Ssbgs2$E*_wCF$esgE)`seP^^FFVT1fal5oB!U~POK*fZ{R#!}dd|)IJ&D>_L ztg(GVi#TMe^YZ6*6M;)|;Zwq90R?fvDb|H=YXrs))C7zP{bcn?y6nQGlt6fe$?z*uOV zjI0E?gyb{0(Q3BftNJjRRZZ_47v=;ZSsY#Iw(kWI*pS+_e$E)Z^M zZ@y*~%Z?#y=2?52)z&G;#vc`XXmj(B;$acZtRxKqMn%~HCC_O1koLD1F?I=pNQ&Yl z8;LVSwNG>eioRi|(;n~+_w`B30?QuELMLZO81scvmLNH-(M~pgj7-cT?nYYWox`5| zevIO7YxR@%!^JgyR^X3kWv`^X)`qgv4fLHOc6WbCqdCOCva9=AR zNStE{2l&iQ^$5UB0T1N8dk@N!TF44tG@3|)ht@aAD)J3-DDR{cP~BG|PgG{K&=z zN_vrUnSxMh1?ALbm*TRpGK;Aacx}W#IhlzWfCjErV}W;HpQ`VBejH>W92BLbp0`Xf zrGVu5%;tZ*WJU(wOm2_?U=&;sj0FOv7muCi$`6symj3baqp&Pu#2zs;3*dpG7IfD+ z7PQF(1|Wz;{vJZPs9Stf30y3g*(k#nr~nwTUj{qI&cUHjD{|=;)bapH0vwSKSRs07 zAEg(z2cEp4p&@cLSJ$$MRo|H??PU+nHVDM2e}>^UH~vGpxT5Yp@r6MRXm^i7F|@i~Qvqa2 zpv3?-9o7?k4@3k9GTA7^wB*0V4|@eJPDf3RDf9BMaR83g}-E+)jZKM*=IkjuI{IP^`e zo8Qch8Cs;Hp|N>_f52CQE}EyC=ILkER{HaJVS8Xzy96GNla_?D-%RlFFnC}8h6k{& z{N3UaN3#tN3ewYq9&2i7G-2l;?9|Ll6m+%_}RG z-S|+GYa5I<$%WD5o9Yf7vT?U5;(-XL3;{jscdBBFEQnE|^*_uHbuauN{6-fbS0Dt+ z^XS665>*>e7LX_S=Uu|8hMh)YRVV;D=KSw4G2Sg)C{oA&g!kexqYVP#KvMNE_LgOY z50|Ys0`VXI%!+KlLL_dTM?5&u?3w7ED5U<9{UsJjZf}~1%I!+$j1lD{FD~k#4B*-v`0P6R0(misi<4+}!3PZLj33EL~IrI_L zL9W{N2pfgZwy^)NJaiasUTzxJ(uYgzdlDQit4Z9x8{MR%Op07TPWwFVu&#_`pTw2` zt!U4L679k#ea)sfz^kU+iM*e3{oJI-BHUSkob9NVA#np_Ow=M zG<*a^#cuvhmc>kFH~goBplhBQwXgFP#UK>eu z9=mh30eQ`)8*RQvHp$tD3JcQ>Nu9f-_4)onRG{3XcoH8OzqL#58C!|Lxg5&2wgl|>V}+w&g+7f&IsgN4xHICnl~Bxi6z9w{#HsmEE@JZ)uy%G9rafgRNp zT#nMI*a+v5T)0u1%SZ?|$X+x7N*g*r^e4xB2^|?7`{$-vq0UVjpkxlrW+MDtdrdoS&lHWF&Ka=(-LdI~B-1Umjg7TWfYA>f6hM{&Bj)6hz&@9h zr}yb)SNNmi9y~OG@|8tD701{U*ZrCItS1m=U??7B>J+AYo3vL_y~~>`pY~+DQh+WN zU{0J`k=#y(cr*OK1YEHWhgM|d_vIaShQpx_DD!#RC1)2~D=Cld?zQO5)xX47;NhWo z8Ef_IV@hi3pNq8{7Fg>oE8PC7c79;vWI$doGbu*)sBTx%?Qc3x9XpTiUX6;p%=1sH z_kqdJM5|}4)2c~-^Kpx}aDRYrL2lHw)cfJ0)-IJ{(w-9f2AMS7?q_`dgxhF*#Dd%C zzc;_q9u+FSn~U>I<~$alJ%6T+zn*O4k6B)w$a+m_$f}VyF2;@+*W|gd?B{o~F~V!M z3}PjWY)WqiQf*|_YiDMKic>Jh_@97pyPC>ORDO+21DQyN^7EWlElXI!^o>RwtT+p9z^0s^vd-6E&E`-)s!E}zn zUTb(GE_VjL*K$G+AT~i6S!~46t-s)%o?CH-^&SjU((9_dG+D+k-)*qv(K*;(zu?!W zy3LUsp6Z7>ND&ak zA2EC6}@DH?^OQJ83Xb8UTm1Wf+V}W;55=iM~&kl^mdo^x8-CV6kCT2oE zMnV{wn*MZ!xoE^TFux0u9eE@(8a|C17TAxmH~JZ)niSlG+hG}u@+GYB-@x0%#AyH2 zf&!2)44DG3TK~nX1Q;!e(?oh?kBJD$DEOtwR1=kr5>5OUL_+utLYhr45j;n#3z$5A zl13xjv?<=b3FgLJc{una|75d@NKm6m8XGV$0AKeTC0;E&<#$jcSP{#wLhv022#&8~ z@PGaTY3wgMPN!wNKLb}J&^E+)HUB82zVBD>{0M9 zKfC-dHiW1Dsd~b*|KooCzwnl&_%6lmL3>(lNQ#O`K?ANh*5+!i*5XSFP9-h>VBrvb zFs^OV;Qhh5=(YkdRXG~eWf^=Ao<;1u&W%7gc=*VHlo^hLrAvF~jT^?~Aye`%hnnmKhEEY-ir|c8(JVEH zpT#u$b@+ynUyzm1BO9|RIQyqk4Zj~W{cnXyTI?lV(S$|E;t!pl>}jdbO%Zbo3lGg! zb8}?zC(iVX;GD1uDM5sQNfH6{NwS$RSp05NCVhdAqu4f9WSK8D1{93lP$!PVVC0de z!qwCM7)DIYGsa$>3`W4Y-rRFR=$bHY^kS_i7w;IJ^plmLKtd>V_$=hkhchH*=0D(` z9cu(gpa?LiQ2(Y2B}fHAGPd)Fg1`z+d-o;MC)u9=LJXst7gbRakEIPZm+UR=p0z_d z8zEUP{z)=h#fpdThtaX8YI8g+bSqzqpS$OiQa_KLW@<}_hWHUP-Fnw7ZIq)*0FJ>- z>)i-3{1dKkcga`XfoaZC;gXyhDFNJ@PkO{upSc<0GBJ#V;@$dizBbDX$bwtpejZq} zgcG(rg*#|3@~@3B{0){WWQvv~Xws(jDzkhfE7TmA(*2N8w|&iIE`A~*xfzx2wJ`D} zaD2Uz{3y>b)5xQyZaqJP)5!9DUE$>YuAlWssjK#djo#)u>Td9(~6fS*#61Z=P5wZNxtMb zg4Es1Y=8Q0(qWj;4XZTpf&w%*pd1sD7V91U=rf0zu<$U~IX2fI%bjL>x3=xOVXdS{ zj`OrglyCf=_85`xP+mBR>*(4$t1ZFm;YkU;952(aocX)1TOs7VYvCaY*Bwf4WtcE1Pl#xp9dytLoYxf?5Z(C z1+2t1GOMrc{vz>YXZLYgR@Ft$_ljju5t6Y+3LV^cG)hbQQ$o~HA{y_M4@dp~mUleX_V~gxddTr-8D%H0-TMm*q45=s=nWwtvkMDy7;l?7P^e4;-*p**WZqA!Aeho z*b%pQPq!8Rs9axt#|55g?XR`(SEh+<#8Nz*@i+q)b-9bbTeEMi8$Id@I5o3)SbQt5 z{oB*h;aI*?EG$zV0|Ps<#SYMr-|bFor{W>*7hZNY?pvHna6FVL9&K0kddtCS_nLHo zG&4Un<4v~+-|z3gcU5A9i2uY#dYZhSpa(m%acoFw+&$m^R{3hKydIgey`4vs5;vrO z%5#*agZ#p;n(>KVF{2Sa;cHXhd%g`gWG0v8PG!m_jK>6K7Hwy4Y!E}Ayj>D`rF3k1 zB45&DWOY@+kw^E7L~$O{r!bG5(gHi^A{-CXD+}8!ZM}axV_>8WrDVxRjo||i;0xOU0&z6WR z%eRatuD1_LDSy?it;=>!O`enguu@=dk@xisfA`+zk30jpp{HJ*E_)+)%0vSIyz0{P zYN;kNlpKuZH)t1x`3zh?CwcjgkW8B>N{GpOoAcsQS!y^x9ZtfHA|J?=Spi;?0$C<^BU;nKVGvQ< zOgh?1kA2d*Dc=8-VdvbK0R{bb^PeDv=W$h!dKbX%zrfZ zckbh>X_XQC{%#%dXU=36CR^Fvt>Cy;jbFVCl|7A=D`l6*s&6{BY&Z%yP36ow6W^MO z%3Y5+Es?f9S@S0K$9b3YWk+3ocbe4KHFj;gmv3A8muf2S43Rs3==-NJRsU4IObMW2DLA!IPF#ow=-V6B!Y>*pbx>OO z?{9p{_vZRQ+)MG^FAtF`ejeL-T7o|PDGgEQtgYetw`C9Csqw@Okxace6B7dc6tQ1V zD$gjNI@$n(YUFy+bFVq1bP~Ez>lGqKMP5Y?vnQ@D8Oq$Wx<2=c&h{490AeZq8SC1PJ&^(I&&4~eG5*tv#`}HH2D=bL zgb0&>dxL2<9r^Sq2GH5Q|DzuKXOx=^103V&*;=`vQu#&U0f7_@m_P4dC>dS;q7a6> zPKJ?BDao;qowR`{6+(#Pjy>rM0NGtqd-}An?Fw$j`S%4}$uD;aG7L(;v{Ol$(lM;_ z>*-tEbK)BrdNcm*m7+DvnBa`G#qGiRar))1LJa2>zr|xQYt#a^Q|F*}Ysqw%&3el3?%MF8U2cmyXZ!7zaKVfF3 zS&;u!@mz`&{4Dqr2)Em{!hjS9516OSk3H(HiE(LH)a=UHRNA<&*K2!?%~U34RG7v`cGO7>3`Y zz40#R!cfAREAfS%?V~T15vQs$a0424H;xLAUYo@EJgwjt_c4Bojrus{KWUXO(As8i z(ebn1XYfWo|EOT^(xp7xD?b>ltpgPcN&uCxT zIF?-@xzqZt^Qqw2{R?D+hEfVvV`;?7rwmhRT$E)W5|54xj0$BHKT&`1?#shz5A_3* zxw0V4ZA<`cbd=mP9crFmp3mgQ z`859=EAeDJ_55kpkB7{23U;N-O*WM(gT!Uq)uMghG#f29wQ}a(B^rt?W-L!VGun9f z&IfWU`ySDGyK_$EV|RlyR#I)uw_pitZhhkuE~u;a!aei#g($3HOka2R?^L7Y2Z-99 z$2QWt{p~BC<@j=;_*ALYBQF-xx?lE&6;yOvzm6*Muk&<&8HIekKIQm^SJbymY1A6W zb{kwI2p+W32xPuavtXaZrTu1>4v(@_9ges{NZ>S=P2wef*(!Sq4%3@e$IaEbbam~6Vzm#4*m(bYPsc5Tmlv@CJH6+D%058b@n%EPzro-`I8ZHzWn zxfg!(e*U+{Qi0v|EV1m4UDr<`H@9e?UQ`bp`${GDBR`LU*4oA+FK@2(V|ZR1%}e8F z?8dtzB?`fz7gQ}(4(CELNx-Y0-APZZFksOQ@=a?joe?OB;sJLG0Y-sj&n4p_1)c^d zZa-tiPg3C7wlAt%Wb-MX17~KCpx#y!_gkvRk%~BEPSLPx_DM}PY@U(HCy#pClJ1Iz zTu>iFCnS#zomzNZZuv=G#8zK~&It~+O#atwD~%N|j+)xOGO-_?utQb$CRNwwTFSQ&u=VImFZt+co zYkPjd{7#IE@oA5x{j}1A7VR4>9`e4vu;2Fl>WP%~#D6)$#?hU!zqa`ED)?7h-)!$^ zedV7Ae@ZrewTK6ebKQ)`KE*^>ep^r&pG-=={h+$Y(lV`pSU`-^^NW?0xmka9s8hKo zT(;_T+zE< zITnTA9o<_EeM^Imv+WY=Pp3xLul-WwUf}wSR4h5U*(nCbry#j=3LW@Z-Oj&Cdq5^n zJJ^6+(A~KxcX(|oc6!Z2Ii~I)r>cQYcwyCs#w~$nSFjwDv0_fFCdq1;Ws?W%Kz1=T z(J)fq$b|D$af^afSz}V$TH;(Bx3Bp5q}VT=`IQR^hOAetvy$0x>b3(b7Y6t7n?f9q zYoaU+FAIX9xp=|Ih#z(Cg&%TPrt7&yG_%DjUd>uQA*$qbZM%he`PY#X(BN^KanB7M zS{{y}arHl{Lbm5VDOksNaO5gA_d6@^a%H#42YIH2mfR7c8~V{Ad$qkIs@S7zfs=%k zRXR>&%^-c}CJybF z*#9cj(zaa5zJny&gJB5mT)i24;nz|1jPe>q^vJ+RSXHT$50u31w4A>cXCpD2YxR(g zh9`X4Pl9xOMn%yt|0)*&=A54_RHX2p%b!sSCYcZ@XMsw((Mlm(PIK`=yknp&cH8LO ztd6}8?H!0tJ0lrirXbOlHc)(Ge`%T3pa9ImITC>#Ow4Ggp1o`~4di)QKnF2G3i}t+ z8$0GG%8o74!Vfk@G%pxq^`%M9VA|qPUl^s%3QQB83J;A&stFJp3*k$J>}`@koo6vf zG$Gl?Y9eT)S02}^7-jGUDJaet?y@m!*}1UK-07kgtZq;uk~L4GL5Tu_j*-xECtyk# zjSUW-bKj|_-t#h>De$#)3LdU1e*9)Yii8LHMa5F4X#{%jq_FeLNHpJ%M9Tp7m*MhF zPs*q+Up8lzpFaELtt8{p7#@(v2rI1j$wu=gjer4hDOC!49L~#;Nghyt5E4PG8r94= zDNG{840$8q@Bm1-KJ7ljH)XcYiZ-pg43t!p>x)=qp|OTadcVE0&Vv!4^X~d-?*zca zUZCx_6#X>30yP}qAT=-oCSWKm*n9$__zUpZ9MHhX6HTpk1aF%PoLK4%kGUzj5!zP6agy&tC2&WYLccWQ_t$1ei2Z0(e7N zO+a73_TYfxU6hyz2ar(!*4>QN71Mfk4!0`9#NWkWk>3x{s9b z2;N}%V??GbNFZWi4UBhOeRo~)L>^Q*`0f@w6i;F2Xg`5CE;Qm{@JVL-U~8-7zzurJ z&^q7L<2wFJ&sd3e#pl?w2P$D~e>xu@6n6!7FdV;zxJZYc`>`FB?_~k%W0)hD+1Z!? zmZ7BX{eQ2cbm6K1diA})L?)%~$&d2&^%jQz`Bl&pU5BaPkryT$v@IZ#Mzm@BZ@D`! zBD*U(o|M^vgdU81ewU=6q?7YW32#*laiA(fJG&hG`(yStRWcYb~~=MMGKLh7aVXh}tW!2duc?p1S&0g(u(kQMfn zz)sx_087FK*S%iYLO%*KDR|BAfh52*7=Y5M3KP`#X@PEuI4FkwCGAtNFyW7+z%t>} zX8gy)Hil2eUk#jE{Ka9_rN~}2cP^3m$#Mc>hYMcDLwFaQXguxW+Kxt8iJ=sjLn=jz z@9}_@`@C6=M8n}-bDPycefivB^C|9vlgnpeT9U@XuXz^_O_mt|GBS_^QXl=|_xu=v zcEw)!LJRy`GBo~b{H1;sh~)_Sd1l30-c?~GS&HBFxCdHRKxd5vJzw=9B z13VL<G zBN$-*na3RH_3)2qdU)kF8r_E9uv5zMf>Du?C_HrF_(d9c@mc=@p575qGr-gYlj)!` zI!}XdcY*cb0*&LKJs8>5)YIcY-`CU}kRJNCj~}#2K}HRuWF~O-{1|Ov>Nj~2&h_;l zHJ+e}`5egJU9}gBHni~HI6vO27rQMZo?<*A8-ux?u;@uN9p1>QgF;Fy> zei89(5O*|Dw|wjG@7B5l)xYMwflpqIhr% zj=1gh76Z$18b;P-j7ay5q|`8jj5C}qmUtKO?kxh%=svhUIx#MC*gjHr*Ef12U6oAN z?!NBxxucf&O#@=Q8}2ju+N27GC<=ElkUv_xIp*rS!V*I3a#sq3;)1Yy3{|?Q7B8kN zQM`1RA4FqsHhSydU~t=sI6D=uReYD66A>1?g8y{f+POAUMX(++uB($x1o9`_ioZjD zKa_rT=g<_DkR*XNuR#lFJKa041ot6YAX*g!<)#JO@zLTK3?AEy*n=;cu zRcTbgE-tXW__NEm=3r&M`^fpAG4#7=?cY$}iDEDJN$H{U8>A|oTMLPAIkhEt=)9omqX8iP#6a45omYzEvY)1 z$TvECRk~^b&0BmA)>wR^x<|lI`~^9`dKUR+>m;tUN8sN(3;zwp9rP#F$XEjihmsQU zEO=$SYWKr=NwyG|p~K0Tq%D{Y<#KvM`5cS=^*f7ew6b=X>0HFRH(jM&sVYT}-}{634PVb|B|QTt@M~d} z93XIp3x%kMUa(st4?DZP_bmT;!x*_Q+p*nviac>&KtcuNou9t??rk94uAYEtcS=2- z#-7h7>&bS1HaIDf+6rF%B4nABT0U%9+)QBNAzIYq{M^f*yq|Zk$qS}v;B>J{Cg*vioH?{nbXz^|s62VY+&z+Wf5I~)M zW;7a*KNLE@hJ)U+M{lsm2g~CTIuLv?5<;iJUKt+=T|;A=P3=QraDxCO2WhyUQ!u|Y zMUvg1JrM6MK5LDph6d&MS)y7g@ZM+%-3&Khf3^c#Flkmgi%v#qpqLAATe%!y@{;lDYVY8$n^e-9_Ka+ z@F+H)xC2xrmGSWu37MDw+ooD6tsI?jXvZO7^5!*wZ{D(T^(DFN9XgxAPq-S7+p_cu zQl#*;pn(X0v_dinxMi8K>|C4%mgO{np`v;79k87PU`ybe{uKWDr0m+uj|YTgFr6|9 zxC>d}FR-0$3dc`8#Z3uA=hkiM+*PGdwj+GUDF}>PQ@;B6{(Zgu!CmI?cp*CgfSltZ z@`ok(d%Ofo^E0kt=74C`O5?(Re=M#CgV}cJBY7 ziyVMLc4LtiA4%bkw@a&q`ET&a7Zs_qb%Y}^F!)6Q>uSz4vhP)VAL5Ft$J_*Ht!ay} zXBW88%+}}!YOUdnQi}K$u@Jam_s9$f0gR;$BjL$QFbz~r`w%+#(6&coCv6J4dUN8L z2&Gf}lNh8vE0D_dnijszL08KqWMUhnoBo`*F%niNec@~!`^%*hdSn&Q*FE>w{<<%; z&ZT&jNO}RAaEkRkt6EzLYXjz?5jM$%Lxc?ZM>&P-1Q+IIEFl=TM?36#>%Fe+J~#Xh=3;lFn|UKJEvVyM77s zbu6QrH+ieEMj&V7xk>lLId}emCes`I17V?!5E;kX+!J&OqSK5JP}5|gdGl){`^u7A zQ491LW3l>ViE}ea9)XXf6^rl}O7{1@3Qkt%wG@^B5C_WE=u)Ht!08|a^YM%mIR{6> z^oB5r0T$`<|MYg=VNG^jzSdVPAVskY2x90>kt$7^20~GabdcVA69}LvATJ6S2uK&C zND-v>paMcb1S!%%N+_ZPLy!(UhDpyYx*50 zGc!Y^@^+R|ifnY?xI4weVr~Sp@Mv41)M)Y_<4zzxpl}1Aho|QFJOD-lI0-AS;fsFsadvIB>ERS9YzU` zB?N}zm3XAgbbgnJgNsQ#v{)XegjZ9CQR4m^E>m9=Y^kAstdp2grsie2kVllr>)}bY zS>-|%Yp%vgpTs#od9b(1iAnG%!Ptpdkp`K5Uko4dVe@u0V~nDR*+Yajq>qJve}BSE zZfg6VuYbi)h*zTiXNq?Ii5dT&)dBr8AIY>8jZg`Tj6tZ3kGe@_a;stx&#X#0^Eb~9 zGJiyREyJvvarj-xaEHF&Aw$%_ztjRmfG6L2j_*;w(n;mWmhYjLS8n#Bq0iD;l9E6` zeuBbx`Hvl2Y|uFXDT0w4gCiPhD?JR2%I6UgH1f~{5FYP<$~>w0pL75WN<-kI+2?jJ z9`Xj@?JsdX7nfI1!f;1~*i{%Dl>h2C?xy}&RFUy#Gm{`p?X)>MR^2;~*^WTYz>OLT zT2m$LRHh(9AwrTxF4I{sZt1T`A%z8D?cK{4Bg^>HoEC4B?w)v+a@9si#Os}R~>Oxri znhCUdK?QNIUm$?@0AB}a0wC%=s6qi<9g6)wb3JE;rxwEYroj4l8smHsbsZz#J*8># zvb0a=+y)W>y#bwlRt^sB7A4@KoW+T`M(9K9Z3agoWozAH!;_h)bR?o;8EkNeRB~Y9 zSr22rHjD6VXi8xzV+t0|pj{vWBe06P&VaffLBV!Ux>dh=^{Tko71#iskHn4_4@gN! ztdlx~paPr2ZxOl_K*Iw1gaOd4*5T^vs!0lg<3_cP-%{NB+5(ViPw(>BL5+0^FKmLp zn}>h~l>cLzg^5WDNEO^CwPgKfp>*5!)0@`9SASZ*(33~aptBOX&=8r>Qx4s|O8s`O zt^+pjp?QlU!_+5bVr0ZO_rXahM&M7Np7m%(@+v=1gU=5`sPy;tuGmI{pT}2R|Gsnr zpiTwCJtA+1) z3S)b6LHo#dv`Z66p#fa8{zlcBA9VeV~Vfbs4eRKm}+O3FTB zYP2aYB{|wadV>=M^E6s+w;HeTxD5jE`XpEh3UJW2Z=cmc%v|}K2VbCTaab+5CJc^{0)6iQ{BDT3T6}j~y;ttpem1`Dm6ZK{g%xPmH_|D|bdPL0)a`9YYz8lXhZZwoSfWSm$q@rRIaEw60U05AfLI@qXYRv|~fTtq|! z5na1B0XRG`7*}^3kiyRn$Ih5$jlRRxLAovl>#6D&1_WDZ>+p)YKd=}G?XLi|z6vHcfAJs=myYZiK!{Fi9X;=91I0 z@4nhaKaDevGysvCpSQbx``PH*Jj1s@agtv7conLzfiX&ozANb*1l_;Jf_k>Lw#v#a z&@<5!ycXBkIP}7OBmH|_%45@@4T9R5SIOOkQ$00@-UazkuoRth!zLu7lg3)}iq#S2N646|V;`%gR=uNN~7Kg^bRmVU zLc@!Hq1tX?7rE#9rg+=g6EvYX(4dQpi+kgK9gs87&oeuR(*A6+x(p4xtmtKVZSB}4 z>A2dmyKN^-L1b7)8k5< zQBJ5gPbwDYWQ-`UsCW%@jsmltZ^Ydtq6?%c;d24Y%onZy-e4=m$-(uLCz7@GrUxIHS?B^QT-m+$kBVRfz~;u0m>4fI*Bd5&8Yzm zV?Y`lgZg$jcp@;!uD<}8h3rw0*am1ncL*-M^=r5qOyA-c0o^F*BLT{0$3tg!}?KSP=eQ@n-EBJsqg&-82v zU`e(o?1S1Nob=d|8C3?|fPoKy);2%^w{HSK3BvfL=-v5fQ)p}5z(c#Wk1VwBsvErj`jyS7O(3-j zHKZ5w*d@Qo!XDPD{f8z{v^YZ(8h7%s-p_j zme&`yGS8q$-i$-H*(5y7xs=|?BWRdX=nlF1(S%-6&X?}^aPd~%yAu(e^jEKFTN0mN zVZU$;#mTDgsxWC$@a4@4L|3me&ZiH9egMEgDYE(ca%e(7?A|26@fGLg*~4x}F`2Fnt6z*q@CtX_2H#ciEi5KgRk2t$iJoDfs1nKexGPtt zZxt`We*e+X_dwpa`jtxLFvV50iqyz1eEh;yKHX~zF3;xN-6%xWQ6Lo*ZyZx~hE~CY zLKm>cX&mNvR8_>^;GPaP22;Q=-2iWhy9T8i8S@I2j7yh3??HJ*3; zJUSll@9K6VIm_p6;Pf|)$JI2iO@|1L!a|_vyS`xt(HB;pz&)r&;bWDrsl2jHJkv{ zS`U}!y?!n5^NGt2M_eS`6=7l0+XDRt9gd;kE3rDOFg$_1U5G3z)socF`oHuHeK-<|oB6o*Ub>2BmOfJL{Ur#y%>g9kZ= zqFdZv-rhT(h})k`e8;dsmW@EeKYB*6O}Vw|5+3jw*?eMC1@yn}sT$RSeD*{E`G{v} z7>cM<3nxpwvKQ_fviWJe_a)9duYPE4m0L;dLfo@+Mx)Ud7MY_a$co;|@bGZJb8?9n zPC+}4Z1Cvn@6H%B z*crErU@Myj)R(2)Z4Q|uxH8>#vkC|7g~PE-)iBdJ$Uo}Oy(~T~pTM7Ljk_Pmy(%W0 z5gXp0ogpB(^iG)>Ej504S~cDb5EaX(r1qMmDptWa2L@sXh15)uGkL&(z6%L`ap+?N z>b{=t#c?5dT(Es&V5>hGT@OV5q|nFveI{f>Yt7Ww=|{umwkN@T1#M4g5L0eU{b)SD zxLB|~J~p5^^J7(in?X7{I$91+z;G+&q?-Zlfp2LBP8%rfoWTW&n@z-v_t1nYbWSud z7A<}P`0~e(^}CykKwOP3#%+F!M$cS=R2PV{LW8iKgM;cV8|dBzqX3vK%A?-VT~zbF zd-nxt@Rx~r!;bL6);c_xn>TI%Hbws>)$?FPSN~_0u6K^lKfMxn8I_QcF>%I2i|#Ge z*Tyx%fgIMgvw-qDIMyEhMQTufD-b9OS|pEL6iZ&g+`WrDKTRFuKrfA6l3YrO4_)fj=;0qhy? zch6@xgg2?<{FugXs~)u7tS)jii1*ONU1a6o2JGJ(e!iX6iDUS-P}GbcyKy4fVh z@s+f-QHm}T^zt^Nl$y1!AHq`D4P$bd z|MsnRAtzaO{T;)&SFd^rN4N&}kyNEwDR&s}%yep+N*#X;^1N_*kuis^0D!KJBhv~_ z)UJ6b-7vX`$Du7-BA!M^LsT=bA3Pk#Bt0AUvQ$JwfiF%?6)81F|TI zaNSbmbf=ND4V0sJipYsEayQ|=$cQh4IehnSR zwNP$3pPV&0IoTo8&4sXl#>BCn9<9k2@w9?|ThWXZ0g$t2xlhsn7XK&`TA1X0P$=b481rJ!OrX1&zav!Hvdrj14N^*s?}b#;!dD6z;x9ZHbe7^v z@@>-qvrD1WcXW|vX0$bB2n~AOM!KLq3p^KRSm;48ijLj73$@{C8$?ak?PBZQ&)&>j zvG_-(qJc+2IA$e%&zPsert|y;1XTfw>Ad_WX(;?mdS;1bQ2kzD0tpWt==+|}MzL0; zrClJJ_cVPvMQ?w7!D` zYndy%nkPl9FKcYv`+SoX9N|^L_x#?EQ~P;l%oo7)fXGDnA5Gu*3sevF)@Upd9r0FuScfbm|)cp{{;%oO-TCs?O1b;;^ zXurM>T@sD121M)`H8r)kPqh=1FlLiJ9K)*dM{%g|7j2-o+rY2HT z$eI~2P(n{%NI875x2|1xz0~Eo?b$FyKMF;D_u9dX+^%=D*VS<2OE=+k@BSECyyvNU zd(8^!vIO6}k2JblAInHqZMdKPUu%V72>}*!8VKEivEC={t$+a`2<=@|r?$^=j(++y z72Hl(cNM$-m&C5sE*901kFNT+5VS6(I#Cg0tp;l_x#r4A0X*4prs8#mB#~<3@ilN8 z;I9e|E7}vRd_a_GcYXR=F#V^g0tbey=6C@kPDOG5Lpa$GFajC&iXfv23cUW{gA{{W zu-QhtUPx<)Z%&)x0Rr#W)E61NOy?e(T+u-44fORvl#nnw1YVJSQy3jwc>}mDkW{D! ztkF&@JQ zdM_4hv$gQvqdBmDu$%c2u#g@DTQ+Y9NtQJ-84UvSjo=UinhElVk#a0~R?_N3|5pzU za<6zCN%Q}f2i|0NL&&NqC4uMUREP<8QvR;0XK2T(zk$2Yv$I4DB7G~MNw6RGsp-E~ zFg0SWSNyztXh;$i%w%N+QC1y&8F8JKuQ!F-2^!<3TodG?$C8}5>!fTnrF=b;FV<%G z7dX-unjg`!#N^7IDb1U^BV}?=1mt|$O!x+N3@1H_NxrmU5ntXiDv*prJC^`GfvT zLhieUg*9-utV%M!wwU@9oj-T(^NHi4iBmZ9rm&8NlH8b9gNKa+;?Cnz0SyK16$7Tp zq(I=1A+kry?PGD8pj!$?`iQ!dlJ3#4ipol$Ua9kl%F6D}zl;vr=rD_K6>ezyRBL;4 z)(G&vch)P2K){C$2$$ML6B^=5V`ev77zei>1oXpl!GwP!t50#pLAV;@WTd0QE(aU$Tz9F>itt1wi2EJ-IWy6xapu>+9w zYxHZjf#I7Y(0K|{6|jJYXZ#!-c#}JCY|E>krY-P!@xD%1U)L2;_7`mI8{p?7M%Zs- z)9?NDwUphjNRhB4`toLwLpPu6S>K(N;Gnti_u^4e)yY4vxlbv%9Wrdbpdbs~C*Zxj zT9a?L6DFU(7Oac(H+GT5eq6xbZtm9kB0e!5pch9nRps+7S#w!_<$IyW_cq27C)v%5IF+Yy0n- zP>i>)?^;=xEFYipfE&mHQeKL`hA33GAe`Zan+bIG5UGI&O*9cS4IPtSXcZ@Wwh+XY zei1y{V`)3O|8^2TW$bK(+y{v%e-MvYG~4qJty*1?tB*AmOE#(VaR$*h_zno{T&pL; zoWvXr?&Rs@*B=Wsq+?(JVqE(J`c!AZqq`JwEZo-82+UImWGLa+88h_ffRk2w%iXPh z4y)adNc?w;G#|i|m)k0nTLhcG3l6lt0%|wx({{d9g|i>2rH&!fH~I-oLWK;sKk4+i zV3#r3zAckO_CP$h?-Qr4{uh$A1G;8*Nu(q2dA^T`AEKn!JV2* zX9K+uFXLsmJW}(p{4YuYE(b8~267JyXKNi=b-+|8#{>qxek?P!~O|L&+f!DjbzHCg$D3-@+s;Wz$gN-L2LRqo)N|Jk)|(40_Zn4uQVK32OJc5DZ*oyruL_IA6)LwCJKmP z>Bg?hhLEfALH5`DWI<+Xd3l+@^w@{_W>7Wr8UovO{P8Up8ZX$Ydv~S=bHLZVW{cMk<>KIA2-8YWZvYfLKu2I4Vw0Qo0lGxp z{!L+Crw+}Ur;@?DXyASjhyjUh7uXjEidoN}Csj`N|6N_N@+|gKDSx0KS1dmg-G9qU zJYld>pnx9V!sjkYa;I&A5p}QCIV=w<6b8kq&u1guO!V4S3{FrNTlFJPB-~QPC$Rcc zSw2*9aRhIKf?GO~XtlioU|>=coT3CWc;D>qYIn$ZsS7I!qDTgy2+!;Rn=+2+*%6y8 z%pj!U@=y(1D|wZR!Jv>d0g(xir{Z|t9XIm9tIc*)*~fN)KV%x@2jNA}z-0*u?$pf$ z`}%@W5gXQ>tR{dEE;d}s`<7^{u4UlL$&CJJ zL>*_|J%WV6!bHHH?7f;2=eqnQJ>qCA1jO*`fX!Y|&IxvM;(w`XqIc4+eDGUpeC1%Rl-&nm1Rx1IK)X6#N*7mEo`7%8~}eCFyb>gt+_QHHvcBF`jQ7O};M3nPLq8g|A#ad1eQ8l#JH#VdoXP14NQ0@Buzq8Sb zvYQcc^9qP-y?xE4{2iqLJ}9lYp;dJ2ucpf&f;NA=5P>A0*MSr(Wcn~tdguCJcK(2= zz?b)M?U28h7a1x^A*I!d=6mYvQdhSn1gaARp@N+^T8cyDg5d#&{&7?uiuJPB z={w|MJz323LR|qQm)gGFO5gczYu!=wRUjd)ktN_J_Fk75>kwUs?;e+LCOkR81jN12 zoKjq~?okcPC{6jxS1U+q$0nE9reFMf&oG{@p!kd1=_sbb7m&?|*nqq0$l z#pApi+1n3@#~Ea(Ot?={>=kgs^0aDha<$7WT};%kF-_ZG9;dEEeojaRw{od#Z@UX{ z|1ZVWXiQz;);aXtW!y~|#sc zt5pnIYq*5W;^1-cS^AmnG{ihC8R5sd!P6`xY<7Kr_CcPwIk)-7^ts*0GTW~B5xR`g z!^|u!GJ%`rX?V=t8OdL7Pem#>T_$ayE2`?l%+b?K6V>4MVYMjf(0)BLlgW6SQ)$ns z=f|$BUST3R+W+M%af?Uhbf9eJ~RPO{JVWsBU*r4N}&EeJ{yhqU@R-dH?B@aCqD z@C_29v>9}CG{Ljq&e*2cza&>5U|$en`U)Eyv%iew!lFDmI=+qLc* z1T`hUy&Fqc5#mU{0=?LupF2lA z=ELX3rql1ApqK7Z<$~p+*3JIuKc%R9F0#nHG_;$eEpYXTDLZPe;FODQrufCkR}aOd z>jMwNexJVa(G~{^6uJTEm+2MBU6ZMIAD#%T!4o|E%O#VD9@>+E*XQ3o{q%}2OK(?m zdH4k=VmFkfO4Gm86HYi1_r)F(XRz2R{2$5ZDjp5eoVt`Mih1duOWRh~S(`Bqt(x84 zF=~@M;-@k_c(#NTZl)C^Ri;u#>}Z0~I2E>)4XX7WtEci!=^|9M;SAgj8-yT3S)174b}R4?FXC9yO7wgQ@@>S~PR!CsMg$#n`LOFX2k> zUViek2M->Ap&B~33$p7;U7@41z2FVOF(n7<&{|uqE3pJ95C;81JBI2jqru0~KG)v0 zt!U}>XFZ1#SIWTvsZ4r~)TgbvVN;7&ybe)*?2iN1o%9V?9+@t@^SsQWdISh{T#!CB+ojtX ziW@~=C6m<*YmUl!e~WXPcpY$<*X#>Qu)oxC2@E#5sfc&sUSUjXb~+$=tr*-6waQpM zb*#Zhj$bHz-#>i@Z}?7F-bD5@R0Dvj%4)pM_#PjZLPc_k{kud!A}!yv>N|xozc!)o zZc%Z5+yFryNcJ%J-oD0m3_n}b=8EQdfTnV_=^q>08TIS}s-hxpdt<1%$Pky>AJQ&U zYsP)j1)Rq2&JC+nIbvd|FVWP1?fOBFG}u@kr&W#(4f#Q|0B(8Pv;H#)fm^@)jK)+e zAI0>GSRQavdF7+^1Oo2yN*(${EmgsG5^6#<_PE}w`G(u&sxIOC`k3)oPe)2ERlh7K zpxDA5y;Mp!eY7;GjiU8Y565xBBm);uOQ9O~wIbhdoeiOJ$F4~Bju4ryJsp~ZG@Q!( zJ-s(fBLS@jKN{kE$%^-UgZnkF;ScKKUd>DPkCzAq_AW1I2G@d1b|QWK(I~B7`1Oue zxA3AULGd^cp@RJ~&qs&XBGtUch%S&9B6+)?kXCVcF3qKXS*D?hVvh4Fz4c>Mpwk@wKsB~ zi=P@lJ+Q5Ax%XjU0^$6E=oojXway|XXKr=9^0b+>!y6XM>+^_hkDc=?L%|tSInEtg z+v8W=;jjo3F4V@Ha`4Q~nO2X&)wG3Q~hx`be% z{Dy?{7cRtqX4t8LObv)r*K{+^90K9>SH|oYLp9aZ?$v$h&o)x45m=78#IZC3iy~+n znq2&5o_R3Q>nVqctom1CkB*sF-|3Kd>=!)C$uJj?AfCWt01u64Q5o;m`HzHu_W#+z2ft@ig-=bS#zv^UtwoF*1eyz3k>P*w<;Pnz@k@q)O%D@Y-WZfF{#jUUqg@P#|aq!*_ zCH1+tl}bdnZUvhK@2qY?lxHiqHH04Ue$b{z-fG<2f41Sr;VY zHYxeD?%)Qs1g0Xkh=t-M*WF+91;sc+{?pp0+XyzFp^+ykG4WZTV{Bq_ase+=;>ros zaz9oz8?@kt^!Q6Q7%(2+F4ei443rj%PoP&oGEmLW4pOf0N=?PGe8uEFWHYC9QL1tD z$_Bpl1MO!2^DEflVa)i-&MS(MCQb6F{!Z4T9s*X6~+-VSvacS4*;;KEh& z03aU6^zmb7=ee-Yu*KYac&=9jKtX^#CX8%T?>pW2$eH`{-V8}j>S}V%u@2}^wNdL; zGloC^eM0KmkiGU8^}!pK{t%?%TiqGYSR~CtimE+H4N_X4KGhZ_S3j~SS%-r?xHXaf zTdG5qGNPi`+4*A#c?uuty81t;Psb=7*YLW3_8$bZ=0C*OeQdyRqx@pum4DPMKBn}J z!|OWjZ%p^TeHQ=WBP2JXok#3dOPoib@YS6$VY=3(opC%b?8jgtA`}YK)?Z&_kGdNO z9IuP9`0Ao$=*e-Z>KWv4&ClAa-YQ2ln*1ft$S>dT8sm0QrFiKf z!hpIQ@s3faMgDz6qA7vvJtG~)!%hWVG!x&vXh8xnYa$e?YmrtBK!f=XXRI6dh_^|r z5A#3?#69YWYp5ZuQU^G&g0Ee_{MMVIR^P3kpUn1|1K{0&QAo0xO5)Du@6c-J#;V$D z1VJXKS8dxpPAkX${Ed8v!OGN58@i?!96#f)$K2}=q+AuAGR*oD@|Z%Xzb$q!V$}E; z2ab41yj{$rOb%-AmP&+ajg{*H$LgoK6`rH`*-`_nx;BzCR>c$Hw$Y~@VlJ$O&ZWK320nhbcG^on@a7*8RwK}QC>na-bQ9B6iD{^Qy^014s=4lhF6 zq+jV|V8`FD9)qOL)>6rTmcv=JsH$lNW%Z*`gOEe_5llc0J;;rNQkRJ7iPA!t%e8_6 z5rPlcsP9<1xD-V_e8h)T4Ko)J6Kj8h^QXg5adcy-3U2G7U}&_!2kmC3W?WJUfuF8t zQDY+Up6yHL64hMK-k9XKy4a^kv^e86+n4HD9JBm=>x`IkYj9BgPqzFq(hREuWMACOo z${0xC*@r|YRoZiUZ62N9$?DJiTHXQ}iq+p?kNP$cng zQWDJMBpWC$CwNesrghELoKOWH8ZB$KbmYs;n0dxHEeMmW-=2(A-soXWxP-;ett6_E zqTc+=(I%swVq;8vvrz~;Qu>=LvN|No>TH;7&gyvto?@OZ^j9KMp7SVA3~%s%X5$Hu zN6i22tWqy{Axy-4^7YLr4?&H5ckO;98oZS-o<_y<e-R_V>f{$Fh<+&LglooI{TGDj^WW z+i6qm@V@_oLH_p>?w_#f|NBRESh8-JS*mEVoUMn*@=yS?Qb zCY#mN(w}$4iwb#}+*f`yv%ymSCgSt^zpCHA*1rw)s?U@8aI!%z8H=UODN?~fMG|2A z-dHG`XDo*JH$^0#<>EZ{_y6%nrob8JC?oscG(&Iwz=sxn!d#J&xk?JYfy&mBO&2N` zFq{AOV%*Jwb6Vz#9{=1I`g@1*D^hmmu^FKU?-M#kmT-$2g#5|Iow_|pdH418DGbc=w0oiiLCQ7&O0T*5`Jy({Wh)p??(xwD-{G_*rNm2ojA zFT!~wY8p{=!_%`jOp3U^ns~+Dyl$Yev7$oM-MZ-5`02{I%B9)a9FLoCD9bhmZ^-s4x9gEf0p6fEZw~>$=L~7i%v6%V+j?K5SwLLyVOgE#Q zN?c@wKSlpw+*vh_B-?{!*|RAFzYF z5<)y3FlF7V3MRMitruL?GYdQ=;3VRDbZc|Hrl831XL{of>qK47RdVzq50bf+=X4|# zn&#!HK<(0H+9q!kA_G2aPhn;ZLdz-GB;gQj43OQtSVp~vY}D*X^y%Oj${hu5d@gPv zlZ3Dt=Sdr2$DwR=#W(Ljy!9!7;OEWMe%thITy#88SLbRH97?4^uu}=a90bb|s-XEX zUsb%#6zepV#Y$P4%AgwAIl2_BQTYuPucrn_ZsCKu9Vpf z)gDnAAbFKgjBygk5H6e7OJ94_ z{Y81QI0aM$lb!ACmQ$Z&jlRiGDgU`&uqUk7d8{dJQJ8rab+H=f^fj0E>s8KDkQm^Z z`QT05(Tv^;&)sWf;H_DWD0-6L($HpMH$<3U51Qy39Q-nz)HS!)(ke^(I(Ol*vpJ07 z%$AzLV`gJWFTj{ps^uK8E*px=eZ!p4?4G%(iOPg~)6I+V=pEqmXoKV>wC4cQ=RWlVYP$uJE^M0c4|!fXUsZ+ zo^c{VQby*IrF+B)#zfI?wJu5ZZI#ZyJra0Il^h0@g*8yBm{d@#Dz@N}cQ}GUK;;c5 zz$>}Bmf@W2RPgrAJ;7gJz};&|rnXL+9dT`hlpb0jVE#1&bjViJzjpRB8lfZB;6_ajP7FRV(cJ|CISvA>84(PiB-zNz#ow=NOgBwLH;w^ER5Am)y;WRWGe;yHX{ESdGDUzEm_0)%dfiT19p;_6oeMSJ>`jw9X) zAHnPkX*N3~&v-p%y@L(;*Ad3xbE{_%MXD>I+#v}48#@P8YLh7MKJ+kPMqBn~JYbcY zZ0^gk2Sk7KyNqb-q$G(b%Ml;WSiDUcUiC69tSO#sn1o5!^;E$hDP`V}fG!Djb+uj* zTF5+d(hIe^(^_+f%rJ%^%7OlSMxN1-E~Gh+)dhq44+G&m5^oKRe~7&bk1d>ZHxZ|NxR;4I+&YXQ-@8(N>SC}T!rg>N_TX(Y zFT!!WdrBBuQ7qvNRd$^72spX$AER!AXHDS!eIbw8eqKZ>97mW^sT>oCV3240o9!n% zG6W)#BbXN=aJpifrX5vpR(O-s@@yD+e^y?Es(6h;qZom!M^we&+<NK&H@JF(JK7wI0WMmCdItz)2ZxR}cE|cLe2zLP=NQt1FohzIJ;2hifcI3>7 z?BW}imeW8IhO=+I0$z?vBu*;?@p~*oj=)F!$G41!@hvkOwT^l@ep~()XKx55c*69V zP)mjEGR#uz!=^G9k^hgs{r!pD?lHz{X;OM->9aqaN77PLDKXvL3SQOC1mNejQ6u$* zlJ27X9lTgoFhTC3P`j3rqN57s$Y76~Mcqw&U0gn0egd@<&Ap6jy?`=;GhKTMdLb2W z1z8M6O($?P4{7@LnbFHueD^+B20(UzhM6pLdb;72xH5TAz61?t2klhyc-Y< zqb!p@KYbLR#T=4Vc?8p;^9f>5LyX4}MpseBMlr5ENCT+TIgQ*=m5hpo6IzUjD5NR8 z$3*(yK(-=9jWf2C^NeOZN>^3?vFFdDD(7BmA*a}49~K;8)TE}0h@O@p0d)k$hD3vF z!f^~COG!q+I76ffzou+jR3Xh=>Xae>6J*WS7EwHc9G;7T>RNZ2+?huzuO>dRPug@; zH$FkIC!9EgAsOjX>|W0KYxzeQs8t2u8wX`%(4HSP0&8kD*_&3(%JrA0JQw^ ypTK^CSNGeM`aj;@N@6pw_`^c^*9_D2-T^}{8F|&7H;t5SS4CMvsYu~&$o~RrMPG^l literal 52186 zcmeEuWn5KVx33^dsDyw>BPrc%X^@UhE7Fn@vXPQ5MJZ{dHYE)b(j6+@4dMorZltB} zT=>4{JW6Y8NImTE!NKHlV77iKCl`B_nDacD}T)A=;cI66s zGS+qQ7pi-4i{L*rM-91$SBm;5R<2y3xuPH~spV?8o_s5wxHVzxs z<@iY1fARY-!hz(TfB645BOMrMEpWyjdAfi9;m^7)x5WOlSLl-2t1GjNgcg|uf8qMd z>~-qDGQuNy#XcIFcL?j>g8H+r{cF6xGLk^!TixkTYK8F^0s2;d#{3H-|8K?T$3pw! zacXU^l`X>77Ct^6PWjR9d$eBan1Gty<$msS0%^G&X$V1Ff5Zx`t}*-J`}y%B7Bllo zTRXLez6Vm2uG>H2bPv}(Y0LzoBq3o-5EDqCv?5EaSfHoa5t~L0JkDHwVok-hR_o%q ziw*rB;YgTImUDmKiN-h<%gsxTn=|-UGqW1v1OUB0CGlemMB0T?7q-oa=-9f>b{1C% zzzR!4xGMZ0wfiaF1{9*i`rm?Y1DzP9v;Op$L;6NsPjcssY+YN+@)ZfHP8de zm2OE@K;L3sOT@N>{z~BS3e!FOl>ytb>+{N?sJ~BmX{$prK$RRG>Uz-DOR&t~D6Ds+ zj!lJ|C)-ywzbCp(Q{BK#xCfaD%|0;7JlMTkkS*Y+NZe@+Ek7SSx-mL+Sd{#266Iv$ zN|xz=$!P(Qg;{TUIVL0+G;|$e*wFm zEDRCm_H>K}^=1jWwI&N+z-{v^{M^0JZq-Hs=|{HSWO!&;TI7BOA><`XL&U73vs>Oi z3TxBJGX(-CLx#`}MpdrZYi>|UoPZVcY1MAc%HDKe${$?~JKidon5lPEencAI_7G6d zbpWoz;&LSsT9U-ingC`pTDR3;)O&XDrSAK|Vxz9Xv-4K)XYVcXv zo|BB!0!?0vnS+e6h8j-o4>eXikb?5*e5><>qKBViNu(uWYay!%^u&Mv%CPPd@6{Qv zg}FHMe^05Df{0%S#H~N^*h1oZ@k)X}nB404xE+>@%sHJiE%bP4m>1JEhkxmX5pZzt z^U3#*Av62MpjJ35`}Dc1Ni>xTV7FXq!LI@9PkABFQM!&R zPqba^v#a5~9ktbTJY{N=P z5Hl^na)R2!QIM!KfJeTrR(StaLkC)s0GkFui%us985kd-1d0YRwB9rVu_nO{mNfDI zEec(_30w~3mia!uYv59|0+8l|YtRK2THk6(5Si+RwSZaTfYtH`mib755gLLZ(E{Ih z7iS0IC*Ke3nXEvW9Fl($h})8=0aA!Hvjm{V?y)Reh&K_iY#_PN<-%-$8MAM-<+lJm za7zr{i+=chWqQc+o{FNigzht@zq(XCoy7dx4!Fa#v;y>X9c;CvpK$~W+QB!F`|x4r zjvP3Un?FSZk6n{Z!-4J-3ITb?epsim&GAxQz?!mekGL^Hd&0TgEX%yR$Q@{v9FTpT za4e|ML6mC7g?1H6nIg{)Jr)xH9uv`LCIip@+J8|!1cWdqO0JkwA&aQ zwae+k*@@Bn z{v~R4MM_P@P4DH0+s72R!q@; z7AzjszohC&u{0yIe0mUC)o#G^qB-qEyVyTl^2g+wT%PTB@)U$^-z~J8KxDujfl)Iy zRAwyV=enbNog7T*Z{Sn3A4O=an%)zv&yI07f^S$Ec-JnhMCjl!buTzrfAPm&?TB=# zJZ5|HN#YizP`On0%5YBIMJmiL<>avIJ?rS(TjV1<3*C?UQ$*j#lvobZ9)_P94cVcp z2jRj`o3A`FGAh>9)g39&DH}-=bks61NIntnTsqUq3w?-+Q4I6EvV~XjS%RD7NqaQ#KioU%<3`bR*HFbMrUW+B@4Q- zi=oYyj=}N?d{$8>a&aQ~JfS>fz?p z=xl4q_qedmerKVe+P%#wy^4y8yb#XDCxtp?ZNyAv&-!1!31@>vcl3_rX$n8_nfPRk z&}t`A2*9J>#J`2}ljB-64Py2-y*Q+MVc=!!pRQH_uTlh_y+VT!I=gSl8%5jQi?iW3 zsMJr7Lx;XJq{QibxFp{qdobRUEIbY)n=^1{yolp5BSnTS$NUgtGM7ZrOGffsdSeQ3 zs~v%~>%jYhfJZ2%Q+`zyT!`V+n!T-BTutzEyN9>GI+FaTL-OiyHrz2T)aoZ)_t3oB z6|WZ(HZ~v0L|i}Btt)Sp3@Y{R8DEPtc&T$@jB)#0q`B=5Hy+9T!8em-_x_3hAYArN z4@7Wl-BoKSPRL)eC#2Mmnq@p5-sttKE*TdXxe47v1Y+#{ za`_u*SWiG)CY+)OB=@lhL@qZ<<3dg?05I8vCGjc-8kRpWWb8M2QnW3%W*KY8BQ8m} z1JMtevRdPyVYz~j>2(+&BoI#7bU-Hn8c`s+l@en9H&?)i5-%w32W?0b0e&Q!O+XwA z36&ZJ;qD7YAczO}|FAB8P0+?wD3U*CO25oOcR*l%@|+_7IvN&!jlv`*6u&aPf_EUr zO-Ns{H;v}cy#e4s(3S2X+Mf=DbRYx|HoT>Pyb&0ueKeJxx+dTNd?cJ=fSz79l)BvQ z3drIL;f1-(7{HAFnJN*Z#5`kd02JE)W?kP7_;YF~S(ykET!TW~r+DBCCUa{1SQ6mc zkQkJE=r=TDFd;E80x=*9Bn^NVoD!zvarLH}Y1v1+oj}QSc2R=%>VE=KkmJ^v!$!ur7@Q;rC0WPpC+vK}uy~1R-|fffmTrG!D__c#vWgPKXwuEchu1 z2%phLjRctEZM4`dnJ){BvOqcM9=8N9(*_8PjG~q4*xKw(}OIjRAsZY5!e4AuLIM!k+a8y0=sb zc%Q-dAs#F<5MUW=gJe+B{f|;8wpP-oIT@Oq^&1|iFiCR~+Y=^}UTWvB?mWwQn`7_D%EedqVXI=&t;>*W4Wy}IBgBFWRYrz@G?7lWz*va@F1oKg#`qBR#Dw(TEdukH1X}os-gmHiJ*37j-hU@l#&)2_^|-$k zYV-U9NR7tge<#%FIiSV^rN0&GQwIu2jh|BgMyP-rpvIG|zx53+LEJYA3%f^pU$WLw zS|7EG&oSL*_s5NVo-K(9qJ}Bjb?$O7yrN5cm9Jl3M>hwwzc&NEGI zZS7wP8ZYC^Ehy(_2pN^t@`?&#B5?dg!Xq>l&l8*wNeZQs^Zy6cL7t|MMxhw1KMER;E z)H_>8J8$l=@vwoU@4?)=rkir{T2Kt2d3pnoWRFdw0wm3#eRfErlvBmdUVRO~t5Yhq z=ug?$RE;z@lAO(sVX$yFQfBr*(1)|B3tyb>PA!;vQ|olS{BB#7H~LW<&!S--ZP+4;!(}}1Ka?ni-o9r0k z*=kIovDcKBuwq&CLpy%9NJYC#1A_4bBU)m}IKM;TvG{4RI; zLh;3MvH0GA6yAF_#9p?Kp!)M~rREza7JG)V%$nKN$dlK&%)dV7P3xJ;`UkTIL`xBv zVe1SGRnZWbZq|U43UK@ek59gy^L!7EI#%o*PRrRw=^QjsRq~Ky|Ih5myyb_kP6wmC zxi?LrQqD^cZ(lGn4jqUxtRR42@>z`eEl1e^}X-^uOj ze{u{)V?|>LoE^R)F$@GMdHl!v7=S6fxA>p|OyMpt1+Ew^N&C4eM>>Yp<-k+D8D?JF zNxog&_ybIv!56*!^Qg->Xu~{fu}zHA>=)yuY0#OC#o^)Eu$(i0tR8_Tbt* z`reFkvTJh%G81m8AHLr|34IrhdVnL8OI#*b<7xV4q=nmMDENxs2wezyx1G6y}?nPcJ@WXz{u)kw_n^Rv$65~>?c+< z#6JLK;2=}ddV2(%(E>pskq|c_kUVST+E0yWaFU~iRYM&gKOzXg3|MM`TA3>ZeUq5L zR_%8(LG8jnR2f=^1!QAQopTXTjSB=V5kg!;vD79y{jNUv{&Gh6IknsDBW;8Ixjrw4 z>Kwr^UO|1u%kBC4vI7BDK6&L1fmmRbv~*=GR}ad%D6~Ko#P>g4BEMC7kmaUcwtN3Q zbW#qJg23AU&+>l*o&K-0e6nPkOR9I=gVo*74whc z0zf0=c}O=S>k+T%kAl1YFM-~EVgoqP-_;f~(SM0ZC!~I?i#b;Yx6uU++6OGc zA8PWqO~`Q2l(6Xe(L&0kf0Q2zIlr3Qzvx_I@p}{WSY$O28GW>!UHeIOJ6p&wRY`NR z?095q5HPC{D(=%IIYCsJgq3ujTCGp}AVe4G7(HyZTu z(-8EOnz6Ux5W6P;tm>5mfIX%RvW zb8u%q@mcVXp_Ni-b&v&jOK)90I0||Oj6lUt<>pfZ-=N2PMSj(S%no0Xt5}E@GY<3u zBF6n-BpfM_=e#zScYN&rNKcP^B0WPcJU@t#DW8B&Hur;6K)yal4!~C#i+#!D@pi|1 zE7k*Pl8EE?tlIg+tdD=kHa-1}4uhA3j~M?8JmeSkO_8||Ee(8-`tQK3HQ33A)7b`q z5pxM22^5hN?TXrm`kpV+6&3sjM&d6E8f{e(4R1y>6iI2Czw`Of@qChYLHwx|)FWSu zkp!fixiy%EYL{bFNVN2%%Y>lBX~`4`U>XOx>KQ^J4{xMZR#ry0glq57-$O|Ez>|ge z%Rii-_LPmJka)M2^0iu7HL7F?YgvYu-v3#-aVvkE^jYpzPO4LlwUlJMCtCdZQ3L?P zw!k%t-hq-z%Qiy`oWlL((#A~FMUSVLIMZ|XfLr9*i@nGt#<=A+zpI#&7hA-z>|YV` z0|3-~ryg$baDfdC<*24g*w2Nc0+$5={1|8EO`JU;CC0Xb~zBCOV1XRhe!(FAt8qM&YIxlKfjHce6si^NC2hm~35<8Nu$sSs}|BNuR43QBZw99WtiC=)Lh9Fr%KqGh}dkMzNW zADicmXDURa#N7qswNZii5#e{FrUTVV%&nrd#~JQFjMkc$o%{wsezUoQMjWU@*O}nj z?-r@jA$V92jr`u_WInrQ6+eTRn#LwkJ z-2MdV*q#28Xh^C#Tq5yAPLRE;`F1kLpGpQk%sxJgdFu`JmdA4c`6*qFc*#$0nC=`& z4R>7H3RX{}*NG?S>Kdq2^5M}(h|pT%Qj4uqNzg<_jg39#cm4I_Rrk7XLRH#nzWDEr!Cvee#NoE_OozfJvA1Nkk&2d=hhFg76c#j9&>I zq8wF)%$1kkn;WRBDh->o647}NCRgWV5$nEvma@}!*S|WC#hxxSnfvhjY9qBcS4E-Q z(uUQMLq^I=kjiNgQ9H_z1BG#NCxl4#pVyOk*4wRl)Co1*7#$JGdfpkuVEy~oPs-Q; zD>Pc<@mW|ztccO+EXn9lmXdHFxtqm+mDRBX+P4{Ai=MmuFV`my=03>CccNXJ`P~M) zEr=tu9%M!PN#Md$IL+##&^|`B6juLZ}nhXltR;@u&6PS_l5D4dny0MMYxEv zI@Te9rqEOGLzHWHNEi!3+NlE#y*rN6B}2@i+D|7n6PJj_K@$qS`N}AHM-on*?AW3w zHJ?CbtA?-Yo;Lqw*D`9y=a2BWAKt?iJ=t9?S;SPj(1DsVQ`N=diwy!6N?(G>33#(< zwu6ySEJcqpe*sQ!FI44`ywAcsXpx{h@}ndBVscU!W)w9mOPxuxaJvS3sR}gmiNJ(G z7VG2u)|s>OsIdm&Cepq~^4I)a06GP=&i!kDNQ0KbK$);zllR$tSYuP1ZdYsK;$Ytn zubs412&K6C2UT_6*2t=!`;p{=>TqhYA6b?h;x}$T6$(#P+m4l+&*l@CL22bxM_SC( zSXb0$oC~uaz_utf6Hg*Ok^vY8C>@DHp4J`*JZ~Pd_DsW^}(H9NVK00O#D#F`)26gIFxF0N;7X(?*9bcWzPHWGsks=mi zXU`oJKkgLbH$4Egw<+X_LwS+(sx^H??D0 zG9ptPVv7nLPwX@|aYxm0tYWj*z*b2vi|Xa=`{vEu44UZt|;f&FS|1)1q_u)Z>__u|)&&(TrWr=rscK{7hj{_h4etW_OuSKYN@RhW+GeV3S20UVY$Z(2?Hdigb2E0CmBabzk)nXnDl(}zHzbhA zizhT@feEo^Ztb1kqI3pYM^52eG2t<+?ZF)S)dQkIau+JnVU5DgTh;2~N)T22aptv#YDQx0dmn zZW8M3O|RWMqURCvK6Q1WT{>$@UC6w-Tdmi`H{ED)uux$$p8t?g(!elDfy#@+9bmIC zu&30S-&4=#Y>P9Y=;u+BLlsir{t%Np>Y2Nj(DFj^;SQX6m?9wf$g5=LWHj;|YMT(z z{F+~6UtODP+;gM#0%jT3?)%8YKZw;k-kZDm|zY{#!=vB8>%6ZgqxU5Y2 zfDxWJe;%2u692ioULVfrFwMjFfySQhb+ESbQPBMsxTld6N}2?w`y$>B*>LtyHC?)A z`2<#pn9AStA~m!pWZ1DfcmuMM+QrQ|1LegpYKnGbO(ktxPo7cQ`>dC=s~m3%v=cRL zy^IcXS+OsX?-_4A;NOotcx3r+#7e(_nnw6YwtJkAZ z_Atlm`&isK`BURTC)4ZsX1}ZNr4#n_8a}C0T#x0N7tRK{4? zH3X3044!C?IbZ~l4~ti*H65>hoWiRA20NuaNQ}fv27@b3fs1R8u7g~;D`O>#n4c+) zscCF%JeyTX?kwI?^ob5PlvHEm(Of_DCN{hI5bXP*=MBVJYnk9Is%5m^-8N?rJr^XG5l*siuhN zOxd?nQpOf?f0E8xO{KCY1XT{Z`jh@(cBSafl(3h<@mzK)<4Wd5)w96|49_f_c0nmq zo3rt^iR4`3$xym1;>&td$gGa?V)}rh(d7Nv#a}JU6?Q3N-TAsc zM^ri;nElRRu@qLb$19KWZ^+f$S$pkr_chK5?;+~O$n2fX=6(35ha>!hE z=a=v(h4|K+ck6U*Z8-N=JkgC$0+dT17wRtHkdwO3gI;lp$EKjmj&q;j5{B%HdIB;> zjdG8}&FF)E`ETBK-StjAK?&!{Et z_Xh7kPFA5|gRX(k+5Tqf^`pI&VL63)i@CPYRq~nNvY*YJY7Laj)9XZMvp-c6T->3K z=_hIYb!}XnvR!b+|Nh1axYLo2=g4!K_#r6x4PYoUx_EnBIi<}M;?dL8)O?|nXqa!2 z;(6aW+I{_7nVf;gMg@XAtHHq`9gDY#;iDm5M!(FKqg5AybARaXm;(htkv3MuyvvRQ zT%|om{N!N6(F<-^-dRoC$fPuMT7-N5&QWAwS5}H#J1D>DwC-S3WcbMGi#JmnB) zH_LeV#Rz1sMN0QTlUCZb*!V#mvwk zQNsP8=E$n9Ruv^eKaOY^!A*x_qloHlm19L6j_kOh`1G6!D=TjTx^EE;-e>O4lU6y* z-{{OzET8n0#rF`?#ca;Vg`$?dEQQkV7sg5PnUQ9*EyZw4AAA$p?cv=(*A2NtIT~l4 zU4gzRCo5IjmhmN{^H)l?{Ot0%qJU7Oz>Cs!WPh$Wkwy{>1Zn zvcEh?w}(XKMv{Lh#EiZy%?Y|okczrHosf&?h0NqV$Zoy+B>jYQmhlPdH+Ed^tZdhS zlHne`vZtZ91xFKeW&N;6eQgo4-Z-`sDIIIp|H|`JMJYN}$w}+DY=)uUTTZ7~uPRK8 zq(RA?MNZ#-ju_wl9lrc=*84m6+=F!E7a}ZX_Uh!dyg9w34ic}2l26yJFgWsz#dl9S zCBKgC=DvPZClX0c&7sa@mvAh=JaCuMZq#Ju%f`D&@$zc!{#}xvPZ1p>uO2eWQXh55 zjm(@)&D7XzhD<5Z@Q>O>XGkOVCZFqk$*{{Edz}=sLK)FR`0>?r(x7voC?xL_+zndA)AY%C40$aj$0CivupG_|4I@ zvUp%2$^ALDhwB?^Sku)TdpF8VI9qyN+w{cUs#R6Eh>5#??8MjL872z*DFWd?>?LL< zm2Bg0a7+CJ_}xb+vpfDd#vpp=k=YX(pooNVbQSmNYIL8+Ba<39 z>6{8P0+A07Wr#hv3FdYF#RG_(nYI=ezc@&Fjbg7IIo%VRrbC|j%ifbDF3;2z9~Pl3 z8HnuP3z#*8N(4lQQH0EQW@>n3uVo?lamAHfrRv z@}IpvkBn*mTZj$NdSQQc-pT_8iSDO&u2ZzY^AHBw+J{o@_G0-l4qH|B6we|~!BpRV zO|yzv>cu0DxD)HVetAc+A1F?<+_40xa)jTq5nkg(N6UJH1?^@WWX0uK+2Yb=chAtEA%(GL_%McFUDkit zXcT?g>>Exg?tS`9F>e#ioML4ttNQ|BY5CjyKlBqq+E{J#x(BJ}@o(%JR+MuyYeC@` zj2%7<&z_69!v0#Pi~ENe8sQmT3~}Aw+W6K5bJjFr#Bm{|@XOO;@e~VB5iKiHC8A2Z zqp9MbGR9PT1|G%~RD&ZkCPLJagP9WP=ftpWDlkQ-DaU9=rM9^zH_7+z^>Wu<<6-@r>U zZGQK>fJkvK{Kd%e;nsybD4BP0R?IyJ7Z4Ee=FnI8rbL`=XVo+oH-4=C>$tqC*z663 z7YA$Xo}FC$IsSW4h-WEr%9ewmk_X3C1;`lU_k8I{;tF*u6sKJ0Lh~yt59kNejk}PR zHS7(Ijni-@7pLdnE8(3C{$M81d3R;uIy4~YxV!V^f6fKwwPpDD)4_o3S;MKt8+DoY zy;Z};!>Gl*y*T3ZsEab!vsgx|_OAKN*6lEtm$$7AOCF*!lWDv-Htw+LUu3qExg}CD zx$m!LJuZ0i^Kr9Bar9}=$*X1$>>?`RObSw^SB3*ep_~e#f`;yU0WLw%`Rqdz<;sH( z>gGbmg4Zbs<6-C-K1{De8WjvUxb$nC@>T~k0)+NQG^akddg#^aWV_jv-$dNHb*o4| z3Ohqz{=v)jC)!x8M+4$thS$c6XVkN_LYdl2fPzQfax!=@e?2ICXqQ@nMaFF+U8jot zA|K)|?rs&5*0$n!F zMXfONr254Np|ic=QB;lN)XduUa*|^owx?v3$04Euw9ar@Z$>>~{xQDz;OrjGSH4Uc zNJA%9QIt%n{VqpywZ{n`s8znmV^Fj+e{W9S^Uc5{AfPy&tCDL@O56Y22}ewF9fZXu zY`}cgTqIjw2tcv`OEvnAloo}9@CW<97JHX=eKKNd**baVVx7DxQmeUWq^8AW<#q<{StIW%) zLmY^`Zgw3s8fsEbWwdm-)dBC33mhIgS!y1V2x<^@v=VJp1VXhG$^D*^OyayoX$hR; z^+&U{YDwq#hucpRc0;JMrVn->qt%!$0p1j8-JYZIQIXb=#!s4KxvOIodg4c{$p-k#NesqNk@f`s4HK zDf8>ouyOaiUh_DEvBq_Y>+GbN^K=XJpT#=QBYpA8NsKe9BfD;q@<^X;x->bi9f2OV zs_k^WV5fygcRrcD=5w7%Vr~7OjS6C}ztXzpw_p)I-7Tzef{_PaeMGP?&;M0h-*gE! z6rjJrJzm}-{?@K-nU_i`BL?8wXn^?XPuys|gx_85dT%|$6ZU_F$a>%zVhhQruCj3Y z%zKQSY>UYaZT<@nZz!1iWJouvda7Eepj;0fSKqE}AJwiEInoKxs(^FuB*$~<`Q@_| z3%-t%S1KV)%uC>OVvpCzJg=AKP0+T2mnAkl<_VQEEoYQV=Pd0_y-C*;@~E78gFEA@ z>$47V8-+p*#mQ^A?=al-a1Rm0N@Vg&fJZLq1Jw6ByqmM>RCB@FWGIq+OOG?*sR;1( zqUDHiX2j{3TnJw04^|WT5F$rfnrN7g3XO;3UZVAe4T=5>$pGgu`*?Ge^#Alr<@eUvQ;!P!Y+uHFa<-Q%^EkmX7rWw9)MukcXYY@H zV2V$C7NZq2=eef@YcX8BIQxyk&Mp{PY26Z98FXrUTsKXPPd`6=S2A>XN!fnx3>WuU zMWFq@<4S?HYha*Gqi2n<-{_g-^jJ4fwyO6|bdfI~tQ>h>aFUu@y?tOTS~GSOjStg^ zWw!9_V$(tCI}OUjxvY(CY|VI|Y}C~{t(4}Jlx)j?*s6shXY9G*@RtD(HDAxvP6u3DX|C?O8J$w5{J=dPtmxX7;KXd{Ujqbb>$xeQAFz6=fQB%N!~?eK_Ol3?rmR zoKDAgk_50xM#1P8$pwyv1_-^e@w$=ByQa8Y#sXc}{UYkUZETckGZG`<%Bys@P((McViCh$XG?Lu0UuNLvjci|mljNfCl)a9NedG_5S;pKg zk>llz)wm|lrJ_DnuT{-WX|)5j6^Gs>k5>4-!FhA0nXAK~_aOul$@l1avg`r}_+ovu zUGefE&NwaF1kc~d(+-?NDh+e5whddpkAAgUD(W*dsTmUc^v zHz%ko4w)76ra**$ud(_eJ3IU9bZx8>s>4&#C!lj?v1HZIer`j_n{xuEA#IqGFw>{d zpsw<(QZ?7iJ@uiM6_EC$-6hx==UU;dL+#c0Wo!%nDR(%9Wqb){m|MBSsGrBVkP$hz7CP zHZ1ETF{ar?(0en&4z@hnINL4a7#JiMOgG$`+9ib1PB1vId8u;B?dat_RVi&q?-sWB z-H^sL9PibvADg`fbK5a2B~I0^eKo5_N0Z(2LE7B-iZ*uUhn`JfMP z#(k~Ox$fo0?d^=Ndx)r0L`7l2gR{?-l`VyMv)MSHhvq^-|IL0@8d0w_O#hD4?rTq{ z>Id@ruZEMkun*lBz4b;i`iz^Rx4i|hg?c+wQ5X$1VkMdPqNu+A?KxtO*6G>q!;%7_h?12I>a*NB`VH`sinEtSYQ4GaPx#DXqrZ|aYFRWy61mdA zdHa@_LJ0YOyJ^jPwp|TH%Fd--F!ZpyMeF2!()N9h12l67&OR23FLR|Y#WJ}iovWpg zJu1>_fd{w3CrmMHNnq}R3Cb_$AGD>3!rsJYb!MxaJ=UFmcg=Qa#E$QVf$x$t_Ljw0 z6Y`h6@QtB64Hy!!G_EtxNmh@eRIh(M*{s{jG~0>Qn|#_x0Grg$uQo=%Ggmh;=C=R> zdEGOB4Cs1A(T!U}4RP~Ji1cDD8bf2x3l2#!WDsUSp(6`5ODT?r_rZ$<-bF4)<~VdP zN|BIG{dZv&X79zuCk7%udfCO*<}!3m2kiC5d97{gAQEkCGlY_;%qZtYh5akn8Z)d17X@_x}G`Fw5wo@*;S;>k!P z34!hqhnsNDn=lG5kI_fkttJ*0W6{D5%5ETDVBp5r6 zZi5Vt7!7RS)?F5>bFW3s_|epcxl_?_%I_;;X?7%Zw>ncblyBQz9M}nK{?}_r=AsJR z>Eqv!6rcfY3*J}5hb@}FKAwPkU9Bax_68O1Njt?j>t*Z`Sh!B>!`U`{76#|vaXv>R zhY#B6>0mptY}oe0SxRvi&eLv72DEl1^EdRS1CQWpJ(b;YmG$p=n5-=P+k^vu8x%1t zTx-a~IqqV4gmSeL8H;XBBb@13@(BMuvOi`wbR!5E?zL{p8BPvEs7@3=1&7qknBylM zd&v+;s>ddITij>M70+ku85Y;a2i;^nZ?yzF#X z$OEDhwp0~-SddEdt1yGmHPhtenK3_^gUf-?ATW$Lt=U!1c^pI@T_sjpIiT=bDD-Fy z^$xY{%@ob`?Kj(?!fvizLlTpGrh?bIWq1}I(ejTl@9v=(4VR|llR49GrudCZ0BBEV zq%Gtv8na6dQZ$)cWv6zBFz=c?-PM9`%kBOmZ|dlUHE!QJyh0Jg5;VcJwaXLkEpU=P z9nOg9wCw$8T>_0+yA8KEaA;3h9rYqj!ZnUFo(JIYT%0i?P(O&Sjx_15n4e~#EY<4S z+9^&G@g}t0va+(Uob(vUIBYx65P=|%Q<|?BQCe13)>j81b|FwPr+|S1Vyfw;%{Jov zuyWL{U-=%Dp?!ues}D(?+?TqaBX&B;GiANJzxSWA%q~FX!NbYrtXqq>F$FDlTCNyW znp-Wz``$w+u7v*F{T+n-RP$PZ`y`0mYbP;*2^0#=4Ta))~{=$XdZLnW3455rS^ zj9Rv9ym1=tGEEfK#)jv)t)U`PHxhGmvP92+&rwRpylZ-6cqhqH?=~im*82TrfBpV0 zli58?R&t^Y4Pa$Kk#Q#PojaLu+o53bo_%A?DOUVi`z zPx_@yM(fij+@~$bu*V(bFfW|M7r)n^iGM%8qvDO2uBHIyMD?i$J5Dem%3bVN^VQXC zB*Fo1qB5N!i>P<;a-AVt&5eFWt$uqpdzkTQAoA|ak9(zwv$BjD;zymTs4dEG8CiH^ zSVB}&b_Pw#>pgUt4c(>y#yI8`HP&mUe+QGfE=)e|d3iIu3tYF|hS_~QWBniNqPP-n z{QRgM-9j3DIW=4ye|Q+*e~4q|c)CqhSa?{~fp!gaJvL}G>dz#%{MaiGhlyacLOqrd z-k-C>b;mJpfrNEfE;ZF?a)^?zb&y7^uCBgK1+O$4<<2vtsC%)&SLXHt_$qjn4G1pd z7SAWp;~JP!N(Vk{6u*;gaQ%4=_BavEv`DsEWL3NnkMUw8d~do=|8&T0s{(6|UI-pV z+0KgCGbAJB+pJyVaEZ=7Qe2zyD!O1t#jI@5i+%z2y}!17xLQEz_z*2}0q zPs5RpP(&FlJ;!VPEA{!OHSwM6zY?CM3}W)eupn4?(1t;)uH^fcA?_BzbfTZKXD__c z{q~QkEn~xA7X4n>*`jyFeXlRdNVALk6}a9<<<*g5+dJKyA3+DF&PN#$rMf$Wnc~U> zdL8&Z3Xj$1+e8Lh3R1nlQaUw78+p+;v8`=4whF|Z%af&w+7J`K6jT>`;InTU+v{C& ziIDOhMqlVxd=}VPRZ{Er$MhUQX^uKh%6qR1$1$ihmHKGPn9ejVuDCK<3};fGg;6QQ zydHPbBd>d?e+U!jH)YocRapR*zK^Vqew;e;KR8PxlxncWTJ>4LhpBt{c_7jWLWZf+ zMLW+&0+If$p(*u>B|IxJRpC1fbcA*~Zy5=nJ9b85r8b>*H)$0&9_Q;3+NHFHkkpyT zZe?NLhVhheVY+Tj6P%foJDV64bt2t@mH>D|sa1Q^QP5vGy}q3G((Q0`Vu%>PVs%?RiyGv1w%U1HEw6cX?nr`xF~v9ReZ`T zgR^OzWGASZzqOIHFgi{ZPHwQ=Qs+bmY30_1#28V?h|2kJAKm zUN1bj$sZpdzF~BnUQRsJQZaBTUdF3i;t9Px(4!nRt?F{tj~F&1y4a_CYuKzh8;qVe1~*0h+2OjUT5fPSpRw8!BxSYs(cMqM|8%EF_NDNOc%eCqn_bPYpAI4@vl#*Tai6E z<=B2_y?ZmWSbm<7r7Ep);jolZKIVj-`zD?Km&~Ba6o;H}UPcfKD^>DaL-+6;72F1* z^tXaInnBS?UtI`s}WCm0u%1^ek^0|HEy5e5}O!v z6ulS1*A#~#*=^gKswAz`M*6MWWi>CSwma?hCeE%0n3p-D>AM4_s_omQpvI*~XAqJ7 z_1jxgJ4f{etO-QNKk45|{|JwxjkW!u;@-UNVM@VAI()UydE8(r1)qLaI@a3SI3rwf zU-osqT}_2+lmia?a4W($#&pGeEOthi+K0rO41x@w&Zz?$@4dn79bFHAsspi*v4DP)JLmucyKL2{T_~bf# zt6{JTwRU4`?>mo-{AALyrP%?*kxGRTR+X|V0-%oC#7gPwGu zu{)S#P7jmPBYMQ>tX{>gPn3$ph{bI*M(;f#bmlA~?H$3CT&@c;OZ~*=7aKl)@Is2#Sh{=jtqiJH&)A_1LUy*dz^R!>HeER=vX&LzVx<1YLPO z=GAx-;%o^Gw)I2*&y^P-(vhDDw64BeBU)E1A4#$YA+?@mDaUpoOe z9v3_(Pp7*7L-h9d;)7|;>#ud=fj;(Xx{hsXQVokr7?DIo^*&YOuRPFH=iD#>TMEx$Jo`c(M-KEBeNMW?LY8F%%Yj9{KXd7tRCH_7bvAab2p#?*2)u#R)->z6xo!TSL!MPd8l ze_AjMPMSh0*aJNAZ=lI4$*Rh9g}p*ci>J!LP^)AGZ;(y)a=;{gpOp4L?Uu)0bweBJ zb~0sGqH8Schhh`Y@q117Frhgk-Bd|bOf&08ox3maVLJO8id7+v#J%WXE@E3z&-z%3 zF9d3f1|luJK-&eqxWK}=SrTP7TE#lW0eVC?xc>BL{zA9~j$L1xc*)?SnIfk;&<|p3 z{@?pS|2GVF@&AZ#M}<*|>V}f>=Tm+-ykmLvCQ}>~&QD~l94Izxo!3woO2^QTBD}`L zt2JNVq3>f>0~OrWrt`I?5$LTh09y1*hCga^Y7?>RluCPThZs5SB3*nt zE@bxJQ3^i>KRz)+EJ?_u;N=+0lUV^unFMEOD*EH665!QdKg2Bo@x_e>da$s60aWBIyi1n0nK1T#g9qVsvgT^&)dJdvwYy@&p|hlVUjYSC*;OgCeJu(gw`> z`n6?#DG|vv5n<60~hL=-!c-AqPV#~9Q0gL3DdH43ppiIsS>_PyW{A4V z@Q&KCKDSNw$)Q3x|3fMNUhv}`lW~lgG7=4gDx|PG9QwH`;%83$E}QzW)ncQ>WXC>1 zYw*h(t8L_VT0CzP`X|6VcR<#FLZjNQF4SQKPVp;QuALc8_j(z<=nwydv;@0)vz2eG z#s7!Bw~mUkd*4SDQ9(sY47xi6gh7xH>23rBqnyRy6e+Tn|vkQV>$I#IN72gW3+1Z z^9kwg&w>~a7!IR`)F=+6uOv4_!8LO24munwB z<{Im2q&q9l%Yzwa%VrvO>Xw^_vN_dr&3AdT^xlsXe-~O6>zDY~wsTxMFX6^!UL_aC zRLrD3uqDwogg<74ABcJH^@n>8QD^{MQNK5c67m2UuNF6vNs!JAZ_=vGTRca8OqzB?9DXDC9J{S^I<~jXH<+Z zG~}dJ=#Iq&`!lgRFW@X{dt`Y ze*GKw_vq@@WOczy@{2Eew$!ifa-g>q(R!BK?T$Lvh|OdZ?+GiO@7`FC*J|^mHpjGa zhXGDaM}rH!sXmb^ATV{E_f48O7*bbu7I>?kQsc5dvFFff^bj0>*=dHD(!l($nGU%G z%<1reSS1k1ceb_K_ugXy*L$%mv~C6n=U`=9+LMgI3L zzyF>3ly_-F>p>u}|2INp`(I{p@SV^kwk-YkAw~eX=VZf{C@~#mex)_@|Fb8`|Lc#u z|EC9-HE^j1BRSL@NURx7DLho~wmk$OdBZ?NNE5J;bw56TeAHxPV@Wdt6anV+#M*l)XqB6n!p_McDIAIrvF8iy31kW zmuE*|f1LPhnXN?YXs_~7S<}P)18l;~GG?ZjN>0bD-WB!ggGk>?SKLm-LvNT1IVZvK+YQ7($w1+-f_k*0$C&9`O zgjB5(Y_#_yQzgaNos zO`QdtaaDZiF>{aPZ~CImiZ!_EBG^1qX_3VD;i^3SW#fX&xo|vi%g1soQyMufYMU!= zE~&N1Z8^n_ks`fD*Am`jeDCiko7cLYZcH~GCRz)G0Qk>U`UQ%8M8g0+9_QH1KEwjdh*Nf`x?DL~& zTi`31GHOZ;fFr;vD~c*7g!~Y|Flm1hw0B~&kGu(HxVUp2qZ*+JG$u@%Y6u~F$N*A| zE)|CE4fF(_0<{MdU&0`bi{QUQb7{{12!0Q^xR(K+=Js98-wFTq_Y?4UaF_Pb6aIAn zKlcHb+y)v0PoHDcLiWgf|M`e9Wz3^P15ji3bk;b4!2H+K{}N@RcKH7v@c-f|3J>i8 z_F>$6@L=>~RaN%tYFpTF^?FBm= z)lCnpyS!a9|Fe@nVdM)v>@sZp5giCb#DZdih;)(1uYUMGWU5x0I%SC9@9%dEXla>*bNE^NA@X=&blv|{IW;}f zkN(Gp_*<;)$p>8^)Tn}ziHa+Jns?i0|43eUf8#j&1S*FI#AbfK+Ft@@uZ{VDFzkQR zlDbsSr6GU+IM)SC*O4&btG9r8_1+%xu}iK$P6d@bmlE@O+lD2NwF^cf)&i z9RHZrWMEtd54ti$2m#K~is|F&Cf>>d%h|7|fNQGFnzf~x9JurMcBB4=FDHwO?}FZF$Zx;Jgj?<1@2wXfn+K3Yz=bmfMMD+(sPaf0^onYPsl7{N$X0@X6} zC0SM+|67dpcDEST`9@TMH7maVNmxkxtU_iWUJ>TF)JMbjnZ~}0rQe`+WF<0YWWl0{O+|ZNOY)To;Jp}PwqWfmcol9gT z86tpn>le_jIPdAtl=B*>OLz;>KzW#fHUFZ2Ge_|8IJJtl_Pf4re-X#7hHU4(k@Byt$5ai$T;8>_ZY1^PU&cTX;x zhIjoD@B})aLIDKhye2_J4ez9a$p{sQ8v1)977rz%^lJ1+E<4gITi;*fU*VKLIvi~( zp2MfY5fkh1uFU}sjXTzPGF*rd@KfC|IEi4+`N5?(>yUahBqC+bqOD+hWUjN)esti$)k4$+oJ%Z znj`%qFP**70iT^go4qb+<8@NKi9G;Jn0igU`N5d!# zvf&LNCF&f$zsQo>u87;p2s9j@kG;i zJwRk8G~b#CuzFjE9#mjsMKLP6pXBAF$Gtwfh3wwvnm+^JQD(Ow zQxKT@b25;-)2*38amAK4Ngns~O0N z2_+)xg;wQ`jKTP`n-k>ua9aBxb35b*bl)f<4*^r0HX+#oXk(W$aL8hm_}sFiF)nf1 zU2vm&&7qKM+jo36@2mjAoU45nfS)?|0ve=~d#D5y@m>&!{O|G)V37go?+w{ zKi6!;_$1ViF@pmzK(tgkk3+|VNx1L`rH-t-P9oHg{q-t9ARp0Oo-&c1Jyc{0f_u!2 z45uIayhD_>lWJf@t1+RDuL9;YevE2Y`-oBDNK7Q8>k-2`AR$62 z=(p7M^xy$Nx>W8Pu!Gl&2(-biC0HRNIZMG8t!ME8#Hm)BexmgtZ4xN2F- zz`M&HPU&^69S;TN&jBHnXy5CCtPD3A(iPvNy5%RaFOvzpNQ3vFXI<&4Q@g+1AFKz; zx|B{O1T%~`WCs4kM!%)1*l$4`Oe)>lUukeF%t;x3cUv3duGZXgjl{}Pn~{ng?XYsx z?Yc~n^4uWNI-C1!z2WJw&8c`3zGHB;&Qze6Y90y1+1XiiZK873F>Llxe_!86X%=VH zw_sriuIF|2zndon0%yx99YNSnv{SQ^UoZruH}q9!nPjek8E=yA9LXu}XnO5!kiYNb zkI?z}{PVz7%9qcRif7#xlXLbqCdr*b!e%|ySQzd+NL~@>X=*&#Ll!vJpxnCw&(hkSmgOuo*ttpGHq*sax8IhAqXru1C)(B-23WDjqbjEAFvz=v8$! z9BTyg2c}@-hJZw+QN7u>+k6- zJ^$pQGJS1X3(8wU8|Nzersya%2`}E$MqyL*+Xb zL-4k#4lyKKzHvc7$xbBIXB?Kq#mCQo+QsOT#rfy<8dSfn@Mti?%GR=dxUu@1kWuIx z(F4!ZWt)8*}rahs9C&7u!A?A-u< zuU^g#yjhkpk&GJz+%xuf>M<@n+kZkdX!h2~ZIH9?9eX&8Zod zBntdmAE5Zzmi(ja{cnXUjE_zajN_V@c$;<05zQk&(Xd%3;|!F)J8wx3(?g@xd{pFU0PBjG=ubA{{e#~xtL)rb zRsoxB6~@RN^~z?=u*>t3kyK2}qHFE}EpAd?^AMhJ$Jj??{I)6>x$6Csc96(URebuq z%yeS`Zr=z4)fUkk@V0CxphmunbTrY5Ftg&n7lKiCJ)hY$0tg8aqzqdwR6L1yWwrf}zQl1k=61b!SFc6aWvGh$*~pj>0FYILlu4UIpSfF8@=%8YkB?hf ze*4Q$Vs%Y=sFL6{z#^<6`-!Av)Tooy{2=PhbiX&qTL+h4e4gZyKsz0 zr-B{}>oK)0GWp3<#HZjG2}`ZDp;1}b1FG>K{J7)Y1FgOiEup~vfyu?>hl;0M&pTH9ntNyR9H~~aX5V4%ev+Pd2 zk~$5a707|`^HX-PW0PW}hcOoG6-Is-<2VNb$C-5y_h@%@zfjFuOpJ~UXa&Jw*%$i+ ze*f}+eE}s9ZB6%w&7wX;3w96ojcy+t&jG>)Nx~Je*+1LrBTx*+fQcSrRsj$Nr$J@V z3A}68Y+JaibCPSLIDqf00EQ?MGZEz&zQ_Oh|QJqC{UhU08MA^dY-MsBtR{OT4|u^L~MqKpt3WW zqHXO^4V~O-@JL?v{CSDN2L3e0v$V2h_<;{>q^7i!g>Uv`hlpQb(Ew&Nb0E zN^fs`Hu-B^Fu8v4jn&a$W~_|y5g0MWWUVtusBPyYS>NF&t@xMV-#6yyzO zd1JAk=kW{W*n5C#DzZ3h#629;HYnO#w{bJ|aopH8I55AA?&^OS!lv)2`Gqr;M!~dG7c3lMBb5dPzen?k7c3>UC0`BT z`C@DZ`|tO_=xxjh#P6jFm}#)0|4X}0BcJlos5h3?-nQJ}N$(w>4DS>8bS|5y&@88RoGGruudJ#(nLP#1FLt7E6T$x7{ze>hgaGExOtC!;h#by7x7wppN8!B2 zRnIQi4L?~$oOIWi!Q$UTJg^7W7GtXXQxOq@^V@EfrsMcS`>|)=&j3>I;nDts6X~qg z&TX3SAngmB#YtX3q*IOIqj}8_l+s>jjr#zUgn+_7lk3FquDg{>Qg?d;c|{<6qhIm_ zh}9h~qB*qjMRu0L$zxLMe9%32SoJMZ@GpWsC%|mgN`vLg0Gr{89Z(UxDqnu~E0D~H z+hn79*h|#=xwYPQ4|4V+jlBy_=6Elg-CMxo8byz{GP>IL^j4i1H@eSO*4WwEos|qY zFCGvOoLlpy6wq%2vu1zKDKS0tO`uJc?t>DotJV+NCBC z)~hJ+#cD4V$OY;8c$~NnCl_cdyMx>D&mPC1@stZ(?|l7+eU>SU>i+XrY|)tBIe+DV zCl6&#&Sjbixj`6RvR4&|&~YBc*QhQ=D-mABBh3WXGMki?RLQ*+fUNUkxeSXb5908w z$ML<#mX3dfDx#NHl0bkE#r=_nxt$$VH#u*Y_?S+9 z<_yyX_7}!BiV^k^6?Aln)Nbl`u^Z;(pTIppzU(jVh7H{GMWo)_xP|I$W zD?bCQBypqVpcv~-*QTaSXLOIfe};H|Az{GkqS2do3VYp4K{&xAY{%MWfT2}Lu?b)R zN?$l=<!!5?r9lQGRoon?8Zq+QnrOR9S$dKPhSvLg|HAf*#n zJB=`5Wq&bQ?3tSr48jjCCjbTckZAKWTsG*NX0YQ5gjSl$jeWLzxPP?2XO+i}+;Mco zjbkANh?o+Iig?k%8{O@p0XKE{`GM>4Da)MJRHASRN41X^P_0rw648`nDBRv)emj)C z)g}RZ5jrG=Ufm4J6^I}!!M8i!XByt3I{vxjYq&mnlv1ouX=7`fgailf8BoHo2a*`{ zs!8m-f*un9CeU_!2pcqxpnquj&i%O@5e@XqJ=X410L{Y*{Z~Uh(y*I(W#ZIfslf#C z7&%7=dell@f5w@Q2O7&x5`5R_*ZwIu)^NhsGpE~WpNB)ku-)Pv zu6wNIya%$Q_PY{QS;;OFRtsY$;p@VX*^(Iqa-eWbZsMlvF>Xe8Ymq3#ttu<9J#v_0 zJe!eC!NJ86=*lkuz9H|~FJ*Id$+XDN)YExs{G$Nw1>VAmA+ri+10m2qY?KMKKjXly za2zb=)ol(fb}g_K)oBSOIM_zBLk!`Ly{YJ9D`oT#a|#wU%>*%37f!v}IXb^NBD7zc*jnE%0A@iGrLHfIvx3)@p2e9flSy?gOFD_jf^WJ=Y zAv2JSjmI>`sxz9!i?)WOhNBzLlKNm{qOt(;(oe!XYTZpfPQ>W+wBhwai!&L}Bo%gC z45NVWX-!V$`dDDnVp={bWH^~bmiyVsx?+;V)KDi1ZL^~e@VSK42hSEjPR@EaPthy^S%SwXFnU=${;*S@{ zV;7|Sw^!$o0XTUL9-@70{6UGwhNq2=i#^O+i2+~uR{7)Koz20|KDuq6)p{-Nr5u6s z*ihXzCcrLH&LzawC9+)1*0-n#Ak#-{jeAc3gJscTSj@uU`S$?kNgG^^>?e2KU7YVo@0 zffC7jsGut*w_ve*wbECN6|)`kE^D%11gEu;W>u;S*usYeR7L!TjT=pme8%j(SJNqk zypMgHhh#MQuq4N7JPewPKLrrHsLnO}K?M2HTjns^Xegzn=BaAj@)}$Gqw0X|$?+>J zU6_gCP3!78S#K*g2!HM;pnutk)$LHxQ{ru=>1T&upR}TCj&k4z zJzrug2ws4^!pEyP8X;nYko6!w9Zmh_&}&h7Whb|2pl=3Nn%p{`9mSB1kU#pVtnPif zjn_01L8lpmbIWRRL&zQ_>wTM27M<2n>p*8VahT+})zN^QDqJeFu$G9vK_ybZub>iZ zX(dhJTye15Sl#vgD`~5tl^ihydf`glTO}=PkVZAJ_0qenJ67qTyxk92N_csJ!?OT6 z-6-lP;r1Q4r~Fo}(q^$ZEl|%&=E)Cxi>Mvg7v^uBp@k#AkkhsgBb%sDV%WU^#NEx^ zMowI`_4LC`<7=jkq;Rj^?gAvgWOs;7@U%B>B=x-K3~}mG`8uZ+TcZAC;PrNDx2B&E zu7r2DIYbafv)K=Ys?Rdd^ppn$U!m&~L7||aF11xVLaYw5&RV6TZqXd-A89n-`nker zP`S(5F!`h$?EMqJNl|3Iy7YSuP3l*vUDNW7h>leCBW-VwabENOjPRx^i(W4K$r`Ug z;`K$Do0A~FGKklpKxQNP3hv_RN(*%|H!wS5ahPu%I5b37!S9{Tw}QMc`k~KgZ}qW; zbc9B-Qd*~rNT02cED9$W;7p|!VW+>8Z=ZoOv*TiM>fR&Kz7Xc7a!gLC&L@`w`;hQ^ zb|}Ki;;F{CcOl|n$BG#FxDP#Hc$#X+NA>b&`2NYt*_yD$VlO|G^MNA|83C4N)%L}U z3%9IWx})T8<2z{{MxU)Vd6d_zC7}6E5A6*Ozk$apPH%U{Y76E2X=IyVKGSTrTpVmC zY=CD7hV$8siZ(fV^ga}foJ`2?+@k|Umfj!ls2YK)=xQ%#`HiicPryr1`l~pwXrXR6 z3G2pS{KTfIvW*sAp_;HEyYJwZSHgrDo>1Sp2%QLkKl9)IAqhSzaOdj5o!HSSFEm#9 zdXDH5@;E60+t&4onoyBgEt~Em{Qt+(+o>=qPR@`nLU|8tnQsQg#Tr=sf2DPo9R|AY)q;# znrCcqEv0IP)y8OuO4hkh?%zxh+CY3oV2RVlB?G;Vh5GHD;8zTRH$?SG^#kdvz)3cB zzt?JcuvrncYlY62wGPkbfRb7Td!p{db)pkJBPT*jl*@4%rBTl>5#-+Wb+Mlyu_>;t zMg4Tj{Ycdu+M|}AoUbp`TkAj^H|4bm4nGff-_DhG;eC`zEW3{JNXcl4lpJCnxV?#O zlpYF`5BZPFxkhH16%(JzyEBDXNE?u{8s%sgKO4_U1_C_1CaoclQMn!=ZzMt7eCu(< zBWi;8n7#i>6HoWnjMO`_qsOQv_NWD%bgXPzk)(| zNHhnnjF#&fe~rMN3w&%o&TxtiO6mRUhsc*!gGHz-vdm0g!ykuQ+xB1`RulNL6f4}$ z>JlGOZTYiiMb|x{qT>4(yA_e5pI9MzfDR!kTIT%8OPq;0GG(B}F2QoQ3VN2OPh`ID z&#a7B(y*sH3q^h$?FiLni;hoP3DGqXtxZ~qxptJN?Nep5C|QJ3NZ@HfSUAssjmprc$~ihVQV<0`ftKAK^1l2$rgPU3qBvMjfR?xjNed34RVk^1AOV$=qL` ztb^$OAmpxaTvT%08@DLuC^Gn3Fsld9L%%pd3)&@lF~>o0G(8h`T|@Zb>#U+a+4nQ&uf%u zj43x9mRkxb3ss~#El7D5|1w#HNM1kzC7ToMfJ{(xd%Q!3?}@sq1`}_=6Z}4+=-w~V zUN(AwUozl6A)-O_W9F*rdmFFECdhK=WG%A|uK)A5q{x8*%WbcFm^8EsgEHtuNxh!E zKGi;#wiqEzX=MQ9I-5ybCfg zxpw^k3Y^vkz3-ks%$|q>-PoGxpg;~TbZ4kX!_bqeTv;K8Ae7;8i(x$I(!y??3ba8< ztFne|Je}QbfGa!oWC$ON60;$uWH5k#$5DM($pU^C5A6Z1tNT~-bZTJJpT9T3X_oU$ zA`yXbg)y8*R!BB>O2(Yp59y^;`bxWUm~o2+S2%hQ7~*ss#3JkrWiMVnmi0=^{n*$i z^>Evxh4MHRG+nLv;ZN3eiqnoT|APfMIXZGq`sG1LR<>fSy>=pDwflZB@C>ZIg%v9-_Am}v%3JrQ9uG3zw@?T}F@hzY$d@1=O0O2z19&+d+j9Jfy zhu?^$b>3vfm(MhMBOs_JlB5{c#=Xph}&3~ z#L|Wpks^wZmNTQ$1NgSQ-#$VPptc_rRdM~I$?+mTt=vc;a}(MJt!^{=vbG|sp91H{ zwheK5xN1Uot@2CvoQ6e+r1GItmWwy*TF&+nvpinuHhV>WOl+xc(J5)b1TFN6pP-p| z8Xi_6yvk>RakVov;C<%Td^P8zOm6TGuUZyB-F-*i6I0HADxG58`c|I85;kbdFKQN2 zlNP$+$mNyOZ^_Qg%;n5adZcQhmGS2EP2RX|PoCjH@<@ zkSK!J^ViR*pTnflG|-m;WQxifym&L_pAPY0yw8^kbY9>X8Y=P(O@OS>^Xr(y_W^8H z`jC+O2UK3(rkLz9GfR>1SS`pWG1n#}tw+S%JDa^umxlz9{q8SsuV+-hK$gu3Lh5sm zIsGg^fzjHRc*g%7Lbg0*#XFS|T3E!`0HM#D=YQ&d)<_I#*fVDzL+u&=8UFkTQFIRa?o@mt`xc{_qiGm_tStnaMITglZq{p1&yuVPto=7rl( zr|kOJ1V$+RbY|RO6)SHzqSWsz-rA4q$!j=ngAHCUY0{re-R2T!<&2ju*w?SI`)QbF z^L7%P*~W8Tny2U@;JbV!azz$!OEEz6p^G1yEeD=_pcfi?)D?NE7&H{?T3_j?k7GEk zbb@gC4jj}gjrh42v3UrWnmeqruS7=+G~<;Jw_vb;B1l{QCYz9lKTmB~ZbDjZt@z>L zq$Vj-6;^`=RIWJ_Knsihi($dOHG=qot8z3{?)-jT;ntVYJ5Tk?NHGmEFTPTWAh3KV zmZJp9k?Mq_{)ivW2n39~ankZZm?b})i=Q;K&3hX@%|2c4Rvtjb`+hIOOKy$7S%f>b zsV3^acHb=yXl9{#`N4(|x_-MqGuJC|J%v9pZEloR#{a0IL37FFn>Yp<%63S2aZdV4 z^f1}Ox`pBkfH2{Bm*k;TJiMopfc;ILeLZ#nu@q={G%=d~fEMwrbU}l< z0I~d1DEu{*puwh%LL>;@C`0awYIsXPcp^rKe-3r zf3ns&y$6AJ(~*U?p56Mey-;_x
      Ez3vVjGyHOPX9E>6eySN#l6q-tZTc+ln<^)a zN%qZuRgf})EDUbF;hhfo6_e^$E%SCQy&sLfb^!;(<|es7-w~}oq=D_1DVE!8C?pXm zT$RjscB?B3o6SjMM{z-Ir%r<@whK`*NbSO>{T@=B#mSMaBP}V&QZBdXz4mL-0qleV zuv9p-*6Qmb2tV;-3v@m6trrLAU4=|^j5T_MgW5@^)TTgt#B?e)W}O<@^81Y?#K5Q| zyGKB62=#m~I z=QCZ3cx~|Xq<$;XM`=M6&qeZ$*3kHKB3U*n7&5-_!pGwjuf1^*y|EzO8rZr8Xme?S zX&tZAwd5$K_dPI()#nsw8-N%o7x3Io;xIKCg0i93GdsE)_TV>|+#0VCpx zxw-6wW&-HkwV}t}(_nF?K_$UXK9SqMV!NW&^kj))G=b%S%mP}rN4i`9 zTFR-muTpvTJ#!g1>wlwbYc^Hy*1zKGiOSV0l1SFG zOkxhy>n}y7&gmxfap{z0_VVX|MT=%R2q%BYBbaN?6ePR&`Z(>V3Z4{H+V`J)*LsFd=as1aua^h22R6vGS;x%&$Ry zoPp-MrOUA-PVva*+F60my@&KHulgu+NdNOkHZt7)tZQz87P0Gst?_O9Cnq14C2i#$ zkDdxjO}SvcL8+!|!wy zLUd43K8C0<&3g#~!Ji0W#MQz**A=IG65y7X3!^bOx$WpLxJ*|JaNqR=$ zjH(k~t0Aw`l*W$0%Q`aR?Ysra!oKwp%@jyndrv|?)u5u_9+4fpzpA~ikZcMlr~ssw z(zjewx0|9lZ~dxf3|UR9M;LJV66%pf=8YYluP+`1sFWZ4Lf3TALJL1e2iWj~fc08> z-4@Y4eWT`1d1mQa$%0H{8HHlc-EoeWFy!nt@W?T@{>F}Z?by4!y%#(05`1t6u6hf9 zn-^#y_{9+AQk|}2wRQc+D&H=(;vzWrouJbyyg1E7%ui75p{k6!Jlq&Ivi#OY)T9=Y z5~Z+3{2}pVZc7q(=<#lE6byLG;|2pGz0_T(3 zkqK2cp-t@zD21Sj*edh&!$nsp`l0j|$f~u*hj*T>cfv#P589hxSWG?UeE2vZ>1pg( ztm5!mJK)ntHhtI+ucCm+80L;PpNL|+a4M4S6A#qhBki4E^Q}phowW|?`NM&o&^{2tN5L_qzGU}=e&r{|HCz)-%44&^rCKZ1~L^-iV ziktaP#W>#hgtc+3+4VOI6pZVBc(AX;uvvTgC`WuCgY)b`;Lg|l!~TKJr#1H=P}JO4 ze9_~XGQ*kWU@|tDH93hZs-rmt>p|fkiH_un1TN7)-QC?qSdr^BQtGoCt@ovziQQpz6I7vX&HCr;zb5u7si~Tpn&V3eKGeSHjNCNniZa$ zcmoy{#4M*0dtxa!8gLOFo~%;CRjd%Sr@2@4KoKbu1%G9&r#phWI?!`c+ZqT@Z?Qzl z8<#`nztOgr?@322Klyb?yDD$+19)d&cylChDkRfF;8h@`D?&_u8CbEZ&BVJW{s8!C zNNwl@59(%<07shGJME^<;erOhkXm#yo&>j0vV}MgfJ(`dP6QDa#pARauvti#S4n^; zlXCB55d**1oglc6O~`@k<$GU*LZwU|KX%J2$2AgN>2y*Ncl|PC%RfK$Qu?Z9$%v+# zX!LecI$1#S9mNXAttTiaB||4`MG$LG@D5$a!o}{ax2jVu$W}-g@VNW*W;~~EMeei_ z8j6h7x|+d0=$+s74p%+Kqr_*jblZJ52T9s$?R8X@8%qNXqLmdPz>&bbo^bQJltPeR zOzDiY<4y4wLw0()X`Gyu@jPoeL)Wf$CgG}6_xT2W?z!Yk`O5fi!KDig#*(y9))6C( zJ6NcSS-V$cDNx(zqGREhB-}Nj5|^{ed|}Yo#!FA*8v#Tzf}RQooA#3;yu(T;+yCUb z==zwlW^qf8voJ%!D*v?)cx=UWTdy_BR=B8e??4T)SkW4tQE^qznnPnX_Orn&+fNT} z1rdGE?w+fl7;N$WnCh~Y!#ZlbSeqh_S`vuJ*9D&c@CpFX`Cjx=mSvhoB3YmB-5IyISReB>w>0?>G$d8k#m!Kf3Gs!Ynp@b?D8PBi8x>RmY5?m@(1g^_3gS#p+!kS3+jor^y~r`g1U;@Zv|KqN)RNrsSwrzY;j zFCYXD*`N0D*-2LDp>msxSgHfC>v+Hp+r37*FC@$6_X9X)0RxnP^+h=Lw~K*}oHDS# z+zR}hbVv3rLf@YNU%T5On)e9aH0@E06X9|S1f%b3KZ8l$>K{m{DK$gZPSncRNzNs^ zPPuulur6Bb29jCDdCmzQO}y%eVv)oWy6KS+|5J)Y8*c4bv=jUvC&zZK3n|{SK|^hqdguX}bii+-6(? zh-kOVeL@mxBN`TX&FVe>f2ZR&tTOR z`VJio{nz|Y5Xj24Y5E3F)4Es^;>KUHY`q*`i^7rob;Al0l1T{IDk&4x^cxCD24w7I z7NG)uIf`PGx+`eSs)Hhgr0Q}?7PM%F1B~$l|klxwpEg&k` zlthStPT`8Tbu-_JT_tFD9Nc(i9cKW5!8f`0UL*Q-a$qX~O#Fqpl#tTsz9+sNvdjW7cC*qhlpG1i<55M9K>RenV|Jl6z)ma#d&jOI% zHumoKqS2#Hx@WW@z@kcbBWw>Vxuy;5XT zITx*SzIf+P`gpIenl3`0zb<@-2ILf~3jb2wHUo|-#Q?amTOd788 zNR9Q$@$7xN9-Xp;atBUzwSDeAY-4ad5kS^>96hPFT54R4T*|;ApOTtkvtD#AH>4 z46|3%@^Z{%3mqkEX~$zgSA$^(Jj-9bCrVTEp9LptXXTWQ62qdBb*lA!5zAGSISnzu zHX1ohgc0)oZ~jb=PiTL-RGp;flB#lZx0p(aRzd*WdiPG{+gFcK_aE9>+|{rf7**0M zfA;MS%k`ojWy}RFiUy@o^MYMqS|B&LKdjKY0XW*CU#N4+Xkiox!G1tg1h(6 zAz?zT+PkubLU#7fU?$khE=Ac7gY+!JRtg;S=iT!=`YL+-WE+66dCg%i_(QvZer;`qQpq{P3U2fhT!;{J#fh(;SLThyWnsZZ7Z5>zGmWbOdH{otOS94)!^^v%=(sCVU4R!|bE9(7#s2(J*?9!vqlExQTYV zq-3Lz50fY=kiNO3I8zeZaH_6nhJTA*+GWn^(_t85JZ4 z$bZ#;-1bGkF0=mhlq1;IYfBO}l*45u8+VQmp5S(7^cqtZ+Eb6QDc#QB(b#hb^TfMa z#YO;`9GR`C-U9QfpoHW${=fM%p=BGU@LGeqKLK7&@%n1Ei_zllhFz<5!%_9*2n}?C zp?#MC2P8_sVB6+Q2S$5#ottjM)Waz^7=_N-skf0;qu!3~^+1G2!Tz!dU$Sa-Hi$`W zdIvW;i3E(g5Ee?0U^@V#cJz%E>CeOJeJUB3sBJL5IjLQ}?^zsj+(8WB$YM-HcR`#s z{`v~uVL#~J-Lt&B?AA!UDw9l25DC_j-mcS6>>_p;6)*G@052`WW_nuKkUMcR9m3NB;W=Q3VSKa+lW6Oir-9y#y{cm zSq?A05AigsU3T%M`?BDBm5Mw3Y&Pm8>%~2~K-?DY$ML-###&j|wA;hf(Uwig9 z!K8(e*Pn1rTD=;qy4F6wz6y~JCGY<=#@(P-yxK2j#x{l8j~%b{2rnFaK-${n7JIYgZi8}xcCdWm}KKozQHwcG0W4ATYoqsIMHAysf(D%e zd@<4+As4Z{fYJMi^M}(wt%9N71i6q8bB2OfdQf3{<1c2!1?*1nA+p4A)o6!<|8e({ z3jISIcqJwQwg8P-8W%82w=whb8%5442Oi!ZmNAh9OnkI|Y{-8ur~xCM-(uKb!N4R- zdULt|@B;_}z(;u>8)pYR>@)2j82u|TNia;-37AIR12DdUzR3Svz#IpL^Q>{sKp6bb z%I-Zr|Gx+$7qAGyc>FK;HiCye{hNVFkjo6_LQ*KqIu1kPCDrBm&js_~bB+EMhbq9x zXEcapwEm$}h*1Nbkk16vec)kNf-wkr48HyX76}+;o@JZr`T!WV+!Oi#T#)t*49DRh z(qk9=PyVRI(A~cmMFNcHE*AD7rY=MH_Y>06F{`*Pkgrb)#%E>O0WSE7`vBg(9m<{R ze*@cr;Ma7h)NDWbC=3_dS`iFzbt4P*5Zv8CY?cV@MJiwXmhMhZZAZ;GVgxB{aQ?Q~ zwh5TF`#1icP2cC>8J|_~EgzROoz^Ds*=X(XLQ#nH6(9=}M1L?81g27JQr6@e20eWN z)UYmK31ZPg0sX1o0e~9%FONQRRNN^!Isyfa^d`{tT;OmY=s80GZTtZUR>HlTzWZJ8 znsl9}I5A}G#{^diAQScwWDJSeO09=k%Rx)6B|&%zM_ZH3R{d<4P#2KK=msrL?|4lP z4w|IAoWXTmOm~0&Ab=AGQR>BZKMa{jbBN_B!3TPhM8#2Vq?!B`sPu&Bd7QeQk z*Y0T3&$nEtBrU_ncVg8T_~HN2-dBc2+3kA^f&vCDAP7q15RyY<&?y2#i%2634hX2k zV4yH`w;+s2cMOfh&>$%>fDWak(#=`p-p{k$*Lgp_*SX$vz1y$Iux8!sUjO*T-!yD+ zig3EbZhHe^JDqYPidmr%<8(eabe-UlYl7l56?%lpE3 z+u1^PbpCatYn;ylFsl2S$&$r}oSfv`)N(N1J$xgIm)aH(7lc8q`k^-jXye@gU^^(| z(>`C-B#>PYIS-(GiJd7g7f!SyRp5i#*+>l&)a?!-ZQSldLAB_X;kJ=a6-xV)DANP; zm+{tfnMDz*%I@GuX6v~O#Il1kfJ#$k1@a}-O&R<=U@J@L02|0P^?(`*g5UOzX@3!Y z`h7i&XU^LhrB?DD!=rG-%q$ z^D!|u)Rs`phDGDYzfNoklpJNIz-q#sGVwflI}_|Tni}r~?yeieX=_;izqkM^0n7>A zT@oLc3LE52+{42l+SU&Ra?exB{=B(%DFT7*($}!T#(10@q zqAga0BX2-^7RGtBJHDxL?qwjW(4^*O-^H!C=2uL%lGkC@aS|8^6w!?=UuUz8-;`fvH$04FhB}H`TZt}JcSzyKBY>P>9!z5iF)t6Sxes$CO!2j^N!sT4lKgBwVcv9l zAJ(8T>m5lpNC~`<`>%AaV`|Yp0Ghu9{pv*!J zFD6bO)J=;5skMg-jn_POI@Xrf2=VOFgZ2(^ug4Q zW=8+rLYyjgTQelD-Uu;M$XK=DJ^LH8s$|(S?+l&5a!+?yKBqRH-s|;z{)z>YtHjQa zSB$Ay=a6Lu{Rvggj`9)_aWHVF6p$Z=PM>&)&&8U;)#Os6S})-N^#MsBu&_@l0$>-m zR)4VC2kb@nb3MAgJ|`QBxS7!}c>v@T3!9spvKDRI{oG{M1h1akxu*x%;nHceS-0WD z3#rKCworxR-D2M}<9@^?G#qv|BL(J3yk`zYIq$)6FaZm{S};9X_c5{YuLxC##F!*> z{-Y8K_T1|dbS2SSOBN^6`$=S#wxwgHp+kL>vD~!YQXCu)n|w`Dsv^Wnv~)V@Zp~B- zpSc(=2Az6pwaLLoE#+Fja8_aW#oj!$VayJaDlmBeQO3}o4|jSOoJdpSQDHgJXv!QXH`sliEQV|s63v0Dqr^bREL5@au!FPgxl75OhROYlpJZpIXPbJ1v& zg!G{0nAnI1Li9vsaQn870jB^{k1Wb&3f%cXtFN; zP~5>C7)7SLb189^?m5EX%U+LPzr66)i6gyZ+PY>8RO^<6XCgn*5DE0rK2gJ09D=-* zd9-kx`LB18mDl1{7GqNj*+irTFDK9bc=IWCy0id?nNvUMKqZX6n0#)35>pD*k=vrf zb5Gph5C_rqMFBUs-HdiPe)lUYDT%0|VC0P{U(_RG%c3Zg9VaE6hU{dV<$f-Shtk(* zJ?%iB_jdPQ7h-tt8ymu{AHqpri49GAaJfgV9W}H8ozk@?W%foX+I>|qpY^CzG31sZ zk{-Ml$n30!m)4{x<~QSTwx#@6w2U4 zX2)Lv$p~npR#Lto43X<}K^{zK*d#@fotApb4w2aQR7>WD<13J0`$_12@^r4&JT{}I zVkj}{+-?owHc50&GZljHTKnI5jWhS09JVWOW`LG7xdC>E%GyjsSnYvwMM=jr*U*MP z!+m?!=ilGUF9aVB(|VF}{g_KWUW}|+M~e?*=nDaVQBV|WlCly9YB3dd9mQ`NLUq=C z+eHFe6KZIJ-0^cOA*opMZqaI$4qn{8_82(Y9DSkYP{y&FhR?DDC@RaWlnth5nU~OO zaF-r4FA?I#OaX56{7O!MSJ9-e9Zv@hbvKIZN)YOp0Ya+ZRU&T)-iGP)Z6s!<5Q{`lXS)$b1hi6huBFrkU?A%ACR zr-(m+Nc-}=VCK~pEn$|9zx+q| zey5Kw_6ezPp3Xnk@=pyp;U`p}mf&Vwd`NK{N7o!-_IC<=ie!##QFClSJPH$U3`L;*>A`@kFSy1i@E) zyE6&cH=%He-|$TS>zre&jQ(9iU|TUd{Y~rr^FM!q7u0N-`n9dr@_>8ej*Sl_ugags zAZE}H)!F}eK15{tgu;T@hcI{H{-C|sJhN5kX3l{}=~!|dq3uL(f5zZ1oid@`xtR^V z=Lft1_Xc2hxQ`N?lH7qyTl3`*%Idr^r$pfPb43=+)HU=Q zkh$>-6wtZ;%cB7-%bid;4S0gbcExqn^6k;Ia)z*K4$et}@`~)!pIJRgP_`^p+AQvW zou^YI>V#5{-laGJOwaZGAUuCa>2~xD8^SEaTS4ObmyG)Nm;U|fL!Q719aA>ubeyeU z1x~}W<~J9*uIu~c5UdG6hL?d2K6%p2vTxQL-=AseE8~Lg$;@8%DYERY&lHwPwEZkv zQ{iG36-V^N{VWkSIO0=p{10Nc!aO~;fchV&+CEFpXT0Lfy6HAO&9?t&4=Q$+<&Hf* z`YaLIE{Qgy=<1S|l`U|*K24LGlJMx{cU)bsKaRG_@j@Y!+ zH{+{tx+|}(8ijegUvq-TaEiDsj0uYT`Snw&MD4Cw#)N-Jt3Xsac>}T_?<@akRh^xO zA3QC9g8D)C<{Cd%)Bn7riUBp0!d~(W%c~|$Rbp_LTz%XWkLUyU>UV|RZ#z=1-|_M% z=^8zd!sXr7+zBsvhBRj0^xGtiqQ6pFR_!D^B%F~@@eEflMNuE9?M z%XaogsR{k3c+2jg&NTXvTR7gU-xwdCrx3kH4(>0%Q^$-*md0*J0}`t@!g`QbFfrhi z<@axUv1@gRI3=2EKpo74)yzL{($5IeS(;FywzM0xkzlWg$i9aCY9s2-gDKsxWfUzJ zu~)JJ3sL>gLjJQO|MO>L#u{1sS09w;CiJeNKN;av(u;h#orA*YK4=mC%1CE>o6v+g zMRX{9Ea{gpoI{2^Zyo2O_zJov8c4to6!gAReMS~Men993HLO)PwFDwvINRQCM)Bgg-tndB#5>9Q+(aOk}Xa#RscgSKfNUF4?BFfsYU}0Vh{=yq>ivw(o5$3wQ&WEf7d^|hXXCMc_6D1+nMxC zHE!^&E1p*K!5z|*J&bxR+M^M=1n&A;60u`Rmnnn#g7Zia?F%Ka-nE@(XR=m>#4m@O zNZqvAfy4>}B#6%Rya8oP$jc{?wlr#1LK7-=n4bDl&?Nzsz~0Mqg5Qr0Ktpd*C?p^Y zkSB6=d`py^w-$%7T0&bT_<102353BxYQQkJP%uB*z;|O)BXYrZ+(}|#mM`cLSVt&N z5e6hDCCervy62#}2X}GaKhYsNmxxbT5mBUH8qqzXZ<*o~?>WLiLz+~0r3WA+z^gQ! z%}}g63ECH6A4p5H3aI`U$0Q_t_BUJde+%J9>C@37Q7u}@Mbcn|bI)l?St`>8*wb~% zHy|hS6Q0kdSSGwmIZ9t=0+L=V70Dh;=2w9E#0QVzKuEm%%kcN-3}haA?g1=4;{jv{aTT6Vd{huPz?DDVV)M|@6;@!Defkxi?jPQ|*3DI7qDcT43Wl?#VlJfuc>f$k|=AoVEhY zb(g(dM6;!tT&|g8UBcebF7|OnPd;z)NQ^q_S@MfswST_#<@p3KWQ{njL%au zd~KqiMOlW&x1esST_xfth#v*(HV1ga{Wn8oQ(|;jten4wz_z*uF6Mv!Ntzk$(ps7F zx!uT<=cWhbJ7}XHkkw$^Tz$P9X~H?bbpEYIIH=!ad3v(<`5QJ~pvQJY*Q$FEke|nw zN(WF$i8l-=dqlV$VK_|vUD19`3ZsmK(KnKYi7GhzRi5`SIEqE|*{bEk=x3)dRY`<< z9FNK;56HIYQ_}HRGFfr!$YC;{5>DYDIw{*65g{s4()l1&wdS@Ww_=GpJB0FhMpHd5nj?BLcVQtpdG-}6GfvB6BV!)nP}z=Sn*pH7fmm$fat!c`QU zhjxcfl3!q=PuS~pZ$+G$_pjFZA0JagVZ6(GoXj3IF;xAUf**qUb(m+km&TJy1iBcW z=ELOh6-v7kyg1*^eKGxltostmRf;Z3pW&;?ZKD`?ivBmV*nRjb)aO;jGXgkm)55hAel?Gt!NcLF;EzLySo@rSL=%VSw*?jK0bXn@r4AQ!l&pGH8 zO}cF8CsYzTCJBaDt`JOlGN5J))PemM)E8H^6a7DE=1k_ZY@c~fXg3MTg&efxKOM{W zM^G&!$}ycv`B zonf91sEFls?kLSOCFf7Vj-$&H0J0xs3#@X0A&ykPV|4I)NoJZ~Tur@l3J(rCM+V0o zaz9Rqld0ZAV}#dKU}7Q4PlCp$og~7dzw5C-neyKG>Q1OmjCBOnDDeiTGZ(+bgSXwL z9!M?605P@i%cN(e1K{z;IQ+6&ZNB)W&5_j=8t8gT zfbowtn9!%;+QE+PHt;q~x#T9~)dtiVzi0L=-CE*nFgFzp{7Zn05m=HUai@Jw)~vSI#uKJfbmTOuTuN+EfX4JAGsGcc?%tk$#0h+iYdNNZs&xf{3aWyu|y-L8KKUP|2o4!J7v9`NX~J>e7vZ+MB@U8b$wj)tC?2rJs!9 ze>mJiYJH{luxGk=R;M2A>#@#%)@7wa&hl#z6e5P-FCF&G>D5r3)m~nKX;E7e_>rk` zt<|QivnM6QJWZT}IjI-i-*Lj1DTd;fx;C#5Pabp7Tn*4KdzRMXeCBD_Az+85)oQx;Cm zrgBeFxy@S8$sxeY5w{($qHHfu6s?QImY?1^UQz&Dd$D4AV`H~Ik^LG{6S)?r;Y?Xd z`w5$tt$$oA@5l9th1-60J}J+T~Hzj*4O@zeq23b2t5exdUpuevrM z=at`O$pKMKf5Jtic;g)ZLzU!@0jH|eI<9$oMt)vY3oD{}dmUnezZ-QiqS_~AA#)jA zM31uyGISEtg7Qc6pEljZbjF^uF>vQWLT*a|RpuMnFL3uumd^=+F^D-wgS z2uR#i)FMGS$%LOG5gBvgdV|)R#q!@`0RHYHSz(7O&+}pMlBsAeq}N8=i78^HQfcP< zkpNqhAbdE}pwZtWWB~ptGRtoODneBBi8SxCqY4hAyXk#vww}cBb>7@H)7j-kov9e+ zXHAf8-j_V^a~U-XKEULv*bcs|r}l`W)D1!E&iC+3sahPQH7%Sqk3IJ-6IFpvT0FqyoA`-a3+(QNDq)f=wWfH?xu5q^y15;=};^WScr7v^U z=qtpCC~vFoo51J7N_jDLi#1loXnX_Zs-sFln)0MC=p5}TT;mQ79WuLmz>3KD?$)9u ziW53YRXqpx%qe*a<~e z=x~y=Q=f`#SZ>~)_5mu<5?u7;^NyS#dgw82j+lx|N{zX*^m;o-hFa;#@u{A7i?R{6 zc?Ot;u4Tn$_{@4`deuap7ywI7Be|41cUnVCc_5qCkJ7y(*(*9cSLGj{+;z^4k+1RR z=S%amu#H^VgJ9rPoLYh?6y!a~q_JQ*Iv+QM&+Fx4=F5-U*^R5Grf2S|%)Xhiv`wm@|bPABHiX;&zx5 z=;-~oxWfTQ(L#ZoXHuu2et)sJ#9WPixE*%t;3D(0^cB|lFw?4tB$EMJ@n_EkUv|hB z_g#lCUl3d6yPJqn^9sy780w=JUZq8Py(+%eywY+%y{QU1f+_m2URPscIl+#M6L=0E zs^q+t#z;}Db|^qxksmo)$Jgqt5tMX;(?2@ooL-KD#ye++)gGCpu0!;s+BQvPYq~O# z8kpq_9HO=4!E^^9!F8^8;tz9JV{J5t)&rP#aQ;5ufqEN-jNP&52;24DGy3(;{iQQc zy>DX}Y(ZV=CJ5D$hbA}@nfdoh*$}8wSOPx;`po}>_$)sWDZmQ7>Wv55nI0NnP#?#K zV03E^x=kxjojRwfrg%#~Vw_VE$6HN<&`ZV?_YYVvMdtOk&l5f`tE87>p?8`b#`1y_0lS9B>dp4&xuHKI7UOVhv zizm$W@adJ}&VSSC(->OAPwT`a^QlQB1Od^2$0d&hg;JGWFq zp4svS2x@&tt82#vweb}k@(xzquZzlZ5lX<;GV58z{WxJ;>GbC8*C1R5U+od*Ey=Qp zlnwm`G!92=3cw=#qs_Fs~}))6fJfzr10Zvtq>fi_H=BpT??T{c zmX!)I3-9WeI?H|L9mP7Hrp_80eTSIzwiItexvL$~e0o^m1v2;1My5s#X~vUjoNhG# ztIffOzS1?y?#@dyolt#|)$O-2748y3ep&?t@q7M#<~VL)t~ha=E?}+3Qb{JwJWd0JY3Q ze5KNLP|CiuP>mIt41=_2U4GxP2H}FcR+iuyVufw5hoU8hReql-T1vh!t0KJeixfh0 zz8w6qGer%AmwQnnsYi&}>e$^xx_(XNfpbQ`Q9qwqd@~*N?T={r53KnBPNBQ>(u%iN z7;v+sC}w}R8C1UZT@Jwtbrwh*V}8hw$*4JX)!sqQMEXfnE2*uV|?Jp?s#%b zpVNBarTKK_Pt4@MvAc<*NB5NeJG_)f=#!_})UQ{vfgd0Oda)`@e7#4i$zs Wg49wHC<;!2e`-qFiUqeHy! Date: Tue, 30 Apr 2024 10:04:41 +0800 Subject: [PATCH 56/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../ref_audio_selector_webui.py | 34 +++++++++++++------ Ref_Audio_Selector/tool/audio_inference.py | 2 +- Ref_Audio_Selector/ui_init/init_ui_param.py | 19 ++++++++++- 3 files changed, 43 insertions(+), 12 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 033035da..aa3ef5a8 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -71,7 +71,8 @@ def convert_from_list(text_work_space_dir, text_role, text_list_input): return i18n(text_convert_from_list_info), text_sample_dir -def start_similarity_analysis(work_space_dir, sample_dir, speaker_verification, base_voice_path, need_similarity_output): +def start_similarity_analysis(work_space_dir, sample_dir, speaker_verification, base_voice_path, + need_similarity_output): similarity_list = None similarity_file_dir = None @@ -129,7 +130,8 @@ def sample(text_work_space_dir, text_role, text_sample_dir, dropdown_speaker_ver ref_audio_dir = os.path.join(base_role_dir, params.reference_audio_dir) time_consuming, (similarity_list, _, _) \ - = time_util.time_monitor(start_similarity_analysis)(base_role_dir, text_sample_dir, dropdown_speaker_verification, text_base_voice_path, + = time_util.time_monitor(start_similarity_analysis)(base_role_dir, text_sample_dir, + dropdown_speaker_verification, text_base_voice_path, checkbox_similarity_output) text_sample_info = f"耗时:{time_consuming:0.1f}秒;抽样成功:生成目录{ref_audio_dir}" @@ -334,8 +336,9 @@ def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path raise Exception("说话人验证模型不能为空") time_consuming, (similarity_list, similarity_file, similarity_file_dir) \ - = time_util.time_monitor(start_similarity_analysis)(base_role_dir,text_compare_audio_dir, - dropdown_speaker_verification, text_base_audio_path, True) + = time_util.time_monitor(start_similarity_analysis)(base_role_dir, text_compare_audio_dir, + dropdown_speaker_verification, text_base_audio_path, + True) if similarity_list is None: raise Exception("相似度分析失败") @@ -383,6 +386,8 @@ def create_config(text_work_space_dir, text_role, text_template, text_refer_audi text_work_space_dir, text_refer_audio_file_dir \ = common.batch_clean_paths([text_work_space_dir, text_refer_audio_file_dir]) + global hide_config_file + config_file = None text_create_config_info = None try: @@ -834,7 +839,7 @@ def init_ui(): visible=False) dropdown_refer_type_param.change(chang_refer_type_param, [dropdown_refer_type_param], [text_ref_path, text_ref_text, text_emotion]) - text_whole_url = gr.Text(label=i18n("完整地址"), value="", interactive=False) + text_whole_url = gr.Text(label=i18n("完整地址"), value=init.text_whole_url_default, interactive=False) text_text.blur(lambda value: rw_param.write(rw_param.text_param, value), [text_text], []) text_ref_path.blur(lambda value: rw_param.write(rw_param.ref_path_param, value), [text_ref_path], []) @@ -849,6 +854,11 @@ def init_ui(): text_emotion], [text_whole_url]) text_text.blur(lambda value: rw_param.write(rw_param.text_param, value), [text_text], []) + dropdown_refer_type_param.change(whole_url, + [text_url, dropdown_refer_type_param, text_text, text_ref_path, + text_ref_text, + text_emotion], + [text_whole_url]) text_ref_path.input(whole_url, [text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion], @@ -883,7 +893,7 @@ def init_ui(): button_model_inference = gr.Button(i18n("开启批量推理"), variant="primary", scale=4) text_model_inference_info = gr.Text(label=i18n("批量推理结果"), value="", interactive=False, scale=4) button_model_inference_result_open = gr.Button(i18n("打开目录"), variant="primary", scale=1) - with gr.Tab(label=i18n("第三步:进行参考音频效果校验与筛选")): + with gr.Tab(label=i18n("第三步:进行参考音频推理效果准确度校验")): gr.Markdown(value=i18n("3.1:启动asr,获取推理音频文本")) text_asr_audio_dir = gr.Text(label=i18n("待asr的音频所在目录"), value=init.text_asr_audio_dir_default, interactive=True) @@ -918,7 +928,8 @@ def init_ui(): value=init.text_text_similarity_analysis_path_default, interactive=True) slider_text_similarity_amplification_boundary = gr.Slider(minimum=0, maximum=1, step=0.01, - label=i18n("文本相似度放大边界,因为原始模型输出的相似度差异太小,所以进行了一次放大,放大逻辑为,边界值以下归0,边界值到1的区间重新映射到0-1"), + label=i18n( + "文本相似度放大边界,因为原始模型输出的相似度差异太小,所以进行了一次放大,放大逻辑为,边界值以下归0,边界值到1的区间重新映射到0-1"), value=init.slider_text_similarity_amplification_boundary_default, interactive=True) slider_text_similarity_amplification_boundary.change( @@ -979,7 +990,8 @@ def init_ui(): button_similarity_audio_output_result_open = gr.Button(i18n("打开目录"), variant="primary", scale=1) button_similarity_audio_output.click(similarity_audio_output, [text_work_space_dir, text_role, text_base_audio_path, - text_compare_audio_dir, dropdown_speaker_verification_2], [text_similarity_audio_output_info]) + text_compare_audio_dir, dropdown_speaker_verification_2], + [text_similarity_audio_output_info]) button_similarity_audio_output_result_open.click(lambda: open_file(hide_voice_similarity_dir), [], []) gr.Markdown(value=i18n("4.2:如果发现存在低音质的推理音频,那么就去参考音频目录下,把原参考音频删了")) gr.Markdown(value=i18n("4.3:删除参考音频之后,按下面的操作,会将推理音频目录下对应的音频也删掉")) @@ -989,7 +1001,8 @@ def init_ui(): button_sync_ref_audio.click(sync_ref_audio, [text_work_space_dir, text_role, text_refer_audio_file_dir, text_inference_audio_file_dir], [text_sync_ref_info]) with gr.Tab("第五步:生成参考音频配置文本"): - gr.Markdown(value=i18n("5.1:编辑模板,占位符说明:\${emotion}表示相对路径加音频文件名;\${ref_path}表示音频相对角色目录的文件路径;\${ref_text}:表示音频文本")) + gr.Markdown(value=i18n( + "5.1:编辑模板,占位符说明:\${emotion}表示相对路径加音频文件名;\${ref_path}表示音频相对角色目录的文件路径;\${ref_text}:表示音频文本")) text_template = gr.Text(label=i18n("模板内容"), value=init.text_template_default, lines=10) text_template.blur(lambda value: rw_param.write(rw_param.text_template, value), [text_template], []) gr.Markdown(value=i18n("5.2:生成配置")) @@ -1001,7 +1014,8 @@ def init_ui(): [text_work_space_dir, text_role, text_template, text_refer_audio_file_dir], [text_create_config_info]) button_create_config_result_open.click(lambda: open_file(hide_config_file), [], []) - button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, dropdown_speaker_verification_1, text_base_voice_path, + button_sample.click(sample, [text_work_space_dir, text_role, text_sample_dir, dropdown_speaker_verification_1, + text_base_voice_path, slider_subsection_num, slider_sample_num, checkbox_similarity_output], [text_sample_info, text_refer_audio_file_dir]) button_sample_result_open.click(open_file, [text_refer_audio_file_dir], []) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index e1aa8c2f..0d9560b0 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -184,7 +184,7 @@ def generate_audio_files_for_emotion_group(url_composer, text_list, emotion_list end_time = time.perf_counter() # 获取计时终点 elapsed_time = end_time - start_time # 计算执行耗时 # 记录日志内容 - log_message = f"进程ID: {os.getpid()}, generate_audio_files_for_emotion_group 执行耗时: {elapsed_time:.6f} 秒" + log_message = f"进程ID: {os.getpid()}, generate_audio_files_for_emotion_group 执行耗时: {elapsed_time:.6f} 秒;推理数量: {has_generated_count}" p_logger.info(log_message) diff --git a/Ref_Audio_Selector/ui_init/init_ui_param.py b/Ref_Audio_Selector/ui_init/init_ui_param.py index c208e165..75cf1105 100644 --- a/Ref_Audio_Selector/ui_init/init_ui_param.py +++ b/Ref_Audio_Selector/ui_init/init_ui_param.py @@ -1,6 +1,7 @@ import os import multiprocessing import Ref_Audio_Selector.config_param.config_params as params +import Ref_Audio_Selector.tool.audio_inference as audio_inference import Ref_Audio_Selector.common.common as common rw_param = params.config_manager.get_rw_param() @@ -44,6 +45,8 @@ text_api_v2_sovits_model_param_default = None # 推理服务请求地址与参数 text_url_default = None +# 推理服务请求完整地址 +text_whole_url_default = None # 文本参数名 text_text_default = None # 参考参数类型 @@ -112,7 +115,7 @@ def init_first(): def init_second(): global text_api_set_model_base_url_default, text_api_gpt_param_default, text_api_sovits_param_default, text_api_v2_set_gpt_model_base_url_default, text_api_v2_gpt_model_param_default - global text_api_v2_set_sovits_model_base_url_default, text_api_v2_sovits_model_param_default, text_url_default, text_text_default, dropdown_refer_type_param_default, text_ref_path_default + global text_api_v2_set_sovits_model_base_url_default, text_api_v2_sovits_model_param_default, text_url_default, text_whole_url_default, text_text_default, dropdown_refer_type_param_default, text_ref_path_default global text_ref_text_default, text_emotion_default, text_test_content_default, slider_request_concurrency_num_default, slider_request_concurrency_max_num text_api_set_model_base_url_default = empty_default(rw_param.read(rw_param.api_set_model_base_url), @@ -137,6 +140,9 @@ def init_second(): text_ref_text_default = empty_default(rw_param.read(rw_param.ref_text_param), 'prompt_text') text_emotion_default = empty_default(rw_param.read(rw_param.emotion_param), 'emotion') + text_whole_url_default = whole_url(text_url_default, dropdown_refer_type_param_default, text_text_default, + text_ref_path_default, text_ref_text_default, text_emotion_default) + text_test_content_default = empty_default(rw_param.read(rw_param.test_content_path), params.default_test_text_path) slider_request_concurrency_max_num = multiprocessing.cpu_count() @@ -146,6 +152,17 @@ def init_second(): slider_request_concurrency_num_default = min(int(slider_request_concurrency_num_default), slider_request_concurrency_max_num) +# 基于请求路径和参数,合成完整的请求路径 +def whole_url(text_url, dropdown_refer_type_param, text_text, text_ref_path, text_ref_text, text_emotion): + url_composer = audio_inference.TTSURLComposer(text_url, dropdown_refer_type_param, text_emotion, text_text, + text_ref_path, text_ref_text) + if url_composer.is_emotion(): + text_whole_url = url_composer.build_url_with_emotion('测试内容', '情绪类型', False) + else: + text_whole_url = url_composer.build_url_with_ref('测试内容', '参考路径', '参考文本', False) + return text_whole_url + + def init_third(): global text_asr_audio_dir_default, text_text_similarity_analysis_path_default, slider_text_similarity_amplification_boundary_default, text_text_similarity_result_path_default From 7660f1c8fb55e887115b4c9266558bc74b58f41e Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Tue, 30 Apr 2024 10:59:31 +0800 Subject: [PATCH 57/72] =?UTF-8?q?=E4=BC=98=E5=8C=96=E7=9B=91=E6=8E=A7?= =?UTF-8?q?=E4=BF=A1=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_inference.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 0d9560b0..0cf0e72a 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -143,6 +143,7 @@ def generate_audio_files_for_emotion_group(url_composer, text_list, emotion_list all_count = len(text_list) * len(emotion_list) has_generated_count = 0 + all_text_count = sum(len(item) for item in text_list) # 计算笛卡尔积 cartesian_product = list(itertools.product(text_list, emotion_list)) @@ -184,7 +185,7 @@ def generate_audio_files_for_emotion_group(url_composer, text_list, emotion_list end_time = time.perf_counter() # 获取计时终点 elapsed_time = end_time - start_time # 计算执行耗时 # 记录日志内容 - log_message = f"进程ID: {os.getpid()}, generate_audio_files_for_emotion_group 执行耗时: {elapsed_time:.6f} 秒;推理数量: {has_generated_count}" + log_message = f"进程ID: {os.getpid()}, generate_audio_files_for_emotion_group 执行耗时: {elapsed_time:.6f} 秒;推理数量: {has_generated_count}; 字符总数:{all_text_count};每秒推理字符数:{all_text_count / elapsed_time:.3f};" p_logger.info(log_message) From 5843d56c4e6164116c967b797849ed6aab280c66 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Wed, 1 May 2024 00:11:27 +0800 Subject: [PATCH 58/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/ref_audio_selector_webui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index aa3ef5a8..14a5d938 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -345,7 +345,7 @@ def similarity_audio_output(text_work_space_dir, text_role, text_base_audio_path text_similarity_audio_output_info = f'耗时:{time_consuming:0.1f}秒;相似度分析成功:生成目录{similarity_file_dir},文件{similarity_file}' - hide_voice_similarity_dir = os.path.join(text_work_space_dir, params.audio_similarity_dir) + hide_voice_similarity_dir = os.path.join(base_role_dir, params.audio_similarity_dir) except Exception as e: logger.error("发生异常: \n%s", traceback.format_exc()) @@ -739,7 +739,7 @@ def init_ui(): text_api_sovits_param], [text_api_start_set_model_request_info]) with gr.Tab(label=i18n("fast项目下api_v2.py服务")): - gr.Markdown(value=i18n("2.1.1:请到你的项目下,启动服务")) + gr.Markdown(value=i18n("2.1.1:请将训练完毕得模型,复制到你的项目文件下,启动服务")) gr.Markdown(value=i18n("2.1.2:设置GPT模型参数")) text_api_v2_set_gpt_model_base_url = gr.Text(label=i18n("请输入api服务GPT模型切换接口地址"), value=init.text_api_v2_set_gpt_model_base_url_default, From 4ebcb3bf1b19ee4f60b7eb536b7d9b3199e0f509 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Wed, 1 May 2024 00:24:40 +0800 Subject: [PATCH 59/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_inference.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 0cf0e72a..08d44cdb 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -216,7 +216,7 @@ def start_api_set_model(set_model_url_composer, gpt_models, sovits_models): def start_api_v2_set_gpt_model(set_model_url_composer, gpt_models): - url = set_model_url_composer.build_get_url([gpt_models]) + url = set_model_url_composer.build_get_url([gpt_models], False) logger.info(f'start_api_v2_set_gpt_model url: {url}') response = requests.get(url) if response.status_code == 200: @@ -227,7 +227,7 @@ def start_api_v2_set_gpt_model(set_model_url_composer, gpt_models): def start_api_v2_set_sovits_model(set_model_url_composer, sovits_models): - url = set_model_url_composer.build_get_url([sovits_models]) + url = set_model_url_composer.build_get_url([sovits_models], False) logger.info(f'start_api_v2_set_sovits_model url: {url}') response = requests.get(url) if response.status_code == 200: From 02fabe807f6fd02ac6843cd5873a59c9acb05dcd Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Wed, 1 May 2024 00:46:42 +0800 Subject: [PATCH 60/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_inference.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 08d44cdb..885d046d 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -199,7 +199,7 @@ def inference_audio_from_api(url): # 返回音频数据的字节流 return response.content else: - raise Exception(f"Failed to fetch audio from API. Server responded with status code {response.status_code}.") + raise Exception(f"Failed to fetch audio from API. Server responded with status code {response.status_code}.message: {response.json()}") def start_api_set_model(set_model_url_composer, gpt_models, sovits_models): From 8a10c528e386b46968c3b52cee59d1f91848c402 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Wed, 1 May 2024 01:49:37 +0800 Subject: [PATCH 61/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_inference.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/tool/audio_inference.py b/Ref_Audio_Selector/tool/audio_inference.py index 885d046d..b0560212 100644 --- a/Ref_Audio_Selector/tool/audio_inference.py +++ b/Ref_Audio_Selector/tool/audio_inference.py @@ -185,8 +185,9 @@ def generate_audio_files_for_emotion_group(url_composer, text_list, emotion_list end_time = time.perf_counter() # 获取计时终点 elapsed_time = end_time - start_time # 计算执行耗时 # 记录日志内容 - log_message = f"进程ID: {os.getpid()}, generate_audio_files_for_emotion_group 执行耗时: {elapsed_time:.6f} 秒;推理数量: {has_generated_count}; 字符总数:{all_text_count};每秒推理字符数:{all_text_count / elapsed_time:.3f};" + log_message = f"进程ID: {os.getpid()}, generate_audio_files_for_emotion_group 执行耗时: {elapsed_time:.6f} 秒;推理数量: {has_generated_count}; 字符总数:{all_text_count};每秒推理字符数:{all_text_count*len(emotion_list) / elapsed_time:.3f};" p_logger.info(log_message) + logger.info(log_message) def inference_audio_from_api(url): From fdffd500662aa0d236fb329cffb9ac85b4051b17 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Wed, 1 May 2024 21:23:28 +0800 Subject: [PATCH 62/72] 00 --- Ref_Audio_Selector/file/test_content/test_content.txt | 10 +++++++--- Ref_Audio_Selector/start_ref_audio_selector_webui.bat | 3 --- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Ref_Audio_Selector/file/test_content/test_content.txt b/Ref_Audio_Selector/file/test_content/test_content.txt index e023e157..b245a008 100644 --- a/Ref_Audio_Selector/file/test_content/test_content.txt +++ b/Ref_Audio_Selector/file/test_content/test_content.txt @@ -4,10 +4,14 @@ 汪淼点点头,比起宇宙闪烁来,他宁愿接受这个超自然。但沙瑞山立刻抽走了他怀中这唯一的一根救命稻草。 要想最终证实这一切,其实很简单。宇宙背景辐射这样幅度的波动,已经大到我们能用肉眼觉察的程度。 你胡说什么?现在是你在违反常识了:背景辐射的波长是7厘米,比可见光大了七八个数量级,怎么能看到? -用特制眼镜。 特制眼镜? 是我们为首都天文馆做的一个科普小玩意儿。现在的技术,已经能将彭齐阿斯和威尔逊在四十多年前用于发现特制背景辐射的二十英尺的喇叭形天线做成眼镜大小, 并且在这个眼镜中设置一个转换系统,将接收到的背景辐射的波长压缩七个数量级,将7厘米波转换成红光。 这样,观众在夜里戴上这种眼镜,就能亲眼看到宇宙的特制背景辐射,现在,也能看到宇宙闪烁。 -这东西现在哪儿?能告诉我吗 -希望各位猫猫给视频三连支持一下猫窝,十分感谢支持喵~ \ No newline at end of file +这东西现在哪儿?能告诉我吗? +希望各位猫猫给视频三连支持一下猫窝,十分感谢支持喵~ +你能为我写一本小说吗?一本?嗯……不少于五万字吧。以你为主人公吗? +也是只有一次。”白蓉简单地回答,然后迅速转移话锋,搂住罗辑的脖子说,“算了,我不要那生日礼物了,你也回到正常的生活中来,好吗?” +云天明看到那是一条丑陋的虫子,软乎乎湿漉漉的,在她白皙的手指间蠕动着,旁边一个女生尖叫道:恶心死了,你碰它干吗?!程心把虫子轻轻放到旁边的草丛中,说,它在这里会给踩死的。 +“那么多的星星,像雾似的。”云天明感叹道。程心把目光从银河收回,转头看着他,指着下面的校园和城市说:“你看下面也很漂亮啊,我们的生活是在这儿,可不是在那么远的银河里。” +“可我们的专业,不就是为了到地球之外去吗?”“那是为了这里的生活更好,可不是为了逃离地球啊。”云天明当然知道程心的话是委婉地指向他的孤僻和自闭,他也只有默然以对。 \ No newline at end of file diff --git a/Ref_Audio_Selector/start_ref_audio_selector_webui.bat b/Ref_Audio_Selector/start_ref_audio_selector_webui.bat index 07b66eba..aed2d79e 100644 --- a/Ref_Audio_Selector/start_ref_audio_selector_webui.bat +++ b/Ref_Audio_Selector/start_ref_audio_selector_webui.bat @@ -1,8 +1,5 @@ CHCP 65001 @echo off cd ../ -echo 尝试启动后端程序 -echo 等待一分钟以上没有出现新的内容说明不正常 runtime\python.exe ./Ref_Audio_Selector/ref_audio_selector_webui.py - pause \ No newline at end of file From 7e1c40ef9f4a34f2f70ea67936fd3bbbd9874f9d Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Wed, 1 May 2024 21:52:09 +0800 Subject: [PATCH 63/72] 00 --- Ref_Audio_Selector/ref_audio_selector_webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 14a5d938..27e89222 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -823,7 +823,7 @@ def init_ui(): text_api_v2_start_set_sovits_model_request_info]) with gr.Tab(label=i18n("第三方推理服务")): gr.Markdown(value=i18n("启动第三方推理服务,并完成参考音频打包,模型参数设置等操作")) - gr.Markdown(value=i18n("2.2:配置推理服务参数信息,参考音和角色情绪二选一,如果是角色情绪(第三方推理包),需要先执行第五步," + gr.Markdown(value=i18n("2.2:配置推理服务参数信息,除api服务外,其他需要修改参数内容,参考音和角色情绪二选一,如果是角色情绪(第三方推理包),需要先执行第五步," "将参考音频打包配置到推理服务下,在推理前,请确认完整请求地址是否与正常使用时的一致,包括角色名称,尤其是文本分隔符是否正确")) text_url = gr.Text(label=i18n("请输入推理服务请求地址与参数"), value=init.text_url_default) From 036d828a7e5d1a40539addccc2c37733e86ec97d Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 2 May 2024 07:26:26 +0800 Subject: [PATCH 64/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/audio_sample.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/tool/audio_sample.py b/Ref_Audio_Selector/tool/audio_sample.py index 9655911c..8f02c7e0 100644 --- a/Ref_Audio_Selector/tool/audio_sample.py +++ b/Ref_Audio_Selector/tool/audio_sample.py @@ -40,7 +40,7 @@ def convert_from_list(list_file, output_dir): audio_path, _, _, transcription = parts # 构建新的文件名和路径 - new_filename = transcription + '.wav' + new_filename = transcription.strip() + '.wav' # new_filename = new_filename.replace(' ', '_') # 移除空格 # new_filename = ''.join(e for e in new_filename if e.isalnum() or e in ['_', '.']) # 移除非法字符 new_path = os.path.join(output_dir, new_filename) From 3ac7aad4d0788b2c63bd7ffd766ca90cf328d7f7 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 2 May 2024 07:39:43 +0800 Subject: [PATCH 65/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/ref_audio_selector_webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 27e89222..7ef824f0 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -87,7 +87,7 @@ def start_similarity_analysis(work_space_dir, sample_dir, speaker_verification, cmd = f'"{python_exec}" Ref_Audio_Selector/tool/speaker_verification/voice_similarity.py ' cmd += f' -r "{base_voice_path}"' cmd += f' -c "{sample_dir}"' - cmd += f' -o {similarity_file}' + cmd += f' -o "{similarity_file}"' cmd += f' -m {speaker_verification}' logger.info(cmd) From 48cc70a7de72ae5002c637def2e0dc0267eeceb6 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 2 May 2024 08:02:56 +0800 Subject: [PATCH 66/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/ref_audio_selector_webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/ref_audio_selector_webui.py b/Ref_Audio_Selector/ref_audio_selector_webui.py index 7ef824f0..d9c8a29f 100644 --- a/Ref_Audio_Selector/ref_audio_selector_webui.py +++ b/Ref_Audio_Selector/ref_audio_selector_webui.py @@ -243,7 +243,7 @@ def open_asr(asr_inp_dir, asr_opt_dir, asr_model, asr_model_size, asr_lang): asr_py_path = asr_dict[asr_model]["path"] if asr_py_path == 'funasr_asr.py': asr_py_path = 'funasr_asr_multi_level_dir.py' - if asr_py_path == 'fasterwhisper.py': + if asr_py_path == 'fasterwhisper_asr.py': asr_py_path = 'fasterwhisper_asr_multi_level_dir.py' cmd = f'"{python_exec}" Ref_Audio_Selector/tool/asr/{asr_py_path} ' cmd += f' -i "{asr_inp_dir}"' From 12fa7d875f544a639b016f4a7c60135bdbe8fe9e Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 2 May 2024 09:56:00 +0800 Subject: [PATCH 67/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../tool/asr/fasterwhisper_asr_multi_level_dir.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py index 301b2c5b..fd2e9209 100644 --- a/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py +++ b/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py @@ -60,7 +60,7 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag # 只处理wav文件(假设是wav文件) if file_name.endswith(".wav"): try: - file_path = os.path.join(input_folder, file_name) + file_path = os.path.join(root, file_name) original_text = os.path.basename(root) segments, info = model.transcribe( audio=file_path, From 18002ad8099e79e4d28807b5ccbf228f19d2b961 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 2 May 2024 09:58:28 +0800 Subject: [PATCH 68/72] =?UTF-8?q?bug=E4=BF=AE=E5=A4=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py | 1 + 1 file changed, 1 insertion(+) diff --git a/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py b/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py index fd2e9209..4b5bc95c 100644 --- a/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py +++ b/Ref_Audio_Selector/tool/asr/fasterwhisper_asr_multi_level_dir.py @@ -81,6 +81,7 @@ def execute_asr_multi_level_dir(input_folder, output_folder, model_size, languag for segment in segments: text += segment.text output.append(f"{file_path}|{original_text}|{info.language.upper()}|{text}") + print(f"{file_path}|{original_text}|{info.language.upper()}|{text}") except: return logger.error(traceback.format_exc()) From 7c3c778b1792a38250209fce1796212a8d98d914 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Tue, 7 May 2024 19:08:00 +0800 Subject: [PATCH 69/72] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=8F=AF=E8=83=BD?= =?UTF-8?q?=E5=AD=98=E5=9C=A8=E9=AB=98=E9=A2=91=E9=BD=BF=E9=9F=B3=E7=9A=84?= =?UTF-8?q?=E6=96=87=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Ref_Audio_Selector/file/test_content/test_content.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Ref_Audio_Selector/file/test_content/test_content.txt b/Ref_Audio_Selector/file/test_content/test_content.txt index b245a008..3f226276 100644 --- a/Ref_Audio_Selector/file/test_content/test_content.txt +++ b/Ref_Audio_Selector/file/test_content/test_content.txt @@ -14,4 +14,5 @@ 也是只有一次。”白蓉简单地回答,然后迅速转移话锋,搂住罗辑的脖子说,“算了,我不要那生日礼物了,你也回到正常的生活中来,好吗?” 云天明看到那是一条丑陋的虫子,软乎乎湿漉漉的,在她白皙的手指间蠕动着,旁边一个女生尖叫道:恶心死了,你碰它干吗?!程心把虫子轻轻放到旁边的草丛中,说,它在这里会给踩死的。 “那么多的星星,像雾似的。”云天明感叹道。程心把目光从银河收回,转头看着他,指着下面的校园和城市说:“你看下面也很漂亮啊,我们的生活是在这儿,可不是在那么远的银河里。” -“可我们的专业,不就是为了到地球之外去吗?”“那是为了这里的生活更好,可不是为了逃离地球啊。”云天明当然知道程心的话是委婉地指向他的孤僻和自闭,他也只有默然以对。 \ No newline at end of file +“可我们的专业,不就是为了到地球之外去吗?”“那是为了这里的生活更好,可不是为了逃离地球啊。”云天明当然知道程心的话是委婉地指向他的孤僻和自闭,他也只有默然以对。 +她轻轻地诵读着,每一个“紫”、“枝”、“子”、“扇”、“丝”、“池”、“穿梭”、“诗”、“紫藤”、“枝桠”、“池面”、“似霞”、“扇面”、“思绪”、“鱼跃”、“浅洼”,都让人感受到汉语齿音的清脆与灵动。 \ No newline at end of file From 5ffb193bcd1d5481842092f0eea9c4be59f1eeae Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 6 Jun 2024 18:14:38 +0800 Subject: [PATCH 70/72] =?UTF-8?q?=E5=88=9D=E5=A7=8B=E5=8C=96=E4=B8=BA?= =?UTF-8?q?=E6=95=B0=E5=AD=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- GPT_SoVITS/text/symbols.py | 1 + Ref_Audio_Selector/ui_init/init_ui_param.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/GPT_SoVITS/text/symbols.py b/GPT_SoVITS/text/symbols.py index 70499492..b85c57a0 100644 --- a/GPT_SoVITS/text/symbols.py +++ b/GPT_SoVITS/text/symbols.py @@ -398,4 +398,5 @@ symbols = [pad] + c + v + ja_symbols + pu_symbols + list(arpa) symbols = sorted(set(symbols)) if __name__ == "__main__": + print(symbols) print(len(symbols)) diff --git a/Ref_Audio_Selector/ui_init/init_ui_param.py b/Ref_Audio_Selector/ui_init/init_ui_param.py index 75cf1105..87367bb4 100644 --- a/Ref_Audio_Selector/ui_init/init_ui_param.py +++ b/Ref_Audio_Selector/ui_init/init_ui_param.py @@ -108,9 +108,9 @@ def init_first(): text_sample_dir_default = common.check_path_existence_and_return( os.path.join(base_dir_default, params.list_to_convert_reference_audio_dir)) - slider_subsection_num_default = empty_default(rw_param.read(rw_param.subsection_num), 5) + slider_subsection_num_default = int(empty_default(rw_param.read(rw_param.subsection_num), 10)) - slider_sample_num_default = empty_default(rw_param.read(rw_param.sample_num), 4) + slider_sample_num_default = (empty_default(rw_param.read(rw_param.sample_num), 4)) def init_second(): From 9f418af1dd38f1c70cad6ef5c32da871e19a8113 Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Thu, 6 Jun 2024 22:02:40 +0800 Subject: [PATCH 71/72] -- --- Ref_Audio_Selector/file/test_content/test_content.txt | 6 +----- webui.py | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/Ref_Audio_Selector/file/test_content/test_content.txt b/Ref_Audio_Selector/file/test_content/test_content.txt index 3f226276..52fb556f 100644 --- a/Ref_Audio_Selector/file/test_content/test_content.txt +++ b/Ref_Audio_Selector/file/test_content/test_content.txt @@ -1,9 +1,6 @@ 你知道这不可能! 如果有人故意破坏呢? 也不可能!同时改变三颗卫星和一个地面观测站的数据?那这破坏也有些超自然了。 -汪淼点点头,比起宇宙闪烁来,他宁愿接受这个超自然。但沙瑞山立刻抽走了他怀中这唯一的一根救命稻草。 -要想最终证实这一切,其实很简单。宇宙背景辐射这样幅度的波动,已经大到我们能用肉眼觉察的程度。 -你胡说什么?现在是你在违反常识了:背景辐射的波长是7厘米,比可见光大了七八个数量级,怎么能看到? 特制眼镜? 是我们为首都天文馆做的一个科普小玩意儿。现在的技术,已经能将彭齐阿斯和威尔逊在四十多年前用于发现特制背景辐射的二十英尺的喇叭形天线做成眼镜大小, 并且在这个眼镜中设置一个转换系统,将接收到的背景辐射的波长压缩七个数量级,将7厘米波转换成红光。 @@ -14,5 +11,4 @@ 也是只有一次。”白蓉简单地回答,然后迅速转移话锋,搂住罗辑的脖子说,“算了,我不要那生日礼物了,你也回到正常的生活中来,好吗?” 云天明看到那是一条丑陋的虫子,软乎乎湿漉漉的,在她白皙的手指间蠕动着,旁边一个女生尖叫道:恶心死了,你碰它干吗?!程心把虫子轻轻放到旁边的草丛中,说,它在这里会给踩死的。 “那么多的星星,像雾似的。”云天明感叹道。程心把目光从银河收回,转头看着他,指着下面的校园和城市说:“你看下面也很漂亮啊,我们的生活是在这儿,可不是在那么远的银河里。” -“可我们的专业,不就是为了到地球之外去吗?”“那是为了这里的生活更好,可不是为了逃离地球啊。”云天明当然知道程心的话是委婉地指向他的孤僻和自闭,他也只有默然以对。 -她轻轻地诵读着,每一个“紫”、“枝”、“子”、“扇”、“丝”、“池”、“穿梭”、“诗”、“紫藤”、“枝桠”、“池面”、“似霞”、“扇面”、“思绪”、“鱼跃”、“浅洼”,都让人感受到汉语齿音的清脆与灵动。 \ No newline at end of file +“可我们的专业,不就是为了到地球之外去吗?”“那是为了这里的生活更好,可不是为了逃离地球啊。”云天明当然知道程心的话是委婉地指向他的孤僻和自闭,他也只有默然以对。 \ No newline at end of file diff --git a/webui.py b/webui.py index c71c1ca4..e278c52d 100644 --- a/webui.py +++ b/webui.py @@ -832,7 +832,7 @@ def change_size_choices(key): # 根据选择的模型修改可选的模型尺寸 gr.Markdown(value=i18n("1Ba-SoVITS训练。用于分享的模型文件输出在SoVITS_weights下。")) with gr.Row(): batch_size = gr.Slider(minimum=1,maximum=40,step=1,label=i18n("每张显卡的batch_size"),value=default_batch_size,interactive=True) - total_epoch = gr.Slider(minimum=1,maximum=25,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=8,interactive=True) + total_epoch = gr.Slider(minimum=1,maximum=100,step=1,label=i18n("总训练轮数total_epoch,不建议太高"),value=8,interactive=True) text_low_lr_rate = gr.Slider(minimum=0.2,maximum=0.6,step=0.05,label=i18n("文本模块学习率权重"),value=0.4,interactive=True) save_every_epoch = gr.Slider(minimum=1,maximum=25,step=1,label=i18n("保存频率save_every_epoch"),value=4,interactive=True) if_save_latest = gr.Checkbox(label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"), value=True, interactive=True, show_label=True) From 16b3c2a131985ec53df0ecae19b58620a05ee0ce Mon Sep 17 00:00:00 2001 From: Downupanddownup Date: Sat, 15 Jun 2024 02:06:10 +0800 Subject: [PATCH 72/72] -- --- Ref_Audio_Selector/file/test_content/test_content.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/Ref_Audio_Selector/file/test_content/test_content.txt b/Ref_Audio_Selector/file/test_content/test_content.txt index 52fb556f..0a8cc905 100644 --- a/Ref_Audio_Selector/file/test_content/test_content.txt +++ b/Ref_Audio_Selector/file/test_content/test_content.txt @@ -2,9 +2,6 @@ 如果有人故意破坏呢? 也不可能!同时改变三颗卫星和一个地面观测站的数据?那这破坏也有些超自然了。 特制眼镜? -是我们为首都天文馆做的一个科普小玩意儿。现在的技术,已经能将彭齐阿斯和威尔逊在四十多年前用于发现特制背景辐射的二十英尺的喇叭形天线做成眼镜大小, -并且在这个眼镜中设置一个转换系统,将接收到的背景辐射的波长压缩七个数量级,将7厘米波转换成红光。 -这样,观众在夜里戴上这种眼镜,就能亲眼看到宇宙的特制背景辐射,现在,也能看到宇宙闪烁。 这东西现在哪儿?能告诉我吗? 希望各位猫猫给视频三连支持一下猫窝,十分感谢支持喵~ 你能为我写一本小说吗?一本?嗯……不少于五万字吧。以你为主人公吗?