From 17c905fc21b60cd9c1fbd463bc57d1dfdf7121e9 Mon Sep 17 00:00:00 2001 From: jameson512 <2867557054@qq.com> Date: Sat, 3 Feb 2024 00:56:19 +0800 Subject: [PATCH] update --- about.md | 10 +++++ version.json | 4 +- videotrans/__init__.py | 4 +- videotrans/box/win.py | 60 ++++++++++++++++----------- videotrans/box/worker.py | 66 ++++++++++++++++-------------- videotrans/language/en.json | 3 +- videotrans/language/es.json | 3 +- videotrans/language/zh.json | 5 ++- videotrans/mainwin/secwin.py | 5 --- videotrans/mainwin/spwin.py | 2 +- videotrans/recognition/__init__.py | 2 + videotrans/task/trans_create.py | 2 +- videotrans/translator/azure.py | 2 + videotrans/translator/baidu.py | 2 + videotrans/translator/chatgpt.py | 2 + videotrans/translator/deepl.py | 2 + videotrans/translator/deeplx.py | 2 + videotrans/translator/gemini.py | 4 +- videotrans/translator/google.py | 2 + videotrans/translator/ott.py | 2 + videotrans/translator/tencent.py | 3 +- videotrans/ui/info.py | 13 +++++- videotrans/ui/toolboxen.py | 16 ++++++++ videotrans/util/tools.py | 8 ++-- 24 files changed, 146 insertions(+), 78 deletions(-) diff --git a/about.md b/about.md index e1eac2c..317a90d 100644 --- a/about.md +++ b/about.md @@ -69,6 +69,16 @@ - * 生 / 2024-1-25 捐助 18.88 元 - w*d / 2024-1-26 捐助 6.66 元 - *. / 2024-1-27 捐助 0.15 元 +- vedanthkadam555 / 2024-1-28 捐助 10 美元 - *明 / 2024-1-29 捐助 20 元 +- *骁(支付宝) / 2024-1-29 捐助 2 元 - 5*) / 2024-1-30 捐助 10 元 - rqi14(U*d) / 2024-1-30 捐助 200 元 +- *正 / 2024-1-30 捐助 10 元 +- i*8 / 2024-1-31 捐助 18 元 +- *. / 2024-2-1 捐助 10 元 +- *途 / 2024-2-1 捐助 30 元 +- *甜(bingsunny0730) / 2024-2-2 捐助 1.68 元 +- k*v / 2024-2-2 捐助 10 元 +- *林(xjsszl) / 2024-2-2 捐助 10 元 +- **宇(支付宝) / 2024-2-2 捐助 10 元 diff --git a/version.json b/version.json index d9b3994..240d191 100644 --- a/version.json +++ b/version.json @@ -1,4 +1,4 @@ { - "version": "0.9993", - "version_num": 10003 + "version": "0.9994", + "version_num": 10004 } diff --git a/videotrans/__init__.py b/videotrans/__init__.py index 75d8aeb..c46572d 100644 --- a/videotrans/__init__.py +++ b/videotrans/__init__.py @@ -1,4 +1,4 @@ # -*- coding: utf-8 -*- -VERSION="0.9993 v.wonyes.org" -VERSION_NUM=10003 +VERSION="0.9994 v.wonyes.org" +VERSION_NUM=10004 diff --git a/videotrans/box/win.py b/videotrans/box/win.py index f398567..4b6d2f6 100644 --- a/videotrans/box/win.py +++ b/videotrans/box/win.py @@ -6,7 +6,7 @@ import time from PySide6 import QtWidgets from PySide6.QtCore import QSettings, QUrl -from PySide6.QtGui import QDesktopServices, QIcon +from PySide6.QtGui import QDesktopServices, QIcon, QTextCursor from PySide6.QtWidgets import QMainWindow, QFileDialog, QMessageBox, QLabel from videotrans import VERSION from videotrans.box.component import Player, DropButton, Textedit, TextGetdir @@ -241,22 +241,18 @@ def opendir_fn(self, dirname=None): def receiver(self, json_data): data = json.loads(json_data) # fun_name 方法名,type类型,text具体文本 - if "func_name" not in data: - self.statuslabel.setText(data['text'][:60]) - if data['type'] == 'error': - self.statuslabel.setStyle("""color:#ff0000""") - elif data['func_name'] == "yspfl_end": + + if data['func_name'] == "yspfl_end": # 音视频分离完成了 self.yspfl_startbtn.setText(config.transobj["zhixingwc"] if data['type'] == "end" else config.transobj["zhixinger"]) self.yspfl_startbtn.setDisabled(False) - - self.statuslabel.setText("") + self.statuslabel.setText("Succeed") elif data['func_name'] == 'ysphb_end': self.ysphb_startbtn.setText(config.transobj["zhixingwc"] if data['type'] == "end" else config.transobj["zhixinger"]) self.ysphb_startbtn.setDisabled(False) self.ysphb_opendir.setDisabled(False) if data['type'] == 'end': - self.statuslabel.setText("") + self.statuslabel.setText("Succeed") basename = os.path.basename(self.ysphb_videoinput.text()) if os.path.exists(config.rootdir + f"/{basename}.srt"): os.unlink(config.rootdir + f"/{basename}.srt") @@ -268,15 +264,16 @@ def receiver(self, json_data): self.disabled_shibie(False) if data['type'] == 'end': self.shibie_startbtn.setText(config.transobj["zhixingwc"]) + self.shibie_text.clear() self.shibie_text.insertPlainText(data['text']) - self.statuslabel.setText("") + self.statuslabel.setText("Succeed") else: self.shibie_startbtn.setText(config.transobj["zhixinger"]) elif data['func_name'] == 'hecheng_end': if data['type'] == 'end': self.hecheng_startbtn.setText(config.transobj["zhixingwc"]) self.hecheng_startbtn.setToolTip(config.transobj["zhixingwc"]) - self.statuslabel.setText("") + self.statuslabel.setText("Succeed") else: self.hecheng_startbtn.setText(data['text']) self.hecheng_startbtn.setToolTip(data['text']) @@ -289,17 +286,30 @@ def receiver(self, json_data): self.disabled_geshi(False) self.geshi_result.insertPlainText(config.transobj["zhixingwc"]) self.geshi_input.clear() - self.statuslabel.setText("") + self.statuslabel.setText("Succeed") elif data['func_name'] == 'hun_end': self.hun_startbtn.setDisabled(False) self.hun_out.setDisabled(False) - self.statuslabel.setText("") + self.statuslabel.setText("Succeed") elif data['func_name'] == 'fanyi_end': self.fanyi_start.setDisabled(False) self.fanyi_start.setText(config.transobj['starttrans']) + self.fanyi_targettext.clear() self.fanyi_targettext.setPlainText(data['text']) self.daochu.setDisabled(False) - self.statuslabel.setText("") + self.statuslabel.setText("Translate end") + elif data['func_name']=='set_fanyi': + self.fanyi_targettext.moveCursor(QTextCursor.End) + self.fanyi_targettext.insertPlainText(data['text']) + elif data['func_name']=='set_subtitle': + self.shibie_text.moveCursor(QTextCursor.End) + self.shibie_text.insertPlainText(data['text']) + elif "func_name" not in data or not data['func_name']: + self.statuslabel.setText(data['text'][:60]) + if data['type'] == 'error': + self.statuslabel.setStyle("""color:#ff0000""") + else: + self.statuslabel.setText(data['text']) # tab-1 音视频分离启动 def yspfl_start_fn(self): @@ -313,7 +323,7 @@ def yspfl_start_fn(self): self.yspfl_task = Worker( [['-y', '-i', file, '-an', f"{video_out}/{basename}.mp4", f"{video_out}/{basename}.wav"]], "yspfl_end", self) - self.yspfl_task.update_ui.connect(self.receiver) + # self.yspfl_task.update_ui.connect(self.receiver) self.yspfl_task.start() self.yspfl_startbtn.setText(config.transobj['running']) self.yspfl_startbtn.setDisabled(True) @@ -359,7 +369,8 @@ def ysphb_start_fun(self): basename = os.path.basename(videofile) srtfile = self.ysphb_srtinput.text() wavfile = self.ysphb_wavinput.text() - + # 是否保留原声 + save_raw=self.ysphb_replace.isChecked() if not videofile or not os.path.exists(videofile): QMessageBox.critical(self, config.transobj['anerror'], config.transobj['selectvideodir']) return @@ -392,8 +403,8 @@ def ysphb_start_fun(self): videofile = tmpname_conver if wavfile: - # 视频里是否有音轨 - if video_info['streams_audio'] > 0: + # 视频里是否有音轨 并且保留原声音 + if video_info['streams_audio'] > 0 and save_raw: cmds = [ ['-y', '-i', videofile, '-i', wavfile, '-filter_complex', "[0:a][1:a]amerge=inputs=2[aout]", '-map', '0:v', '-map', "[aout]", '-c:v', 'copy', '-c:a', 'aac', tmpname], @@ -409,7 +420,6 @@ def ysphb_start_fun(self): ['-y', '-i', tmpname if wavfile else videofile, "-vf", f"subtitles={srtfile}", '-c:v', 'libx264', '-c:a', 'copy', f'{savedir}/{basename}.mp4']) self.ysphb_task = Worker(cmds, "ysphb_end", self) - self.ysphb_task.update_ui.connect(self.receiver) self.ysphb_task.start() self.ysphb_startbtn.setText(config.transobj["running"]) @@ -440,7 +450,7 @@ def shibie_start_fun(self): self.shibie_ffmpeg_task = Worker([ ['-y', '-i', file, out_file] ], "shibie_next", self) - self.shibie_ffmpeg_task.update_ui.connect(self.receiver) + # self.shibie_ffmpeg_task.update_ui.connect(self.receiver) self.shibie_ffmpeg_task.start() except Exception as e: config.logger.error("执行语音识别前,先从视频中分离出音频失败:" + str(e)) @@ -459,7 +469,7 @@ def shibie_start_next_fun(self): model = self.shibie_model.currentText() print(f'{file=}') self.shibie_task = WorkerWhisper(file, model, translator.get_audio_code(show_source=self.shibie_language.currentText()),"shibie_end", self) - self.shibie_task.update_ui.connect(self.receiver) + # self.shibie_task.update_ui.connect(self.receiver) self.shibie_task.start() def shibie_save_fun(self): @@ -527,7 +537,7 @@ def hecheng_start_fun(self): func_name="hecheng_end", voice_autorate=issrt and self.voice_autorate.isChecked(), tts_issrt=issrt) - self.hecheng_task.update_ui.connect(self.receiver) + # self.hecheng_task.update_ui.connect(self.receiver) self.hecheng_task.start() self.hecheng_startbtn.setText(config.transobj["running"]) self.hecheng_startbtn.setDisabled(True) @@ -614,7 +624,7 @@ def geshi_start_fun(self, ext): self.disabled_geshi(False) return self.geshi_task = Worker(cmdlist, "geshi_end", self, True) - self.geshi_task.update_ui.connect(self.receiver) + # self.geshi_task.update_ui.connect(self.receiver) self.geshi_task.start() # 禁用按钮 @@ -651,7 +661,7 @@ def hun_fun(self): cmd = ['-y', '-i', file1, '-i', file2, '-filter_complex', "[0:a][1:a]amix=inputs=2:duration=first:dropout_transition=2", '-ac', '2', savename] self.geshi_task = Worker([cmd], "hun_end", self, True) - self.geshi_task.update_ui.connect(self.receiver) + # self.geshi_task.update_ui.connect(self.receiver) self.geshi_task.start() self.hun_startbtn.setDisabled(True) self.hun_out.setDisabled(True) @@ -694,7 +704,7 @@ def fanyi_start_fun(self): QMessageBox.critical(self, config.transobj['anerror'], rs) return self.fanyi_task = FanyiWorker(translate_type, target_language, source_text, issrt, self) - self.fanyi_task.ui.connect(self.receiver) + # self.fanyi_task.ui.connect(self.receiver) self.fanyi_task.start() self.fanyi_start.setDisabled(True) self.fanyi_start.setText(config.transobj["running"]) diff --git a/videotrans/box/worker.py b/videotrans/box/worker.py index 7dfa841..b83a274 100644 --- a/videotrans/box/worker.py +++ b/videotrans/box/worker.py @@ -17,7 +17,7 @@ # 执行 ffmpeg 线程 class Worker(QThread): - update_ui = pyqtSignal(str) + # update_ui = pyqtSignal(str) def __init__(self, cmd_list, func_name="", parent=None, no_decode=False): super(Worker, self).__init__(parent) @@ -26,7 +26,7 @@ def __init__(self, cmd_list, func_name="", parent=None, no_decode=False): self.no_decode=no_decode def run(self): - set_process_box(f'starting...') + set_process_box(f'starting ffmpeg...') for cmd in self.cmd_list: logger.info(f"[box]Will execute: ffmpeg {cmd=}") try: @@ -36,18 +36,19 @@ def run(self): except Exception as e: logger.error("[bxo]FFmepg exec error:" + str(e)) set_process_box("[bxo]FFmepg exec error:" + str(e)) - self.post_message("error", "ffmpeg error") + # self.post_message("error", "ffmpeg error") return f'[error]{str(e)}' - self.post_message("end", "End\n") - set_process_box(f'Ended','end') + set_process_box('ffmpeg succeed',"end",func_name=self.func_name) + # self.post_message("end", "End\n") + # set_process_box(f'Ended ffmpeg','end') - def post_message(self, type, text): - self.update_ui.emit(json.dumps({"func_name": self.func_name, "type": type, "text": text})) + # def post_message(self, type, text): + # self.update_ui.emit(json.dumps({"func_name": self.func_name, "type": type, "text": text})) # 执行语音识别 class WorkerWhisper(QThread): - update_ui = pyqtSignal(str) + # update_ui = pyqtSignal(str) def __init__(self, audio_path, model, language, func_name, parent=None): super(WorkerWhisper, self).__init__(parent) @@ -57,7 +58,7 @@ def __init__(self, audio_path, model, language, func_name, parent=None): self.language = language def run(self): - set_process_box(f'start {self.model}') + set_process_box(f'start {self.model} ') try: config.box_status='ing' srts=run_recogn(type="all",audio_file=self.audio_path,model_name=self.model,detect_language=self.language,set_p=False,cache_folder=config.TEMP_DIR) @@ -71,13 +72,13 @@ def run(self): # config.box_status='stop' def post_message(self, type, text): - self.update_ui.emit(json.dumps({"func_name": self.func_name, "type": type, "text": text})) + set_process_box(text,type,func_name=self.func_name) + # self.update_ui.emit(json.dumps({"func_name": self.func_name, "type": type, "text": text})) # 合成 class WorkerTTS(QThread): - update_ui = pyqtSignal(str) - + # update_ui = pyqtSignal(str) def __init__(self, parent=None, *, text=None, role=None, @@ -102,12 +103,13 @@ def __init__(self, parent=None, *, def run(self): config.box_status='ing' - set_process_box(f"start {self.tts_type=},{self.role=},{self.rate=}") + set_process_box(f"start {self.tts_type=}") if self.tts_issrt: try: q = self.before_tts() self.exec_tts(q) + self.post_message("end","Succeed") except Exception as e: self.post_message('error', f'srt create dubbing error:{str(e)}') return @@ -136,7 +138,7 @@ def run(self): ], no_decode=True,is_box=True) if os.path.exists(mp3): os.unlink(mp3) - self.post_message("end", "Ended") + self.post_message("end", "Succeed") # config.box_status='stop' # 配音预处理,去掉无效字符,整理开始时间 @@ -206,16 +208,17 @@ def exec_tts(self, queue_tts): diff = mp3len - wavlen if diff > 0 and self.voice_autorate: speed = mp3len / wavlen - # 新的长度 - mp3len = mp3len / speed - diff = mp3len - wavlen - if diff < 0: - diff = 0 - tmp_mp3 = os.path.join(config.TEMP_DIR, f'{it["filename"]}.mp3') - speed_up_mp3(filename=it['filename'], speed=speed, out=tmp_mp3) - audio_data = AudioSegment.from_file(tmp_mp3, format="mp3") - # 增加新的偏移 - offset += diff + if speed<50: + # 新的长度 + mp3len = mp3len / speed + diff = mp3len - wavlen + if diff < 0: + diff = 0 + tmp_mp3 = os.path.join(config.TEMP_DIR, f'{it["filename"]}.mp3') + speed_up_mp3(filename=it['filename'], speed=speed, out=tmp_mp3) + audio_data = AudioSegment.from_file(tmp_mp3, format="mp3") + # 增加新的偏移 + offset += diff elif diff > 0: offset += diff it['end_time'] = it['start_time'] + mp3len @@ -227,7 +230,7 @@ def exec_tts(self, queue_tts): # 原 total_length==0,说明没有上传视频,仅对已有字幕进行处理,不需要裁切音频 self.merge_audio_segments(segments, start_times) except Exception as e: - raise Exception(f"[error] exec_tts :" + str(e)) + raise Exception(f"[error] exec_tts:" + str(e)) return True # join all short audio to one ,eg name.mp4 name.mp4.wav @@ -259,11 +262,11 @@ def merge_audio_segments(self, segments, start_times): return merged_audio def post_message(self, type, text): - self.update_ui.emit(json.dumps({"func_name": self.func_name, "type": type, "text": text})) + set_process_box(text,type,func_name=self.func_name) + # self.update_ui.emit(json.dumps({"func_name": self.func_name, "type": type, "text": text})) class FanyiWorker(QThread): - ui = pyqtSignal(str) - + # ui = pyqtSignal(str) def __init__(self, type, target_language, text, issrt, parent=None): super(FanyiWorker, self).__init__(parent) self.type = type @@ -290,8 +293,9 @@ def run(self): srts_tmp += f"{it['line']}\n{it['time']}\n{it['text']}\n\n" self.srts = srts_tmp except Exception as e: - self.ui.emit(json.dumps({"func_name": "fanyi_end", "type": "error", "text": str(e)})) + # self.ui.emit(json.dumps({"func_name": "fanyi_end", "type": "error", "text": str(e)})) + set_process_box(str(e),"error",func_name="fanyi_end") return - - self.ui.emit(json.dumps({"func_name": "fanyi_end", "type": "end", "text": self.srts})) + set_process_box(self.srts,"end",func_name="fanyi_end") + # self.ui.emit(json.dumps({"func_name": "fanyi_end", "type": "end", "text": self.srts})) diff --git a/videotrans/language/en.json b/videotrans/language/en.json index fd6eb72..652fd3e 100644 --- a/videotrans/language/en.json +++ b/videotrans/language/en.json @@ -1,5 +1,6 @@ { "translate_language": { + "Preserve the original sound in the video": "Preserve the original sound in the video", "Clone voice cannot be used in subtitle dubbing mode as there are no replicable voices": "Clone voice cannot be used in subtitle dubbing mode as there are no replicable voices", "lanjie": "Active restriction", "The ott project at":"The OTT text translate at: github.com/jianchang512/ott", @@ -289,7 +290,7 @@ "Translation channels":"Translation channels", "Target lang":"Target lang", "Proxy":"Proxy", - "Failed to access Google services. Please set up the proxy correctly":"Failed to access Google services. Please set up the proxy correctly", + "Failed to access Google services. Please set up the proxy correctly":"set up the proxy", "Import text to be translated from a file..":"Import text to be translated from a file..", "shuoming1":"If it is only pure text, there is no need to select it.\nIf it is in SRT subtitle format, it needs to be selected to not translate the line numbers and time values in the subtitles, and return them in the original subtitle format.\nIf not selected, all content will be sent to the translation engine for processing at once, and punctuation, symbols, and formatting may change.\n", "export..":"export..", diff --git a/videotrans/language/es.json b/videotrans/language/es.json index a719d2f..9401cff 100644 --- a/videotrans/language/es.json +++ b/videotrans/language/es.json @@ -1,5 +1,6 @@ { "translate_language" : { + "Preserve the original sound in the video": "Preserve the original sound in the video", "Clone voice cannot be used in subtitle dubbing mode as there are no replicable voices": "Clone voice cannot be used in subtitle dubbing mode as there are no replicable voices", "lanjie": "Active restriction", "The ott project at":"The OTT text translate at: github.com/jianchang512/ott", @@ -288,7 +289,7 @@ "Translation channels" : "Canales de traducción ", "Target lang" : "Objetivo lang ", "Proxy" : "Representante ", - "Failed to access Google services. Please set up the proxy correctly" : "Error al acceder a los servicios de Google. Configure el proxy correctamente ", + "Failed to access Google services. Please set up the proxy correctly" : "Configure el proxy correctamente ", "Import text to be translated from a file.." : "Importar texto que se va a traducir de un archivo. ", "shuoming1" : "Si sólo es texto puro, no es necesario seleccionarlo. \nSi está en formato de subtítulo de SRT, debe seleccionarse para no traducir los números de línea y los valores de tiempo en los subtítulos y devolverlos en el formato de subtítulo original. \nSi no se selecciona, todo el contenido se enviará al motor de traducción para su procesamiento a la vez, y la puntuación, los símbolos y el formato pueden cambiar. \n ", "export.." : "export .. ", diff --git a/videotrans/language/zh.json b/videotrans/language/zh.json index 9744063..f813503 100644 --- a/videotrans/language/zh.json +++ b/videotrans/language/zh.json @@ -1,5 +1,6 @@ { "translate_language": { + "Preserve the original sound in the video": "保留视频中原声音", "Clone voice cannot be used in subtitle dubbing mode as there are no replicable voices": "字幕配音模式下不可使用clone-voice,因为不存在可复制的音色", "lanjie": "已限制", "The ott project at":"OTT离线文字翻译地址: github.com/jianchang512/ott", @@ -115,7 +116,7 @@ "wenbenbukeweikong": "待翻译文本不可为空", "buzhichifanyi": "不支持翻译到该目标语言", "ffmpegno": "未找到 ffmpeg,软件不可用,请去 ffmpeg.org 下载并加入到系统环境变量", - "newversion": "有新的版本可以下载了", + "newversion": "有新的版本了,点击去下载", "tingzhile": "停止了", "geshihuazimuchucuo": "格式化字幕文件出错", "moweiyanchangshibai": "末尾添加延长视频帧失败,将保持原样不延长视频", @@ -286,7 +287,7 @@ "Translation channels":"翻译渠道", "Target lang":"目标语言", "Proxy":"代理地址", - "Failed to access Google services. Please set up the proxy correctly":"网络连接不通,请填写代理地址", + "Failed to access Google services. Please set up the proxy correctly":"Goole或chaGPT官方api需填写代理地址", "Import text to be translated from a file..":"从文件导入文本...", "shuoming1":"如果只是纯文字,无需选中。\n如果是srt字幕格式,需选中,将不翻译字幕中的行号和时间值,并按字幕原格式返回。\n不选则一次性将全部内容发给翻译引擎处理,标点、符号、格式可能有变化。\n", "export..":"导出结果..", diff --git a/videotrans/mainwin/secwin.py b/videotrans/mainwin/secwin.py index 4f82ab4..419a8f4 100644 --- a/videotrans/mainwin/secwin.py +++ b/videotrans/mainwin/secwin.py @@ -1348,11 +1348,6 @@ def update_data(self, json_data): self.set_process_btn_text(d['text'], d['btnkey'], 'succeed') elif d['type'] == 'statusbar': self.main.statusLabel.setText(d['text']) - # elif d['type'] == 'error': - # # 出错停止 - # self.update_status('error') - # self.set_process_btn_text(d['text'], d['btnkey'], 'error') - # self.main.continue_compos.hide() elif d['type'] == 'edit_subtitle': # 显示出合成按钮,等待编辑字幕 self.main.continue_compos.show() diff --git a/videotrans/mainwin/spwin.py b/videotrans/mainwin/spwin.py index 8289a29..de12035 100644 --- a/videotrans/mainwin/spwin.py +++ b/videotrans/mainwin/spwin.py @@ -49,7 +49,7 @@ def __init__(self, parent=None): self.initUI() # 打开工具箱 configure.TOOLBOX = win.MainWindow() - configure.TOOLBOX.resize(int(width*0.7), int(height*0.7)) + configure.TOOLBOX.resize(int(width*0.7), int(height*0.8)) qtRect=configure.TOOLBOX.frameGeometry() qtRect.moveCenter(screen.availableGeometry().center()) configure.TOOLBOX.move(qtRect.topLeft()) diff --git a/videotrans/recognition/__init__.py b/videotrans/recognition/__init__.py index 28d917f..e834202 100644 --- a/videotrans/recognition/__init__.py +++ b/videotrans/recognition/__init__.py @@ -216,6 +216,8 @@ def all_recogn(*, detect_language=None, audio_file=None, cache_folder=None,model if set_p: tools.set_process(f'{s["line"]}\n{startTime} --> {endTime}\n{text}\n\n', 'subtitle') tools.set_process( f'{config.transobj["zimuhangshu"]} {s["line"]}, {round(segment.end * 100 / info.duration, 2)}%') + else: + tools.set_process_box(f'{s["line"]}\n{startTime} --> {endTime}\n{text}\n\n', func_name="set_subtitle") # 写入翻译前的原语言字幕到目标文件夹 diff --git a/videotrans/task/trans_create.py b/videotrans/task/trans_create.py index 097e3b6..584fef8 100644 --- a/videotrans/task/trans_create.py +++ b/videotrans/task/trans_create.py @@ -555,7 +555,7 @@ def exec_tts(self, queue_tts, total_length): if diff > 0: speed = round(mp3len / wavlen if wavlen>0 else 1,2) # 新的长度 - if os.path.exists(it['filename']) and os.path.getsize(it['filename'])>0: + if speed<50 and os.path.exists(it['filename']) and os.path.getsize(it['filename'])>0: tmp_mp3 = os.path.join(self.cache_folder, f'{it["filename"]}.{ext}') speed_up_mp3(filename=it['filename'], speed=speed, out=tmp_mp3) # mp3 降速 diff --git a/videotrans/translator/azure.py b/videotrans/translator/azure.py index edfa121..89f52ab 100644 --- a/videotrans/translator/azure.py +++ b/videotrans/translator/azure.py @@ -66,6 +66,8 @@ def trans(text_list, target_language="English", *, set_p=True): result=result.strip().replace(''','"').split("\n") if set_p: tools.set_process("\n\n".join(result), 'subtitle') + else: + tools.set_process("\n\n".join(result), func_name="set_fanyi") result_length = len(result) print(f'{result_length=}') while result_length < source_length: diff --git a/videotrans/translator/baidu.py b/videotrans/translator/baidu.py index 3d627ad..658be4c 100644 --- a/videotrans/translator/baidu.py +++ b/videotrans/translator/baidu.py @@ -55,6 +55,8 @@ def trans(text_list, target_language="en", *, set_p=True): result=result.strip().replace(''','"').split("\n") if set_p: tools.set_process("\n\n".join(result), 'subtitle') + else: + tools.set_process("\n\n".join(result), func_name="set_fanyi") result_length = len(result) print(f'{result_length=}') while result_length < source_length: diff --git a/videotrans/translator/chatgpt.py b/videotrans/translator/chatgpt.py index d000ecb..2b032f4 100644 --- a/videotrans/translator/chatgpt.py +++ b/videotrans/translator/chatgpt.py @@ -87,6 +87,8 @@ def trans(text_list, target_language="English", *, set_p=True): if set_p: tools.set_process("\n\n".join(result), 'subtitle') + else: + tools.set_process("\n\n".join(result), func_name="set_fanyi") result_length = len(result) while result_length < source_length: result.append("") diff --git a/videotrans/translator/deepl.py b/videotrans/translator/deepl.py index 2820b8a..4120c19 100644 --- a/videotrans/translator/deepl.py +++ b/videotrans/translator/deepl.py @@ -39,6 +39,8 @@ def trans(text_list, target_language="en", *, set_p=True): result=result.text.strip().replace(''','"').split("\n") if set_p: tools.set_process("\n\n".join(result), 'subtitle') + else: + tools.set_process("\n\n".join(result), func_name="set_fanyi") result_length = len(result) print(f'{result_length=}') while result_length < source_length: diff --git a/videotrans/translator/deeplx.py b/videotrans/translator/deeplx.py index 5325066..91279cb 100644 --- a/videotrans/translator/deeplx.py +++ b/videotrans/translator/deeplx.py @@ -58,6 +58,8 @@ def trans(text_list, target_language="en", *, set_p=True): result=result['data'].strip().replace(''','"').split("\n") if set_p: tools.set_process("\n\n".join(result), 'subtitle') + else: + tools.set_process("\n\n".join(result), func_name="set_fanyi") result_length = len(result) while result_length < source_length: result.append("") diff --git a/videotrans/translator/gemini.py b/videotrans/translator/gemini.py index 5a92bc3..c3e8001 100644 --- a/videotrans/translator/gemini.py +++ b/videotrans/translator/gemini.py @@ -74,7 +74,7 @@ def trans(text_list, target_language="English", *, set_p=True): # 切割为每次翻译多少行,值在 set.ini中设定,默认10 split_size = int(config.settings['trans_thread']) split_source_text = [source_text[i:i + split_size] for i in range(0, len(source_text), split_size)] - + response=None for it in split_source_text: try: source_length=len(it) @@ -88,6 +88,8 @@ def trans(text_list, target_language="English", *, set_p=True): result=result.strip().replace(''','"').split("\n") if set_p: tools.set_process("\n\n".join(result), 'subtitle') + else: + tools.set_process("\n\n".join(result), func_name="set_fanyi") while len(result) < source_length: result.append("") result = result[:source_length] diff --git a/videotrans/translator/google.py b/videotrans/translator/google.py index 1d5c434..4f483d5 100644 --- a/videotrans/translator/google.py +++ b/videotrans/translator/google.py @@ -62,6 +62,8 @@ def trans(text_list, target_language="en", *, set_p=True): result=re_result[0].strip().replace(''','"').split("\n") if set_p: tools.set_process("\n\n".join(result), 'subtitle') + else: + tools.set_process("\n\n".join(result), func_name="set_fanyi") result_length=len(result) print(f'{result_length=}') config.logger.info(f'{result_length=},{source_length=}') diff --git a/videotrans/translator/ott.py b/videotrans/translator/ott.py index 6181651..d4276a8 100644 --- a/videotrans/translator/ott.py +++ b/videotrans/translator/ott.py @@ -61,6 +61,8 @@ def trans(text_list, target_language="en", *, set_p=True): result=result['translatedText'].strip().replace(''','"').split("\n") if set_p: tools.set_process("\n\n".join(result), 'subtitle') + else: + tools.set_process("\n\n".join(result), func_name="set_fanyi") result_length = len(result) while result_length < source_length: result.append("") diff --git a/videotrans/translator/tencent.py b/videotrans/translator/tencent.py index fe02641..997d08d 100644 --- a/videotrans/translator/tencent.py +++ b/videotrans/translator/tencent.py @@ -60,8 +60,9 @@ def trans(text_list, target_language="en", *, set_p=True): result = resp.TargetText.strip().replace(''','"').split("\n")#json.loads(resp.TargetText) if set_p: tools.set_process("\n\n".join(result), 'subtitle') + else: + tools.set_process("\n\n".join(result), func_name="set_fanyi") result_length = len(result) - print(f'{result_length=}') while result_length < source_length: result.append("") result_length += 1 diff --git a/videotrans/ui/info.py b/videotrans/ui/info.py index a7c0533..93fa8bc 100644 --- a/videotrans/ui/info.py +++ b/videotrans/ui/info.py @@ -108,7 +108,18 @@ def retranslateUi(self, infoform): *明 2024-1-29 捐助 20 元 / 5*) 2024-1-30 捐助 10 元 / rqi14(U*d) 2024-1-30 捐助 200 元 / - +*明 2024-1-29 捐助 20 元 / +*骁(支付宝) 2024-1-29 捐助 2 元 / +5*) 2024-1-30 捐助 10 元 / +rqi14(U*d) 2024-1-30 捐助 200 元 / +*正 2024-1-30 捐助 10 元 / +i*8 2024-1-31 捐助 18 元 / +*. 2024-2-1 捐助 10 元 / +*途 2024-2-1 捐助 30 元 / +*甜(bingsunny0730) 2024-2-2 捐助 1.68 元 / +k*v 2024-2-2 捐助 10 元 / +*林(xjsszl) 2024-2-2 捐助 10 元 / +**宇(支付宝) 2024-2-2 捐助 10 元 /

全部捐助列表

diff --git a/videotrans/ui/toolboxen.py b/videotrans/ui/toolboxen.py index be233a3..23007e4 100644 --- a/videotrans/ui/toolboxen.py +++ b/videotrans/ui/toolboxen.py @@ -9,6 +9,8 @@ from PySide6 import QtCore, QtGui, QtWidgets + +from videotrans.configure import config from videotrans.configure.config import box_lang class Ui_MainWindow(object): @@ -150,12 +152,26 @@ def setupUi(self, MainWindow): self.ysphb_srtinput.setMinimumSize(QtCore.QSize(0, 40)) self.ysphb_srtinput.setReadOnly(True) self.ysphb_srtinput.setObjectName("ysphb_srtinput") + self.horizontalLayout_6.addWidget(self.ysphb_srtinput, 0, QtCore.Qt.AlignTop) self.ysphb_selectsrt = QtWidgets.QPushButton(self.tab_3) self.ysphb_selectsrt.setMinimumSize(QtCore.QSize(150, 40)) self.ysphb_selectsrt.setObjectName("ysphb_selectsrt") self.horizontalLayout_6.addWidget(self.ysphb_selectsrt, 0, QtCore.Qt.AlignTop) + + + + self.horizontalLayout_replace = QtWidgets.QHBoxLayout() + self.horizontalLayout_replace.setObjectName("horizontalLayout_replace") + self.ysphb_replace = QtWidgets.QCheckBox(self.tab_3) + self.ysphb_replace.setObjectName("ysphb_replace") + self.ysphb_replace.setChecked(True) + self.ysphb_replace.setText(config.transobj['Preserve the original sound in the video']) + self.horizontalLayout_replace.addWidget(self.ysphb_replace, 0, QtCore.Qt.AlignTop) + self.verticalLayout.addLayout(self.horizontalLayout_6) + self.verticalLayout.addLayout(self.horizontalLayout_replace) + self.verticalLayout_9.addLayout(self.verticalLayout) self.verticalLayout_8 = QtWidgets.QVBoxLayout() self.verticalLayout_8.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize) diff --git a/videotrans/util/tools.py b/videotrans/util/tools.py index 6b581c6..e263071 100644 --- a/videotrans/util/tools.py +++ b/videotrans/util/tools.py @@ -737,11 +737,11 @@ def get_clone_role(): return False # 工具箱写入日志队列 -def set_process_box(text, type='logs'): - set_process(text, type, "box") +def set_process_box(text, type='logs',*,func_name=""): + set_process(text, type, qname="box",func_name=func_name) # 综合写入日志,默认sp界面 -def set_process(text, type="logs", qname='sp'): +def set_process(text, type="logs",*,qname='sp',func_name=""): try: if text: log_msg = text.strip() @@ -753,7 +753,7 @@ def set_process(text, type="logs", qname='sp'): if qname == 'sp': config.queue_logs.put_nowait({"text": text, "type": type,"btnkey":config.btnkey}) elif qname=='box': - config.queuebox_logs.put_nowait({"text": text, "type": type}) + config.queuebox_logs.put_nowait({"text": text, "type": type,"func_name":func_name}) else: print(f'[{type}]: {text}') except Exception as e: