forked from ducanhdt/openai_whisper_finetuning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
inference.py
57 lines (44 loc) · 1.53 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import os
import numpy as np
import whisper
import torch
from model import WhisperModelModule
from ultis import load_config_file
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from dotenv import load_dotenv
load_dotenv()
config = load_config_file(os.environ["CONFIG_PATH"])
checkpoint_path = config["checkpoint_path"]
# try:
module = WhisperModelModule(config)
try:
state_dict = torch.load(checkpoint_path)
state_dict = state_dict["state_dict"]
module.load_state_dict(state_dict)
print(f"load checkpoint successfully from {checkpoint_path}")
except Exception as e:
print(e)
print(f"load checkpoint failt using origin weigth of {config['model_name']} model")
model = module.model
model.to(device)
print(
f"Model is {'multilingual' if model.is_multilingual else 'English-only'} "
f"and has {sum(np.prod(p.shape) for p in model.parameters()):,} parameters."
)
# result = model.transcribe("audio.wav")
audio = whisper.load_audio("audio.wav")
audio = whisper.pad_or_trim(audio)
# make log-Mel spectrogram and move to the same device as the model
mel = whisper.log_mel_spectrogram(audio).to(model.device)
# detect the spoken language
_, probs = model.detect_language(mel)
print(f"Detected language: {max(probs, key=probs.get)}")
# decode the audio
options = whisper.DecodingOptions(
language="vi", without_timestamps=True, fp16=torch.cuda.is_available()
)
result = whisper.decode(model, mel, options)
# print(options)
# print the recognized text
print(result.audio_features.shape)
print(result.text)