Skip to content

Commit

Permalink
distilled whisperx
Browse files Browse the repository at this point in the history
  • Loading branch information
kyegomez committed Nov 4, 2023
1 parent 2f31a63 commit 75ebbe0
Show file tree
Hide file tree
Showing 4 changed files with 167 additions and 9 deletions.
2 changes: 1 addition & 1 deletion example.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
# model_name="gpt-4"
openai_api_key=api_key,
temperature=0.5,
#max_tokens=100,
# max_tokens=100,
)

## Initialize the workflow
Expand Down
12 changes: 6 additions & 6 deletions groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,28 +15,28 @@
llm=llm,
max_loops=1,
system_message="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE",
name='silly',
name="silly",
dashboard=True,
)
flow2 = Flow(
llm=llm,
max_loops=1,
system_message="YOU ARE VERY SMART AND ANSWER RIDDLES",
name='detective',
name="detective",
dashboard=True,
)
flow3 = Flow(
llm=llm,
max_loops=1,
system_message="YOU MAKE RIDDLES",
name='riddler',
name="riddler",
dashboard=True,
)
manager = Flow(
llm=llm,
max_loops=1,
system_message="YOU ARE A GROUP CHAT MANAGER",
name='manager',
name="manager",
dashboard=True,
)

Expand All @@ -45,5 +45,5 @@
agents = [flow1, flow2, flow3]

group_chat = GroupChat(agents=agents, messages=[], max_round=10)
chat_manager = GroupChatManager(groupchat=group_chat, selector = manager)
chat_history = chat_manager("Write me a riddle")
chat_manager = GroupChatManager(groupchat=group_chat, selector=manager)
chat_history = chat_manager("Write me a riddle")
1 change: 1 addition & 0 deletions swarms/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from swarms.models.vilt import Vilt
from swarms.models.nougat import Nougat
from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
# from swarms.models.distilled_whisperx import DistilWhisperModel

# from swarms.models.fuyu import Fuyu # Not working, wait until they update
import sys
Expand Down
161 changes: 159 additions & 2 deletions swarms/models/distilled_whisperx.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,160 @@
"""
import asyncio
import os
import time
from functools import wraps
from typing import Union

"""
import torch
from termcolor import colored
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline


def async_retry(max_retries=3, exceptions=(Exception,), delay=1):
"""
A decorator for adding retry logic to async functions.
:param max_retries: Maximum number of retries before giving up.
:param exceptions: A tuple of exceptions to catch and retry on.
:param delay: Delay between retries.
"""

def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
retries = max_retries
while retries:
try:
return await func(*args, **kwargs)
except exceptions as e:
retries -= 1
if retries <= 0:
raise
print(f"Retry after exception: {e}, Attempts remaining: {retries}")
await asyncio.sleep(delay)

return wrapper

return decorator


class DistilWhisperModel:
"""
This class encapsulates the Distil-Whisper model for English speech recognition.
It allows for both synchronous and asynchronous transcription of short and long-form audio.
Args:
model_id: The model ID to use. Defaults to "distil-whisper/distil-large-v2".
Attributes:
device: The device to use for inference.
torch_dtype: The torch data type to use for inference.
model_id: The model ID to use.
model: The model instance.
processor: The processor instance.
Usage:
model_wrapper = DistilWhisperModel()
transcription = model_wrapper('path/to/audio.mp3')
# For async usage
transcription = asyncio.run(model_wrapper.async_transcribe('path/to/audio.mp3'))
"""

def __init__(self, model_id="distil-whisper/distil-large-v2"):
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self.torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
self.model_id = model_id
self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id,
torch_dtype=self.torch_dtype,
low_cpu_mem_usage=True,
use_safetensors=True,
).to(self.device)
self.processor = AutoProcessor.from_pretrained(model_id)

def __call__(self, inputs: Union[str, dict]):
return self.transcribe(inputs)

def transcribe(self, inputs: Union[str, dict]):
"""
Synchronously transcribe the given audio input using the Distil-Whisper model.
:param inputs: A string representing the file path or a dict with audio data.
:return: The transcribed text.
"""
pipe = pipeline(
"automatic-speech-recognition",
model=self.model,
tokenizer=self.processor.tokenizer,
feature_extractor=self.processor.feature_extractor,
max_new_tokens=128,
torch_dtype=self.torch_dtype,
device=self.device,
)

return pipe(inputs)["text"]

@async_retry()
async def async_transcribe(self, inputs: Union[str, dict]):
"""
Asynchronously transcribe the given audio input using the Distil-Whisper model.
:param inputs: A string representing the file path or a dict with audio data.
:return: The transcribed text.
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, self.transcribe, inputs)

def real_time_transcribe(self, audio_file_path, chunk_duration=5):
"""
Simulates real-time transcription of an audio file, processing and printing results
in chunks with colored output for readability.
:param audio_file_path: Path to the audio file to be transcribed.
:param chunk_duration: Duration in seconds of each audio chunk to be processed.
"""
if not os.path.isfile(audio_file_path):
print(colored("The audio file was not found.", "red"))
return

# Assuming `chunk_duration` is in seconds and `processor` can handle chunk-wise processing
try:
with torch.no_grad():
# Load the whole audio file, but process and transcribe it in chunks
audio_input = self.processor.audio_file_to_array(audio_file_path)
sample_rate = audio_input.sampling_rate
total_duration = len(audio_input.array) / sample_rate
chunks = [
audio_input.array[i : i + sample_rate * chunk_duration]
for i in range(
0, len(audio_input.array), sample_rate * chunk_duration
)
]

print(colored("Starting real-time transcription...", "green"))

for i, chunk in enumerate(chunks):
# Process the current chunk
processed_inputs = self.processor(
chunk,
sampling_rate=sample_rate,
return_tensors="pt",
padding=True,
)
processed_inputs = processed_inputs.input_values.to(self.device)

# Generate transcription for the chunk
logits = self.model.generate(processed_inputs)
transcription = self.processor.batch_decode(
logits, skip_special_tokens=True
)[0]

# Print the chunk's transcription
print(
colored(f"Chunk {i+1}/{len(chunks)}: ", "yellow")
+ transcription
)

# Wait for the chunk's duration to simulate real-time processing
time.sleep(chunk_duration)

except Exception as e:
print(colored(f"An error occurred during transcription: {e}", "red"))

0 comments on commit 75ebbe0

Please sign in to comment.