Skip to content

Commit

Permalink
Merge pull request #75 from innightwolfsleep/structure_refactor
Browse files Browse the repository at this point in the history
Structure refactor
  • Loading branch information
innightwolfsleep authored Sep 13, 2023
2 parents a29645f + e70d296 commit 6d7aa61
Show file tree
Hide file tree
Showing 12 changed files with 1,499 additions and 1,497 deletions.
2,775 changes: 1,388 additions & 1,387 deletions telegram_bot_wrapper.py → main.py

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@ backoff>=2.2.1
langchain>=0.0.286
requests>=2.31.0
urllib3>=2.0.4
python-dotenv==1.0.0
python-dotenv==1.0.0
2 changes: 1 addition & 1 deletion run.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import sys
import os
from threading import Thread
from telegram_bot_wrapper import TelegramBotWrapper
from main import TelegramBotWrapper
from dotenv import load_dotenv

config_file_path = "configs/telegram_config.json"
Expand Down
210 changes: 105 additions & 105 deletions telegram_bot_generator.py → source/generator.py
Original file line number Diff line number Diff line change
@@ -1,105 +1,105 @@
import importlib

# generator obj
generator = None
debug_flag = True


# import generator
def init(script="GeneratorLlamaCpp", model_path="", n_ctx=4096, n_gpu_layers=0):
"""Initiate generator type
generator - is a class Generator from package generators/script
Generator class should contain method:
__init__() - method to initiate model
get_answer() - method get answer
tokens_count(str) - method to get str length in tokens
If Generator.model_change_allowed = True - also method:
get_model_list() - get list of available models
load_model(str) - load new model
Args:
script: script type, one of generators/*.py files
model_path: path to model file, if generator needs
n_ctx: context length, if generator needs
n_gpu_layers: n_gpu_layers for llama
"""
try:
generator_class = getattr(importlib.import_module("generators." + script), "Generator")
except ImportError:
generator_class = getattr(
importlib.import_module("extensions.telegram_bot.generators." + script),
"Generator",
)
global generator
generator = generator_class(model_path, n_ctx=n_ctx, n_gpu_layers=n_gpu_layers)


def get_answer(
prompt, generation_params, eos_token, stopping_strings, default_answer: str, turn_template="", **kwargs
) -> str:
"""Generate and return answer string.
Args:
prompt: user prompt
generation_params: dict with various generator params
eos_token: list with end of string tokens
stopping_strings: list with strings stopping generating
default_answer: if generating fails, default_answer will be returned
turn_template: turn template if generator needs it
Returns:
generation result string
"""
# Preparing, add stopping_strings
answer = default_answer
generation_params.update({"turn_template": turn_template})
if debug_flag:
print("stopping_strings =", stopping_strings)
print(prompt, end="")
try:
answer = generator.get_answer(
prompt,
generation_params,
eos_token,
stopping_strings,
default_answer,
turn_template,
)
except Exception as e:
print("generation error:", e)
if debug_flag:
print(answer)
return answer


def tokens_count(text: str):
"""Return string length in tokens
Args:
text: text to be counted
Returns:
text token length (int)
"""
return generator.tokens_count(text)


def get_model_list():
"""Return list of available models
Returns:
list of available models
"""
return generator.get_model_list()


def load_model(model_file: str):
"""Change current llm model to model_file
Args:
model_file: model file to be loaded
Returns:
True if loading successful, otherwise False
"""
return generator.load_model(model_file)
import importlib

# generator obj
generator = None
debug_flag = True


# import generator
def init(script="GeneratorLlamaCpp", model_path="", n_ctx=4096, n_gpu_layers=0):
"""Initiate generator type
generator - is a class Generator from package generators/script
Generator class should contain method:
__init__() - method to initiate model
get_answer() - method get answer
tokens_count(str) - method to get str length in tokens
If Generator.model_change_allowed = True - also method:
get_model_list() - get list of available models
load_model(str) - load new model
Args:
script: script type, one of generators/*.py files
model_path: path to model file, if generator needs
n_ctx: context length, if generator needs
n_gpu_layers: n_gpu_layers for llama
"""
try:
generator_class = getattr(importlib.import_module("source.generators." + script), "Generator")
except ImportError:
generator_class = getattr(
importlib.import_module("extensions.source.telegram_bot.generators." + script),
"Generator",
)
global generator
generator = generator_class(model_path, n_ctx=n_ctx, n_gpu_layers=n_gpu_layers)


def get_answer(
prompt, generation_params, eos_token, stopping_strings, default_answer: str, turn_template="", **kwargs
) -> str:
"""Generate and return answer string.
Args:
prompt: user prompt
generation_params: dict with various generator params
eos_token: list with end of string tokens
stopping_strings: list with strings stopping generating
default_answer: if generating fails, default_answer will be returned
turn_template: turn template if generator needs it
Returns:
generation result string
"""
# Preparing, add stopping_strings
answer = default_answer
generation_params.update({"turn_template": turn_template})
if debug_flag:
print("stopping_strings =", stopping_strings)
print(prompt, end="")
try:
answer = generator.get_answer(
prompt,
generation_params,
eos_token,
stopping_strings,
default_answer,
turn_template,
)
except Exception as e:
print("generation error:", e)
if debug_flag:
print(answer)
return answer


def get_tokens_count(text: str):
"""Return string length in tokens
Args:
text: text to be counted
Returns:
text token length (int)
"""
return generator.tokens_count(text)


def get_model_list():
"""Return list of available models
Returns:
list of available models
"""
return generator.get_model_list()


def load_model(model_file: str):
"""Change current llm model to model_file
Args:
model_file: model file to be loaded
Returns:
True if loading successful, otherwise False
"""
return generator.load_model(model_file)
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def tokens_count(self, text: str):

def get_model_list(self):
bins = []
for i in os.listdir("../models"):
for i in os.listdir("../../models"):
if i.endswith(".bin"):
bins.append(i)
return bins
Expand Down
File renamed without changes.
File renamed without changes.
5 changes: 3 additions & 2 deletions telegram_bot_silero.py → source/silero.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import re

import torch
from num2words import num2words

try:
from extensions.telegram_bot.TelegramBotUser import TelegramBotUser as User
from extensions.telegram_bot.src.TelegramBotUser import TelegramBotUser as User
except ImportError:
from telegram_bot_user import TelegramBotUser as User
from source.user import TelegramBotUser as User


class Silero:
Expand Down
File renamed without changes.

0 comments on commit 6d7aa61

Please sign in to comment.