Skip to content

Commit

Permalink
Release 2.3.0a
Browse files Browse the repository at this point in the history
Release 2.3.0a
  • Loading branch information
IgnatovFedor committed Aug 22, 2022
2 parents 74b5b99 + e594de2 commit 3fd553b
Show file tree
Hide file tree
Showing 34 changed files with 911 additions and 270 deletions.
1 change: 1 addition & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
include requirements.txt
include deeppavlov_agent/http_api/templates/*.html
include deeppavlov_agent/log_config.yml
include deeppavlov_agent/channels/telegram/config/*.yml
Empty file.
Empty file.
173 changes: 173 additions & 0 deletions deeppavlov_agent/channels/telegram/bot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
import asyncio
import logging
from pathlib import Path

from aiogram import Bot, Dispatcher, types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import FSMContext
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.utils import executor

from .utils import MessageResponder

config_dir = Path(__file__).resolve().parent / 'config'

logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)


class DialogState(StatesGroup):
active = State()
awaiting_rating = State()
inactive = State()


def run_tg(token, proxy, agent):
loop = asyncio.get_event_loop()
bot = Bot(token=token, loop=loop, proxy=proxy)
storage = MemoryStorage() # TODO change to actual storage maybe?
dp = Dispatcher(bot, storage=storage)
responder = MessageResponder(
config_path=config_dir / "telegram_config.yml",
messages_path=config_dir / "telegram_messages.yml",
keyboards_path=config_dir / "telegram_keyboards.yml",
)

@dp.message_handler(commands="start")
async def start_handler(message: types.Message):
text = responder.message("start")
reply_markup = responder.reply_keyboard("dialog_inactive")

await message.answer(text, reply_markup=reply_markup)

@dp.message_handler(commands="help", state="*")
async def help_handler(message: types.Message):
text = responder.message("help")

await message.answer(text)

@dp.message_handler(commands="complain", state="*")
async def complain_handler(message: types.Message, state: FSMContext):
# TODO Add actual complaint logic
if await state.get_state() == DialogState.active.state:
text = responder.message("complain_success")
else:
text = responder.message("complain_fail")

await message.answer(text)

@dp.message_handler(commands="begin", state="*")
async def begin_dialog(message: types.Message, state: FSMContext):
state = await state.get_state()
must_evaluate = (
state == DialogState.awaiting_rating.state
and responder.config.evaluation_options.user_must_evaluate
)
is_not_finished = state == DialogState.active

if must_evaluate or is_not_finished:
text = responder.message("begin_fail")
reply_markup = None

else:
await DialogState.active.set()

text = responder.message("begin_success")
reply_markup = responder.reply_keyboard("dialog_active")

await message.answer(text, reply_markup=reply_markup)

@dp.message_handler(commands="end", state="*")
async def end_dialog(message: types.Message, state: FSMContext):
if await state.get_state() != DialogState.active.state:
text = responder.message("end_fail")
reply_markup = responder.reply_keyboard("dialog_inactive")
else:
text = responder.message("end_success")
dialog_id = await agent.state_manager.drop_active_dialog(
str(message.from_user.id)
)
reply_markup = responder.dialog_rating_inline_keyboard(dialog_id)

await DialogState.awaiting_rating.set()

await message.answer(text, reply_markup=reply_markup)

@dp.callback_query_handler(
lambda c: c.data.startswith("utt"), state=DialogState.active
)
async def handle_utterance_rating(
callback_query: types.CallbackQuery, state: FSMContext
):
_, utterance_id, rating = callback_query.data.split("-")
await agent.state_manager.set_rating_utterance(
str(callback_query.from_user.id), utterance_id, rating
)
await bot.answer_callback_query(callback_query.id, text=rating.capitalize())

@dp.callback_query_handler(lambda c: c.data.startswith("dialog"), state="*")
async def handle_dialog_rating(
callback_query: types.CallbackQuery, state: FSMContext
):
if await state.get_state() != DialogState.active.state:
_, dialog_id, rating = callback_query.data.split("-")

await agent.state_manager.set_rating_dialog(
str(callback_query.from_user.id), dialog_id, rating
)

edited_inline_keyboard = responder.dialog_rating_inline_keyboard(
dialog_id, chosen_rating=rating
)

await bot.edit_message_reply_markup(
chat_id=callback_query.from_user.id,
message_id=callback_query.message.message_id,
reply_markup=edited_inline_keyboard,
)

if responder.config.dialog_options.reveal_dialog_id:
message_text = responder.message(
"evaluate_dialog_success_reveal_id", dialog_id=dialog_id
)
else:
message_text = responder.message("evaluate_dialog_success")
callback_text = "Evaluation saved!"
reply_markup = responder.reply_keyboard("dialog_inactive")

await DialogState.inactive.set()

else:
callback_text = ""
message_text = responder.message("evaluate_dialog_success")
reply_markup = None

await bot.answer_callback_query(callback_query.id, text=callback_text)
await bot.send_message(
callback_query.from_user.id, message_text, reply_markup=reply_markup
)

@dp.message_handler(state="*")
async def handle_message(message: types.Message, state: FSMContext):
if await state.get_state() == DialogState.active.state:
response_data = await agent.register_msg(
utterance=message.text,
user_external_id=str(message.from_user.id),
user_device_type="telegram",
date_time=message.date,
location="",
channel_type="telegram",
require_response=True,
)
text = response_data["dialog"].utterances[-1].text
utterance_id = response_data["dialog"].utterances[-1].utt_id
reply_markup = responder.utterance_rating_inline_keyboard(utterance_id)
else:
text = responder.message("unexpected_message")
reply_markup = None

await message.answer(text, reply_markup=reply_markup)

executor.start_polling(dp, skip_updates=True)
7 changes: 7 additions & 0 deletions deeppavlov_agent/channels/telegram/config/telegram_config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
dialog_options:
reveal_dialog_id: true

evaluation_options:
user_must_evaluate: true
min_score: 1
max_score: 5
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
dialog_inactive:
- '/begin'
- '/help'

dialog_active:
- '/end'
- '/complain'
61 changes: 61 additions & 0 deletions deeppavlov_agent/channels/telegram/config/telegram_messages.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
start: >
This chatbot is developed by Neural Networks and Deep Learning Lab at MIPT.
Please have a chat with it and evaluate its performance.
To begin a conversation enter /begin. To end a conversation: enter /end.
When the dialogue is finished, you will be asked to evaluate it.
You will have to rate the conversation from 1 (bad) to 5 (excellent).
Your conversations will be recorded for further use.
By starting a chat you give permission for your anonymized conversation data
to be released publicly under Apache License Version 2.0
https://www.apache.org / licenses / LICENSE – 2.0.
help: >
This chatbot is developed by Neural Networks and Deep Learning Lab at MIPT.
Please have a chat with it and evaluate its performance.
To begin a conversation enter /begin. To end a conversation: enter /end.
When the dialogue is finished, you will be asked to evaluate it.
You will have to rate the conversation from 1 (bad) to 5 (excellent).
Your conversations will be recorded for further use.
By starting a chat you give permission for your anonymized conversation data
to be released publicly under Apache License Version 2.0
https://www.apache.org / licenses / LICENSE – 2.0.
complain_success: >
Your complaint has been recorded and will be examined by the system administrator.
Note that your conversation is still active.
You can always use /end command to end it.
complain_fail: >
Could not save your complaint. Did you /begin the dialog?
You cannot complain when there are no messages in the dialog.
begin_success: >
Starting a dialog. To finish the dialog enter /end.
begin_fail: >
Cannot start a new conversation.
Please finish and evaluate your current dialog first.
Use /help command for usage instructions.
end_success: >
Dialog is finished.
Please evaluate the whole dialog using one of the buttons below:
end_fail: >
You're not in a dialog now.
evaluate_dialog_success: >
Thank you for participation!
evaluate_dialog_success_reveal_id: >
Thank you for participation!
Evaluated dialog secret id: ${dialog_id}
evaluate_dialog_fail: >
Evaluation is not allowed at the moment.
Use /help command for usage instructions.
unexpected_message: >
Unexpected message.
You are not in a dialog yet or the dialog has already been finished.
Use /help command for usage instructions.

0 comments on commit 3fd553b

Please sign in to comment.