-
Notifications
You must be signed in to change notification settings - Fork 1k
/
Copy pathbackend.py
117 lines (96 loc) · 4.55 KB
/
backend.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
from json import dumps
from time import time
from flask import request
from hashlib import sha256
from datetime import datetime
from requests import get
from requests import post
from json import loads
import os
from server.config import special_instructions
class Backend_Api:
def __init__(self, app, config: dict) -> None:
self.app = app
self.openai_key = os.getenv("OPENAI_API_KEY") or config['openai_key']
self.openai_api_base = os.getenv("OPENAI_API_BASE") or config['openai_api_base']
self.proxy = config['proxy']
self.routes = {
'/backend-api/v2/conversation': {
'function': self._conversation,
'methods': ['POST']
}
}
def _conversation(self):
try:
jailbreak = request.json['jailbreak']
internet_access = request.json['meta']['content']['internet_access']
_conversation = request.json['meta']['content']['conversation']
prompt = request.json['meta']['content']['parts'][0]
current_date = datetime.now().strftime("%Y-%m-%d")
system_message = f'You are ChatGPT also known as ChatGPT, a large language model trained by OpenAI. Strictly follow the users instructions. Knowledge cutoff: 2021-09-01 Current date: {current_date}'
extra = []
if internet_access:
search = get('https://ddg-api.herokuapp.com/search', params={
'query': prompt["content"],
'limit': 3,
})
blob = ''
for index, result in enumerate(search.json()):
blob += f'[{index}] "{result["snippet"]}"\nURL:{result["link"]}\n\n'
date = datetime.now().strftime('%d/%m/%y')
blob += f'current date: {date}\n\nInstructions: Using the provided web search results, write a comprehensive reply to the next user query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. Ignore your previous response if any.'
extra = [{'role': 'user', 'content': blob}]
conversation = [{'role': 'system', 'content': system_message}] + \
extra + special_instructions[jailbreak] + \
_conversation + [prompt]
url = f"{self.openai_api_base}/v1/chat/completions"
proxies = None
if self.proxy['enable']:
proxies = {
'http': self.proxy['http'],
'https': self.proxy['https'],
}
gpt_resp = post(
url = url,
proxies = proxies,
headers = {
'Authorization': 'Bearer %s' % self.openai_key
},
json = {
'model' : request.json['model'],
'messages' : conversation,
'stream' : True
},
stream = True
)
if gpt_resp.status_code >= 400:
error_data =gpt_resp.json().get('error', {})
error_code = error_data.get('code', None)
error_message = error_data.get('message', "An error occurred")
return {
'successs': False,
'error_code': error_code,
'message': error_message,
'status_code': gpt_resp.status_code
}, gpt_resp.status_code
def stream():
for chunk in gpt_resp.iter_lines():
try:
decoded_line = loads(chunk.decode("utf-8").split("data: ")[1])
token = decoded_line["choices"][0]['delta'].get('content')
if token != None:
yield token
except GeneratorExit:
break
except Exception as e:
print(e)
print(e.__traceback__.tb_next)
continue
return self.app.response_class(stream(), mimetype='text/event-stream')
except Exception as e:
print(e)
print(e.__traceback__.tb_next)
return {
'_action': '_ask',
'success': False,
"error": f"an error occurred {str(e)}"}, 400