Skip to content

Commit

Permalink
Merge branch 'GaiZhenbiao:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
xingfanxia committed Feb 26, 2024
2 parents 01fad2d + 8ae89e8 commit fce203c
Show file tree
Hide file tree
Showing 14 changed files with 448 additions and 84 deletions.
15 changes: 13 additions & 2 deletions ChuanhuChatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,9 @@
from modules import config
import gradio as gr
import colorama
from modules.gradio_patch import reg_patch

reg_patch()

logging.getLogger("httpx").setLevel(logging.WARNING)

Expand All @@ -33,6 +35,8 @@ def create_new_model():

with gr.Blocks(theme=small_and_beautiful_theme) as demo:
user_name = gr.Textbox("", visible=False)
# 激活/logout路由
logout_hidden_btn = gr.LogoutButton(visible=False)
promptTemplates = gr.State(load_template(get_template_names()[0], mode=2))
user_question = gr.State("")
assert type(my_api_key) == str
Expand Down Expand Up @@ -391,6 +395,8 @@ def create_new_model():
single_turn_checkbox = gr.Checkbox(label=i18n(
"单轮对话"), value=False, elem_classes="switch-checkbox", elem_id="gr-single-session-cb", visible=False)
# checkUpdateBtn = gr.Button(i18n("🔄 检查更新..."), visible=check_update)
logout_btn = gr.Button(
i18n("退出用户"), variant="primary", visible=authflag)

with gr.Tab(i18n("网络")):
gr.Markdown(
Expand Down Expand Up @@ -511,7 +517,7 @@ def create_greeting(request: gr.Request):
loaded_stuff = current_model.auto_load()
else:
loaded_stuff = [gr.update(), gr.update(), gr.Chatbot.update(label=MODELS[DEFAULT_MODEL]), current_model.single_turn, current_model.temperature, current_model.top_p, current_model.n_choices, current_model.stop_sequence, current_model.token_upper_limit, current_model.max_generation_token, current_model.presence_penalty, current_model.frequency_penalty, current_model.logit_bias, current_model.user_identifier]
return user_info, user_name, current_model, toggle_like_btn_visibility(DEFAULT_MODEL), *loaded_stuff, init_history_list(user_name)
return user_info, user_name, current_model, toggle_like_btn_visibility(DEFAULT_MODEL), *loaded_stuff, init_history_list(user_name, prepend=current_model.history_file_path[:-5])
demo.load(create_greeting, inputs=None, outputs=[
user_info, user_name, current_model, like_dislike_area, saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider, n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider, presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt, historySelectList], api_name="load")
chatgpt_predict_args = dict(
Expand Down Expand Up @@ -801,7 +807,12 @@ def create_greeting(request: gr.Request):
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider, n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider, presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
_js='(a,b)=>{return bgSelectHistory(a,b);}'
)

logout_btn.click(
fn=None,
inputs=[],
outputs=[],
_js='self.location="/logout"'
)
# 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
demo.title = i18n("胖猫🐱🐱🐱GPT")

Expand Down
3 changes: 2 additions & 1 deletion config_example.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

//== API 配置 ==
"openai_api_key": "", // 你的 OpenAI API Key,一般必填,若空缺则需在图形界面中填入API Key
"google_palm_api_key": "", // 你的 Google PaLM API Key,用于 Google PaLM 对话模型
"google_genai_api_key": "", // 你的 Google PaLM API Key,用于 Google PaLM 对话模型
"xmchat_api_key": "", // 你的 xmchat API Key,用于 XMChat 对话模型
"minimax_api_key": "", // 你的 MiniMax API Key,用于 MiniMax 对话模型
"minimax_group_id": "", // 你的 MiniMax Group ID,用于 MiniMax 对话模型
Expand All @@ -17,6 +17,7 @@
"claude_api_secret":"",// 你的 Claude API Secret,用于 Claude 对话模型
"ernie_api_key": "",// 你的文心一言在百度云中的API Key,用于文心一言对话模型
"ernie_secret_key": "",// 你的文心一言在百度云中的Secret Key,用于文心一言对话模型
"huggingface_auth_token": "", // 你的 Hugging Face API Token,用于访问有限制的模型


//== Azure ==
Expand Down
5 changes: 3 additions & 2 deletions locale/en_US.json
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@
"本地编制索引": "Local indexing",
"是否在本地编制知识库索引?如果是,可以在使用本地模型时离线使用知识库,否则使用OpenAI服务来编制索引(需要OpenAI API Key)。请确保你的电脑有至少16GB内存。本地索引模型需要从互联网下载。": "Do you want to index the knowledge base locally? If so, you can use the knowledge base offline when using the local model, otherwise use the OpenAI service to index (requires OpenAI API Key). Make sure your computer has at least 16GB of memory. The local index model needs to be downloaded from the Internet.",
"现在开始设置其他在线模型的API Key": "Start setting the API Key for other online models",
"是否设置默认 Google Palm API 密钥?如果设置,软件启动时会自动加载该API Key,无需在 UI 中手动输入。如果不设置,可以在软件启动后手动输入 API Key。": "Set the default Google Palm API Key? If set, the API Key will be automatically loaded when the software starts, and there is no need to manually enter it in the UI. If not set, you can manually enter the API Key after the software starts.",
"是否设置默认 Google AI Studio API 密钥?如果设置,软件启动时会自动加载该API Key,无需在 UI 中手动输入。如果不设置,可以在软件启动后手动输入 API Key。": "Set the default Google Palm API Key? If set, the API Key will be automatically loaded when the software starts, and there is no need to manually enter it in the UI. If not set, you can manually enter the API Key after the software starts.",
"是否设置默认 XMChat API 密钥?如果设置,软件启动时会自动加载该API Key,无需在 UI 中手动输入。如果不设置,可以在软件启动后手动输入 API Key。": "Set the default XMChat API Key? If set, the API Key will be automatically loaded when the software starts, and there is no need to manually enter it in the UI. If not set, you can manually enter the API Key after the software starts.",
"是否设置默认 MiniMax API 密钥和 Group ID?如果设置,软件启动时会自动加载该API Key,无需在 UI 中手动输入。如果不设置,将无法使用 MiniMax 模型。": "Set the default MiniMax API Key and Group ID? If set, the API Key will be automatically loaded when the software starts, and there is no need to manually enter it in the UI. If not set, the MiniMax model will not be available.",
"你的": "Your ",
Expand Down Expand Up @@ -227,5 +227,6 @@
"设置完成。现在请重启本程序。": "Setup completed. Please restart this program now.",
"你设置了 ": "You set ",
" 为: ": " as: ",
"输入的不是数字,将使用默认值。": "The input is not a number, the default value will be used."
"输入的不是数字,将使用默认值。": "The input is not a number, the default value will be used.",
"由于下面的原因,Google 拒绝返回 Gemini 的回答:\n\n": "For the following reasons, Google refuses to return Gemini's response:\n\n",
}
15 changes: 11 additions & 4 deletions modules/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,17 @@ def load_config_to_environ(key_list):

HIDE_MY_KEY = config.get("hide_my_key", False)

google_palm_api_key = config.get("google_palm_api_key", "")
google_palm_api_key = os.environ.get(
"GOOGLE_PALM_API_KEY", google_palm_api_key)
os.environ["GOOGLE_PALM_API_KEY"] = google_palm_api_key
google_genai_api_key = os.environ.get(
"GOOGLE_PALM_API_KEY", "")
google_genai_api_key = os.environ.get(
"GOOGLE_GENAI_API_KEY", "")
google_genai_api_key = config.get("google_palm_api_key", google_genai_api_key)
google_genai_api_key = config.get("google_genai_api_key", google_genai_api_key)
os.environ["GOOGLE_GENAI_API_KEY"] = google_genai_api_key

huggingface_auth_token = os.environ.get("HF_AUTH_TOKEN", "")
huggingface_auth_token = config.get("hf_auth_token", huggingface_auth_token)
os.environ["HF_AUTH_TOKEN"] = huggingface_auth_token

xmchat_api_key = config.get("xmchat_api_key", "")
os.environ["XMCHAT_API_KEY"] = xmchat_api_key
Expand Down
114 changes: 114 additions & 0 deletions modules/gradio_patch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,114 @@
import logging
import os

import fastapi
import gradio
from fastapi.responses import RedirectResponse
from gradio.oauth import MOCKED_OAUTH_TOKEN

from modules.presets import i18n

OAUTH_CLIENT_ID = os.environ.get("OAUTH_CLIENT_ID")
OAUTH_CLIENT_SECRET = os.environ.get("OAUTH_CLIENT_SECRET")
OAUTH_SCOPES = os.environ.get("OAUTH_SCOPES")
OPENID_PROVIDER_URL = os.environ.get("OPENID_PROVIDER_URL")
def _add_oauth_routes(app: fastapi.FastAPI) -> None:
"""Add OAuth routes to the FastAPI app (login, callback handler and logout)."""
try:
from authlib.integrations.starlette_client import OAuth
except ImportError as e:
raise ImportError(
"Cannot initialize OAuth to due a missing library. Please run `pip install gradio[oauth]` or add "
"`gradio[oauth]` to your requirements.txt file in order to install the required dependencies."
) from e

# Check environment variables
msg = (
"OAuth is required but {} environment variable is not set. Make sure you've enabled OAuth in your Space by"
" setting `hf_oauth: true` in the Space metadata."
)
if OAUTH_CLIENT_ID is None:
raise ValueError(msg.format("OAUTH_CLIENT_ID"))
if OAUTH_CLIENT_SECRET is None:
raise ValueError(msg.format("OAUTH_CLIENT_SECRET"))
if OAUTH_SCOPES is None:
raise ValueError(msg.format("OAUTH_SCOPES"))
if OPENID_PROVIDER_URL is None:
raise ValueError(msg.format("OPENID_PROVIDER_URL"))

# Register OAuth server
oauth = OAuth()
oauth.register(
name="huggingface",
client_id=OAUTH_CLIENT_ID,
client_secret=OAUTH_CLIENT_SECRET,
client_kwargs={"scope": OAUTH_SCOPES},
server_metadata_url=OPENID_PROVIDER_URL + "/.well-known/openid-configuration",
)

# Define OAuth routes
@app.get("/login/huggingface")
async def oauth_login(request: fastapi.Request):
"""Endpoint that redirects to HF OAuth page."""
redirect_uri = str(request.url_for("oauth_redirect_callback"))
if ".hf.space" in redirect_uri:
# In Space, FastAPI redirect as http but we want https
redirect_uri = redirect_uri.replace("http://", "https://")
return await oauth.huggingface.authorize_redirect(request, redirect_uri)

@app.get("/login/callback")
async def oauth_redirect_callback(request: fastapi.Request) -> RedirectResponse:
"""Endpoint that handles the OAuth callback."""
token = await oauth.huggingface.authorize_access_token(request)
request.session["oauth_profile"] = token["userinfo"]
request.session["oauth_token"] = token
return RedirectResponse("/")

@app.get("/logout")
async def oauth_logout(request: fastapi.Request) -> RedirectResponse:
"""Endpoint that logs out the user (e.g. delete cookie session)."""
request.session.pop("oauth_profile", None)
request.session.pop("oauth_token", None)
# 清除cookie并跳转到首页
response = RedirectResponse(url="/", status_code=302)
response.delete_cookie(key=f"access-token")
response.delete_cookie(key=f"access-token-unsecure")
return response


def _add_mocked_oauth_routes(app: fastapi.FastAPI) -> None:
"""Add fake oauth routes if Gradio is run locally and OAuth is enabled.
Clicking on a gr.LoginButton will have the same behavior as in a Space (i.e. gets redirected in a new tab) but
instead of authenticating with HF, a mocked user profile is added to the session.
"""

# Define OAuth routes
@app.get("/login/huggingface")
async def oauth_login(request: fastapi.Request):
"""Fake endpoint that redirects to HF OAuth page."""
return RedirectResponse("/login/callback")

@app.get("/login/callback")
async def oauth_redirect_callback(request: fastapi.Request) -> RedirectResponse:
"""Endpoint that handles the OAuth callback."""
request.session["oauth_profile"] = MOCKED_OAUTH_TOKEN["userinfo"]
request.session["oauth_token"] = MOCKED_OAUTH_TOKEN
return RedirectResponse("/")

@app.get("/logout")
async def oauth_logout(request: fastapi.Request) -> RedirectResponse:
"""Endpoint that logs out the user (e.g. delete cookie session)."""
request.session.pop("oauth_profile", None)
request.session.pop("oauth_token", None)
# 清除cookie并跳转到首页
response = RedirectResponse(url="/", status_code=302)
response.delete_cookie(key=f"access-token")
response.delete_cookie(key=f"access-token-unsecure")
return response


def reg_patch():
gradio.oauth._add_mocked_oauth_routes = _add_mocked_oauth_routes
gradio.oauth._add_oauth_routes = _add_oauth_routes
logging.info(i18n("覆盖gradio.oauth /logout路由"))
81 changes: 81 additions & 0 deletions modules/models/GoogleGemini.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
import json
import logging
import textwrap
import uuid

import google.generativeai as genai
import gradio as gr
import PIL
import requests

from modules.presets import i18n

from ..index_func import construct_index
from ..utils import count_token
from .base_model import BaseLLMModel


class GoogleGeminiClient(BaseLLMModel):
def __init__(self, model_name, api_key, user_name="") -> None:
super().__init__(model_name=model_name, user=user_name)
self.api_key = api_key
if "vision" in model_name.lower():
self.multimodal = True
else:
self.multimodal = False
self.image_paths = []

def _get_gemini_style_input(self):
self.history.extend([{"role": "image", "content": i} for i in self.image_paths])
self.image_paths = []
messages = []
for item in self.history:
if item["role"] == "image":
messages.append(PIL.Image.open(item["content"]))
else:
messages.append(item["content"])
return messages

def to_markdown(self, text):
text = text.replace("•", " *")
return textwrap.indent(text, "> ", predicate=lambda _: True)

def handle_file_upload(self, files, chatbot, language):
if files:
if self.multimodal:
for file in files:
if file.name:
self.image_paths.append(file.name)
chatbot = chatbot + [((file.name,), None)]
return None, chatbot, None
else:
construct_index(self.api_key, file_src=files)
status = i18n("索引构建完成")
return gr.Files.update(), chatbot, status

def get_answer_at_once(self):
genai.configure(api_key=self.api_key)
messages = self._get_gemini_style_input()
model = genai.GenerativeModel(self.model_name)
response = model.generate_content(messages)
try:
return self.to_markdown(response.text), len(response.text)
except ValueError:
return (
i18n("由于下面的原因,Google 拒绝返回 Gemini 的回答:\n\n")
+ str(response.prompt_feedback),
0,
)

def get_answer_stream_iter(self):
genai.configure(api_key=self.api_key)
messages = self._get_gemini_style_input()
model = genai.GenerativeModel(self.model_name)
response = model.generate_content(messages, stream=True)
partial_text = ""
for i in response:
response = i.text
partial_text += response
yield partial_text
self.all_token_counts[-1] = count_token(partial_text)
yield partial_text
Loading

0 comments on commit fce203c

Please sign in to comment.