Skip to content

Commit

Permalink
Significant-Gravitas#1.browse_website配置代理
Browse files Browse the repository at this point in the history
Significant-Gravitas#2.中文提示词(对返回结果有些影响、token消耗大,暂时注掉)
Significant-Gravitas#3.优化代码,待适配文本转向量接口
  • Loading branch information
lianchen.zhang committed Nov 8, 2023
1 parent 92ecdde commit f1078d0
Show file tree
Hide file tree
Showing 14 changed files with 74 additions and 32 deletions.
16 changes: 8 additions & 8 deletions .env
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
################################################################################

## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
OPENAI_API_KEY=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
OPENAI_API_KEY=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

## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
# EXECUTE_LOCAL_COMMANDS=False
Expand Down Expand Up @@ -215,12 +215,12 @@ GOOGLE_CUSTOM_SEARCH_ENGINE_ID=62da02a91306a4c88
################################################################################
### QUUNAR config
################################################################################
QUNAR_API_BASE = http://llm.api.corp.qunar.com/algo/llm/api
QUNAR_API_BASE=http://llm.api.corp.qunar.com/algo/llm/api

QUNAR_GPT_MODEL = gpt-4
QUNAR_KEY = qunar_hackathon_41
QUNAR_PWD = 1MVb45
QUNAR_USER = lianchen.zhang
QUNAR_GPT_MODEL=gpt-35-turbo
QUNAR_KEY=qunar-flightCheckin-prod
QUNAR_PWD=04b446c5-10e3-4664-8a12-b2e7729b9293
QUNAR_USER=lianchen.zhang

QUNAR_PROXY_HOST = proxy.corp.qunar.com
QUNAR_PROXY_PORT = 10080
QUNAR_PROXY_HOST=proxy.corp.qunar.com
QUNAR_PROXY_PORT=10080
12 changes: 12 additions & 0 deletions autogpt/app/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,18 @@ def generate_aiconfig_automatic(user_prompt: str, config: Config) -> AIConfig:
.group(1)
.strip()
)

# 中文格式解析
# ai_name = re.search(r"名称(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1)
# ai_role = (
# re.search(
# r"描述(?:\s*):(?:\s*)(.*?)(?:(?:\n)|目标)",
# output,
# re.IGNORECASE | re.DOTALL,
# )
# .group(1)
# .strip()
# )
ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output)
api_budget = 0.0 # TODO: parse api budget using a regular expression

Expand Down
13 changes: 4 additions & 9 deletions autogpt/commands/web_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@

import httplib2

from autogpt.core.configuration import *

COMMAND_CATEGORY = "web_search"
COMMAND_CATEGORY_TITLE = "Web Search"

Expand All @@ -21,14 +23,7 @@

DUCKDUCKGO_MAX_ATTEMPTS = 3

QUNAR_PROXY_HOST = os.environ.get("QUNAR_PROXY_HOST")
QUNAR_PROXY_PORT = os.environ.get("QUNAR_PROXY_PORT")
QUNAR_PROXY_USR = os.environ.get("QUNAR_PROXY_USR")
QUNAR_PROXY_PWD = os.environ.get("QUNAR_PROXY_PWD")
proxies = {
'http': f'http://{QUNAR_PROXY_HOST}:{QUNAR_PROXY_PORT}',
'https': f'http://{QUNAR_PROXY_HOST}:{QUNAR_PROXY_PORT}'
}



@command(
Expand Down Expand Up @@ -201,7 +196,7 @@ def proxy_info_from_url(method="http", noproxy=None):
pi = httplib2.ProxyInfo(
proxy_type=proxy_type,
proxy_host=QUNAR_PROXY_HOST,
proxy_port=QUNAR_PROXY_PORT or dict(https=443, http=80)[method],
proxy_port=int(QUNAR_PROXY_PORT) or dict(https=443, http=80)[method],
proxy_user=QUNAR_PROXY_USR or None,
proxy_pass=QUNAR_PROXY_PWD or None,
proxy_headers=None,
Expand Down
5 changes: 3 additions & 2 deletions autogpt/commands/web_selenium.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
from autogpt.memory.vector import MemoryItem, get_memory
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
from autogpt.url_utils.validators import validate_url

from autogpt.core.configuration import *
FILE_DIR = Path(__file__).parent.parent
TOKENS_TO_TRIGGER_SUMMARY = 50
LINKS_TO_RETURN = 20
Expand Down Expand Up @@ -115,7 +115,8 @@ def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
options.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
)

# options.add_argument(f'--proxy-server={QUNAR_PROXY_HOST}:{QUNAR_PROXY_PORT}')
options.add_argument(f'--proxy-server={proxies["http"]}')
if agent.config.selenium_web_browser == "firefox":
if agent.config.selenium_headless:
options.headless = True
Expand Down
10 changes: 10 additions & 0 deletions autogpt/core/configuration/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,17 @@
"""The configuration encapsulates settings for all Agent subsystems."""
import os

from autogpt.core.configuration.schema import (
Configurable,
SystemConfiguration,
SystemSettings,
UserConfigurable,
)
QUNAR_PROXY_HOST = os.environ.get("QUNAR_PROXY_HOST")
QUNAR_PROXY_PORT = os.environ.get("QUNAR_PROXY_PORT")
QUNAR_PROXY_USR = os.environ.get("QUNAR_PROXY_USR")
QUNAR_PROXY_PWD = os.environ.get("QUNAR_PROXY_PWD")
proxies = {
'http': f'http://{QUNAR_PROXY_HOST}:{QUNAR_PROXY_PORT}',
'https': f'http://{QUNAR_PROXY_HOST}:{QUNAR_PROXY_PORT}'
}
4 changes: 2 additions & 2 deletions autogpt/llm/providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
)
from autogpt.logs import logger
from autogpt.models.command_registry import CommandRegistry
from autogpt.qunar.qunar_gpt import QunarGPT
from autogpt.qunar.api_resources.q_chat_completion import QChatCompletion

MAX_TOKENS = 40960

Expand Down Expand Up @@ -242,7 +242,7 @@ def create_chat_completion(
# **kwargs,
# )

completion: OpenAIObject = QunarGPT.create(
completion: OpenAIObject = QChatCompletion.create(
messages=messages,
**kwargs,
)
Expand Down
2 changes: 2 additions & 0 deletions autogpt/memory/vector/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def get_embedding(
Returns:
List[float]: The embedding.
"""
return []
multiple = isinstance(input, list) and all(not isinstance(i, int) for i in input)

if isinstance(input, str):
Expand All @@ -58,6 +59,7 @@ def get_embedding(
+ (f" via Azure deployment '{kwargs['engine']}'" if config.use_azure else "")
)

#
embeddings = iopenai.create_embedding(
input,
**kwargs,
Expand Down
1 change: 1 addition & 0 deletions autogpt/prompts/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from autogpt.prompts.generator import PromptGenerator

DEFAULT_TRIGGERING_PROMPT = (
# "Your answers must be in Chinese. "
"Determine exactly one command to use based on the given goals "
"and the progress you have made so far, "
"and respond using the JSON schema specified previously:"
Expand Down
Empty file.
Original file line number Diff line number Diff line change
@@ -1,17 +1,12 @@
import os

import openai
from openai import api_requestor, error, util
from openai.api_resources.abstract.api_resource import APIResource
from openai import error, util
from openai.openai_response import OpenAIResponse
from openai.util import ApiType

from autogpt.qunar.qunar_requestor import QunarRequestor

MAX_TIMEOUT = 20


class QunarGPT:
class EngineAPIResource:
plain_old_data = False

@classmethod
Expand Down
Empty file.
29 changes: 29 additions & 0 deletions autogpt/qunar/api_resources/q_chat_completion.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import time

from openai import util
from openai.error import TryAgain

from autogpt.qunar.api_resources.abstract.EngineApiResource import EngineAPIResource


class QChatCompletion(EngineAPIResource):

@classmethod
def create(cls, *args, **kwargs):
"""
Creates a new chat completion for the provided messages and parameters.
See https://platform.openai.com/docs/api-reference/chat-completions/create
for a list of valid parameters.
"""
start = time.time()
timeout = kwargs.pop("timeout", None)

while True:
try:
return super().create(*args, **kwargs)
except TryAgain as e:
if timeout is not None and time.time() > start + timeout:
raise

util.log_info("Waiting for model to warm up", error=e)
3 changes: 0 additions & 3 deletions autogpt/qunar/qunar_requestor.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,12 @@
import warnings
from json import JSONDecodeError
from typing import (
AsyncGenerator,
AsyncIterator,
Callable,
Dict,
Iterator,
Optional,
Tuple,
Union,
overload,
)

import openai
Expand Down
2 changes: 1 addition & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
version: "3.9"

services:
auto-gpt:
qunar-auto-gpt:
build: ./
env_file:
- .env
Expand Down

0 comments on commit f1078d0

Please sign in to comment.