Skip to content

Commit

Permalink
Merge branch 'master' into sync/pull-org-and-openai-api-base-bugfix-i…
Browse files Browse the repository at this point in the history
…nto-master
  • Loading branch information
collijk committed Jul 10, 2023
2 parents 11052ac + 62e3304 commit a51349f
Show file tree
Hide file tree
Showing 9 changed files with 112 additions and 119 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,8 @@ jobs:
- name: Run pytest with coverage
run: |
pytest -v --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=4 --durations=10 \
pytest -vv --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
--numprocesses=logical --durations=10 \
tests/unit tests/integration tests/challenges
python tests/challenges/utils/build_current_score.py
env:
Expand Down Expand Up @@ -247,7 +247,7 @@ jobs:
gh api repos/$REPO/issues/$PR_NUMBER/comments -X POST -F body="You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
fi
- name: Upload logs as artifact
- name: Upload logs to artifact
if: always()
uses: actions/upload-artifact@v3
with:
Expand Down
7 changes: 2 additions & 5 deletions .github/workflows/docker-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -73,16 +73,13 @@ jobs:
run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
continue-on-error: true

# Docker setup needs fixing before this is going to work: #1843
test:
runs-on: ubuntu-latest
timeout-minutes: 30
needs: build
timeout-minutes: 10
steps:
- name: Check out repository
uses: actions/checkout@v3
with:
fetch-depth: 0
submodules: true

- name: Set up Docker Buildx
Expand All @@ -102,8 +99,8 @@ jobs:
- id: test
name: Run tests
env:
PLAIN_OUTPUT: True
CI: true
PLAIN_OUTPUT: True
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
set +e
Expand Down
9 changes: 7 additions & 2 deletions autogpt/commands/execute_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,9 @@ def execute_python_file(filename: str, agent: Agent) -> str:
)

if we_are_running_in_a_docker_container():
logger.debug(
f"Auto-GPT is running in a Docker container; executing {file_path} directly..."
)
result = subprocess.run(
["python", str(file_path)],
capture_output=True,
Expand All @@ -114,6 +117,7 @@ def execute_python_file(filename: str, agent: Agent) -> str:
else:
return f"Error: {result.stderr}"

logger.debug("Auto-GPT is not running in a Docker container")
try:
client = docker.from_env()
# You can replace this with the desired Python image/version
Expand All @@ -122,10 +126,10 @@ def execute_python_file(filename: str, agent: Agent) -> str:
image_name = "python:3-alpine"
try:
client.images.get(image_name)
logger.warn(f"Image '{image_name}' found locally")
logger.debug(f"Image '{image_name}' found locally")
except ImageNotFound:
logger.info(
f"Image '{image_name}' not found locally, pulling from Docker Hub"
f"Image '{image_name}' not found locally, pulling from Docker Hub..."
)
# Use the low-level API to stream the pull response
low_level_client = docker.APIClient()
Expand All @@ -138,6 +142,7 @@ def execute_python_file(filename: str, agent: Agent) -> str:
elif status:
logger.info(status)

logger.debug(f"Running {file_path} in a {image_name} container...")
container: DockerContainer = client.containers.run(
image_name,
["python", str(file_path.relative_to(agent.workspace.root))],
Expand Down
36 changes: 21 additions & 15 deletions autogpt/llm/providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import functools
import time
from dataclasses import dataclass
from typing import List, Optional
from typing import Callable, List, Optional
from unittest.mock import patch

import openai
Expand Down Expand Up @@ -112,7 +112,7 @@
}


def meter_api(func):
def meter_api(func: Callable):
"""Adds ApiManager metering to functions which make OpenAI API calls"""
from autogpt.llm.api_manager import ApiManager

Expand Down Expand Up @@ -150,7 +150,7 @@ def metered_func(*args, **kwargs):


def retry_api(
num_retries: int = 10,
max_retries: int = 10,
backoff_base: float = 2.0,
warn_user: bool = True,
):
Expand All @@ -162,43 +162,49 @@ def retry_api(
warn_user bool: Whether to warn the user. Defaults to True.
"""
error_messages = {
ServiceUnavailableError: f"{Fore.RED}Error: The OpenAI API engine is currently overloaded, passing...{Fore.RESET}",
RateLimitError: f"{Fore.RED}Error: Reached rate limit, passing...{Fore.RESET}",
ServiceUnavailableError: f"{Fore.RED}Error: The OpenAI API engine is currently overloaded{Fore.RESET}",
RateLimitError: f"{Fore.RED}Error: Reached rate limit{Fore.RESET}",
}
api_key_error_msg = (
f"Please double check that you have setup a "
f"{Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. You can "
f"read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
)
backoff_msg = (
f"{Fore.RED}Error: API Bad gateway. Waiting {{backoff}} seconds...{Fore.RESET}"
)
backoff_msg = f"{Fore.RED}Waiting {{backoff}} seconds...{Fore.RESET}"

def _wrapper(func):
def _wrapper(func: Callable):
@functools.wraps(func)
def _wrapped(*args, **kwargs):
user_warned = not warn_user
num_attempts = num_retries + 1 # +1 for the first attempt
for attempt in range(1, num_attempts + 1):
max_attempts = max_retries + 1 # +1 for the first attempt
for attempt in range(1, max_attempts + 1):
try:
return func(*args, **kwargs)

except (RateLimitError, ServiceUnavailableError) as e:
if attempt == num_attempts:
if attempt >= max_attempts or (
# User's API quota exceeded
isinstance(e, RateLimitError)
and (err := getattr(e, "error", {}))
and err.get("code") == "insufficient_quota"
):
raise

error_msg = error_messages[type(e)]
logger.debug(error_msg)
logger.warn(error_msg)
if not user_warned:
logger.double_check(api_key_error_msg)
logger.debug(f"Status: {e.http_status}")
logger.debug(f"Response body: {e.json_body}")
logger.debug(f"Response headers: {e.headers}")
user_warned = True

except (APIError, Timeout) as e:
if (e.http_status not in [429, 502]) or (attempt == num_attempts):
if (e.http_status not in [429, 502]) or (attempt == max_attempts):
raise

backoff = backoff_base ** (attempt + 2)
logger.debug(backoff_msg.format(backoff=backoff))
logger.warn(backoff_msg.format(backoff=backoff))
time.sleep(backoff)

return _wrapped
Expand Down
15 changes: 5 additions & 10 deletions tests/unit/test_retry_provider_openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def __init__(self):
self.count = 0

@openai.retry_api(
num_retries=retry_count, backoff_base=0.001, warn_user=warn_user
max_retries=retry_count, backoff_base=0.001, warn_user=warn_user
)
def __call__(self):
self.count += 1
Expand Down Expand Up @@ -69,16 +69,11 @@ def test_retry_open_api_passing(capsys, error, error_count, retry_count, failure

if error_count and retry_count:
if type(error) == RateLimitError:
assert "Reached rate limit, passing..." in output.out
assert "Reached rate limit" in output.out
assert "Please double check" in output.out
if type(error) == ServiceUnavailableError:
assert (
"The OpenAI API engine is currently overloaded, passing..."
in output.out
)
assert "The OpenAI API engine is currently overloaded" in output.out
assert "Please double check" in output.out
if type(error) == APIError:
assert "API Bad gateway" in output.out
else:
assert output.out == ""

Expand All @@ -96,7 +91,7 @@ def test_retry_open_api_rate_limit_no_warn(capsys):

output = capsys.readouterr()

assert "Reached rate limit, passing..." in output.out
assert "Reached rate limit" in output.out
assert "Please double check" not in output.out


Expand All @@ -115,7 +110,7 @@ def test_retry_open_api_service_unavairable_no_warn(capsys):

output = capsys.readouterr()

assert "The OpenAI API engine is currently overloaded, passing..." in output.out
assert "The OpenAI API engine is currently overloaded" in output.out
assert "Please double check" not in output.out


Expand Down
51 changes: 36 additions & 15 deletions tests/vcr/__init__.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,30 @@
import os
from hashlib import sha256

import openai.api_requestor
import pytest
from pytest_mock import MockerFixture

from .vcr_filter import PROXY, before_record_request, before_record_response
from .vcr_filter import (
PROXY,
before_record_request,
before_record_response,
freeze_request_body,
)

DEFAULT_RECORD_MODE = "new_episodes"
BASE_VCR_CONFIG = {
"before_record_request": before_record_request,
"before_record_response": before_record_response,
"filter_headers": [
"Authorization",
"AGENT-MODE",
"AGENT-TYPE",
"OpenAI-Organization",
"X-OpenAI-Client-User-Agent",
"User-Agent",
],
"match_on": ["method", "body"],
"match_on": ["method", "headers"],
}


Expand All @@ -41,31 +50,43 @@ def vcr_cassette_dir(request):
return os.path.join("tests/Auto-GPT-test-cassettes", test_name)


def patch_api_base(requestor):
def patch_api_base(requestor: openai.api_requestor.APIRequestor):
new_api_base = f"{PROXY}/v1"
requestor.api_base = new_api_base
return requestor


@pytest.fixture
def patched_api_requestor(mocker: MockerFixture):
original_init = openai.api_requestor.APIRequestor.__init__
original_validate_headers = openai.api_requestor.APIRequestor._validate_headers
init_requestor = openai.api_requestor.APIRequestor.__init__
prepare_request = openai.api_requestor.APIRequestor._prepare_request_raw

def patched_init(requestor, *args, **kwargs):
original_init(requestor, *args, **kwargs)
def patched_init_requestor(requestor, *args, **kwargs):
init_requestor(requestor, *args, **kwargs)
patch_api_base(requestor)

def patched_validate_headers(self, supplied_headers):
headers = original_validate_headers(self, supplied_headers)
headers["AGENT-MODE"] = os.environ.get("AGENT_MODE")
headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE")
return headers
def patched_prepare_request(self, *args, **kwargs):
url, headers, data = prepare_request(self, *args, **kwargs)

if PROXY:
headers["AGENT-MODE"] = os.environ.get("AGENT_MODE")
headers["AGENT-TYPE"] = os.environ.get("AGENT_TYPE")

# Add hash header for cheap & fast matching on cassette playback
headers["X-Content-Hash"] = sha256(
freeze_request_body(data), usedforsecurity=False
).hexdigest()

return url, headers, data

if PROXY:
mocker.patch("openai.api_requestor.APIRequestor.__init__", new=patched_init)
mocker.patch.object(
openai.api_requestor.APIRequestor,
"_validate_headers",
new=patched_validate_headers,
"__init__",
new=patched_init_requestor,
)
mocker.patch.object(
openai.api_requestor.APIRequestor,
"_prepare_request_raw",
new=patched_prepare_request,
)
52 changes: 0 additions & 52 deletions tests/vcr/openai_filter.py

This file was deleted.

Loading

0 comments on commit a51349f

Please sign in to comment.