Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support Dedicated OpenAI API Deployments on Azure. #124

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ voyager.learn()
3. After the world is created, press `Esc` key and press `Open to LAN`.
4. Select `Allow cheats: ON` and press `Start LAN World`. You will see the bot join the world soon.

Or if you are using dedicated OpenAI API deployments on Azure, you can config your APIs by inferring [this](installation/run_with_azure_api_deployments.md).

# Resume from a checkpoint during learning

If you stop the learning process and want to resume from a checkpoint later, you can instantiate Voyager by:
Expand Down
52 changes: 52 additions & 0 deletions installation/run_with_azure_api_deployments.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# Run With Azure API Deployments

If you are using dedicated OpenAI API deployments on Azure, you can run Voyager by:

```python
from voyager import Voyager
from voyager.agents import AzureChatModelConfig, AzureOpenAIEmbeddingsConfig

# You can also use mc_port instead of azure_login, but azure_login is highly recommended
azure_login = {
"client_id": "YOUR_CLIENT_ID",
"redirect_url": "https://127.0.0.1/auth-response",
"secret_value": "[OPTIONAL] YOUR_SECRET_VALUE",
"version": "fabric-loader-0.14.18-1.19", # the version Voyager is tested on
}
openai_api_key = "YOUR_API_KEY"

# If you are using OpenAI LLM deployments on Azure, you can config them here
azure_gpt_4_config = AzureChatModelConfig(
openai_api_base="BASE_URL_FOR_AZURE_GPT4_DEPLOYMENT",
openai_api_version="GPT4_API_VERSION",
deployment_name="GPT4_DEPLOYMENT_NAME",
openai_api_type="azure",
openai_api_key="YOUR_AZURE_API_KEY", # Not API keys with prefix "sk-"
)
azure_gpt_35_config = AzureChatModelConfig(
openai_api_base="BASE_URL_FOR_AZURE_GPT35_DEPLOYMENT",
openai_api_version="GPT35_API_VERSION",
deployment_name="GPT35_DEPLOYMENT_NAME",
openai_api_type="azure",
openai_api_key="YOUR_AZURE_API_KEY", # Not API keys with prefix "sk-"
)
azure_openai_embeddings_config = AzureOpenAIEmbeddingsConfig(
openai_api_base="BASE_URL_FOR_AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT",
model="MODEL_NAME", # Check https://platform.openai.com/docs/guides/embeddings/embedding-models
openai_api_type="azure",
deployment="YOUR_DEPLOYMENT_NAME",
openai_api_key="YOUR_AZURE_API_KEY", # Not API keys with prefix "sk-"
)

voyager = Voyager(
azure_login=azure_login,
openai_api_type="azure",
azure_gpt_4_config=azure_gpt_4_config,
azure_gpt_35_config=azure_gpt_35_config,
azure_openai_embeddings_config=azure_openai_embeddings_config,
)

# start lifelong learning
voyager.learn()
```

1 change: 1 addition & 0 deletions voyager/agents/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
from .critic import CriticAgent
from .curriculum import CurriculumAgent
from .skill import SkillManager
from .azure_model_config import AzureChatModelConfig, AzureOpenAIEmbeddingsConfig
13 changes: 10 additions & 3 deletions voyager/agents/action.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,13 @@

import voyager.utils as U
from javascript import require
from langchain.chat_models import ChatOpenAI
from langchain.prompts import SystemMessagePromptTemplate
from langchain.schema import AIMessage, HumanMessage, SystemMessage

from voyager.prompts import load_prompt
from voyager.control_primitives_context import load_control_primitives_context
from voyager.agents.azure_model_config import AzureChatModelConfig
from voyager.agents.get_llm import get_llm


class ActionAgent:
Expand All @@ -21,6 +22,9 @@ def __init__(
resume=False,
chat_log=True,
execution_error=True,
openai_api_type="",
azure_gpt_4_config=AzureChatModelConfig(),
azure_gpt_35_config=AzureChatModelConfig(),
):
self.ckpt_dir = ckpt_dir
self.chat_log = chat_log
Expand All @@ -31,10 +35,13 @@ def __init__(
self.chest_memory = U.load_json(f"{ckpt_dir}/action/chest_memory.json")
else:
self.chest_memory = {}
self.llm = ChatOpenAI(
self.llm = get_llm(
model_name=model_name,
temperature=temperature,
request_timeout=request_timout,
request_timout=request_timout,
openai_api_type=openai_api_type,
azure_gpt_4_config=azure_gpt_4_config,
azure_gpt_35_config=azure_gpt_35_config,
)

def update_chest_memory(self, chests):
Expand Down
19 changes: 19 additions & 0 deletions voyager/agents/azure_model_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from pydantic import BaseModel


class AzureChatModelConfig(BaseModel):
"""AzureChatOpenAI config profile"""
openai_api_base: str = ''
openai_api_version: str = ''
deployment_name: str = ''
openai_api_type: str = 'azure'
openai_api_key: str = ''


class AzureOpenAIEmbeddingsConfig(BaseModel):
"""OpenAIEmbeddings config profile"""
openai_api_base: str = ''
model: str = ''
openai_api_type: str = 'azure'
deployment: str = ''
openai_api_key: str = ''
13 changes: 10 additions & 3 deletions voyager/agents/critic.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from voyager.prompts import load_prompt
from voyager.utils.json_utils import fix_and_parse_json
from langchain.chat_models import ChatOpenAI
from voyager.agents.azure_model_config import AzureChatModelConfig
from voyager.agents.get_llm import get_llm
from langchain.schema import HumanMessage, SystemMessage


Expand All @@ -11,11 +12,17 @@ def __init__(
temperature=0,
request_timout=120,
mode="auto",
openai_api_type="",
azure_gpt_4_config=AzureChatModelConfig(),
azure_gpt_35_config=AzureChatModelConfig(),
):
self.llm = ChatOpenAI(
self.llm = get_llm(
model_name=model_name,
temperature=temperature,
request_timeout=request_timout,
request_timout=request_timout,
openai_api_type=openai_api_type,
azure_gpt_4_config=azure_gpt_4_config,
azure_gpt_35_config=azure_gpt_35_config,
)
assert mode in ["auto", "manual"]
self.mode = mode
Expand Down
23 changes: 17 additions & 6 deletions voyager/agents/curriculum.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
import voyager.utils as U
from voyager.prompts import load_prompt
from voyager.utils.json_utils import fix_and_parse_json
from langchain.chat_models import ChatOpenAI
from voyager.agents.azure_model_config import AzureChatModelConfig, AzureOpenAIEmbeddingsConfig
from voyager.agents.get_llm import get_llm
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import HumanMessage, SystemMessage
from langchain.vectorstores import Chroma
Expand All @@ -25,16 +26,26 @@ def __init__(
mode="auto",
warm_up=None,
core_inventory_items: str | None = None,
openai_api_type="",
azure_gpt_4_config=AzureChatModelConfig(),
azure_gpt_35_config=AzureChatModelConfig(),
azure_openai_embeddings_config=AzureOpenAIEmbeddingsConfig(),
):
self.llm = ChatOpenAI(
self.llm = get_llm(
model_name=model_name,
temperature=temperature,
request_timeout=request_timout,
request_timout=request_timout,
openai_api_type=openai_api_type,
azure_gpt_4_config=azure_gpt_4_config,
azure_gpt_35_config=azure_gpt_35_config,
)
self.qa_llm = ChatOpenAI(
self.qa_llm = get_llm(
model_name=qa_model_name,
temperature=qa_temperature,
request_timeout=request_timout,
request_timout=request_timout,
openai_api_type=openai_api_type,
azure_gpt_4_config=azure_gpt_4_config,
azure_gpt_35_config=azure_gpt_35_config,
)
assert mode in [
"auto",
Expand All @@ -57,7 +68,7 @@ def __init__(
# vectordb for qa cache
self.qa_cache_questions_vectordb = Chroma(
collection_name="qa_cache_questions_vectordb",
embedding_function=OpenAIEmbeddings(),
embedding_function=OpenAIEmbeddings(**azure_openai_embeddings_config.dict()) if openai_api_type == "azure" else OpenAIEmbeddings(),
persist_directory=f"{ckpt_dir}/curriculum/vectordb",
)
assert self.qa_cache_questions_vectordb._collection.count() == len(
Expand Down
27 changes: 27 additions & 0 deletions voyager/agents/get_llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
from langchain.chat_models import ChatOpenAI, AzureChatOpenAI

from voyager.agents.azure_model_config import AzureChatModelConfig


def get_llm(
model_name: str = "gpt-3.5-turbo",
temperature: float = 0,
request_timout: float = 120,
azure_gpt_4_config: AzureChatModelConfig = AzureChatModelConfig(),
azure_gpt_35_config: AzureChatModelConfig = AzureChatModelConfig(),
openai_api_type: str = "",
) -> ChatOpenAI | AzureChatOpenAI:
if openai_api_type == "azure":
azure_model_config = azure_gpt_4_config if model_name == "gpt-4" else azure_gpt_35_config
llm = AzureChatOpenAI(
temperature=temperature,
**azure_model_config.dict(),
)
else:
llm = ChatOpenAI(
model_name=model_name,
temperature=temperature,
request_timeout=request_timout,
)

return llm
16 changes: 12 additions & 4 deletions voyager/agents/skill.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import os

import voyager.utils as U
from langchain.chat_models import ChatOpenAI
from voyager.agents.azure_model_config import AzureChatModelConfig, AzureOpenAIEmbeddingsConfig
from voyager.agents.get_llm import get_llm
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import HumanMessage, SystemMessage
from langchain.vectorstores import Chroma
Expand All @@ -19,11 +20,18 @@ def __init__(
request_timout=120,
ckpt_dir="ckpt",
resume=False,
openai_api_type="",
azure_gpt_4_config=AzureChatModelConfig(),
azure_gpt_35_config=AzureChatModelConfig(),
azure_openai_embeddings_config=AzureOpenAIEmbeddingsConfig(),
):
self.llm = ChatOpenAI(
self.llm = get_llm(
model_name=model_name,
temperature=temperature,
request_timeout=request_timout,
request_timout=request_timout,
openai_api_type=openai_api_type,
azure_gpt_4_config=azure_gpt_4_config,
azure_gpt_35_config=azure_gpt_35_config,
)
U.f_mkdir(f"{ckpt_dir}/skill/code")
U.f_mkdir(f"{ckpt_dir}/skill/description")
Expand All @@ -39,7 +47,7 @@ def __init__(
self.ckpt_dir = ckpt_dir
self.vectordb = Chroma(
collection_name="skill_vectordb",
embedding_function=OpenAIEmbeddings(),
embedding_function=OpenAIEmbeddings(**azure_openai_embeddings_config.dict()) if openai_api_type == "azure" else OpenAIEmbeddings(),
persist_directory=f"{ckpt_dir}/skill/vectordb",
)
assert self.vectordb._collection.count() == len(self.skills), (
Expand Down
19 changes: 19 additions & 0 deletions voyager/voyager.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from .agents import CriticAgent
from .agents import CurriculumAgent
from .agents import SkillManager
from .agents import AzureChatModelConfig, AzureOpenAIEmbeddingsConfig


# TODO: remove event memory
Expand Down Expand Up @@ -48,6 +49,10 @@ def __init__(
ckpt_dir: str = "ckpt",
skill_library_dir: str = None,
resume: bool = False,
openai_api_type="",
azure_gpt_4_config=AzureChatModelConfig(),
azure_gpt_35_config=AzureChatModelConfig(),
azure_openai_embeddings_config=AzureOpenAIEmbeddingsConfig(),
):
"""
The main class for Voyager.
Expand Down Expand Up @@ -123,6 +128,9 @@ def __init__(
resume=resume,
chat_log=action_agent_show_chat_log,
execution_error=action_agent_show_execution_error,
openai_api_type=openai_api_type,
azure_gpt_4_config=azure_gpt_4_config,
azure_gpt_35_config=azure_gpt_35_config,
)
self.action_agent_task_max_retries = action_agent_task_max_retries
self.curriculum_agent = CurriculumAgent(
Expand All @@ -136,12 +144,19 @@ def __init__(
mode=curriculum_agent_mode,
warm_up=curriculum_agent_warm_up,
core_inventory_items=curriculum_agent_core_inventory_items,
openai_api_type=openai_api_type,
azure_gpt_4_config=azure_gpt_4_config,
azure_gpt_35_config=azure_gpt_35_config,
azure_openai_embeddings_config=azure_openai_embeddings_config,
)
self.critic_agent = CriticAgent(
model_name=critic_agent_model_name,
temperature=critic_agent_temperature,
request_timout=openai_api_request_timeout,
mode=critic_agent_mode,
openai_api_type=openai_api_type,
azure_gpt_4_config=azure_gpt_4_config,
azure_gpt_35_config=azure_gpt_35_config,
)
self.skill_manager = SkillManager(
model_name=skill_manager_model_name,
Expand All @@ -150,6 +165,10 @@ def __init__(
request_timout=openai_api_request_timeout,
ckpt_dir=skill_library_dir if skill_library_dir else ckpt_dir,
resume=True if resume or skill_library_dir else False,
openai_api_type=openai_api_type,
azure_gpt_4_config=azure_gpt_4_config,
azure_gpt_35_config=azure_gpt_35_config,
azure_openai_embeddings_config=azure_openai_embeddings_config,
)
self.recorder = U.EventRecorder(ckpt_dir=ckpt_dir, resume=resume)
self.resume = resume
Expand Down