Skip to content

Commit

Permalink
feat: add /v1/models API
Browse files Browse the repository at this point in the history
  • Loading branch information
jtsang4 committed May 11, 2023
1 parent ae53760 commit 564b907
Show file tree
Hide file tree
Showing 7 changed files with 149 additions and 16 deletions.
7 changes: 7 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"python.formatting.provider": "none",
"editor.formatOnSave": true
}
37 changes: 26 additions & 11 deletions claude_to_chatgpt/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
"max_tokens": "length",
}


class ClaudeAdapter:
def __init__(self, claude_base_url="https://api.anthropic.com"):
self.claude_api_key = os.getenv("CLAUDE_API_KEY", None)
Expand Down Expand Up @@ -50,16 +51,16 @@ def openai_to_claude_params(self, openai_params):
"max_tokens_to_sample": 9016,
}

if (openai_params.get("max_tokens")):
if openai_params.get("max_tokens"):
claude_params["max_tokens_to_sample"] = openai_params["max_tokens"]

if (openai_params.get("stop")):
if openai_params.get("stop"):
claude_params["stop_sequences"] = openai_params.get("stop")

if (openai_params.get("temperature")):
if openai_params.get("temperature"):
claude_params["temperature"] = openai_params.get("temperature")
if (openai_params.get('stream')):

if openai_params.get("stream"):
claude_params["stream"] = True

return claude_params
Expand All @@ -79,10 +80,14 @@ def claude_to_chatgpt_response_stream(self, claude_response, prev_decoded_respon
{
"delta": {
"role": "assistant",
"content": claude_response.get("completion", "").removeprefix(prev_decoded_response.get("completion", "")),
"content": claude_response.get("completion", "").removeprefix(
prev_decoded_response.get("completion", "")
),
},
"index": 0,
"finish_reason": stop_reason_map[claude_response.get("stop_reason")] if claude_response.get("stop_reason") else None,
"finish_reason": stop_reason_map[claude_response.get("stop_reason")]
if claude_response.get("stop_reason")
else None,
}
],
}
Expand All @@ -107,7 +112,9 @@ def claude_to_chatgpt_response(self, claude_response):
"content": claude_response.get("completion", ""),
},
"index": 0,
"finish_reason": stop_reason_map[claude_response.get("stop_reason")] if claude_response.get("stop_reason") else None,
"finish_reason": stop_reason_map[claude_response.get("stop_reason")]
if claude_response.get("stop_reason")
else None,
}
],
}
Expand Down Expand Up @@ -143,12 +150,20 @@ async def chat(self, request: Request):
try:
decoded_line = json.loads(stripped_line)
# yield decoded_line
openai_response = self.claude_to_chatgpt_response_stream(decoded_line, prev_decoded_line)
openai_response = (
self.claude_to_chatgpt_response_stream(
decoded_line, prev_decoded_line
)
)
prev_decoded_line = decoded_line
yield openai_response
except json.JSONDecodeError as e:
logger.debug(f"Error decoding JSON: {e}") # Debug output
logger.debug(f"Failed to decode line: {stripped_line}") # Debug output
logger.debug(
f"Error decoding JSON: {e}"
) # Debug output
logger.debug(
f"Failed to decode line: {stripped_line}"
) # Debug output
else:
claude_response = response.json()
openai_response = self.claude_to_chatgpt_response(claude_response)
Expand Down
16 changes: 14 additions & 2 deletions claude_to_chatgpt/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import json
import os
from claude_to_chatgpt.logger import logger
from claude_to_chatgpt.models import models_list

CLAUDE_BASE_URL = os.getenv("CLAUDE_BASE_URL", "https://api.anthropic.com")
LOG_LEVEL = os.getenv("LOG_LEVEL", "info")
Expand All @@ -25,26 +26,37 @@
allow_headers=["*"],
)


@app.api_route(
"/v1/chat/completions",
methods=["POST", "OPTIONS"],
)
async def chat(request: Request):
openai_params = await request.json()
if openai_params.get("stream", False):

async def generate():
async for response in adapter.chat(request):
if response == "[DONE]":
yield "data: [DONE]"
break
yield f"data: {json.dumps(response)}\n\n"

return StreamingResponse(generate(), media_type="text/event-stream")
else:
openai_response = None
async for response in adapter.chat(request):
openai_response = response
response = adapter.chat(request)
openai_response = await response.__anext__()
return JSONResponse(content=openai_response)


@app.route("/v1/models", methods=["GET"])
async def models(request: Request):
# return a dict with key "object" and "data", "object" value is "list", "data" values is models list
return JSONResponse(content={"object": "list", "data": models_list})


if __name__ == "__main__":
import uvicorn

uvicorn.run("app:app", host="0.0.0.0", port=PORT, log_level=LOG_LEVEL)
2 changes: 1 addition & 1 deletion claude_to_chatgpt/logger.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
import logging

logger = logging.getLogger("debug")
logger = logging.getLogger("debug")
98 changes: 98 additions & 0 deletions claude_to_chatgpt/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
models_list = [
{
"id": "gpt-3.5-turbo",
"object": "model",
"created": 1677610602,
"owned_by": "openai",
"permission": [
{
"id": "modelperm-YO9wdQnaovI4GD1HLV59M0AV",
"object": "model_permission",
"created": 1683753011,
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False,
}
],
"root": "gpt-3.5-turbo",
"parent": None,
},
{
"id": "gpt-3.5-turbo-0301",
"object": "model",
"created": 1677649963,
"owned_by": "openai",
"permission": [
{
"id": "modelperm-tsdKKNwiNtHfnKWWTkKChjoo",
"object": "model_permission",
"created": 1683753015,
"allow_create_engine": False,
"allow_sampling": True,
"allow_logprobs": True,
"allow_search_indices": False,
"allow_view": True,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False,
}
],
"root": "gpt-3.5-turbo-0301",
"parent": None,
},
{
"id": "gpt-4",
"object": "model",
"created": 1678604602,
"owned_by": "openai",
"permission": [
{
"id": "modelperm-nqKDpzYoZMlqbIltZojY48n9",
"object": "model_permission",
"created": 1683768705,
"allow_create_engine": False,
"allow_sampling": False,
"allow_logprobs": False,
"allow_search_indices": False,
"allow_view": False,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False,
}
],
"root": "gpt-4",
"parent": None,
},
{
"id": "gpt-4-0314",
"object": "model",
"created": 1678604601,
"owned_by": "openai",
"permission": [
{
"id": "modelperm-PGbNkIIZZLRipow1uFL0LCvV",
"object": "model_permission",
"created": 1683768678,
"allow_create_engine": False,
"allow_sampling": False,
"allow_logprobs": False,
"allow_search_indices": False,
"allow_view": False,
"allow_fine_tuning": False,
"organization": "*",
"group": None,
"is_blocking": False,
}
],
"root": "gpt-4-0314",
"parent": None,
},
]
3 changes: 2 additions & 1 deletion claude_to_chatgpt/util.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import tiktoken


def num_tokens_from_string(string: str, encoding_name: str = "cl100k_base") -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
return num_tokens
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "claude-to-chatgpt"
version = "0.1.0"
version = "0.2.0"
description = ""
authors = ["jtsang4 <wtzeng1@gmail.com>"]
readme = "README.md"
Expand Down

0 comments on commit 564b907

Please sign in to comment.