Skip to content

Commit

Permalink
feat: support claude-2 and anthropic-version
Browse files Browse the repository at this point in the history
  • Loading branch information
jtsang4 committed Jul 11, 2023
1 parent 6a17657 commit 4084f3f
Show file tree
Hide file tree
Showing 6 changed files with 59 additions and 48 deletions.
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ This project converts the API of Anthropic's Claude model to the OpenAI Chat API

* ✨ Call Claude API like OpenAI ChatGPT API
* 💦 Support streaming response
* 🐻 Support `claude-v1.3`, `claude-v1.3-100k` models
* 🐻 Support `claude-instant-1`, `claude-2` models
* 🌩️ Deploy by Cloudflare Workers or Docker

## Getting Started
Expand Down Expand Up @@ -61,7 +61,7 @@ The API will then be available at http://localhost:8000. API endpoint: `/v1/chat

### Usage

When you input the model parameter as `gpt-3.5-turbo` or `gpt-3.5-turbo-0301`, it will be substituted with `claude-v1.3`. otherwise, `claude-v1.3-100k` will be utilized.
When you input the model parameter as `gpt-3.5-turbo` or `gpt-3.5-turbo-0613`, it will be substituted with `claude-instant-1`. otherwise, `claude-2` will be utilized.


#### GUI
Expand Down Expand Up @@ -90,7 +90,7 @@ The Claude Completion API has an endpoint `/v1/complete` which takes the followi
```json
{
"prompt": "\n\nHuman: Hello, AI.\n\nAssistant: ",
"model": "claude-v1.3",
"model": "claude-instant-1",
"max_tokens_to_sample": 100,
"temperature": 1,
"stream": true
Expand Down
6 changes: 3 additions & 3 deletions README_CN.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

* ✨ 以 OpenAI ChatGPT API 的方式调用 Claude API
* 💦 支持流式响应,实现打印机效果
* 🐻 支持 `claude-v1.3``claude-v1.3-100k` 模型
* 🐻 支持 `claude-instant-1``claude-2` 模型
* 🌩️ 通过 Cloudflare Workers 或 Docker 部署

## 开始使用
Expand Down Expand Up @@ -60,7 +60,7 @@ docker-compose up

### 使用方法

当您将模型参数 `model``gpt-3.5-turbo``gpt-3.5-turbo-0301` 时,它将替换为 `claude-v1.3`。否则,将使用 `claude-v1.3-100k`
当您将模型参数 `model``gpt-3.5-turbo``gpt-3.5-turbo-0613` 时,它将替换为 `claude-instant-1`。否则,将使用 `claude-2`


#### 图形界面软件
Expand Down Expand Up @@ -89,7 +89,7 @@ Claude Completion API endpoint 为 `/v1/complete`,它接受以下请求格式
```json
{
"prompt": "\n\nHuman: Hello, AI.\n\nAssistant: ",
"model": "claude-v1.3",
"model": "claude-instant-1",
"max_tokens_to_sample": 100,
"temperature": 1,
"stream": true
Expand Down
6 changes: 3 additions & 3 deletions README_JA.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

- ✨ OpenAI ChatGPT API のように Claude API を呼び出す
- 💦 ストリーミングレス対応
- 🐻 `claude-v1.3`, `claude-v1.3-100k` のモデルをサポートする
- 🐻 `claude-instant-1`, `claude-2` のモデルをサポートする
- 🌩️ Cloudflare Workers や Docker でデプロイする

## はじめに
Expand Down Expand Up @@ -60,7 +60,7 @@ docker-compose up

### 使用方法

モデルパラメータを `gpt-3.5-turbo` または `gpt-3.5-turbo-0301` と入力すると `claude-v1.3` に置換されます。
モデルパラメータを `gpt-3.5-turbo` または `gpt-3.5-turbo-0613` と入力すると `claude-instant-1` に置換されます。

#### GUI

Expand Down Expand Up @@ -88,7 +88,7 @@ Claude Completion API にはエンドポイント `/v1/complete` があり、以
```json
{
"prompt": "\n\nHuman: Hello, AI.\n\nAssistant: ",
"model": "claude-v1.3",
"model": "claude-instant-1",
"max_tokens_to_sample": 100,
"temperature": 1,
"stream": true
Expand Down
53 changes: 32 additions & 21 deletions claude_to_chatgpt/adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,15 +42,15 @@ def convert_messages_to_prompt(self, messages):
return prompt

def openai_to_claude_params(self, openai_params):
model = model_map.get(openai_params["model"], "claude-v1.3-100k")
model = model_map.get(openai_params["model"], "claude-2")
messages = openai_params["messages"]

prompt = self.convert_messages_to_prompt(messages)

claude_params = {
"model": model,
"prompt": prompt,
"max_tokens_to_sample": 100000 if model == "claude-v1.3-100k" else 9016,
"max_tokens_to_sample": 100000,
}

if openai_params.get("max_tokens"):
Expand All @@ -67,13 +67,14 @@ def openai_to_claude_params(self, openai_params):

return claude_params

def claude_to_chatgpt_response_stream(self, claude_response, prev_decoded_response):
completion_tokens = num_tokens_from_string(claude_response.get("completion", ""))
def claude_to_chatgpt_response_stream(self, claude_response):
completion = claude_response.get("completion", "")
completion_tokens = num_tokens_from_string(completion)
openai_response = {
"id": f"chatcmpl-{str(time.time())}",
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": "gpt-3.5-turbo-0301",
"model": "gpt-3.5-turbo-0613",
"usage": {
"prompt_tokens": 0,
"completion_tokens": completion_tokens,
Expand All @@ -83,9 +84,7 @@ def claude_to_chatgpt_response_stream(self, claude_response, prev_decoded_respon
{
"delta": {
"role": "assistant",
"content": claude_response.get("completion", "").removeprefix(
prev_decoded_response.get("completion", "")
),
"content": completion,
},
"index": 0,
"finish_reason": stop_reason_map[claude_response.get("stop_reason")]
Expand All @@ -98,12 +97,14 @@ def claude_to_chatgpt_response_stream(self, claude_response, prev_decoded_respon
return openai_response

def claude_to_chatgpt_response(self, claude_response):
completion_tokens = num_tokens_from_string(claude_response.get("completion", ""))
completion_tokens = num_tokens_from_string(
claude_response.get("completion", "")
)
openai_response = {
"id": f"chatcmpl-{str(time.time())}",
"object": "chat.completion",
"created": int(time.time()),
"model": "gpt-3.5-turbo-0301",
"model": "gpt-3.5-turbo-0613",
"usage": {
"prompt_tokens": 0,
"completion_tokens": completion_tokens,
Expand Down Expand Up @@ -137,7 +138,9 @@ async def chat(self, request: Request):
f"{self.claude_base_url}/v1/complete",
headers={
"x-api-key": api_key,
"accept": "application/json",
"content-type": "application/json",
"anthropic-version": "2023-06-01",
},
json=claude_params,
)
Expand All @@ -152,30 +155,38 @@ async def chat(self, request: Request):
f"{self.claude_base_url}/v1/complete",
headers={
"x-api-key": api_key,
"accept": "application/json",
"content-type": "application/json",
"anthropic-version": "2023-06-01",
},
json=claude_params,
) as response:
if response.is_error:
raise Exception(f"Error: {response.status_code}")
prev_decoded_line = {}
async for line in response.aiter_lines():
if line:
if line == "data: [DONE]":
yield "[DONE]"
break
stripped_line = line.lstrip("data:")
if stripped_line:
try:
decoded_line = json.loads(stripped_line)
# yield decoded_line
openai_response = (
self.claude_to_chatgpt_response_stream(
decoded_line, prev_decoded_line
stop_reason = decoded_line.get("stop_reason")
if stop_reason:
yield self.claude_to_chatgpt_response_stream(
{
"completion": "",
"stop_reason": stop_reason,
}
)
)
prev_decoded_line = decoded_line
yield openai_response
yield "[DONE]"
else:
completion = decoded_line.get("completion")
if completion:
openai_response = (
self.claude_to_chatgpt_response_stream(
decoded_line
)
)
yield openai_response
except json.JSONDecodeError as e:
logger.debug(
f"Error decoding JSON: {e}"
Expand Down
16 changes: 8 additions & 8 deletions claude_to_chatgpt/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
"parent": None,
},
{
"id": "gpt-3.5-turbo-0301",
"id": "gpt-3.5-turbo-0613",
"object": "model",
"created": 1677649963,
"owned_by": "openai",
Expand All @@ -44,7 +44,7 @@
"is_blocking": False,
}
],
"root": "gpt-3.5-turbo-0301",
"root": "gpt-3.5-turbo-0613",
"parent": None,
},
{
Expand Down Expand Up @@ -72,7 +72,7 @@
"parent": None,
},
{
"id": "gpt-4-0314",
"id": "gpt-4-0613",
"object": "model",
"created": 1678604601,
"owned_by": "openai",
Expand All @@ -92,14 +92,14 @@
"is_blocking": False,
}
],
"root": "gpt-4-0314",
"root": "gpt-4-0613",
"parent": None,
},
]

model_map = {
"gpt-3.5-turbo": "claude-v1.3",
"gpt-3.5-turbo-0301": "claude-v1.3",
"gpt-4": "claude-v1.3-100k",
"gpt-4-0314": "claude-v1.3-100k",
"gpt-3.5-turbo": "claude-instant-1",
"gpt-3.5-turbo-0613": "claude-instant-1",
"gpt-4": "claude-2",
"gpt-4-0613": "claude-2",
}
20 changes: 10 additions & 10 deletions cloudflare-worker.js
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ function claudeToChatGPTResponse(claudeResponse, stream = false) {
const result = {
id: `chatcmpl-${timestamp}`,
created: timestamp,
model: 'gpt-3.5-turbo-0301',
model: 'gpt-3.5-turbo-0613',
usage: {
prompt_tokens: 0,
completion_tokens: completionTokens,
Expand Down Expand Up @@ -149,12 +149,12 @@ async function handleRequest(request) {

const requestBody = await request.json();
const { model, messages, temperature, stop, stream } = requestBody;
const claudeModel = model_map[model] || 'claude-v1.3-100k';
const claudeModel = model_map[model] || 'claude-2';

// OpenAI API 转换为 Claude API
const prompt = convertMessagesToPrompt(messages);
let maxTokensToSample = 100000;
if (model !== 'claude-v1.3-100k') {
if (model !== 'claude-2') {
maxTokensToSample = 9016;
}
const claudeRequestBody = {
Expand Down Expand Up @@ -237,7 +237,7 @@ const models_list = [
parent: null,
},
{
id: 'gpt-3.5-turbo-0301',
id: 'gpt-3.5-turbo-0613',
object: 'model',
created: 1677649963,
owned_by: 'openai',
Expand All @@ -257,7 +257,7 @@ const models_list = [
is_blocking: false,
},
],
root: 'gpt-3.5-turbo-0301',
root: 'gpt-3.5-turbo-0613',
parent: null,
},
{
Expand Down Expand Up @@ -305,14 +305,14 @@ const models_list = [
is_blocking: false,
},
],
root: 'gpt-4-0314',
root: 'gpt-4-0613',
parent: null,
},
];

const model_map = {
'gpt-3.5-turbo': 'claude-v1.3',
'gpt-3.5-turbo-0301': 'claude-v1.3',
'gpt-4': 'claude-v1.3-100k',
'gpt-4-0314': 'claude-v1.3-100k',
'gpt-3.5-turbo': 'claude-instant-1',
'gpt-3.5-turbo-0613': 'claude-instant-1',
'gpt-4': 'claude-2',
'gpt-4-0613': 'claude-2',
};

0 comments on commit 4084f3f

Please sign in to comment.