diff --git a/.github/workflows/deploy.yaml b/.github/workflows/deploy.yaml new file mode 100644 index 0000000..1c58463 --- /dev/null +++ b/.github/workflows/deploy.yaml @@ -0,0 +1,17 @@ +name: Deploy Worker +on: + push: + pull_request: + repository_dispatch: +jobs: + deploy: + runs-on: ubuntu-latest + timeout-minutes: 60 + needs: test + steps: + - uses: actions/checkout@v2 + - name: Build & Deploy Worker + uses: cloudflare/wrangler-action@v3 + with: + apiToken: ${{ secrets.CF_API_TOKEN }} + accountId: ${{ secrets.CF_ACCOUNT_ID }} diff --git a/README.md b/README.md index c784629..eb551f3 100644 --- a/README.md +++ b/README.md @@ -2,15 +2,23 @@ this project aims to convert / proxy Cloudflare Workers AI responses to OpenAI API compatible responses so Workers AI models can be used with any OpenAI / ChatGPT compatible client +- supports streaming and non-streaming responses +- rewrites the different models 'gpt-3' and 'gpt-4' to use `@cf/meta/llama-2-7b-chat-fp16` +- if the openAI client can be configured to provide other model names, just put in the cloudflare model id instead of gpt-4 + ## installation -tba +[![Deploy to Cloudflare Workers](https://deploy.workers.cloudflare.com/button)](https://deploy.workers.cloudflare.com/?url=https://github.com/pew/cloudflare-workers-openai-mock) -## features +1. create a [Cloudflare Account](https://dash.cloudflare.com/) +2. clone this repo +3. run `npm run deploy` -- supports streaming and non-streaming responses -- rewrites the different models 'gpt-3' and 'gpt-4' to use `@cf/meta/llama-2-7b-chat-fp16` -- if the openAI client can be configured to provide other model names, just put in the cloudflare model id instead of gpt-4 +after the script has been deployed, you'll get an URL which you can use as your OpenAI API endpoint for other applications, something like this: + +``` +https://openai-api.foobar.workers.dev +``` ## use with llm