diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml index 6e9980ac..54556ea8 100644 --- a/.github/workflows/pr.yml +++ b/.github/workflows/pr.yml @@ -36,7 +36,7 @@ jobs: run: npm run build - name: Run code review script - run: npm run start -- --ci=github --model=gpt-3.5-turbo + run: npm run start -- --ci=github --model=gpt-4o - name: Run linting test run: npm run lint-test @@ -45,7 +45,7 @@ jobs: run: npm run test-unit - name: Run prompt tests - run: npm run test -- --ci=github --model=gpt-3.5-turbo + run: npm run test -- --ci=github --model=gpt-4o deploy_core_to_dev: runs-on: ubuntu-latest diff --git a/action.md b/action.md index f197789a..a0216cb6 100644 --- a/action.md +++ b/action.md @@ -22,9 +22,9 @@ jobs: fetch-depth: 0 - name: Code Review GPT - uses: mattzcarey/code-review-gpt@v0.1.4-alpha + uses: mattzcarey/code-review-gpt@v0.1.8 with: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - MODEL: 'gpt-3.5-turbo' + MODEL: 'gpt-4o' GITHUB_TOKEN: ${{ github.token }} ``` \ No newline at end of file diff --git a/action.yml b/action.yml index f37d61d5..f2f64283 100644 --- a/action.yml +++ b/action.yml @@ -5,7 +5,7 @@ inputs: MODEL: description: 'The GPT model to use' required: true - default: 'gpt-3.5-turbo' + default: 'gpt-4o' OPENAI_API_KEY: description: 'OpenAI API Key' required: true diff --git a/packages/code-review-gpt/package-lock.json b/packages/code-review-gpt/package-lock.json index 7188fb92..eb9c3904 100644 --- a/packages/code-review-gpt/package-lock.json +++ b/packages/code-review-gpt/package-lock.json @@ -1,12 +1,12 @@ { "name": "code-review-gpt", - "version": "0.1.5", + "version": "0.1.8", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "code-review-gpt", - "version": "0.1.5", + "version": "0.1.8", "license": "MIT", "dependencies": { "@actions/github": "^5.1.1", diff --git a/packages/code-review-gpt/package.json b/packages/code-review-gpt/package.json index 8c7306e1..e9a8baa6 100644 --- a/packages/code-review-gpt/package.json +++ b/packages/code-review-gpt/package.json @@ -1,6 +1,6 @@ { "name": "code-review-gpt", - "version": "0.1.6", + "version": "0.1.8", "description": "Your AI code reviewer. Improve code quality and catch bugs before you break production", "bin": { "code-review-gpt": "./dist/index.js" @@ -16,7 +16,8 @@ "test": "ts-node ./src/index.ts test", "test-unit": "dotenv -e .env jest", "build": "node ./utils/build.js", - "postbuild": "node ./utils/shebang.js && chmod +x ./dist/index.js" + "postbuild": "node ./utils/shebang.js && chmod +x ./dist/index.js", + "publish-package": "npm i && npm run build && npm publish --access public" }, "keywords": [ "code-review", diff --git a/packages/code-review-gpt/src/review/constants.ts b/packages/code-review-gpt/src/review/constants.ts index 7696b046..c5cd0c21 100644 --- a/packages/code-review-gpt/src/review/constants.ts +++ b/packages/code-review-gpt/src/review/constants.ts @@ -3,8 +3,16 @@ export const signOff = export const modelInfo = [ { - model: "gpt-4-1106-preview", - maxPromptLength: 128000, //128k tokens + model: "gpt-4o", + maxPromptLength: 300000, //128k tokens + }, + { + model: "gpt-4-turbo", + maxPromptLength: 300000, //128k tokens + }, + { + model: "gpt-4-turbo-preview", + maxPromptLength: 300000, //128k tokens }, { model: "gpt-4", diff --git a/services/core/functions/webhook/src/constants.ts b/services/core/functions/webhook/src/constants.ts index fd19b0c0..469bfe46 100644 --- a/services/core/functions/webhook/src/constants.ts +++ b/services/core/functions/webhook/src/constants.ts @@ -1,28 +1,36 @@ export const signOff = "#### Powered by [Code Review GPT](https://github.com/mattzcarey/code-review-gpt)"; -export const modelInfo = [ - { - model: "gpt-4-1106-preview", - maxPromptLength: 300000, //100k tokens - }, - { - model: "gpt-4", - maxPromptLength: 21000, //8k tokens - }, - { - model: "gpt-4-32k", - maxPromptLength: 90000, //32k tokens - }, - { - model: "gpt-3.5-turbo", - maxPromptLength: 9000, //4k tokens - }, - { - model: "gpt-3.5-turbo-16k", - maxPromptLength: 45000, //16k tokens - }, -]; // Response needs about 1k tokens ~= 3k characters + export const modelInfo = [ + { + model: "gpt-4o", + maxPromptLength: 300000, //128k tokens + }, + { + model: "gpt-4-turbo", + maxPromptLength: 300000, //128k tokens + }, + { + model: "gpt-4-turbo-preview", + maxPromptLength: 300000, //128k tokens + }, + { + model: "gpt-4", + maxPromptLength: 21000, //8k tokens + }, + { + model: "gpt-4-32k", + maxPromptLength: 90000, //32k tokens + }, + { + model: "gpt-3.5-turbo", + maxPromptLength: 9000, //4k tokens + }, + { + model: "gpt-3.5-turbo-16k", + maxPromptLength: 45000, //16k tokens + }, + ]; // Response needs about 1k tokens ~= 3k characters export const languageMap: { [key: string]: string } = { ".js": "JavaScript",