Skip to content

Commit

Permalink
add an OpenAI-compatible provider as a generic Enterprise LLM adapter (
Browse files Browse the repository at this point in the history
…#3218)

Signed-off-by: Stephen Gutekanst <stephen@sourcegraph.com>
  • Loading branch information
slimsag authored and valerybugakov committed Apr 22, 2024
1 parent 6bbff68 commit 0c4c486
Show file tree
Hide file tree
Showing 8 changed files with 473 additions and 3 deletions.
1 change: 1 addition & 0 deletions lib/shared/src/configuration.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ export interface Configuration {
| 'anthropic'
| 'fireworks'
| 'unstable-openai'
| 'experimental-openaicompatible'
| 'experimental-ollama'
| null
autocompleteAdvancedModel: string | null
Expand Down
6 changes: 5 additions & 1 deletion lib/shared/src/sourcegraph-api/completions/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,12 @@ export abstract class SourcegraphCompletionsClient {
apiVersion: number,
signal?: AbortSignal
): AsyncGenerator<CompletionGeneratorValue> {
// This is a technique to convert a function that takes callbacks to an async generator.
// Provide default stop sequence for starchat models.
if (!params.stopSequences && params?.model?.startsWith('openaicompatible/starchat')) {
params.stopSequences = ['<|end|>']
}

// This is a technique to convert a function that takes callbacks to an async generator.
const values: Promise<CompletionGeneratorValue>[] = []
let resolve: ((value: CompletionGeneratorValue) => void) | undefined
values.push(
Expand Down
1 change: 1 addition & 0 deletions vscode/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ This is a log of all notable changes to Cody for VS Code. [Unreleased] changes a

### Added

- Cody Enterprise users now have access to an `experimental-openaicompatible` which allows bringing your own LLM via any OpenAI-compatible API. For now, this is only supported with Starchat and specific configurations - but we continue to generalize this work to support more models and OpenAI-compatible endpoints. [pull/3218](https://github.com/sourcegraph/cody/pull/3218)
- Edit/Chat: Cody now expands the selection to the nearest enclosing function, if available, before attempting to expand to the nearest enclosing block. [pull/3507](https://github.com/sourcegraph/cody/pull/3507)
- Edit: New `cody.edit.preInstruction` configuration option for adding custom instruction at the end of all your requests. [pull/3542](https://github.com/sourcegraph/cody/pull/3542)
- Edit: Add support for the new `cody.edit.preInstruction` setting. [pull/3542](https://github.com/sourcegraph/cody/pull/3542)
Expand Down
2 changes: 1 addition & 1 deletion vscode/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -970,7 +970,7 @@
"cody.autocomplete.advanced.provider": {
"type": "string",
"default": null,
"enum": [null, "anthropic", "fireworks", "unstable-openai", "experimental-ollama"],
"enum": [null, "anthropic", "fireworks", "unstable-openai", "experimental-ollama", "experimental-openaicompatible"],
"markdownDescription": "The provider used for code autocomplete. Most providers other than `anthropic` require the `cody.autocomplete.advanced.serverEndpoint` and `cody.autocomplete.advanced.accessToken` settings to also be set. Check the Cody output channel for error messages if autocomplete is not working as expected."
},
"cody.autocomplete.advanced.serverEndpoint": {
Expand Down
27 changes: 27 additions & 0 deletions vscode/src/completions/providers/create-provider.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,33 @@ describe('createProviderConfig', () => {
expect(provider?.model).toBe('starcoder-hybrid')
})

it('returns "experimental-openaicompatible" provider config and corresponding model if specified', async () => {
const provider = await createProviderConfig(
getVSCodeConfigurationWithAccessToken({
autocompleteAdvancedProvider: 'experimental-openaicompatible',
autocompleteAdvancedModel: 'starchat-16b-beta',
}),
dummyCodeCompletionsClient,
dummyAuthStatus
)
expect(provider?.identifier).toBe('experimental-openaicompatible')
expect(provider?.model).toBe('starchat-16b-beta')
})

it('returns "experimental-openaicompatible" provider config if specified in settings and default model', async () => {
const provider = await createProviderConfig(
getVSCodeConfigurationWithAccessToken({
autocompleteAdvancedProvider: 'experimental-openaicompatible',
}),
dummyCodeCompletionsClient,
dummyAuthStatus
)
expect(provider?.identifier).toBe('experimental-openaicompatible')
// TODO(slimsag): make this default to starchat2 once added
// specifically just when using `experimental-openaicompatible`
expect(provider?.model).toBe('starcoder-hybrid')
})

it('returns "openai" provider config if specified in VSCode settings; model is ignored', async () => {
const provider = await createProviderConfig(
getVSCodeConfigurationWithAccessToken({
Expand Down
18 changes: 18 additions & 0 deletions vscode/src/completions/providers/create-provider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import {
type FireworksOptions,
createProviderConfig as createFireworksProviderConfig,
} from './fireworks'
import { createProviderConfig as createOpenAICompatibleProviderConfig } from './openaicompatible'
import type { ProviderConfig } from './provider'
import { createProviderConfig as createUnstableOpenAIProviderConfig } from './unstable-openai'

Expand Down Expand Up @@ -52,6 +53,15 @@ export async function createProviderConfig(
case 'anthropic': {
return createAnthropicProviderConfig({ client, model })
}
case 'experimental-openaicompatible': {
return createOpenAICompatibleProviderConfig({
client,
model: config.autocompleteAdvancedModel ?? model ?? null,
timeouts: config.autocompleteTimeouts,
authStatus,
config,
})
}
case 'experimental-ollama':
case 'unstable-ollama': {
return createExperimentalOllamaProviderConfig(
Expand Down Expand Up @@ -102,6 +112,14 @@ export async function createProviderConfig(
authStatus,
config,
})
case 'experimental-openaicompatible':
return createOpenAICompatibleProviderConfig({
client,
timeouts: config.autocompleteTimeouts,
model: model ?? null,
authStatus,
config,
})
case 'aws-bedrock':
case 'anthropic':
return createAnthropicProviderConfig({
Expand Down

0 comments on commit 0c4c486

Please sign in to comment.