-
Notifications
You must be signed in to change notification settings - Fork 297
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
configure autocomplete provider based on cody LLM settings in site co…
…nfig (#1035) Part of #931 Test in pair with https://github.com/sourcegraph/sourcegraph/pull/56568 Defines the autocomplete provider config based on the completions provider and model names from the site config. The suggested configuration hierarchy: 1. If `cody.autocomplete.advanced.provider` field in VSCode settings is set to a supported provider name, and all the additional conditions like model, access token, etc. are met the corresponding provider config is returned. Otherwise, return `null` (completions provider is not created). 2. If the provider name and model can be defined based on the evaluated feature flags, return the corresponding provider. 3. If the completions provider name is defined in the connected Sourcegraph instance site config, we return the corresponding provider config. If the provider name/model can't be parsed or this provider is not supported `null` is returned (completions provider is not created). 4. Anthropic config provider is used by default.
- Loading branch information
1 parent
471fffb
commit c475300
Showing
7 changed files
with
447 additions
and
40 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
264 changes: 264 additions & 0 deletions
264
vscode/src/completions/providers/createProvider.test.ts
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,264 @@ | ||
import { describe, expect, it } from 'vitest' | ||
|
||
import { Configuration } from '@sourcegraph/cody-shared/src/configuration' | ||
import { DOTCOM_URL } from '@sourcegraph/cody-shared/src/sourcegraph-api/environments' | ||
import { CodyLLMSiteConfiguration } from '@sourcegraph/cody-shared/src/sourcegraph-api/graphql/client' | ||
|
||
import { CodeCompletionsClient } from '../client' | ||
|
||
import { createProviderConfig } from './createProvider' | ||
|
||
const DEFAULT_VSCODE_SETTINGS: Configuration = { | ||
serverEndpoint: DOTCOM_URL.href, | ||
proxy: null, | ||
codebase: '', | ||
customHeaders: {}, | ||
chatPreInstruction: 'My name is John Doe.', | ||
useContext: 'embeddings', | ||
autocomplete: true, | ||
experimentalCommandLenses: false, | ||
experimentalEditorTitleCommandIcon: false, | ||
experimentalChatPredictions: false, | ||
experimentalGuardrails: false, | ||
experimentalLocalSymbols: false, | ||
inlineChat: true, | ||
isRunningInsideAgent: false, | ||
experimentalNonStop: false, | ||
experimentalSymfAnthropicKey: '', | ||
experimentalSymfPath: 'symf', | ||
debugEnable: false, | ||
debugVerbose: false, | ||
debugFilter: null, | ||
telemetryLevel: 'all', | ||
autocompleteAdvancedProvider: null, | ||
autocompleteAdvancedServerEndpoint: null, | ||
autocompleteAdvancedModel: null, | ||
autocompleteAdvancedAccessToken: null, | ||
autocompleteAdvancedEmbeddings: true, | ||
autocompleteExperimentalCompleteSuggestWidgetSelection: false, | ||
autocompleteExperimentalSyntacticPostProcessing: false, | ||
autocompleteExperimentalGraphContext: false, | ||
} | ||
|
||
const getVSCodeSettings = (config: Partial<Configuration> = {}): Configuration => ({ | ||
...DEFAULT_VSCODE_SETTINGS, | ||
...config, | ||
}) | ||
|
||
const dummyCodeCompletionsClient: CodeCompletionsClient = { | ||
complete: () => Promise.resolve({ completion: '', stopReason: '' }), | ||
onConfigurationChange: () => undefined, | ||
} | ||
|
||
describe('createProviderConfig', () => { | ||
describe('if completions provider fields are defined in VSCode settings', () => { | ||
it('returns null if completions provider is not supported', async () => { | ||
const provider = await createProviderConfig( | ||
getVSCodeSettings({ | ||
autocompleteAdvancedProvider: 'nasa-ai' as Configuration['autocompleteAdvancedProvider'], | ||
}), | ||
dummyCodeCompletionsClient, | ||
undefined, | ||
{} | ||
) | ||
expect(provider).toBeNull() | ||
}) | ||
|
||
it('returns "codegen" provider config if the corresponding provider name and endpoint are specified', async () => { | ||
const provider = await createProviderConfig( | ||
getVSCodeSettings({ | ||
autocompleteAdvancedProvider: 'unstable-codegen', | ||
autocompleteAdvancedServerEndpoint: 'https://unstable-codegen.com', | ||
}), | ||
dummyCodeCompletionsClient, | ||
undefined, | ||
{} | ||
) | ||
expect(provider?.identifier).toBe('codegen') | ||
expect(provider?.model).toBe('codegen') | ||
}) | ||
|
||
it('returns null if provider is "unstable-codegen", but the server endpoint is not set', async () => { | ||
const provider = await createProviderConfig( | ||
getVSCodeSettings({ autocompleteAdvancedProvider: 'unstable-codegen' }), | ||
dummyCodeCompletionsClient, | ||
undefined, | ||
{} | ||
) | ||
expect(provider).toBeNull() | ||
}) | ||
|
||
it('returns "fireworks" provider config and corresponding model if specified', async () => { | ||
const provider = await createProviderConfig( | ||
getVSCodeSettings({ | ||
autocompleteAdvancedProvider: 'unstable-fireworks', | ||
autocompleteAdvancedModel: 'starcoder-3b', | ||
}), | ||
dummyCodeCompletionsClient, | ||
undefined, | ||
{} | ||
) | ||
expect(provider?.identifier).toBe('fireworks') | ||
expect(provider?.model).toBe('starcoder-3b') | ||
}) | ||
|
||
it('returns "fireworks" provider config if specified in settings and default model', async () => { | ||
const provider = await createProviderConfig( | ||
getVSCodeSettings({ autocompleteAdvancedProvider: 'unstable-fireworks' }), | ||
dummyCodeCompletionsClient, | ||
undefined, | ||
{} | ||
) | ||
expect(provider?.identifier).toBe('fireworks') | ||
expect(provider?.model).toBe('starcoder-7b') | ||
}) | ||
|
||
it('returns "openai" provider config if specified in VSCode settings; model is ignored', async () => { | ||
const provider = await createProviderConfig( | ||
getVSCodeSettings({ | ||
autocompleteAdvancedProvider: 'unstable-openai', | ||
autocompleteAdvancedModel: 'hello-world', | ||
}), | ||
dummyCodeCompletionsClient, | ||
undefined, | ||
{} | ||
) | ||
expect(provider?.identifier).toBe('unstable-openai') | ||
expect(provider?.model).toBe('gpt-35-turbo') | ||
}) | ||
|
||
it('returns "anthropic" provider config if specified in VSCode settings; model is ignored', async () => { | ||
const provider = await createProviderConfig( | ||
getVSCodeSettings({ | ||
autocompleteAdvancedProvider: 'anthropic', | ||
autocompleteAdvancedModel: 'hello-world', | ||
}), | ||
dummyCodeCompletionsClient, | ||
undefined, | ||
{} | ||
) | ||
expect(provider?.identifier).toBe('anthropic') | ||
expect(provider?.model).toBe('claude-instant-1') | ||
}) | ||
|
||
it('provider specified in VSCode settings takes precedence over the one defined in the site config', async () => { | ||
const provider = await createProviderConfig( | ||
getVSCodeSettings({ | ||
autocompleteAdvancedProvider: 'unstable-codegen', | ||
autocompleteAdvancedServerEndpoint: 'https://unstable-codegen.com', | ||
}), | ||
dummyCodeCompletionsClient, | ||
undefined, | ||
{ provider: 'azure-open-ai', completionModel: 'gpt-35-turbo-test' } | ||
) | ||
expect(provider?.identifier).toBe('codegen') | ||
expect(provider?.model).toBe('codegen') | ||
}) | ||
}) | ||
|
||
describe('completions provider and model are defined in the site config and not set in VSCode settings', () => { | ||
describe('if provider is "sourcegraph"', () => { | ||
const testCases: { | ||
codyLLMConfig: CodyLLMSiteConfiguration | ||
expected: { provider: string; model?: string } | null | ||
}[] = [ | ||
// sourcegraph | ||
{ codyLLMConfig: { provider: 'sourcegraph', completionModel: 'hello-world' }, expected: null }, | ||
{ | ||
codyLLMConfig: { provider: 'sourcegraph', completionModel: 'anthropic/claude-instant-1' }, | ||
expected: { provider: 'anthropic', model: 'claude-instant-1' }, | ||
}, | ||
{ | ||
codyLLMConfig: { provider: 'sourcegraph', completionModel: 'anthropic/' }, | ||
expected: null, | ||
}, | ||
{ | ||
codyLLMConfig: { provider: 'sourcegraph', completionModel: '/claude-instant-1' }, | ||
expected: null, | ||
}, | ||
|
||
// aws-bedrock | ||
{ codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'hello-world' }, expected: null }, | ||
{ | ||
codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic.claude-instant-1' }, | ||
expected: { provider: 'anthropic', model: 'claude-instant-1' }, | ||
}, | ||
{ | ||
codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic.' }, | ||
expected: null, | ||
}, | ||
{ | ||
codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic/claude-instant-1' }, | ||
expected: null, | ||
}, | ||
|
||
// open-ai | ||
{ | ||
codyLLMConfig: { provider: 'openai', completionModel: 'gpt-35-turbo-test' }, | ||
expected: { provider: 'unstable-openai', model: 'gpt-35-turbo-test' }, | ||
}, | ||
{ | ||
codyLLMConfig: { provider: 'openai' }, | ||
expected: { provider: 'unstable-openai', model: 'gpt-35-turbo' }, | ||
}, | ||
|
||
// azure-openai | ||
{ | ||
codyLLMConfig: { provider: 'azure-openai', completionModel: 'gpt-35-turbo-test' }, | ||
expected: { provider: 'unstable-openai', model: '' }, | ||
}, | ||
{ | ||
codyLLMConfig: { provider: 'azure-openai' }, | ||
expected: { provider: 'unstable-openai', model: 'gpt-35-turbo' }, | ||
}, | ||
|
||
// fireworks | ||
{ | ||
codyLLMConfig: { provider: 'fireworks', completionModel: 'llama-code-7b' }, | ||
expected: { provider: 'fireworks', model: 'llama-code-7b' }, | ||
}, | ||
{ | ||
codyLLMConfig: { provider: 'fireworks' }, | ||
expected: { provider: 'fireworks', model: 'starcoder-7b' }, | ||
}, | ||
|
||
// unknown-provider | ||
{ | ||
codyLLMConfig: { provider: 'unknown-provider', completionModel: 'llama-code-7b' }, | ||
expected: null, | ||
}, | ||
|
||
// provider not defined (backward compat) | ||
{ | ||
codyLLMConfig: { provider: undefined, completionModel: 'llama-code-7b' }, | ||
expected: { provider: 'anthropic', model: 'claude-instant-1' }, | ||
}, | ||
] | ||
|
||
for (const { codyLLMConfig, expected } of testCases) { | ||
it(`returns ${JSON.stringify(expected)} when cody LLM config is ${JSON.stringify( | ||
codyLLMConfig | ||
)}`, async () => { | ||
const provider = await createProviderConfig( | ||
getVSCodeSettings(), | ||
dummyCodeCompletionsClient, | ||
undefined, | ||
codyLLMConfig | ||
) | ||
if (expected === null) { | ||
expect(provider).toBeNull() | ||
} else { | ||
expect(provider?.identifier).toBe(expected.provider) | ||
expect(provider?.model).toBe(expected.model) | ||
} | ||
}) | ||
} | ||
}) | ||
}) | ||
|
||
it('returns anthropic provider config if no completions provider specified in VSCode settings or site config', async () => { | ||
const provider = await createProviderConfig(getVSCodeSettings(), dummyCodeCompletionsClient, undefined, {}) | ||
expect(provider?.identifier).toBe('anthropic') | ||
expect(provider?.model).toBe('claude-instant-1') | ||
}) | ||
}) |
Oops, something went wrong.