Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

configure autocomplete provider based on cody LLM settings in site config #1035

1 change: 1 addition & 0 deletions lib/shared/src/sourcegraph-api/graphql/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,7 @@ export interface CodyLLMSiteConfiguration {
fastChatModelMaxTokens?: number
completionModel?: string
completionModelMaxTokens?: number
provider?: string
}

interface IsContextRequiredForChatQueryResponse {
Expand Down
1 change: 1 addition & 0 deletions lib/shared/src/sourcegraph-api/graphql/queries.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ query CurrentSiteCodyLlmConfiguration {
fastChatModelMaxTokens
completionModel
completionModelMaxTokens
provider
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I assume this doesn't cause issues if the provider field does not exist?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Great catch! How did I miss that?! Thank you, @chwarwick!
Addressed in a16c760
Honestly, I don't like the implementation. Do you know of a more robust way of getting a potentially non-existing field?

cc: @philipp-spiess

}
}
}`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ export async function createInlineCompletionItemProvider({
const disposables: vscode.Disposable[] = []

const [providerConfig, graphContextFlag] = await Promise.all([
createProviderConfig(config, client, featureFlagProvider),
createProviderConfig(config, client, featureFlagProvider, authProvider.getAuthStatus().configOverwrites),
featureFlagProvider?.evaluateFeatureFlag(FeatureFlag.CodyAutocompleteGraphContext),
])
if (providerConfig) {
Expand Down
258 changes: 258 additions & 0 deletions vscode/src/completions/providers/createProvider.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,258 @@
import { describe, expect, it } from 'vitest'

import { Configuration } from '@sourcegraph/cody-shared/src/configuration'
import { DOTCOM_URL } from '@sourcegraph/cody-shared/src/sourcegraph-api/environments'
import { CodyLLMSiteConfiguration } from '@sourcegraph/cody-shared/src/sourcegraph-api/graphql/client'

import { CodeCompletionsClient } from '../client'

import { createProviderConfig } from './createProvider'

const DEFAULT_VSCODE_SETTINGS: Configuration = {
serverEndpoint: DOTCOM_URL.href,
proxy: null,
codebase: '',
customHeaders: {},
chatPreInstruction: 'My name is John Doe.',
useContext: 'embeddings',
autocomplete: true,
experimentalCommandLenses: false,
experimentalEditorTitleCommandIcon: false,
experimentalChatPredictions: false,
experimentalGuardrails: false,
experimentalLocalSymbols: false,
inlineChat: true,
isRunningInsideAgent: false,
experimentalNonStop: false,
experimentalSymfAnthropicKey: '',
experimentalSymfPath: 'symf',
debugEnable: false,
debugVerbose: false,
debugFilter: null,
telemetryLevel: 'all',
autocompleteAdvancedProvider: null,
autocompleteAdvancedServerEndpoint: null,
autocompleteAdvancedModel: null,
autocompleteAdvancedAccessToken: null,
autocompleteAdvancedEmbeddings: true,
autocompleteExperimentalCompleteSuggestWidgetSelection: false,
autocompleteExperimentalSyntacticPostProcessing: false,
autocompleteExperimentalGraphContext: false,
}

const getVSCodeSettings = (config: Partial<Configuration> = {}): Configuration => ({
...DEFAULT_VSCODE_SETTINGS,
...config,
})

const dummyCodeCompletionsClient: CodeCompletionsClient = {
complete: () => Promise.resolve({ completion: '', stopReason: '' }),
onConfigurationChange: () => undefined,
}

describe('createProviderConfig', () => {
describe('if completions provider fields are defined in VSCode settings', () => {
it('returns null if completions provider is not supported', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'nasa-ai' as Configuration['autocompleteAdvancedProvider'],
}),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider).toBeNull()
})

it('returns "codegen" provider config if the corresponding provider name and endpoint are specified', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'unstable-codegen',
autocompleteAdvancedServerEndpoint: 'https://unstable-codegen.com',
}),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider?.identifier).toBe('codegen')
expect(provider?.model).toBe('codegen')
})

it('returns null if provider is "unstable-codegen", but the server endpoint is not set', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({ autocompleteAdvancedProvider: 'unstable-codegen' }),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider).toBeNull()
})

it('returns "fireworks" provider config and corresponding model if specified', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'unstable-fireworks',
autocompleteAdvancedModel: 'starcoder-3b',
}),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider?.identifier).toBe('fireworks')
expect(provider?.model).toBe('starcoder-3b')
})

it('returns "fireworks" provider config if specified in settings and default model', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({ autocompleteAdvancedProvider: 'unstable-fireworks' }),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider?.identifier).toBe('fireworks')
expect(provider?.model).toBe('starcoder-7b')
})

it('returns "openai" provider config if specified in VSCode settings; model is ignored', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'unstable-openai',
autocompleteAdvancedModel: 'hello-world',
}),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider?.identifier).toBe('unstable-openai')
expect(provider?.model).toBe('gpt-35-turbo')
})

it('returns "anthropic" provider config if specified in VSCode settings; model is ignored', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'anthropic',
autocompleteAdvancedModel: 'hello-world',
}),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider?.identifier).toBe('anthropic')
expect(provider?.model).toBe('claude-instant-1')
})

it('provider specified in VSCode settings takes precedence over the one defined in the site config', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'unstable-codegen',
autocompleteAdvancedServerEndpoint: 'https://unstable-codegen.com',
}),
dummyCodeCompletionsClient,
undefined,
{ provider: 'azure-open-ai', completionModel: 'gpt-35-turbo-test' }
)
expect(provider?.identifier).toBe('codegen')
expect(provider?.model).toBe('codegen')
})
})

describe('completions provider and model are defined in the site config and not set in VSCode settings', () => {
describe('if provider is "sourcegraph"', () => {
const testCases: {
codyLLMConfig: CodyLLMSiteConfiguration
expected: { provider: string; model?: string } | null
}[] = [
// sourcegraph
{ codyLLMConfig: { provider: 'sourcegraph', completionModel: 'hello-world' }, expected: null },
{
codyLLMConfig: { provider: 'sourcegraph', completionModel: 'anthropic/claude-instant-1' },
expected: { provider: 'anthropic', model: 'claude-instant-1' },
},
{
codyLLMConfig: { provider: 'sourcegraph', completionModel: 'anthropic/' },
expected: null,
},
{
codyLLMConfig: { provider: 'sourcegraph', completionModel: '/claude-instant-1' },
expected: null,
},

// aws-bedrock
{ codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'hello-world' }, expected: null },
{
codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic.claude-instant-1' },
expected: { provider: 'anthropic', model: 'claude-instant-1' },
},
{
codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic.' },
expected: null,
},
{
codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic/claude-instant-1' },
expected: null,
},

// open-ai
{
codyLLMConfig: { provider: 'openai', completionModel: 'gpt-35-turbo-test' },
expected: { provider: 'unstable-openai', model: 'gpt-35-turbo-test' },
},
{
codyLLMConfig: { provider: 'openai' },
expected: { provider: 'unstable-openai', model: 'gpt-35-turbo' },
},

// azure-openai
{
codyLLMConfig: { provider: 'azure-openai', completionModel: 'gpt-35-turbo-test' },
expected: { provider: 'unstable-openai', model: 'gpt-35-turbo-test' },
},
{
codyLLMConfig: { provider: 'azure-openai' },
expected: { provider: 'unstable-openai', model: 'gpt-35-turbo' },
},

// fireworks
{
codyLLMConfig: { provider: 'fireworks', completionModel: 'llama-code-7b' },
expected: { provider: 'fireworks', model: 'llama-code-7b' },
},
{
codyLLMConfig: { provider: 'fireworks' },
expected: { provider: 'fireworks', model: 'starcoder-7b' },
},

// unknown-provider
{
taras-yemets marked this conversation as resolved.
Show resolved Hide resolved
codyLLMConfig: { provider: 'unknown-provider', completionModel: 'llama-code-7b' },
expected: null,
},
]

for (const { codyLLMConfig, expected } of testCases) {
it(`returns ${JSON.stringify(expected)} when cody LLM config is ${JSON.stringify(
codyLLMConfig
)}`, async () => {
const provider = await createProviderConfig(
getVSCodeSettings(),
dummyCodeCompletionsClient,
undefined,
codyLLMConfig
)
if (expected === null) {
expect(provider).toBeNull()
} else {
expect(provider?.identifier).toBe(expected.provider)
expect(provider?.model).toBe(expected.model)
}
})
}
})
})

it('returns anthropic provider config if no completions provider specified in VSCode settings or site config', async () => {
const provider = await createProviderConfig(getVSCodeSettings(), dummyCodeCompletionsClient, undefined, {})
expect(provider?.identifier).toBe('anthropic')
expect(provider?.model).toBe('claude-instant-1')
})
})