Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

configure autocomplete provider based on cody LLM settings in site config #1035

Merged
33 changes: 31 additions & 2 deletions lib/shared/src/sourcegraph-api/graphql/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import { DOTCOM_URL, isDotCom } from '../environments'

import {
CURRENT_SITE_CODY_LLM_CONFIGURATION,
CURRENT_SITE_CODY_LLM_PROVIDER,
CURRENT_SITE_GRAPHQL_FIELDS_QUERY,
CURRENT_SITE_HAS_CODY_ENABLED_QUERY,
CURRENT_SITE_IDENTIFICATION,
Expand Down Expand Up @@ -57,6 +58,14 @@ interface CurrentUserIdHasVerifiedEmailResponse {
currentUser: { id: string; hasVerifiedEmail: boolean } | null
}

interface CodyLLMSiteConfigurationResponse {
site: { codyLLMConfiguration: Omit<CodyLLMSiteConfiguration, 'provider'> | null } | null
}

interface CodyLLMSiteConfigurationProviderResponse {
site: { codyLLMConfiguration: Pick<CodyLLMSiteConfiguration, 'provider'> | null } | null
}

interface RepositoryIdResponse {
repository: { id: string } | null
}
Expand Down Expand Up @@ -140,6 +149,7 @@ export interface CodyLLMSiteConfiguration {
fastChatModelMaxTokens?: number
completionModel?: string
completionModelMaxTokens?: number
provider?: string
}

interface IsContextRequiredForChatQueryResponse {
Expand Down Expand Up @@ -274,9 +284,28 @@ export class SourcegraphGraphQLAPIClient {
}

public async getCodyLLMConfiguration(): Promise<undefined | CodyLLMSiteConfiguration | Error> {
const response = await this.fetchSourcegraphAPI<APIResponse<any>>(CURRENT_SITE_CODY_LLM_CONFIGURATION)
// fetch Cody LLM provider separately for backward compatability
const [configResponse, providerResponse] = await Promise.all([
this.fetchSourcegraphAPI<APIResponse<CodyLLMSiteConfigurationResponse>>(
CURRENT_SITE_CODY_LLM_CONFIGURATION
),
this.fetchSourcegraphAPI<APIResponse<CodyLLMSiteConfigurationProviderResponse>>(
CURRENT_SITE_CODY_LLM_PROVIDER
),
])

const config = extractDataOrError(configResponse, data => data.site?.codyLLMConfiguration || undefined)
if (!config || isError(config)) {
return config
}

let provider: string | undefined
const llmProvider = extractDataOrError(providerResponse, data => data.site?.codyLLMConfiguration?.provider)
if (llmProvider && !isError(llmProvider)) {
provider = llmProvider
}

return extractDataOrError(response, data => data.site?.codyLLMConfiguration)
return { ...config, provider }
}

public async getRepoIds(names: string[]): Promise<{ id: string; name: string }[] | Error> {
Expand Down
9 changes: 9 additions & 0 deletions lib/shared/src/sourcegraph-api/graphql/queries.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,15 @@ query CurrentUser {
}
}`

export const CURRENT_SITE_CODY_LLM_PROVIDER = `
query CurrentSiteCodyLlmConfiguration {
site {
codyLLMConfiguration {
provider
}
}
}`

export const CURRENT_SITE_CODY_LLM_CONFIGURATION = `
query CurrentSiteCodyLlmConfiguration {
site {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ export async function createInlineCompletionItemProvider({
const disposables: vscode.Disposable[] = []

const [providerConfig, graphContextFlag] = await Promise.all([
createProviderConfig(config, client, featureFlagProvider),
createProviderConfig(config, client, featureFlagProvider, authProvider.getAuthStatus().configOverwrites),
featureFlagProvider?.evaluateFeatureFlag(FeatureFlag.CodyAutocompleteGraphContext),
])
if (providerConfig) {
Expand Down
264 changes: 264 additions & 0 deletions vscode/src/completions/providers/createProvider.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,264 @@
import { describe, expect, it } from 'vitest'

import { Configuration } from '@sourcegraph/cody-shared/src/configuration'
import { DOTCOM_URL } from '@sourcegraph/cody-shared/src/sourcegraph-api/environments'
import { CodyLLMSiteConfiguration } from '@sourcegraph/cody-shared/src/sourcegraph-api/graphql/client'

import { CodeCompletionsClient } from '../client'

import { createProviderConfig } from './createProvider'

const DEFAULT_VSCODE_SETTINGS: Configuration = {
serverEndpoint: DOTCOM_URL.href,
proxy: null,
codebase: '',
customHeaders: {},
chatPreInstruction: 'My name is John Doe.',
useContext: 'embeddings',
autocomplete: true,
experimentalCommandLenses: false,
experimentalEditorTitleCommandIcon: false,
experimentalChatPredictions: false,
experimentalGuardrails: false,
experimentalLocalSymbols: false,
inlineChat: true,
isRunningInsideAgent: false,
experimentalNonStop: false,
experimentalSymfAnthropicKey: '',
experimentalSymfPath: 'symf',
debugEnable: false,
debugVerbose: false,
debugFilter: null,
telemetryLevel: 'all',
autocompleteAdvancedProvider: null,
autocompleteAdvancedServerEndpoint: null,
autocompleteAdvancedModel: null,
autocompleteAdvancedAccessToken: null,
autocompleteAdvancedEmbeddings: true,
autocompleteExperimentalCompleteSuggestWidgetSelection: false,
autocompleteExperimentalSyntacticPostProcessing: false,
autocompleteExperimentalGraphContext: false,
}

const getVSCodeSettings = (config: Partial<Configuration> = {}): Configuration => ({
...DEFAULT_VSCODE_SETTINGS,
...config,
})

const dummyCodeCompletionsClient: CodeCompletionsClient = {
complete: () => Promise.resolve({ completion: '', stopReason: '' }),
onConfigurationChange: () => undefined,
}

describe('createProviderConfig', () => {
describe('if completions provider fields are defined in VSCode settings', () => {
it('returns null if completions provider is not supported', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'nasa-ai' as Configuration['autocompleteAdvancedProvider'],
}),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider).toBeNull()
})

it('returns "codegen" provider config if the corresponding provider name and endpoint are specified', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'unstable-codegen',
autocompleteAdvancedServerEndpoint: 'https://unstable-codegen.com',
}),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider?.identifier).toBe('codegen')
expect(provider?.model).toBe('codegen')
})

it('returns null if provider is "unstable-codegen", but the server endpoint is not set', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({ autocompleteAdvancedProvider: 'unstable-codegen' }),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider).toBeNull()
})

it('returns "fireworks" provider config and corresponding model if specified', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'unstable-fireworks',
autocompleteAdvancedModel: 'starcoder-3b',
}),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider?.identifier).toBe('fireworks')
expect(provider?.model).toBe('starcoder-3b')
})

it('returns "fireworks" provider config if specified in settings and default model', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({ autocompleteAdvancedProvider: 'unstable-fireworks' }),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider?.identifier).toBe('fireworks')
expect(provider?.model).toBe('starcoder-7b')
})

it('returns "openai" provider config if specified in VSCode settings; model is ignored', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'unstable-openai',
autocompleteAdvancedModel: 'hello-world',
}),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider?.identifier).toBe('unstable-openai')
expect(provider?.model).toBe('gpt-35-turbo')
})

it('returns "anthropic" provider config if specified in VSCode settings; model is ignored', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'anthropic',
autocompleteAdvancedModel: 'hello-world',
}),
dummyCodeCompletionsClient,
undefined,
{}
)
expect(provider?.identifier).toBe('anthropic')
expect(provider?.model).toBe('claude-instant-1')
})

it('provider specified in VSCode settings takes precedence over the one defined in the site config', async () => {
const provider = await createProviderConfig(
getVSCodeSettings({
autocompleteAdvancedProvider: 'unstable-codegen',
autocompleteAdvancedServerEndpoint: 'https://unstable-codegen.com',
}),
dummyCodeCompletionsClient,
undefined,
{ provider: 'azure-open-ai', completionModel: 'gpt-35-turbo-test' }
)
expect(provider?.identifier).toBe('codegen')
expect(provider?.model).toBe('codegen')
})
})

describe('completions provider and model are defined in the site config and not set in VSCode settings', () => {
describe('if provider is "sourcegraph"', () => {
const testCases: {
codyLLMConfig: CodyLLMSiteConfiguration
expected: { provider: string; model?: string } | null
}[] = [
// sourcegraph
{ codyLLMConfig: { provider: 'sourcegraph', completionModel: 'hello-world' }, expected: null },
{
codyLLMConfig: { provider: 'sourcegraph', completionModel: 'anthropic/claude-instant-1' },
expected: { provider: 'anthropic', model: 'claude-instant-1' },
},
{
codyLLMConfig: { provider: 'sourcegraph', completionModel: 'anthropic/' },
expected: null,
},
{
codyLLMConfig: { provider: 'sourcegraph', completionModel: '/claude-instant-1' },
expected: null,
},

// aws-bedrock
{ codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'hello-world' }, expected: null },
{
codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic.claude-instant-1' },
expected: { provider: 'anthropic', model: 'claude-instant-1' },
},
{
codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic.' },
expected: null,
},
{
codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic/claude-instant-1' },
expected: null,
},

// open-ai
{
codyLLMConfig: { provider: 'openai', completionModel: 'gpt-35-turbo-test' },
expected: { provider: 'unstable-openai', model: 'gpt-35-turbo-test' },
},
{
codyLLMConfig: { provider: 'openai' },
expected: { provider: 'unstable-openai', model: 'gpt-35-turbo' },
},

// azure-openai
{
codyLLMConfig: { provider: 'azure-openai', completionModel: 'gpt-35-turbo-test' },
expected: { provider: 'unstable-openai', model: '' },
},
{
codyLLMConfig: { provider: 'azure-openai' },
expected: { provider: 'unstable-openai', model: 'gpt-35-turbo' },
},

// fireworks
{
codyLLMConfig: { provider: 'fireworks', completionModel: 'llama-code-7b' },
expected: { provider: 'fireworks', model: 'llama-code-7b' },
},
{
codyLLMConfig: { provider: 'fireworks' },
expected: { provider: 'fireworks', model: 'starcoder-7b' },
},

// unknown-provider
{
taras-yemets marked this conversation as resolved.
Show resolved Hide resolved
codyLLMConfig: { provider: 'unknown-provider', completionModel: 'llama-code-7b' },
expected: null,
},

// provider not defined (backward compat)
{
codyLLMConfig: { provider: undefined, completionModel: 'llama-code-7b' },
expected: { provider: 'anthropic', model: 'claude-instant-1' },
},
]

for (const { codyLLMConfig, expected } of testCases) {
it(`returns ${JSON.stringify(expected)} when cody LLM config is ${JSON.stringify(
codyLLMConfig
)}`, async () => {
const provider = await createProviderConfig(
getVSCodeSettings(),
dummyCodeCompletionsClient,
undefined,
codyLLMConfig
)
if (expected === null) {
expect(provider).toBeNull()
} else {
expect(provider?.identifier).toBe(expected.provider)
expect(provider?.model).toBe(expected.model)
}
})
}
})
})

it('returns anthropic provider config if no completions provider specified in VSCode settings or site config', async () => {
const provider = await createProviderConfig(getVSCodeSettings(), dummyCodeCompletionsClient, undefined, {})
expect(provider?.identifier).toBe('anthropic')
expect(provider?.model).toBe('claude-instant-1')
})
})
Loading