From 463fd3b203aecb775b011b99dc47d16972203ade Mon Sep 17 00:00:00 2001 From: Taras Yemets Date: Wed, 13 Sep 2023 12:13:44 +0300 Subject: [PATCH 01/10] initial commit --- lib/shared/src/configuration.ts | 23 +++++++++---- .../src/sourcegraph-api/graphql/client.ts | 1 + .../src/sourcegraph-api/graphql/queries.ts | 1 + ...reateVSCodeInlineCompletionItemProvider.ts | 2 +- .../completions/providers/createProvider.ts | 32 ++++++++++++++++--- .../completions/providers/unstable-openai.ts | 13 ++++++-- 6 files changed, 56 insertions(+), 16 deletions(-) diff --git a/lib/shared/src/configuration.ts b/lib/shared/src/configuration.ts index 7889e165a46..45ae0846acf 100644 --- a/lib/shared/src/configuration.ts +++ b/lib/shared/src/configuration.ts @@ -1,5 +1,13 @@ export type ConfigurationUseContext = 'embeddings' | 'keyword' | 'none' | 'blended' | 'unified' +export const autocompleteAdvancedProviders = [ + 'anthropic', + 'unstable-codegen', + 'unstable-fireworks', + 'unstable-azure-openai', + 'unstable-openai', +] as const + // Should we share VS Code specific config via cody-shared? export interface Configuration { serverEndpoint: string @@ -22,13 +30,7 @@ export interface Configuration { experimentalLocalSymbols: boolean experimentalSymfPath: string experimentalSymfAnthropicKey: string - autocompleteAdvancedProvider: - | 'anthropic' - | 'unstable-codegen' - | 'unstable-fireworks' - | 'unstable-azure-openai' - | 'unstable-openai' - | null + autocompleteAdvancedProvider: (typeof autocompleteAdvancedProviders)[number] | null autocompleteAdvancedServerEndpoint: string | null autocompleteAdvancedModel: string | null autocompleteAdvancedAccessToken: string | null @@ -43,3 +45,10 @@ export interface ConfigurationWithAccessToken extends Configuration { /** The access token, which is stored in the secret storage (not configuration). */ accessToken: string | null } + +const colors = ['red', 'green', 'yellow'] as const +interface Config { + color: (typeof colors)[number] | null +} + +const color: Config['color'] = 'blue' diff --git a/lib/shared/src/sourcegraph-api/graphql/client.ts b/lib/shared/src/sourcegraph-api/graphql/client.ts index 2c0c5b412f5..b62e5c2a2cb 100644 --- a/lib/shared/src/sourcegraph-api/graphql/client.ts +++ b/lib/shared/src/sourcegraph-api/graphql/client.ts @@ -140,6 +140,7 @@ export interface CodyLLMSiteConfiguration { fastChatModelMaxTokens?: number completionModel?: string completionModelMaxTokens?: number + provider?: string } interface IsContextRequiredForChatQueryResponse { diff --git a/lib/shared/src/sourcegraph-api/graphql/queries.ts b/lib/shared/src/sourcegraph-api/graphql/queries.ts index 40608577916..79053cf4f6a 100644 --- a/lib/shared/src/sourcegraph-api/graphql/queries.ts +++ b/lib/shared/src/sourcegraph-api/graphql/queries.ts @@ -46,6 +46,7 @@ query CurrentSiteCodyLlmConfiguration { fastChatModelMaxTokens completionModel completionModelMaxTokens + provider } } }` diff --git a/vscode/src/completions/createVSCodeInlineCompletionItemProvider.ts b/vscode/src/completions/createVSCodeInlineCompletionItemProvider.ts index e754e0b0cd8..f7dc8e6970e 100644 --- a/vscode/src/completions/createVSCodeInlineCompletionItemProvider.ts +++ b/vscode/src/completions/createVSCodeInlineCompletionItemProvider.ts @@ -42,7 +42,7 @@ export async function createInlineCompletionItemProvider({ const disposables: vscode.Disposable[] = [] - const providerConfig = await createProviderConfig(config, client, featureFlagProvider) + const providerConfig = await createProviderConfig(config, client, featureFlagProvider, authProvider) if (providerConfig) { const history = new VSCodeDocumentHistory() const sectionObserver = config.autocompleteExperimentalGraphContext diff --git a/vscode/src/completions/providers/createProvider.ts b/vscode/src/completions/providers/createProvider.ts index dca9a560256..90d626179d4 100644 --- a/vscode/src/completions/providers/createProvider.ts +++ b/vscode/src/completions/providers/createProvider.ts @@ -2,6 +2,7 @@ import { Configuration } from '@sourcegraph/cody-shared/src/configuration' import { FeatureFlag, FeatureFlagProvider } from '@sourcegraph/cody-shared/src/experimentation/FeatureFlagProvider' import { logError } from '../../log' +import { AuthProvider } from '../../services/AuthProvider' import { CodeCompletionsClient } from '../client' import { createProviderConfig as createAnthropicProviderConfig } from './anthropic' @@ -14,9 +15,14 @@ import { createProviderConfig as createUnstableOpenAIProviderConfig } from './un export async function createProviderConfig( config: Configuration, client: CodeCompletionsClient, - featureFlagProvider?: FeatureFlagProvider + featureFlagProvider: FeatureFlagProvider, + authProvider: AuthProvider ): Promise { - const { provider, model } = await resolveDefaultProvider(config.autocompleteAdvancedProvider, featureFlagProvider) + const { provider, model } = await resolveDefaultProvider( + config.autocompleteAdvancedProvider, + featureFlagProvider, + authProvider + ) switch (provider) { case 'unstable-codegen': { if (config.autocompleteAdvancedServerEndpoint !== null) { @@ -51,16 +57,23 @@ export async function createProviderConfig( accessToken: config.autocompleteAdvancedAccessToken, }) } + case 'openai': + case 'azure-openai': case 'unstable-openai': { return createUnstableOpenAIProviderConfig({ client, contextWindowTokens: 2048, + // "unstable-openai" provider doesn't support setting a model. + // Pass model only if provider comes from the instance site config. + model: provider !== 'unstable-openai' ? model : undefined, }) } + case 'fireworks': case 'unstable-fireworks': { return createUnstableFireworksProviderConfig({ client, - model: config.autocompleteAdvancedModel ?? model ?? null, + // if completions provider comes from the instance site config, ignore advanced model value from the VSCode settings + model: (provider === 'unstable-fireworks' ? config.autocompleteAdvancedModel : null) ?? model ?? null, }) } case 'anthropic': { @@ -81,8 +94,9 @@ export async function createProviderConfig( async function resolveDefaultProvider( configuredProvider: string | null, - featureFlagProvider?: FeatureFlagProvider -): Promise<{ provider: string; model?: 'starcoder-7b' | 'starcoder-16b' | 'claude-instant-infill' }> { + featureFlagProvider: FeatureFlagProvider, + authProvider: AuthProvider +): Promise<{ provider: string; model?: string }> { if (configuredProvider) { return { provider: configuredProvider } } @@ -101,5 +115,13 @@ async function resolveDefaultProvider( return { provider: 'anthropic', model: 'claude-instant-infill' } } + const codyLLMSiteConfigOverwrites = authProvider.getAuthStatus().configOverwrites + const provider = codyLLMSiteConfigOverwrites?.provider + const model = codyLLMSiteConfigOverwrites?.completionModel + if (provider && provider !== 'sourcegraph') { + // https://github.com/sourcegraph/sourcegraph/blob/83166945fa80c009dd7d13b7ff97e4c7df000180/internal/conf/computed.go#L592-L601 + return { provider, model } + } + return { provider: 'anthropic' } } diff --git a/vscode/src/completions/providers/unstable-openai.ts b/vscode/src/completions/providers/unstable-openai.ts index aa306ea9706..8231cfff761 100644 --- a/vscode/src/completions/providers/unstable-openai.ts +++ b/vscode/src/completions/providers/unstable-openai.ts @@ -14,6 +14,7 @@ import { CompletionProviderTracer, Provider, ProviderConfig, ProviderOptions } f interface UnstableOpenAIOptions { client: Pick contextWindowTokens: number + model: string } const PROVIDER_IDENTIFIER = 'unstable-openai' @@ -30,11 +31,13 @@ function tokensToChars(tokens: number): number { export class UnstableOpenAIProvider extends Provider { private client: Pick private promptChars: number + private model: string constructor(options: ProviderOptions, azureOpenAIOptions: UnstableOpenAIOptions) { super(options) this.client = azureOpenAIOptions.client this.promptChars = tokensToChars(azureOpenAIOptions.contextWindowTokens) - tokensToChars(MAX_RESPONSE_TOKENS) + this.model = azureOpenAIOptions.model } private createPrompt(snippets: ContextSnippet[]): string { @@ -78,6 +81,7 @@ export class UnstableOpenAIProvider extends Provider { temperature: 1, topP: 0.5, stopSequences, + model: this.model, } tracer?.params(args) @@ -153,14 +157,17 @@ export class UnstableOpenAIProvider extends Provider { } } -export function createProviderConfig(unstableAzureOpenAIOptions: UnstableOpenAIOptions): ProviderConfig { +export function createProviderConfig( + unstableAzureOpenAIOptions: Omit & { model?: string } +): ProviderConfig { + const model = unstableAzureOpenAIOptions.model || 'gpt-35-turbo' return { create(options: ProviderOptions) { - return new UnstableOpenAIProvider(options, { ...unstableAzureOpenAIOptions }) + return new UnstableOpenAIProvider(options, { ...unstableAzureOpenAIOptions, model }) }, maximumContextCharacters: tokensToChars(unstableAzureOpenAIOptions.contextWindowTokens), enableExtendedMultilineTriggers: false, identifier: PROVIDER_IDENTIFIER, - model: 'gpt-35-turbo', + model, } } From 7510a2f593c6b0e90dfea9d754906113e370cf58 Mon Sep 17 00:00:00 2001 From: Taras Yemets Date: Wed, 13 Sep 2023 12:14:17 +0300 Subject: [PATCH 02/10] wip: handle providers from VSCode and site configs separately --- .../completions/providers/createProvider.ts | 172 +++++++++++------- 1 file changed, 104 insertions(+), 68 deletions(-) diff --git a/vscode/src/completions/providers/createProvider.ts b/vscode/src/completions/providers/createProvider.ts index 90d626179d4..1e7678dce10 100644 --- a/vscode/src/completions/providers/createProvider.ts +++ b/vscode/src/completions/providers/createProvider.ts @@ -1,8 +1,8 @@ import { Configuration } from '@sourcegraph/cody-shared/src/configuration' import { FeatureFlag, FeatureFlagProvider } from '@sourcegraph/cody-shared/src/experimentation/FeatureFlagProvider' +import { CodyLLMSiteConfiguration } from '@sourcegraph/cody-shared/src/sourcegraph-api/graphql/client' import { logError } from '../../log' -import { AuthProvider } from '../../services/AuthProvider' import { CodeCompletionsClient } from '../client' import { createProviderConfig as createAnthropicProviderConfig } from './anthropic' @@ -12,91 +12,124 @@ import { createProviderConfig as createUnstableCodeGenProviderConfig } from './u import { createProviderConfig as createUnstableFireworksProviderConfig } from './unstable-fireworks' import { createProviderConfig as createUnstableOpenAIProviderConfig } from './unstable-openai' +const DEFAULT_PROVIDER: { provider: string; model?: string } = { provider: 'anthropic' } + export async function createProviderConfig( config: Configuration, client: CodeCompletionsClient, featureFlagProvider: FeatureFlagProvider, - authProvider: AuthProvider + codyLLMSiteConfig?: CodyLLMSiteConfiguration ): Promise { - const { provider, model } = await resolveDefaultProvider( + const providerFromVSCodeConfig = await resolveDefaultProviderFromVSCodeConfig( config.autocompleteAdvancedProvider, - featureFlagProvider, - authProvider + featureFlagProvider ) - switch (provider) { - case 'unstable-codegen': { - if (config.autocompleteAdvancedServerEndpoint !== null) { - return createUnstableCodeGenProviderConfig(config.autocompleteAdvancedServerEndpoint) - } + if (providerFromVSCodeConfig) { + const { provider, model } = providerFromVSCodeConfig + + switch (provider) { + case 'unstable-codegen': { + if (config.autocompleteAdvancedServerEndpoint !== null) { + return createUnstableCodeGenProviderConfig(config.autocompleteAdvancedServerEndpoint) + } - logError( - 'createProviderConfig', - 'Provider `unstable-codegen` can not be used without configuring `cody.autocomplete.advanced.serverEndpoint`.' - ) - return null - } - case 'unstable-azure-openai': { - if (config.autocompleteAdvancedServerEndpoint === null) { logError( 'createProviderConfig', - 'Provider `unstable-azure-openai` can not be used without configuring `cody.autocomplete.advanced.serverEndpoint`.' + 'Provider `unstable-codegen` can not be used without configuring `cody.autocomplete.advanced.serverEndpoint`.' ) return null } + case 'unstable-azure-openai': { + if (config.autocompleteAdvancedServerEndpoint === null) { + logError( + 'createProviderConfig', + 'Provider `unstable-azure-openai` can not be used without configuring `cody.autocomplete.advanced.serverEndpoint`.' + ) + return null + } + + if (config.autocompleteAdvancedAccessToken === null) { + logError( + 'createProviderConfig', + 'Provider `unstable-azure-openai` can not be used without configuring `cody.autocomplete.advanced.accessToken`.' + ) + return null + } - if (config.autocompleteAdvancedAccessToken === null) { + return createUnstableAzureOpenAiProviderConfig({ + serverEndpoint: config.autocompleteAdvancedServerEndpoint, + accessToken: config.autocompleteAdvancedAccessToken, + }) + } + case 'unstable-openai': { + return createUnstableOpenAIProviderConfig({ + client, + contextWindowTokens: 2048, + }) + } + case 'unstable-fireworks': { + return createUnstableFireworksProviderConfig({ + client, + model: config.autocompleteAdvancedModel ?? model ?? null, + }) + } + case 'anthropic': { + return createAnthropicProviderConfig({ + client, + contextWindowTokens: 2048, + mode: config.autocompleteAdvancedModel === 'claude-instant-infill' ? 'infill' : 'default', + }) + } + default: logError( 'createProviderConfig', - 'Provider `unstable-azure-openai` can not be used without configuring `cody.autocomplete.advanced.accessToken`.' + `Unrecognized provider '${config.autocompleteAdvancedProvider}' configured.` ) return null - } - - return createUnstableAzureOpenAiProviderConfig({ - serverEndpoint: config.autocompleteAdvancedServerEndpoint, - accessToken: config.autocompleteAdvancedAccessToken, - }) - } - case 'openai': - case 'azure-openai': - case 'unstable-openai': { - return createUnstableOpenAIProviderConfig({ - client, - contextWindowTokens: 2048, - // "unstable-openai" provider doesn't support setting a model. - // Pass model only if provider comes from the instance site config. - model: provider !== 'unstable-openai' ? model : undefined, - }) } - case 'fireworks': - case 'unstable-fireworks': { - return createUnstableFireworksProviderConfig({ - client, - // if completions provider comes from the instance site config, ignore advanced model value from the VSCode settings - model: (provider === 'unstable-fireworks' ? config.autocompleteAdvancedModel : null) ?? model ?? null, - }) - } - case 'anthropic': { - return createAnthropicProviderConfig({ - client, - contextWindowTokens: 2048, - mode: config.autocompleteAdvancedModel === 'claude-instant-infill' ? 'infill' : 'default', - }) + } + + const providerFromSiteConfig = codyLLMSiteConfig ? resolveDefaultProviderFromSiteConfig(codyLLMSiteConfig) : null + if (providerFromSiteConfig) { + const { provider, model } = providerFromSiteConfig + + switch (provider) { + case 'openai': + case 'azure-openai': + return createUnstableOpenAIProviderConfig({ + client, + contextWindowTokens: 2048, + model, + }) + + case 'fireworks': + return createUnstableFireworksProviderConfig({ + client, + model: model ?? null, + }) + case 'anthropic': + case 'sourcegraph': + return createAnthropicProviderConfig({ + client, + contextWindowTokens: 2048, + mode: config.autocompleteAdvancedModel === 'claude-instant-infill' ? 'infill' : 'default', + // TODO: pass model name if provider is anthropic + // model: provider === 'anthropic' ? model : undefined, + }) + default: + logError('createProviderConfig', `Unrecognized provider '${provider}' configured.`) + return null } - default: - logError( - 'createProviderConfig', - `Unrecognized provider '${config.autocompleteAdvancedProvider}' configured.` - ) - return null } + + // TODO: return default provider (anthropic) config instead + return null } -async function resolveDefaultProvider( +async function resolveDefaultProviderFromVSCodeConfig( configuredProvider: string | null, - featureFlagProvider: FeatureFlagProvider, - authProvider: AuthProvider -): Promise<{ provider: string; model?: string }> { + featureFlagProvider?: FeatureFlagProvider +): Promise<{ provider: string; model?: 'starcoder-7b' | 'starcoder-16b' | 'claude-instant-infill' } | null> { if (configuredProvider) { return { provider: configuredProvider } } @@ -115,13 +148,16 @@ async function resolveDefaultProvider( return { provider: 'anthropic', model: 'claude-instant-infill' } } - const codyLLMSiteConfigOverwrites = authProvider.getAuthStatus().configOverwrites - const provider = codyLLMSiteConfigOverwrites?.provider - const model = codyLLMSiteConfigOverwrites?.completionModel + return null +} + +function resolveDefaultProviderFromSiteConfig({ + provider, + completionModel, +}: CodyLLMSiteConfiguration): { provider: string; model?: string } | null { if (provider && provider !== 'sourcegraph') { // https://github.com/sourcegraph/sourcegraph/blob/83166945fa80c009dd7d13b7ff97e4c7df000180/internal/conf/computed.go#L592-L601 - return { provider, model } + return { provider, model: completionModel } } - - return { provider: 'anthropic' } + return null } From cea48245d06bf9905c60d664db4a63e26ea2f8af Mon Sep 17 00:00:00 2001 From: Taras Yemets Date: Wed, 13 Sep 2023 16:49:25 +0300 Subject: [PATCH 03/10] wip --- lib/shared/src/configuration.ts | 23 ++-- ...reateVSCodeInlineCompletionItemProvider.ts | 7 +- .../completions/providers/createProvider.ts | 105 ++++++++++++------ .../completions/providers/unstable-openai.ts | 16 +-- 4 files changed, 93 insertions(+), 58 deletions(-) diff --git a/lib/shared/src/configuration.ts b/lib/shared/src/configuration.ts index 45ae0846acf..7889e165a46 100644 --- a/lib/shared/src/configuration.ts +++ b/lib/shared/src/configuration.ts @@ -1,13 +1,5 @@ export type ConfigurationUseContext = 'embeddings' | 'keyword' | 'none' | 'blended' | 'unified' -export const autocompleteAdvancedProviders = [ - 'anthropic', - 'unstable-codegen', - 'unstable-fireworks', - 'unstable-azure-openai', - 'unstable-openai', -] as const - // Should we share VS Code specific config via cody-shared? export interface Configuration { serverEndpoint: string @@ -30,7 +22,13 @@ export interface Configuration { experimentalLocalSymbols: boolean experimentalSymfPath: string experimentalSymfAnthropicKey: string - autocompleteAdvancedProvider: (typeof autocompleteAdvancedProviders)[number] | null + autocompleteAdvancedProvider: + | 'anthropic' + | 'unstable-codegen' + | 'unstable-fireworks' + | 'unstable-azure-openai' + | 'unstable-openai' + | null autocompleteAdvancedServerEndpoint: string | null autocompleteAdvancedModel: string | null autocompleteAdvancedAccessToken: string | null @@ -45,10 +43,3 @@ export interface ConfigurationWithAccessToken extends Configuration { /** The access token, which is stored in the secret storage (not configuration). */ accessToken: string | null } - -const colors = ['red', 'green', 'yellow'] as const -interface Config { - color: (typeof colors)[number] | null -} - -const color: Config['color'] = 'blue' diff --git a/vscode/src/completions/createVSCodeInlineCompletionItemProvider.ts b/vscode/src/completions/createVSCodeInlineCompletionItemProvider.ts index f7dc8e6970e..9e88b52507f 100644 --- a/vscode/src/completions/createVSCodeInlineCompletionItemProvider.ts +++ b/vscode/src/completions/createVSCodeInlineCompletionItemProvider.ts @@ -42,7 +42,12 @@ export async function createInlineCompletionItemProvider({ const disposables: vscode.Disposable[] = [] - const providerConfig = await createProviderConfig(config, client, featureFlagProvider, authProvider) + const providerConfig = await createProviderConfig( + config, + client, + featureFlagProvider, + authProvider.getAuthStatus().configOverwrites + ) if (providerConfig) { const history = new VSCodeDocumentHistory() const sectionObserver = config.autocompleteExperimentalGraphContext diff --git a/vscode/src/completions/providers/createProvider.ts b/vscode/src/completions/providers/createProvider.ts index 1e7678dce10..94f82bf1a08 100644 --- a/vscode/src/completions/providers/createProvider.ts +++ b/vscode/src/completions/providers/createProvider.ts @@ -12,20 +12,27 @@ import { createProviderConfig as createUnstableCodeGenProviderConfig } from './u import { createProviderConfig as createUnstableFireworksProviderConfig } from './unstable-fireworks' import { createProviderConfig as createUnstableOpenAIProviderConfig } from './unstable-openai' -const DEFAULT_PROVIDER: { provider: string; model?: string } = { provider: 'anthropic' } - export async function createProviderConfig( config: Configuration, client: CodeCompletionsClient, featureFlagProvider: FeatureFlagProvider, codyLLMSiteConfig?: CodyLLMSiteConfiguration ): Promise { - const providerFromVSCodeConfig = await resolveDefaultProviderFromVSCodeConfig( + const defaultAnthropicProviderConfig = createAnthropicProviderConfig({ + client, + contextWindowTokens: 2048, + mode: config.autocompleteAdvancedModel === 'claude-instant-infill' ? 'infill' : 'default', + }) + + /** + * Look for the autocomplete provider in VSCode settings and return matching provider config. + */ + const providerAndModelFromVSCodeConfig = await resolveDefaultProviderFromVSCodeConfigOrFeatureFlags( config.autocompleteAdvancedProvider, featureFlagProvider ) - if (providerFromVSCodeConfig) { - const { provider, model } = providerFromVSCodeConfig + if (providerAndModelFromVSCodeConfig) { + const { provider, model } = providerAndModelFromVSCodeConfig switch (provider) { case 'unstable-codegen': { @@ -74,11 +81,7 @@ export async function createProviderConfig( }) } case 'anthropic': { - return createAnthropicProviderConfig({ - client, - contextWindowTokens: 2048, - mode: config.autocompleteAdvancedModel === 'claude-instant-infill' ? 'infill' : 'default', - }) + return defaultAnthropicProviderConfig } default: logError( @@ -89,10 +92,24 @@ export async function createProviderConfig( } } - const providerFromSiteConfig = codyLLMSiteConfig ? resolveDefaultProviderFromSiteConfig(codyLLMSiteConfig) : null - if (providerFromSiteConfig) { - const { provider, model } = providerFromSiteConfig - + /** + * If autocomplete provider is not defined in the VSCode settings, + * check the completions provider in the connected Sourcegraph instance site config + * and return the matching provider config. + */ + if (codyLLMSiteConfig?.provider) { + const parsed = parseProviderAndModel({ + provider: codyLLMSiteConfig.provider, + model: codyLLMSiteConfig.completionModel, + }) + if (!parsed) { + logError( + 'createProviderConfig', + `Failed to parse the model name for '${codyLLMSiteConfig.provider}' completions provider.` + ) + return null + } + const { provider, model } = parsed switch (provider) { case 'openai': case 'azure-openai': @@ -107,26 +124,23 @@ export async function createProviderConfig( client, model: model ?? null, }) + case 'aws-bedrock': case 'anthropic': - case 'sourcegraph': - return createAnthropicProviderConfig({ - client, - contextWindowTokens: 2048, - mode: config.autocompleteAdvancedModel === 'claude-instant-infill' ? 'infill' : 'default', - // TODO: pass model name if provider is anthropic - // model: provider === 'anthropic' ? model : undefined, - }) + return defaultAnthropicProviderConfig default: logError('createProviderConfig', `Unrecognized provider '${provider}' configured.`) return null } } - // TODO: return default provider (anthropic) config instead - return null + /** + * If autocomplete provider is not defined neither in VSCode nor in Sourcegraph instance site config, + * use the default provider config ("anthropic"). + */ + return defaultAnthropicProviderConfig } -async function resolveDefaultProviderFromVSCodeConfig( +async function resolveDefaultProviderFromVSCodeConfigOrFeatureFlags( configuredProvider: string | null, featureFlagProvider?: FeatureFlagProvider ): Promise<{ provider: string; model?: 'starcoder-7b' | 'starcoder-16b' | 'claude-instant-infill' } | null> { @@ -151,13 +165,42 @@ async function resolveDefaultProviderFromVSCodeConfig( return null } -function resolveDefaultProviderFromSiteConfig({ +const delimeters: Record = { + sourcegraph: '/', + 'aws-bedrock': '.', +} + +/** + * For certain completions providers configured in the Sourcegraph instance site config + * the model name consists MODEL_PROVIDER and MODEL_NAME separated by a specific delimeter (see {@link delimeters}). + * + * This function checks if the given provider has a specific model naming format and: + * - if it does, parses the model name and returns the parsed provider and model names; + * - if it doesn't, returns the original provider and model names. + * + * E.g. for "sourcegraph" provider the completions model name consists of model provider and model name separated by "/". + * So when received `{ provider: "sourcegraph", model: "anthropic/claude-instant-1" }` the expected output would be `{ provider: "anthropic", model: "claude-instant-1" }`. + */ +function parseProviderAndModel({ provider, - completionModel, -}: CodyLLMSiteConfiguration): { provider: string; model?: string } | null { - if (provider && provider !== 'sourcegraph') { - // https://github.com/sourcegraph/sourcegraph/blob/83166945fa80c009dd7d13b7ff97e4c7df000180/internal/conf/computed.go#L592-L601 - return { provider, model: completionModel } + model, +}: { + provider: string + model?: string +}): { provider: string; model?: string } | null { + const delimeter = delimeters[provider] + if (!delimeter) { + return { provider, model } } + + if (model) { + const index = model.indexOf(delimeter) + const parsedProvider = model.slice(0, index) + const parsedModel = model.slice(index + 1) + if (parsedProvider && parsedModel) { + return { provider: parsedProvider, model: parsedModel } + } + } + return null } diff --git a/vscode/src/completions/providers/unstable-openai.ts b/vscode/src/completions/providers/unstable-openai.ts index 8231cfff761..5a790d211a6 100644 --- a/vscode/src/completions/providers/unstable-openai.ts +++ b/vscode/src/completions/providers/unstable-openai.ts @@ -14,7 +14,6 @@ import { CompletionProviderTracer, Provider, ProviderConfig, ProviderOptions } f interface UnstableOpenAIOptions { client: Pick contextWindowTokens: number - model: string } const PROVIDER_IDENTIFIER = 'unstable-openai' @@ -31,13 +30,11 @@ function tokensToChars(tokens: number): number { export class UnstableOpenAIProvider extends Provider { private client: Pick private promptChars: number - private model: string constructor(options: ProviderOptions, azureOpenAIOptions: UnstableOpenAIOptions) { super(options) this.client = azureOpenAIOptions.client this.promptChars = tokensToChars(azureOpenAIOptions.contextWindowTokens) - tokensToChars(MAX_RESPONSE_TOKENS) - this.model = azureOpenAIOptions.model } private createPrompt(snippets: ContextSnippet[]): string { @@ -81,7 +78,6 @@ export class UnstableOpenAIProvider extends Provider { temperature: 1, topP: 0.5, stopSequences, - model: this.model, } tracer?.params(args) @@ -157,17 +153,17 @@ export class UnstableOpenAIProvider extends Provider { } } -export function createProviderConfig( - unstableAzureOpenAIOptions: Omit & { model?: string } -): ProviderConfig { - const model = unstableAzureOpenAIOptions.model || 'gpt-35-turbo' +export function createProviderConfig({ + model, + ...unstableAzureOpenAIOptions +}: UnstableOpenAIOptions & { model?: string }): ProviderConfig { return { create(options: ProviderOptions) { - return new UnstableOpenAIProvider(options, { ...unstableAzureOpenAIOptions, model }) + return new UnstableOpenAIProvider(options, { ...unstableAzureOpenAIOptions }) }, maximumContextCharacters: tokensToChars(unstableAzureOpenAIOptions.contextWindowTokens), enableExtendedMultilineTriggers: false, identifier: PROVIDER_IDENTIFIER, - model, + model: model || 'gpt-35-turbo', } } From 08dff296f1973deec42215971801ce819b716c64 Mon Sep 17 00:00:00 2001 From: Taras Yemets Date: Wed, 13 Sep 2023 17:12:29 +0300 Subject: [PATCH 04/10] fix types --- vscode/src/completions/providers/createProvider.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vscode/src/completions/providers/createProvider.ts b/vscode/src/completions/providers/createProvider.ts index 94f82bf1a08..a30ce221906 100644 --- a/vscode/src/completions/providers/createProvider.ts +++ b/vscode/src/completions/providers/createProvider.ts @@ -15,7 +15,7 @@ import { createProviderConfig as createUnstableOpenAIProviderConfig } from './un export async function createProviderConfig( config: Configuration, client: CodeCompletionsClient, - featureFlagProvider: FeatureFlagProvider, + featureFlagProvider?: FeatureFlagProvider, codyLLMSiteConfig?: CodyLLMSiteConfiguration ): Promise { const defaultAnthropicProviderConfig = createAnthropicProviderConfig({ From 1088f2073a03d5b2284547ef0869811a4789a205 Mon Sep 17 00:00:00 2001 From: Taras Yemets Date: Thu, 14 Sep 2023 15:53:08 +0300 Subject: [PATCH 05/10] add test --- .../providers/createProvider.test.ts | 258 ++++++++++++++++++ 1 file changed, 258 insertions(+) create mode 100644 vscode/src/completions/providers/createProvider.test.ts diff --git a/vscode/src/completions/providers/createProvider.test.ts b/vscode/src/completions/providers/createProvider.test.ts new file mode 100644 index 00000000000..2b7cdfb7172 --- /dev/null +++ b/vscode/src/completions/providers/createProvider.test.ts @@ -0,0 +1,258 @@ +import { describe, expect, it } from 'vitest' + +import { Configuration } from '@sourcegraph/cody-shared/src/configuration' +import { DOTCOM_URL } from '@sourcegraph/cody-shared/src/sourcegraph-api/environments' +import { CodyLLMSiteConfiguration } from '@sourcegraph/cody-shared/src/sourcegraph-api/graphql/client' + +import { CodeCompletionsClient } from '../client' + +import { createProviderConfig } from './createProvider' + +const DEFAULT_VSCODE_SETTINGS: Configuration = { + serverEndpoint: DOTCOM_URL.href, + proxy: null, + codebase: '', + customHeaders: {}, + chatPreInstruction: 'My name is John Doe.', + useContext: 'embeddings', + autocomplete: true, + experimentalCommandLenses: false, + experimentalEditorTitleCommandIcon: false, + experimentalChatPredictions: false, + experimentalGuardrails: false, + experimentalLocalSymbols: false, + inlineChat: true, + isRunningInsideAgent: false, + experimentalNonStop: false, + experimentalSymfAnthropicKey: '', + experimentalSymfPath: 'symf', + debugEnable: false, + debugVerbose: false, + debugFilter: null, + telemetryLevel: 'all', + autocompleteAdvancedProvider: null, + autocompleteAdvancedServerEndpoint: null, + autocompleteAdvancedModel: null, + autocompleteAdvancedAccessToken: null, + autocompleteAdvancedEmbeddings: true, + autocompleteExperimentalCompleteSuggestWidgetSelection: false, + autocompleteExperimentalSyntacticPostProcessing: false, + autocompleteExperimentalGraphContext: false, +} + +const getVSCodeSettings = (config: Partial = {}): Configuration => ({ + ...DEFAULT_VSCODE_SETTINGS, + ...config, +}) + +const dummyCodeCompletionsClient: CodeCompletionsClient = { + complete: () => Promise.resolve({ completion: '', stopReason: '' }), + onConfigurationChange: () => undefined, +} + +describe('createProviderConfig', () => { + describe('if completions provider fields are defined in VSCode settings', () => { + it('returns null if completions provider is not supported', async () => { + const provider = await createProviderConfig( + getVSCodeSettings({ + autocompleteAdvancedProvider: 'nasa-ai' as Configuration['autocompleteAdvancedProvider'], + }), + dummyCodeCompletionsClient, + undefined, + {} + ) + expect(provider).toBeNull() + }) + + it('returns "codegen" provider config if the corresponding provider name and endpoint are specified', async () => { + const provider = await createProviderConfig( + getVSCodeSettings({ + autocompleteAdvancedProvider: 'unstable-codegen', + autocompleteAdvancedServerEndpoint: 'https://unstable-codegen.com', + }), + dummyCodeCompletionsClient, + undefined, + {} + ) + expect(provider?.identifier).toBe('codegen') + expect(provider?.model).toBe('codegen') + }) + + it('returns null if provider is "unstable-codegen", but the server endpoint is not set', async () => { + const provider = await createProviderConfig( + getVSCodeSettings({ autocompleteAdvancedProvider: 'unstable-codegen' }), + dummyCodeCompletionsClient, + undefined, + {} + ) + expect(provider).toBeNull() + }) + + it('returns "fireworks" provider config and corresponding model if specified', async () => { + const provider = await createProviderConfig( + getVSCodeSettings({ + autocompleteAdvancedProvider: 'unstable-fireworks', + autocompleteAdvancedModel: 'starcoder-3b', + }), + dummyCodeCompletionsClient, + undefined, + {} + ) + expect(provider?.identifier).toBe('fireworks') + expect(provider?.model).toBe('starcoder-3b') + }) + + it('returns "fireworks" provider config if specified in settings and default model', async () => { + const provider = await createProviderConfig( + getVSCodeSettings({ autocompleteAdvancedProvider: 'unstable-fireworks' }), + dummyCodeCompletionsClient, + undefined, + {} + ) + expect(provider?.identifier).toBe('fireworks') + expect(provider?.model).toBe('starcoder-7b') + }) + + it('returns "openai" provider config if specified in VSCode settings; model is ignored', async () => { + const provider = await createProviderConfig( + getVSCodeSettings({ + autocompleteAdvancedProvider: 'unstable-openai', + autocompleteAdvancedModel: 'hello-world', + }), + dummyCodeCompletionsClient, + undefined, + {} + ) + expect(provider?.identifier).toBe('unstable-openai') + expect(provider?.model).toBe('gpt-35-turbo') + }) + + it('returns "anthropic" provider config if specified in VSCode settings; model is ignored', async () => { + const provider = await createProviderConfig( + getVSCodeSettings({ + autocompleteAdvancedProvider: 'anthropic', + autocompleteAdvancedModel: 'hello-world', + }), + dummyCodeCompletionsClient, + undefined, + {} + ) + expect(provider?.identifier).toBe('anthropic') + expect(provider?.model).toBe('claude-instant-1') + }) + + it('provider specified in VSCode settings takes precedence over the one defined in the site config', async () => { + const provider = await createProviderConfig( + getVSCodeSettings({ + autocompleteAdvancedProvider: 'unstable-codegen', + autocompleteAdvancedServerEndpoint: 'https://unstable-codegen.com', + }), + dummyCodeCompletionsClient, + undefined, + { provider: 'azure-open-ai', completionModel: 'gpt-35-turbo-test' } + ) + expect(provider?.identifier).toBe('codegen') + expect(provider?.model).toBe('codegen') + }) + }) + + describe('completions provider and model are defined in the site config and not set in VSCode settings', () => { + describe('if provider is "sourcegraph"', () => { + const testCases: { + codyLLMConfig: CodyLLMSiteConfiguration + expected: { provider: string; model?: string } | null + }[] = [ + // sourcegraph + { codyLLMConfig: { provider: 'sourcegraph', completionModel: 'hello-world' }, expected: null }, + { + codyLLMConfig: { provider: 'sourcegraph', completionModel: 'anthropic/claude-instant-1' }, + expected: { provider: 'anthropic', model: 'claude-instant-1' }, + }, + { + codyLLMConfig: { provider: 'sourcegraph', completionModel: 'anthropic/' }, + expected: null, + }, + { + codyLLMConfig: { provider: 'sourcegraph', completionModel: '/claude-instant-1' }, + expected: null, + }, + + // aws-bedrock + { codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'hello-world' }, expected: null }, + { + codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic.claude-instant-1' }, + expected: { provider: 'anthropic', model: 'claude-instant-1' }, + }, + { + codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic.' }, + expected: null, + }, + { + codyLLMConfig: { provider: 'aws-bedrock', completionModel: 'anthropic/claude-instant-1' }, + expected: null, + }, + + // open-ai + { + codyLLMConfig: { provider: 'openai', completionModel: 'gpt-35-turbo-test' }, + expected: { provider: 'unstable-openai', model: 'gpt-35-turbo-test' }, + }, + { + codyLLMConfig: { provider: 'openai' }, + expected: { provider: 'unstable-openai', model: 'gpt-35-turbo' }, + }, + + // azure-openai + { + codyLLMConfig: { provider: 'azure-openai', completionModel: 'gpt-35-turbo-test' }, + expected: { provider: 'unstable-openai', model: 'gpt-35-turbo-test' }, + }, + { + codyLLMConfig: { provider: 'azure-openai' }, + expected: { provider: 'unstable-openai', model: 'gpt-35-turbo' }, + }, + + // fireworks + { + codyLLMConfig: { provider: 'fireworks', completionModel: 'llama-code-7b' }, + expected: { provider: 'fireworks', model: 'llama-code-7b' }, + }, + { + codyLLMConfig: { provider: 'fireworks' }, + expected: { provider: 'fireworks', model: 'starcoder-7b' }, + }, + + // unknown-provider + { + codyLLMConfig: { provider: 'unknown-provider', completionModel: 'llama-code-7b' }, + expected: null, + }, + ] + + for (const { codyLLMConfig, expected } of testCases) { + it(`returns ${JSON.stringify(expected)} when cody LLM config is ${JSON.stringify( + codyLLMConfig + )}`, async () => { + const provider = await createProviderConfig( + getVSCodeSettings(), + dummyCodeCompletionsClient, + undefined, + codyLLMConfig + ) + if (expected === null) { + expect(provider).toBeNull() + } else { + expect(provider?.identifier).toBe(expected.provider) + expect(provider?.model).toBe(expected.model) + } + }) + } + }) + }) + + it('returns anthropic provider config if no completions provider specified in VSCode settings or site config', async () => { + const provider = await createProviderConfig(getVSCodeSettings(), dummyCodeCompletionsClient, undefined, {}) + expect(provider?.identifier).toBe('anthropic') + expect(provider?.model).toBe('claude-instant-1') + }) +}) From a16c760ee979585173822a77d564da13c4884d9f Mon Sep 17 00:00:00 2001 From: Taras Yemets Date: Thu, 14 Sep 2023 19:12:42 +0300 Subject: [PATCH 06/10] request LLM provider field separatly for backwards compat --- .../src/sourcegraph-api/graphql/client.ts | 32 +++++++++++++++++-- .../src/sourcegraph-api/graphql/queries.ts | 10 +++++- 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/lib/shared/src/sourcegraph-api/graphql/client.ts b/lib/shared/src/sourcegraph-api/graphql/client.ts index a15d5487c5b..33bac10f783 100644 --- a/lib/shared/src/sourcegraph-api/graphql/client.ts +++ b/lib/shared/src/sourcegraph-api/graphql/client.ts @@ -6,6 +6,7 @@ import { DOTCOM_URL, isDotCom } from '../environments' import { CURRENT_SITE_CODY_LLM_CONFIGURATION, + CURRENT_SITE_CODY_LLM_PROVIDER, CURRENT_SITE_GRAPHQL_FIELDS_QUERY, CURRENT_SITE_HAS_CODY_ENABLED_QUERY, CURRENT_SITE_IDENTIFICATION, @@ -57,6 +58,14 @@ interface CurrentUserIdHasVerifiedEmailResponse { currentUser: { id: string; hasVerifiedEmail: boolean } | null } +interface CodyLLMSiteConfigurationResponse { + site: { codyLLMConfiguration: Omit | null } | null +} + +interface CodyLLMSiteConfigurationProviderResponse { + site: { codyLLMConfiguration: Pick | null } | null +} + interface RepositoryIdResponse { repository: { id: string } | null } @@ -273,9 +282,28 @@ export class SourcegraphGraphQLAPIClient { } public async getCodyLLMConfiguration(): Promise { - const response = await this.fetchSourcegraphAPI>(CURRENT_SITE_CODY_LLM_CONFIGURATION) + // fetch Cody LLM provider separately for backward compatability + const [configResponse, providerResponse] = await Promise.all([ + this.fetchSourcegraphAPI>( + CURRENT_SITE_CODY_LLM_CONFIGURATION + ), + this.fetchSourcegraphAPI>( + CURRENT_SITE_CODY_LLM_PROVIDER + ), + ]) + + const config = extractDataOrError(configResponse, data => data.site?.codyLLMConfiguration || undefined) + if (!config || isError(config)) { + return config + } + + let provider: string | undefined + const llmProvider = extractDataOrError(providerResponse, data => data.site?.codyLLMConfiguration?.provider) + if (llmProvider && !isError(llmProvider)) { + provider = llmProvider + } - return extractDataOrError(response, data => data.site?.codyLLMConfiguration) + return { ...config, provider } } public async getRepoIds(names: string[]): Promise<{ id: string; name: string }[] | Error> { diff --git a/lib/shared/src/sourcegraph-api/graphql/queries.ts b/lib/shared/src/sourcegraph-api/graphql/queries.ts index 79053cf4f6a..fa8138919de 100644 --- a/lib/shared/src/sourcegraph-api/graphql/queries.ts +++ b/lib/shared/src/sourcegraph-api/graphql/queries.ts @@ -36,6 +36,15 @@ query CurrentUser { } }` +export const CURRENT_SITE_CODY_LLM_PROVIDER = ` +query CurrentSiteCodyLlmConfiguration { + site { + codyLLMConfiguration { + provider + } + } +}` + export const CURRENT_SITE_CODY_LLM_CONFIGURATION = ` query CurrentSiteCodyLlmConfiguration { site { @@ -46,7 +55,6 @@ query CurrentSiteCodyLlmConfiguration { fastChatModelMaxTokens completionModel completionModelMaxTokens - provider } } }` From a5af6d14a4af182f41241f6360a7abd4ba265cd0 Mon Sep 17 00:00:00 2001 From: Taras Yemets Date: Fri, 15 Sep 2023 11:06:21 +0300 Subject: [PATCH 07/10] add test case --- vscode/src/completions/providers/createProvider.test.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vscode/src/completions/providers/createProvider.test.ts b/vscode/src/completions/providers/createProvider.test.ts index 2b7cdfb7172..6564e52daf5 100644 --- a/vscode/src/completions/providers/createProvider.test.ts +++ b/vscode/src/completions/providers/createProvider.test.ts @@ -227,6 +227,12 @@ describe('createProviderConfig', () => { codyLLMConfig: { provider: 'unknown-provider', completionModel: 'llama-code-7b' }, expected: null, }, + + // provider not defined (backward compat) + { + codyLLMConfig: { provider: undefined, completionModel: 'llama-code-7b' }, + expected: null, + }, ] for (const { codyLLMConfig, expected } of testCases) { From 0157089fd895df49f5b1b97a35ed0212143c8166 Mon Sep 17 00:00:00 2001 From: Taras Yemets Date: Fri, 15 Sep 2023 11:16:07 +0300 Subject: [PATCH 08/10] fix test --- vscode/src/completions/providers/createProvider.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vscode/src/completions/providers/createProvider.test.ts b/vscode/src/completions/providers/createProvider.test.ts index 6564e52daf5..6970f46ffdf 100644 --- a/vscode/src/completions/providers/createProvider.test.ts +++ b/vscode/src/completions/providers/createProvider.test.ts @@ -231,7 +231,7 @@ describe('createProviderConfig', () => { // provider not defined (backward compat) { codyLLMConfig: { provider: undefined, completionModel: 'llama-code-7b' }, - expected: null, + expected: { provider: 'anthropic', model: 'claude-instant-1' }, }, ] From 2f160ea40711048fcec18869a72e4f6299d96a41 Mon Sep 17 00:00:00 2001 From: Taras Yemets Date: Fri, 15 Sep 2023 16:06:45 +0300 Subject: [PATCH 09/10] do not log azure deployment name --- vscode/src/completions/providers/createProvider.ts | 2 +- vscode/src/completions/providers/unstable-openai.ts | 2 +- vscode/src/completions/vscodeInlineCompletionItemProvider.ts | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/vscode/src/completions/providers/createProvider.ts b/vscode/src/completions/providers/createProvider.ts index 2bd1c0fdffa..abc20a1913b 100644 --- a/vscode/src/completions/providers/createProvider.ts +++ b/vscode/src/completions/providers/createProvider.ts @@ -93,7 +93,7 @@ export async function createProviderConfig( return createUnstableOpenAIProviderConfig({ client, contextWindowTokens: 2048, - model, + model: provider === 'azure-openai' ? '' : model, }) case 'fireworks': diff --git a/vscode/src/completions/providers/unstable-openai.ts b/vscode/src/completions/providers/unstable-openai.ts index 5a790d211a6..b29fbe81441 100644 --- a/vscode/src/completions/providers/unstable-openai.ts +++ b/vscode/src/completions/providers/unstable-openai.ts @@ -164,6 +164,6 @@ export function createProviderConfig({ maximumContextCharacters: tokensToChars(unstableAzureOpenAIOptions.contextWindowTokens), enableExtendedMultilineTriggers: false, identifier: PROVIDER_IDENTIFIER, - model: model || 'gpt-35-turbo', + model: model ?? 'gpt-35-turbo', } } diff --git a/vscode/src/completions/vscodeInlineCompletionItemProvider.ts b/vscode/src/completions/vscodeInlineCompletionItemProvider.ts index 3b052324720..f984d75a566 100644 --- a/vscode/src/completions/vscodeInlineCompletionItemProvider.ts +++ b/vscode/src/completions/vscodeInlineCompletionItemProvider.ts @@ -113,7 +113,7 @@ export class InlineCompletionItemProvider implements vscode.InlineCompletionItem logDebug( 'CodyCompletionProvider:initialized', - `${this.config.providerConfig.identifier}/${this.config.providerConfig.model}` + [this.config.providerConfig.identifier, this.config.providerConfig.model].join('/') ) } From 258f15343c9dedc0f86c0bea1e8058d8156872b6 Mon Sep 17 00:00:00 2001 From: Taras Yemets Date: Fri, 15 Sep 2023 16:31:58 +0300 Subject: [PATCH 10/10] fix azure openai model name condition and test --- vscode/src/completions/providers/createProvider.test.ts | 2 +- vscode/src/completions/providers/createProvider.ts | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/vscode/src/completions/providers/createProvider.test.ts b/vscode/src/completions/providers/createProvider.test.ts index 6970f46ffdf..f6f41a000b7 100644 --- a/vscode/src/completions/providers/createProvider.test.ts +++ b/vscode/src/completions/providers/createProvider.test.ts @@ -205,7 +205,7 @@ describe('createProviderConfig', () => { // azure-openai { codyLLMConfig: { provider: 'azure-openai', completionModel: 'gpt-35-turbo-test' }, - expected: { provider: 'unstable-openai', model: 'gpt-35-turbo-test' }, + expected: { provider: 'unstable-openai', model: '' }, }, { codyLLMConfig: { provider: 'azure-openai' }, diff --git a/vscode/src/completions/providers/createProvider.ts b/vscode/src/completions/providers/createProvider.ts index abc20a1913b..9b782f2fe86 100644 --- a/vscode/src/completions/providers/createProvider.ts +++ b/vscode/src/completions/providers/createProvider.ts @@ -93,7 +93,8 @@ export async function createProviderConfig( return createUnstableOpenAIProviderConfig({ client, contextWindowTokens: 2048, - model: provider === 'azure-openai' ? '' : model, + // Model name for azure openai provider is a deployment name. It shouldn't appear in logs. + model: provider === 'azure-openai' && model ? '' : model, }) case 'fireworks':