Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 10 additions & 5 deletions helpers/providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,13 @@ import { askAnthropicQuestions } from "./anthropic";
import { askGeminiQuestions } from "./gemini";
import { askOllamaQuestions } from "./ollama";
import { askOpenAIQuestions } from "./openai";
import { askLLMHubQuestions } from "./llmhub";

const DEFAULT_MODEL_PROVIDER = "openai";
const DEFAULT_MODEL_PROVIDER = "openai";

export type ModelConfigQuestionsParams = {
openAiKey?: string;
llmHubKey?: string;
askModels: boolean;
};

Expand All @@ -19,6 +21,7 @@ export type ModelConfigParams = Omit<ModelConfig, "provider">;
export async function askModelConfig({
askModels,
openAiKey,
llmHubKey,
}: ModelConfigQuestionsParams): Promise<ModelConfig> {
let modelProvider: ModelProvider = DEFAULT_MODEL_PROVIDER;
if (askModels && !ciInfo.isCI) {
Expand All @@ -28,10 +31,8 @@ export async function askModelConfig({
name: "provider",
message: "Which model provider would you like to use",
choices: [
{
title: "OpenAI",
value: "openai",
},
{ title: "LLMHub", value: "llmhub" }, // LLMHub as the first option
{ title: "OpenAI", value: "openai" },
{ title: "Ollama", value: "ollama" },
{ title: "Anthropic", value: "anthropic" },
{ title: "Gemini", value: "gemini" },
Expand All @@ -45,6 +46,9 @@ export async function askModelConfig({

let modelConfig: ModelConfigParams;
switch (modelProvider) {
case "llmhub":
modelConfig = await askLLMHubQuestions({ llmHubKey, askModels });
break;
case "ollama":
modelConfig = await askOllamaQuestions({ askModels });
break;
Expand All @@ -54,6 +58,7 @@ export async function askModelConfig({
case "gemini":
modelConfig = await askGeminiQuestions({ askModels });
break;

default:
modelConfig = await askOpenAIQuestions({
openAiKey,
Expand Down
143 changes: 143 additions & 0 deletions helpers/providers/llmhub.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
import ciInfo from "ci-info";
import got from "got";
import ora from "ora";
import { red } from "picocolors";
import prompts from "prompts";
import { ModelConfigParams, ModelConfigQuestionsParams } from ".";
import { questionHandlers } from "../../questions";

const LLMHUB_API_URL = "https://llm-server.llmhub.t-systems.net/";

const DEFAULT_MODEL = "gpt-3.5-turbo";
const DEFAULT_EMBEDDING_MODEL = "text-embedding-3-large";

export async function askLLMHubQuestions({
llmHubKey,
askModels,
}: ModelConfigQuestionsParams): Promise<ModelConfigParams> {
const config: ModelConfigParams = {
apiKey: llmHubKey,
model: DEFAULT_MODEL,
embeddingModel: DEFAULT_EMBEDDING_MODEL,
dimensions: getDimensions(DEFAULT_EMBEDDING_MODEL),
isConfigured(): boolean {
if (config.apiKey) {
return true;
}
if (process.env["LLMHUB_API_KEY"]) {
return true;
}
return false;
},
};

if (!config.apiKey) {
const { key } = await prompts(
{
type: "text",
name: "key",
message: askModels
? "Please provide your LLMHub API key (or leave blank to use LLMHUB_API_KEY env variable):"
: "Please provide your LLMHub API key (leave blank to skip):",
validate: (value: string) => {
if (askModels && !value) {
if (process.env.LLMHUB_API_KEY) {
return true;
}
return "LLMHUB_API_KEY env variable is not set - key is required";
}
return true;
},
},
questionHandlers,
);
config.apiKey = key || process.env.LLMHUB_API_KEY;
}

// use default model values in CI or if user should not be asked
const useDefaults = ciInfo.isCI || !askModels;
if (!useDefaults) {
const { model } = await prompts(
{
type: "select",
name: "model",
message: "Which LLM model would you like to use?",
choices: await getAvailableModelChoices(false, config.apiKey),
initial: 0,
},
questionHandlers,
);
config.model = model;

const { embeddingModel } = await prompts(
{
type: "select",
name: "embeddingModel",
message: "Which embedding model would you like to use?",
choices: await getAvailableModelChoices(true, config.apiKey),
initial: 0,
},
questionHandlers,
);
config.embeddingModel = embeddingModel;
config.dimensions = getDimensions(embeddingModel);
}

return config;
}

async function getAvailableModelChoices(
selectEmbedding: boolean,
apiKey?: string,
) {
if (!apiKey) {
throw new Error("Need LLMHub key to retrieve model choices");
}
const isLLMModel = (modelId: string) => {
return modelId.startsWith("gemini");
};

const isEmbeddingModel = (modelId: string) => {
return modelId.includes("embedding");
};

const spinner = ora("Fetching available models").start();
try {
const response = await got(`${LLMHUB_API_URL}/models`, {
headers: {
Authorization: "Bearer " + apiKey,
},
timeout: 5000,
responseType: "json",
});
const data: any = await response.body;
spinner.stop();
return data.data
.filter((model: any) =>
selectEmbedding ? isEmbeddingModel(model.id) : isLLMModel(model.id),
)
.map((el: any) => {
return {
title: el.id,
value: el.id,
};
});
} catch (error) {
spinner.stop();
if ((error as any).response?.statusCode === 401) {
console.log(
red(
"Invalid LLMHub API key provided! Please provide a valid key and try again!",
),
);
} else {
console.log(red("Request failed: " + error));
}
process.exit(1);
}
}

function getDimensions(modelName: string) {
// Assuming dimensions similar to OpenAI for simplicity. Update if different.
return modelName === "text-embedding-004" ? 768 : 1536;
}
2 changes: 1 addition & 1 deletion helpers/types.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { PackageManager } from "../helpers/get-pkg-manager";
import { Tool } from "./tools";

export type ModelProvider = "openai" | "ollama" | "anthropic" | "gemini";
export type ModelProvider = "llmhub" | "openai" | "ollama" | "anthropic" | "gemini";
export type ModelConfig = {
provider: ModelProvider;
apiKey?: string;
Expand Down