Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

configurable default model #544

Merged
merged 8 commits into from
Jun 14, 2024
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions demo/genaisrc/genaiscript.d.ts

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 5 additions & 6 deletions docs/genaisrc/genaiscript.d.ts

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 5 additions & 6 deletions genaisrc/genaiscript.d.ts

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions packages/cli/src/cli.ts
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ export async function cli() {
}

let nodeHost: NodeHost
program.hook("preAction", (cmd) => {
nodeHost = NodeHost.install(cmd.opts().env)
program.hook("preAction", async (cmd) => {
nodeHost = await NodeHost.install(cmd.opts().env)
pelikhan marked this conversation as resolved.
Show resolved Hide resolved
})
program
.name(TOOL_ID)
Expand Down
3 changes: 1 addition & 2 deletions packages/cli/src/info.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import {
CORE_VERSION,
DEFAULT_MODEL,
ModelConnectionInfo,
YAMLStringify,
host,
Expand All @@ -23,7 +22,7 @@ async function resolveScriptsConnectionInfo(
const models: Record<string, ModelConnectionOptions> = {}
for (const template of templates) {
const conn: ModelConnectionOptions = {
model: template.model ?? DEFAULT_MODEL,
model: template.model ?? host.defaultModelOptions.model,
}
const key = JSON.stringify(conn)
if (!models[key]) models[key] = conn
Expand Down
3 changes: 3 additions & 0 deletions packages/cli/src/llamaindexretrieval.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import {
PDF_MIME_TYPE,
DOCX_MIME_TYPE,
JSON_SCHEMA_MIME_TYPE,
assert,
} from "genaiscript-core"
import { type BaseReader } from "llamaindex"
import type { GenericFileSystem } from "@llamaindex/env"
Expand Down Expand Up @@ -143,6 +144,7 @@ export class LlamaIndexRetrievalService
}

private async getModelToken(modelId: string) {
assert(!!modelId)
const { provider } = parseModelIdentifier(modelId)
const conn = await this.host.getLanguageModelConfiguration(modelId)
if (provider === MODEL_PROVIDER_OLLAMA)
Expand All @@ -151,6 +153,7 @@ export class LlamaIndexRetrievalService
}

async pullModel(modelid: string): Promise<ResponseStatus> {
assert(!!modelid)
const { provider, model } = parseModelIdentifier(modelid)
const conn = await this.getModelToken(modelid)
if (provider === MODEL_PROVIDER_OLLAMA) {
Expand Down
15 changes: 14 additions & 1 deletion packages/cli/src/nodehost.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ import {
AZURE_OPENAI_TOKEN_SCOPES,
AbortSignalOptions,
AskUserOptions,
DEFAULT_MODEL,
DEFAULT_TEMPERATURE,
Host,
LanguageModel,
LanguageModelConfiguration,
Expand All @@ -19,6 +21,7 @@ import {
UTF8Encoder,
createBundledParsers,
createFileSystem,
parseDefaultsFromEnv,
parseTokenFromEnv,
resolveLanguageModel,
setHost,
Expand Down Expand Up @@ -56,14 +59,18 @@ export class NodeHost implements Host {
readonly workspace = createFileSystem()
readonly parser = createBundledParsers()
readonly docker = new DockerManager()
readonly defaultModelOptions = {
model: DEFAULT_MODEL,
temperature: DEFAULT_TEMPERATURE,
}

constructor() {
const srv = new LlamaIndexRetrievalService(this)
this.retrieval = srv
this.models = srv
}

static install(dotEnvPath: string) {
static async install(dotEnvPath: string) {
dotEnvPath = dotEnvPath || resolve(".env")
if (existsSync(dotEnvPath)) {
const res = dotenv.config({
Expand All @@ -75,19 +82,25 @@ export class NodeHost implements Host {
}
const h = new NodeHost()
setHost(h)
await h.parseDefaults()
return h
}

async readSecret(name: string): Promise<string | undefined> {
return process.env[name]
}

private async parseDefaults() {
await parseDefaultsFromEnv(process.env)
}
pelikhan marked this conversation as resolved.
Show resolved Hide resolved
pelikhan marked this conversation as resolved.
Show resolved Hide resolved

private _azureToken: AccessToken
async getLanguageModelConfiguration(
modelId: string,
options?: { token?: boolean } & AbortSignalOptions & TraceOptions
): Promise<LanguageModelConfiguration> {
const { signal, token: askToken } = options || {}
await this.parseDefaults()
const tok = await parseTokenFromEnv(process.env, modelId)
if (
askToken &&
Expand Down
20 changes: 9 additions & 11 deletions packages/core/src/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,7 @@ import {
renderFencedVariables,
} from "./fence"
import { validateFencesWithSchema, validateJSONWithSchema } from "./schema"
import {
CHAT_CACHE,
DEFAULT_MODEL,
DEFAULT_TEMPERATURE,
MAX_DATA_REPAIRS,
MAX_TOOL_CALLS,
} from "./constants"
import { CHAT_CACHE, MAX_DATA_REPAIRS, MAX_TOOL_CALLS } from "./constants"
import { parseAnnotations } from "./annotations"
import { isCancelError, serializeError } from "./error"
import { details, fenceMD } from "./markdown"
Expand Down Expand Up @@ -489,8 +483,12 @@ export function mergeGenerationOptions(
return {
...options,
...(runOptions || {}),
model: runOptions?.model ?? options?.model ?? DEFAULT_MODEL,
temperature: runOptions?.temperature ?? DEFAULT_TEMPERATURE,
model:
runOptions?.model ??
options?.model ??
host.defaultModelOptions.model,
temperature:
runOptions?.temperature ?? host.defaultModelOptions.temperature,
}
}

Expand All @@ -507,8 +505,8 @@ export async function executeChatSession(
) {
const {
trace,
model = DEFAULT_MODEL,
temperature = DEFAULT_TEMPERATURE,
model = host.defaultModelOptions.model,
temperature = host.defaultModelOptions.temperature,
topP,
maxTokens,
seed,
Expand Down
Loading
Loading