-
Notifications
You must be signed in to change notification settings - Fork 2k
/
llama_cpp.ts
90 lines (74 loc) · 2.46 KB
/
llama_cpp.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import { LlamaModel, LlamaContext, LlamaChatSession } from "node-llama-cpp";
import {
LlamaBaseCppInputs,
createLlamaModel,
createLlamaContext,
createLlamaSession,
} from "../util/llama_cpp.js";
import { LLM, BaseLLMCallOptions, BaseLLMParams } from "./base.js";
/**
* Note that the modelPath is the only required parameter. For testing you
* can set this in the environment variable `LLAMA_PATH`.
*/
export interface LlamaCppInputs extends LlamaBaseCppInputs, BaseLLMParams {}
export interface LlamaCppCallOptions extends BaseLLMCallOptions {
/** The maximum number of tokens the response should contain. */
maxTokens?: number;
/** A function called when matching the provided token array */
onToken?: (tokens: number[]) => void;
}
/**
* To use this model you need to have the `node-llama-cpp` module installed.
* This can be installed using `npm install -S node-llama-cpp` and the minimum
* version supported in version 2.0.0.
* This also requires that have a locally built version of Llama2 installed.
*/
export class LlamaCpp extends LLM<LlamaCppCallOptions> {
declare CallOptions: LlamaCppCallOptions;
static inputs: LlamaCppInputs;
maxTokens?: number;
temperature?: number;
topK?: number;
topP?: number;
trimWhitespaceSuffix?: boolean;
_model: LlamaModel;
_context: LlamaContext;
_session: LlamaChatSession;
static lc_name() {
return "LlamaCpp";
}
constructor(inputs: LlamaCppInputs) {
super(inputs);
this.maxTokens = inputs?.maxTokens;
this.temperature = inputs?.temperature;
this.topK = inputs?.topK;
this.topP = inputs?.topP;
this.trimWhitespaceSuffix = inputs?.trimWhitespaceSuffix;
this._model = createLlamaModel(inputs);
this._context = createLlamaContext(this._model, inputs);
this._session = createLlamaSession(this._context);
}
_llmType() {
return "llama2_cpp";
}
/** @ignore */
async _call(
prompt: string,
// @ts-expect-error - TS6133: 'options' is declared but its value is never read.
options?: this["ParsedCallOptions"]
): Promise<string> {
try {
const promptOptions = {
maxTokens: this?.maxTokens,
temperature: this?.temperature,
topK: this?.topK,
topP: this?.topP,
trimWhitespaceSuffix: this?.trimWhitespaceSuffix,
};
const completion = await this._session.prompt(prompt, promptOptions);
return completion;
} catch (e) {
throw new Error("Error getting prompt completion.");
}
}
}