-
-
Notifications
You must be signed in to change notification settings - Fork 87
/
Copy path1902-cuda.patch
248 lines (248 loc) · 9.8 KB
/
1902-cuda.patch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
diff --git a/common/common.cpp b/common/common.cpp
index 2597ba0..e42ae73 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -1268,3 +1268,218 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
fprintf(stream, "typical_p: %f # default: 1.0\n", params.typical_p);
fprintf(stream, "verbose_prompt: %s # default: false\n", params.verbose_prompt ? "true" : "false");
}
+
+gpt_params* create_gpt_params(const std::string& fname,const std::string& lora,const std::string& lora_base) {
+ gpt_params* lparams = new gpt_params;
+ fprintf(stderr, "%s: loading model %s\n", __func__, fname.c_str());
+
+ // Initialize the 'model' member with the 'fname' parameter
+ lparams->model = fname;
+ lparams->lora_base = lora_base;
+ lparams->lora_adapter = lora;
+ if (lparams->lora_adapter.empty()) {
+ lparams->use_mmap = false;
+ }
+ return lparams;
+}
+
+gpt_params* create_gpt_params_cuda(const std::string& fname) {
+ gpt_params* lparams = new gpt_params;
+ fprintf(stderr, "%s: loading model %s\n", __func__, fname.c_str());
+
+ // Initialize the 'model' member with the 'fname' parameter
+ lparams->model = fname;
+ return lparams;
+}
+
+void* load_binding_model(const char *fname, int n_ctx, int n_seed, bool memory_f16, bool mlock, bool embeddings, bool mmap, bool low_vram, int n_gpu_layers, int n_batch, const char *maingpu, const char *tensorsplit, bool numa, float rope_freq_base, float rope_freq_scale, bool mul_mat_q, const char *lora, const char *lora_base, bool perplexity) {
+ // load the model
+ gpt_params * lparams;
+// Temporary workaround for https://github.com/go-skynet/go-llama.cpp/issues/218
+#ifdef GGML_USE_CUBLAS
+ lparams = create_gpt_params_cuda(fname);
+#else
+ lparams = create_gpt_params(fname, lora, lora_base);
+#endif
+ llama_model * model;
+ llama_binding_state * state;
+ state = new llama_binding_state;
+ llama_context * ctx;
+ lparams->n_ctx = n_ctx;
+ lparams->seed = n_seed;
+ lparams->memory_f16 = memory_f16;
+ lparams->embedding = embeddings;
+ lparams->use_mlock = mlock;
+ lparams->n_gpu_layers = n_gpu_layers;
+ lparams->perplexity = perplexity;
+ lparams->use_mmap = mmap;
+
+ lparams->low_vram = low_vram;
+ if (rope_freq_base != 0.0f) {
+ lparams->rope_freq_base = rope_freq_base;
+ } else {
+ lparams->rope_freq_base = 10000.0f;
+ }
+
+ if (rope_freq_scale != 0.0f) {
+ lparams->rope_freq_scale = rope_freq_scale;
+ } else {
+ lparams->rope_freq_scale = 1.0f;
+ }
+
+ lparams->model = fname;
+ if (maingpu[0] != '\0') {
+ lparams->main_gpu = std::stoi(maingpu);
+ }
+
+ if (tensorsplit[0] != '\0') {
+ std::string arg_next = tensorsplit;
+ // split string by , and /
+ const std::regex regex{R"([,/]+)"};
+ std::sregex_token_iterator it{arg_next.begin(), arg_next.end(), regex, -1};
+ std::vector<std::string> split_arg{it, {}};
+ GGML_ASSERT(split_arg.size() <= LLAMA_MAX_DEVICES);
+
+ for (size_t i = 0; i < LLAMA_MAX_DEVICES; ++i) {
+ if (i < split_arg.size()) {
+ lparams->tensor_split[i] = std::stof(split_arg[i]);
+ } else {
+ lparams->tensor_split[i] = 0.0f;
+ }
+ }
+ }
+
+ lparams->n_batch = n_batch;
+
+ llama_backend_init(numa);
+
+ std::tie(model, ctx) = llama_init_from_gpt_params(*lparams);
+ if (model == NULL) {
+ fprintf(stderr, "%s: error: unable to load model\n", __func__);
+ return nullptr;
+ }
+ state->ctx = ctx;
+ state->model= model;
+ return state;
+}
+
+// Note: the only difference here is passing params as a pointer and avoid copy-by-value
+// We stick to another function to avoid patching all the llama.cpp code
+// We need the function to be in the common.o object, as using it in the binding does not make effect.
+llama_token llama_sample_token_binding(
+ struct llama_context * ctx,
+ struct llama_context * ctx_guidance,
+ struct llama_grammar * grammar,
+ const struct gpt_params * g_params,
+ const std::vector<llama_token> & last_tokens,
+ std::vector<llama_token_data> & candidates,
+ int idx) {
+
+ struct gpt_params params = *g_params;
+ const int n_ctx = llama_n_ctx(ctx);
+ const int n_vocab = llama_n_vocab(ctx);
+
+ const float temp = params.temp;
+ const int32_t top_k = params.top_k <= 0 ? n_vocab : params.top_k;
+ const float top_p = params.top_p;
+ const float tfs_z = params.tfs_z;
+ const float typical_p = params.typical_p;
+ const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
+ const float repeat_penalty = params.repeat_penalty;
+ const float alpha_presence = params.presence_penalty;
+ const float alpha_frequency = params.frequency_penalty;
+ const int mirostat = params.mirostat;
+ const float mirostat_tau = params.mirostat_tau;
+ const float mirostat_eta = params.mirostat_eta;
+ const bool penalize_nl = params.penalize_nl;
+
+ llama_token id = 0;
+
+ float * logits = llama_get_logits(ctx) + idx * n_vocab;
+
+ // Apply params.logit_bias map
+ for (auto it = params.logit_bias.begin(); it != params.logit_bias.end(); it++) {
+ logits[it->first] += it->second;
+ }
+
+ candidates.clear();
+ for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
+ candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
+ }
+
+ llama_token_data_array cur_p = { candidates.data(), candidates.size(), false };
+
+ if (ctx_guidance) {
+ llama_sample_classifier_free_guidance(ctx, &cur_p, ctx_guidance, params.cfg_scale);
+ }
+
+ // apply penalties
+ if (!last_tokens.empty()) {
+ const float nl_logit = logits[llama_token_nl(ctx)];
+ const int last_n_repeat = std::min(std::min((int)last_tokens.size(), repeat_last_n), n_ctx);
+
+ llama_sample_repetition_penalty(ctx, &cur_p,
+ last_tokens.data() + last_tokens.size() - last_n_repeat,
+ last_n_repeat, repeat_penalty);
+ llama_sample_frequency_and_presence_penalties(ctx, &cur_p,
+ last_tokens.data() + last_tokens.size() - last_n_repeat,
+ last_n_repeat, alpha_frequency, alpha_presence);
+
+ if (!penalize_nl) {
+ for (size_t idx = 0; idx < cur_p.size; idx++) {
+ if (cur_p.data[idx].id == llama_token_nl(ctx)) {
+ cur_p.data[idx].logit = nl_logit;
+ break;
+ }
+ }
+ }
+ }
+
+ if (grammar != NULL) {
+ llama_sample_grammar(ctx, &cur_p, grammar);
+ }
+
+ if (temp <= 0) {
+ // Greedy sampling
+ id = llama_sample_token_greedy(ctx, &cur_p);
+ } else {
+ if (mirostat == 1) {
+ static float mirostat_mu = 2.0f * mirostat_tau;
+ const int mirostat_m = 100;
+ llama_sample_temperature(ctx, &cur_p, temp);
+ id = llama_sample_token_mirostat(ctx, &cur_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
+ } else if (mirostat == 2) {
+ static float mirostat_mu = 2.0f * mirostat_tau;
+ llama_sample_temperature(ctx, &cur_p, temp);
+ id = llama_sample_token_mirostat_v2(ctx, &cur_p, mirostat_tau, mirostat_eta, &mirostat_mu);
+ } else {
+ // Temperature sampling
+ llama_sample_top_k (ctx, &cur_p, top_k, 1);
+ llama_sample_tail_free (ctx, &cur_p, tfs_z, 1);
+ llama_sample_typical (ctx, &cur_p, typical_p, 1);
+ llama_sample_top_p (ctx, &cur_p, top_p, 1);
+ llama_sample_temperature(ctx, &cur_p, temp);
+
+ {
+ const int n_top = 10;
+ LOG("top %d candidates:\n", n_top);
+
+ for (int i = 0; i < n_top; i++) {
+ const llama_token id = cur_p.data[i].id;
+ LOG(" - %5d: '%12s' (%.3f)\n", id, llama_token_to_piece(ctx, id).c_str(), cur_p.data[i].p);
+ }
+ }
+
+ id = llama_sample_token(ctx, &cur_p);
+
+ LOG("sampled token: %5d: '%s'\n", id, llama_token_to_piece(ctx, id).c_str());
+ }
+ }
+ // printf("`%d`", candidates_p.size);
+
+ if (grammar != NULL) {
+ llama_grammar_accept_token(ctx, grammar, id);
+ }
+
+ return id;
+}
\ No newline at end of file
diff --git a/common/common.h b/common/common.h
index 18aea38..ca7a168 100644
--- a/common/common.h
+++ b/common/common.h
@@ -209,3 +209,19 @@ std::string get_sortable_timestamp();
void dump_non_result_info_yaml(
FILE * stream, const gpt_params & params, const llama_context * lctx,
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
+
+struct llama_binding_state {
+ llama_context * ctx;
+ llama_model * model;
+};
+
+void* load_binding_model(const char *fname, int n_ctx, int n_seed, bool memory_f16, bool mlock, bool embeddings, bool mmap, bool low_vram, int n_gpu_layers, int n_batch, const char *maingpu, const char *tensorsplit, bool numa, float rope_freq_base, float rope_freq_scale, bool mul_mat_q, const char *lora, const char *lora_base, bool perplexity);
+
+llama_token llama_sample_token_binding(
+ struct llama_context * ctx,
+ struct llama_context * ctx_guidance,
+ struct llama_grammar * grammar,
+ const struct gpt_params * g_params,
+ const std::vector<llama_token> & last_tokens,
+ std::vector<llama_token_data> & candidates,
+ int idx = 0);