Skip to content

Commit 21bc833

Browse files
authored
Merge pull request #17 from NexaAI/weili/master-release
fix OCR template error.
2 parents 667a6d9 + d04e354 commit 21bc833

File tree

3 files changed

+6
-116
lines changed

3 files changed

+6
-116
lines changed

examples/omni-vlm/omni-vlm-cli.cpp

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ static void process_prompt(struct omnivlm_context * ctx_omnivlm, struct omni_ima
149149
LOG_TEE("%6d -> '%s'\n", tmp[i], llama_token_to_piece(ctx_omnivlm->ctx_llama, tmp[i]).c_str());
150150
}
151151
}
152-
LOG_TEE("user_prompt: %s\n", user_prompt.c_str());
152+
// LOG_TEE("user_prompt: %s\n", user_prompt.c_str());
153153
if (params->verbose_prompt) {
154154
auto tmp = ::llama_tokenize(ctx_omnivlm->ctx_llama, user_prompt, true, true);
155155
for (int i = 0; i < (int) tmp.size(); i++) {
@@ -165,6 +165,9 @@ static void process_prompt(struct omnivlm_context * ctx_omnivlm, struct omni_ima
165165

166166
LOG("\n");
167167

168+
params->sparams.temp = 0.0f;
169+
params->sparams.top_k = 1;
170+
params->sparams.top_p = 1.0f;
168171
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
169172
if (!ctx_sampling) {
170173
LOG_TEE("%s: failed to initialize sampling subsystem\n", __func__);
@@ -177,8 +180,8 @@ static void process_prompt(struct omnivlm_context * ctx_omnivlm, struct omni_ima
177180
response += tmp;
178181
if (strcmp(tmp, "<|im_end|>") == 0) break;
179182
if (strcmp(tmp, "</s>") == 0) break;
180-
// if (strstr(tmp, "###")) break; // Yi-VL behavior
181183
printf("%s", tmp);
184+
// LOG("%s", tmp);
182185
// if (strstr(response.c_str(), "<|im_end|>")) break; // Yi-34B llava-1.6 - for some reason those decode not as the correct token (tokenizer works)
183186
// if (strstr(response.c_str(), "<|im_start|>")) break; // Yi-34B llava-1.6
184187
// if (strstr(response.c_str(), "USER:")) break; // mistral llava-1.6
@@ -265,7 +268,7 @@ int main(int argc, char ** argv) {
265268
}
266269

267270
if (params.omni_vlm_version == "vlm-81-ocr") {
268-
params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n <|ocr_start|><|vision_start|><|image_pad|><|vision_end|><|ocr_end|><|im_end|>";
271+
params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n <|vision_start|><|image_pad|><|vision_end|><|im_end|>";
269272
} else if (params.omni_vlm_version == "vlm-81-instruct" || params.omni_vlm_version == "nano-vlm-instruct") {
270273
params.prompt = "<|im_start|>system\nYou are Nano-Omni-VLM, created by Nexa AI. You are a helpful assistant.<|im_end|>\n<|im_start|>user\n" + params.prompt + "\n<|vision_start|><|image_pad|><|vision_end|><|im_end|>";
271274
} else {
@@ -282,10 +285,6 @@ int main(int argc, char ** argv) {
282285

283286
auto * ctx_omnivlm = omnivlm_init_context(&params, model);
284287

285-
// temporarily set to greedy decoding.
286-
params.sparams.top_k = 1;
287-
params.sparams.top_p = 1.0f;
288-
289288
for (auto & image : params.image) {
290289
auto * image_embed = load_image(ctx_omnivlm, &params, image);
291290
if (!image_embed) {

examples/omni-vlm/omni-vlm-wrapper.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -222,8 +222,6 @@ static void print_usage(int argc, char ** argv, const gpt_params & params) {
222222

223223
// inference interface definition
224224
void omnivlm_init(const char* llm_model_path, const char* projector_model_path, const char* omni_vlm_version) {
225-
std::cout << "debug0 " << llm_model_path << std::endl;
226-
std::cout << "debug1 " << omni_vlm_version << std::endl;
227225
const char* argv = "omni-wrapper-py";
228226
char* nc_argv = const_cast<char*>(argv);
229227
if (!gpt_params_parse(1, &nc_argv, params)) {
@@ -235,8 +233,6 @@ void omnivlm_init(const char* llm_model_path, const char* projector_model_path,
235233
params.omni_vlm_version = omni_vlm_version;
236234

237235
std::string omni_vlm_ver = params.omni_vlm_version;
238-
std::cout << "\t\t DEBUG omni_ver" << std::endl;
239-
std::cout << params.omni_vlm_version << std::endl;
240236
if(omni_vlm_ver != "vlm-81-ocr" && omni_vlm_ver != "vlm-81-instruct" && omni_vlm_ver != "nano-vlm-instruct") {
241237
fprintf(stderr, "%s: error: you set wrong omni_vlm_string: %s\n", __func__, omni_vlm_version);
242238
fprintf(stderr, "%s: Valid omni_vlm_version set is ('vlm-81-ocr', 'vlm-81-instruct', 'nano-vlm-instruct')\n", __func__);

examples/omni-vlm/omni-vlm.cpp

Lines changed: 0 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -258,111 +258,6 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli
258258

259259
*n_img_pos = clip_n_patches(ctx_clip);
260260
bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[0], image_embd);
261-
// cout << "\t\t A NICE START" << endl;
262-
// cout << "\t\t" << *n_img_pos << endl;
263-
/*
264-
if (clip_is_minicpmv(ctx_clip)) {
265-
std::vector<float *> image_embd_v;
266-
image_embd_v.resize(img_res_v.size);
267-
struct clip_image_size * load_image_size = clip_image_size_init();
268-
for (size_t i = 0; i < img_res_v.size; i++) {
269-
const int64_t t_img_enc_step_start_us = ggml_time_us();
270-
image_embd_v[i] = (float *)malloc(clip_embd_nbytes(ctx_clip));
271-
int patch_size=14;
272-
load_image_size->width = img_res_v.data[i].nx;
273-
load_image_size->height = img_res_v.data[i].ny;
274-
clip_add_load_image_size(ctx_clip, load_image_size);
275-
bool encoded = false;
276-
int has_minicpmv_projector = clip_is_minicpmv(ctx_clip);
277-
if (has_minicpmv_projector == 2) {
278-
encoded = clip_image_encode(ctx_clip, n_threads, only_v2_5_reshape_by_patch(&img_res_v.data[i], patch_size), image_embd_v[i]);
279-
}
280-
else if (has_minicpmv_projector == 3) {
281-
encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]);
282-
}
283-
if (!encoded) {
284-
LOG_ERR("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int) i+1, (int) img_res_v.size);
285-
return false;
286-
}
287-
const int64_t t_img_enc_steop_batch_us = ggml_time_us();
288-
LOG_INF("%s: step %d of %d encoded in %8.2f ms\n", __func__, (int)i+1, (int)img_res_v.size, (t_img_enc_steop_batch_us - t_img_enc_step_start_us) / 1000.0);
289-
}
290-
const int64_t t_img_enc_batch_us = ggml_time_us();
291-
LOG_INF("%s: all %d segments encoded in %8.2f ms\n", __func__, (int)img_res_v.size, (t_img_enc_batch_us - t_img_enc_start_us) / 1000.0);
292-
293-
int n_img_pos_out = 0;
294-
for (size_t i = 0; i < image_embd_v.size(); i++) {
295-
std::memcpy(image_embd + n_img_pos_out * clip_n_mmproj_embd(ctx_clip), image_embd_v[i], clip_embd_nbytes(ctx_clip));
296-
n_img_pos_out += clip_n_patches(ctx_clip);
297-
}
298-
*n_img_pos = n_img_pos_out;
299-
for (size_t i = 0; i < image_embd_v.size(); i++) {
300-
free(image_embd_v[i]);
301-
}
302-
image_embd_v.clear();
303-
load_image_size->width = img->nx;
304-
load_image_size->height = img->ny;
305-
clip_add_load_image_size(ctx_clip, load_image_size);
306-
LOG_INF("%s: load_image_size %d %d\n", __func__, load_image_size->width, load_image_size->height);
307-
}
308-
else if (strcmp(mm_patch_merge_type, "spatial_unpad") != 0) {
309-
// flat / default llava-1.5 type embedding
310-
*n_img_pos = clip_n_patches(ctx_clip);
311-
bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[0], image_embd); // image_embd shape is 576 x 4096
312-
delete[] img_res_v.data;
313-
if (!encoded) {
314-
LOG_ERR("Unable to encode image\n");
315-
316-
return false;
317-
}
318-
}
319-
else {
320-
// spatial_unpad llava-1.6 type embedding
321-
// TODO: CLIP needs batching support - in HF the llm projection is separate after encoding, which might be a solution to quickly get batching working
322-
std::vector<float *> image_embd_v;
323-
image_embd_v.resize(img_res_v.size);
324-
for (size_t i = 0; i < img_res_v.size; i++) {
325-
image_embd_v[i] = (float *)malloc(clip_embd_nbytes(ctx_clip)); // 576 patches * 4096 embeddings * 4 bytes = 9437184
326-
const bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[i], image_embd_v[i]); // image data is in 3x336x336 format and will be converted to 336x336x3 inside
327-
if (!encoded) {
328-
LOG_ERR("Unable to encode image - spatial_unpad - subimage %d of %d\n", (int) i+1, (int) img_res_v.size);
329-
return false;
330-
}
331-
}
332-
const int64_t t_img_enc_batch_us = ggml_time_us();
333-
LOG_INF("%s: %d segments encoded in %8.2f ms\n", __func__, (int)img_res_v.size, (t_img_enc_batch_us - t_img_enc_start_us) / 1000.0);
334-
335-
const int32_t * image_grid = clip_image_grid(ctx_clip);
336-
337-
std::vector<std::pair<int, int>> grid_pinpoints;
338-
for (int i = 0; i < 32 && image_grid[i] != 0; i += 2) {
339-
grid_pinpoints.push_back({image_grid[i], image_grid[i+1]});
340-
}
341-
342-
// free all img_res_v - not needed anymore
343-
delete[] img_res_v.data;
344-
img_res_v.size = 0;
345-
img_res_v.data = nullptr;
346-
347-
const int32_t image_size = clip_image_size(ctx_clip);
348-
349-
struct clip_image_grid_shape grid_shape = get_anyres_image_grid_shape({img->nx,img->ny}, grid_pinpoints, image_size);
350-
351-
int n_img_pos_out;
352-
clip_llava_handle_patches(ctx_clip, image_embd_v, grid_shape, image_embd, &n_img_pos_out);
353-
*n_img_pos = n_img_pos_out;
354-
355-
for (size_t i = 0; i < image_embd_v.size(); i++) {
356-
free(image_embd_v[i]);
357-
}
358-
image_embd_v.clear();
359-
360-
// debug image/segment/normalization content:
361-
// clip_image_u8 * tmp = clip_image_u8_init();
362-
// clip_image_convert_f32_to_u8(*image_feature, *tmp);
363-
// clip_image_save_to_bmp(*tmp, "image_feature.bmp");
364-
}
365-
*/
366261

367262
LOG("%s: image embedding created: %d tokens\n", __func__, *n_img_pos);
368263

0 commit comments

Comments
 (0)