from lmdeploy import ChatTemplateConfig, TurbomindEngineConfig
from lmdeploy.serve.openai.api_server import serve
import argparse
import torch
import os.path as osp
import mmengine
# from max_cache import MAXCache
from pathlib import Path

"""
anomaly cmd
python projects/mllm_deploy/agent_script/internvl2.lmdeploy.run.py --port 23333 --model anomaly_240910=/mnt/xnncloud/torin/workdirs/internvl_chat_v2_0/lora_3opt_v5_240910_merged
python projects/mllm_deploy/agent_script/internvl2.lmdeploy.run.py --port 23334 --model numvqa_240920=/mnt/xnncloud/junxuan/mllm_checkpoints/internvl2/internvl2_8b_lora_task1_and_task2_mlp_0920_merged
"""


def modify_cfg(model_path):
    cfg_fn = osp.join(model_path, "config.json")
    cfg = mmengine.load(cfg_fn)
    if cfg["use_thumbnail"]:
        print("change defaut use_thumbnail in config")
        cfg["use_thumbnail"] = False
        mmengine.dump(cfg, cfg_fn)


if __name__ == "__main__":
    server_name = "127.0.0.1"
    server_port = 23333
    model_path = 'path_to_internvl35'
    model_id = Path(model_path).parent.parent.stem
    # cfg = TurbomindEngineConfig(cache_max_entry_count=0.05,quant_policy=8)
    cfg = TurbomindEngineConfig(cache_max_entry_count=0.9, session_len=65536)
    # model_path = MAXCache(model_path).local_cache_path()
    modify_cfg(model_path)
    serve(
        model_path,
        model_id,
        server_name=server_name,
        backend_config=cfg,
        server_port=server_port,
        # chat_template_config=ChatTemplateConfig("internvl2_5"),
        # chat_template_config=ChatTemplateConfig("internvl2-internlm2"),
        tp=torch.cuda.device_count(),
        backend="turbomind",
    )
