From 82d816a736d285a27c4b2e79a01d6ea1779ae0cd Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 11:47:30 +0800 Subject: [PATCH 01/13] update --- swift/llm/model/constant.py | 1 + swift/llm/model/model/deepseek.py | 14 ++++++++++++++ swift/llm/model/model_arch.py | 9 +++++++++ swift/llm/template/constant.py | 1 + 4 files changed, 25 insertions(+) diff --git a/swift/llm/model/constant.py b/swift/llm/model/constant.py index 733e2a5193..de482635fc 100644 --- a/swift/llm/model/constant.py +++ b/swift/llm/model/constant.py @@ -231,6 +231,7 @@ class MLLMModelType: deepseek_vl2 = 'deepseek_vl2' deepseek_janus = 'deepseek_janus' deepseek_janus_pro = 'deepseek_janus_pro' + deepseek_ocr = 'deepseek_ocr' minicpmv = 'minicpmv' minicpmv2_5 = 'minicpmv2_5' diff --git a/swift/llm/model/model/deepseek.py b/swift/llm/model/model/deepseek.py index 3ef6fdce69..9dbac8bdfa 100644 --- a/swift/llm/model/model/deepseek.py +++ b/swift/llm/model/model/deepseek.py @@ -317,3 +317,17 @@ def get_model_tokenizer_deepseek_vl2(model_dir: str, *args, **kwargs): architectures=['Qwen2ForCausalLM', 'LlamaForCausalLM', 'Qwen3ForCausalLM'], model_arch=ModelArch.llama, )) + +register_model( + ModelMeta( + MLLMModelType.deepseek_ocr, + [ + ModelGroup([ + Model('deepseek-ai/DeepSeek-OCR', 'deepseek-ai/DeepSeek-OCR'), + ]), + ], + TemplateType.deepseek_ocr, + get_model_tokenizer_with_flash_attn, + model_arch=ModelArch.deepseek_ocr, + tags=['vision'], + )) diff --git a/swift/llm/model/model_arch.py b/swift/llm/model/model_arch.py index b0b03765e0..4942f4922a 100644 --- a/swift/llm/model/model_arch.py +++ b/swift/llm/model/model_arch.py @@ -57,6 +57,7 @@ class MLLMModelArch: deepseek_vl = 'deepseek_vl' deepseek_vl2 = 'deepseek_vl2' deepseek_janus = 'deepseek_janus' + deepseek_ocr = 'deepseek_ocr' mplug_owl2 = 'mplug_owl2' mplug_owl2_1 = 'mplug_owl2_1' @@ -435,6 +436,14 @@ def register_model_arch(model_arch: ModelKeys, *, exist_ok: bool = False) -> Non aligner='aligner', generator=['gen_vision_model', 'gen_aligner', 'gen_head', 'gen_embed'])) +register_model_arch( + MultiModelKeys( + MLLMModelArch.deepseek_ocr, + language_model='model.layers', + vision_tower=['model.sam_model', 'model.vision_model'], + aligner='model.projector', + )) + register_model_arch( MultiModelKeys( MLLMModelArch.deepseek_vl2, diff --git a/swift/llm/template/constant.py b/swift/llm/template/constant.py index 3900b56341..b7095db1d3 100644 --- a/swift/llm/template/constant.py +++ b/swift/llm/template/constant.py @@ -190,6 +190,7 @@ class MLLMTemplateType: deepseek_vl2 = 'deepseek_vl2' deepseek_janus = 'deepseek_janus' deepseek_janus_pro = 'deepseek_janus_pro' + deepseek_ocr = 'deepseek_ocr' mplug_owl2 = 'mplug_owl2' mplug_owl3 = 'mplug_owl3' From 36012219b32b345598002b832aedaa179aaea93e Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 11:48:50 +0800 Subject: [PATCH 02/13] update --- swift/llm/template/template/deepseek.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/swift/llm/template/template/deepseek.py b/swift/llm/template/template/deepseek.py index 8ba4b666a5..efa0e06761 100644 --- a/swift/llm/template/template/deepseek.py +++ b/swift/llm/template/template/deepseek.py @@ -234,6 +234,13 @@ class DeepseekJanus(DeepseekVLTemplate): register_template(DeepseekVLTemplateMeta(MLLMTemplateType.deepseek_janus, template_cls=DeepseekJanus)) +class DeepseekOCR(Template): + pass + + +register_template(TemplateMeta(MLLMTemplateType.deepseek_ocr, template_cls=DeepseekOCR)) + + @dataclass class DeepseekV2_5TemplateMeta(TemplateMeta): prefix: Prompt = field(default_factory=lambda: ['<|begin▁of▁sentence|>{{SYSTEM}}']) From e739f28cf93a92d8d660848bb24ea60e8a1ce320 Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 13:34:27 +0800 Subject: [PATCH 03/13] update --- swift/llm/template/template/deepseek.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/swift/llm/template/template/deepseek.py b/swift/llm/template/template/deepseek.py index efa0e06761..85f084061e 100644 --- a/swift/llm/template/template/deepseek.py +++ b/swift/llm/template/template/deepseek.py @@ -235,8 +235,9 @@ class DeepseekJanus(DeepseekVLTemplate): class DeepseekOCR(Template): - pass - + def _encode(self, inputs: StdTemplateInputs) -> Dict[str, Any]: + encoded = super()._encode(inputs) + print() register_template(TemplateMeta(MLLMTemplateType.deepseek_ocr, template_cls=DeepseekOCR)) From 6b463e5ac579142fd2d69dfbff93dd2f87c26b5e Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 14:03:12 +0800 Subject: [PATCH 04/13] update --- swift/llm/model/model/deepseek.py | 7 ++++++- swift/llm/template/template/deepseek.py | 10 +++++++++- tests/test_align/test_template/test_vision.py | 12 +++++++++++- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/swift/llm/model/model/deepseek.py b/swift/llm/model/model/deepseek.py index 9dbac8bdfa..bccb2d594d 100644 --- a/swift/llm/model/model/deepseek.py +++ b/swift/llm/model/model/deepseek.py @@ -318,6 +318,11 @@ def get_model_tokenizer_deepseek_vl2(model_dir: str, *args, **kwargs): model_arch=ModelArch.llama, )) +def get_model_tokenizer_deepseek_ocr(*args, **kwargs): + from transformers import AutoModel + kwargs['automodel_class'] = kwargs['automodel_class'] or AutoModel + return get_model_tokenizer_with_flash_attn(*args, **kwargs) + register_model( ModelMeta( MLLMModelType.deepseek_ocr, @@ -327,7 +332,7 @@ def get_model_tokenizer_deepseek_vl2(model_dir: str, *args, **kwargs): ]), ], TemplateType.deepseek_ocr, - get_model_tokenizer_with_flash_attn, + get_model_tokenizer_deepseek_ocr, model_arch=ModelArch.deepseek_ocr, tags=['vision'], )) diff --git a/swift/llm/template/template/deepseek.py b/swift/llm/template/template/deepseek.py index 85f084061e..65e4a364c0 100644 --- a/swift/llm/template/template/deepseek.py +++ b/swift/llm/template/template/deepseek.py @@ -235,11 +235,19 @@ class DeepseekJanus(DeepseekVLTemplate): class DeepseekOCR(Template): + image_placeholder = ['\n'] def _encode(self, inputs: StdTemplateInputs) -> Dict[str, Any]: encoded = super()._encode(inputs) print() -register_template(TemplateMeta(MLLMTemplateType.deepseek_ocr, template_cls=DeepseekOCR)) + +register_template( + TemplateMeta( + MLLMTemplateType.deepseek_ocr, + prefix=['<|begin▁of▁sentence|>'], + prompt=['{{QUERY}}'], + chat_sep=None, + template_cls=DeepseekOCR)) @dataclass diff --git a/tests/test_align/test_template/test_vision.py b/tests/test_align/test_template/test_vision.py index f0d0da308c..1172b12b19 100644 --- a/tests/test_align/test_template/test_vision.py +++ b/tests/test_align/test_template/test_vision.py @@ -963,13 +963,22 @@ def test_sailvl2(): assert ans in response +def test_deepseek_ocr(): + pt_engine = PtEngine('deepseek-ai/DeepSeek-OCR') + query = 'describe the image' + messages = [{'role': 'user', 'content': query}] + images = ['http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/cat.png'] + response = _infer_model(pt_engine, messages=messages, images=images) + assert ans in response + + if __name__ == '__main__': from swift.llm import PtEngine, RequestConfig from swift.utils import get_logger, seed_everything logger = get_logger() # test_qwen2_vl() - test_qwen2_5_vl_batch_infer() + # test_qwen2_5_vl_batch_infer() # test_qwen2_5_omni() # test_qwen3_omni() # test_qwen3_omni_audio() @@ -1034,3 +1043,4 @@ def test_sailvl2(): # test_internvl3_5_hf() # test_internvl_gpt_hf() # test_sailvl2() + test_deepseek_ocr() From a4916fdd69e97fd8a05cce29ac8b3a00a074abb7 Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 14:19:22 +0800 Subject: [PATCH 05/13] update --- swift/llm/model/model/deepseek.py | 2 + swift/llm/template/template/deepseek.py | 115 +++++++++++++++++++++++- 2 files changed, 116 insertions(+), 1 deletion(-) diff --git a/swift/llm/model/model/deepseek.py b/swift/llm/model/model/deepseek.py index bccb2d594d..3c13af0eef 100644 --- a/swift/llm/model/model/deepseek.py +++ b/swift/llm/model/model/deepseek.py @@ -318,11 +318,13 @@ def get_model_tokenizer_deepseek_vl2(model_dir: str, *args, **kwargs): model_arch=ModelArch.llama, )) + def get_model_tokenizer_deepseek_ocr(*args, **kwargs): from transformers import AutoModel kwargs['automodel_class'] = kwargs['automodel_class'] or AutoModel return get_model_tokenizer_with_flash_attn(*args, **kwargs) + register_model( ModelMeta( MLLMModelType.deepseek_ocr, diff --git a/swift/llm/template/template/deepseek.py b/swift/llm/template/template/deepseek.py index 65e4a364c0..b2f4f17739 100644 --- a/swift/llm/template/template/deepseek.py +++ b/swift/llm/template/template/deepseek.py @@ -1,11 +1,13 @@ # Copyright (c) Alibaba, Inc. and its affiliates. +import math from dataclasses import dataclass, field from typing import Any, Dict, List, Optional import numpy as np import torch import torch.nn as nn -from PIL import Image +from PIL import Image, ImageDraw, ImageFont, ImageOps +from transformers.dynamic_module_utils import get_class_from_dynamic_module from swift.utils import get_env_args from ..base import Template @@ -236,6 +238,117 @@ class DeepseekJanus(DeepseekVLTemplate): class DeepseekOCR(Template): image_placeholder = ['\n'] + + def init_env_args(self): + model_dir = self.model_info.model_dir + self.BasicImageTransform = get_class_from_dynamic_module('modeling_deepseekocr.BasicImageTransform', model_dir) + self.dynamic_preprocess = get_class_from_dynamic_module('modeling_deepseekocr.dynamic_preprocess', model_dir) + self.crop_mode = get_env_args('crop_mode', bool, True) + self.base_size = get_env_args('base_size', int, 1024) + self.image_size = get_env_args('image_size', int, 640) + + def _load_image(self, images): + # Code borrowed from + # https://modelscope.cn/models/deepseek-ai/DeepSeek-OCR/file/view/master/modeling_deepseekocr.py?status=1 + crop_mode = True + patch_size = 16 + downsample_ratio = 4 + valid_img_tokens = 0 + + image_draw = images[0].copy() + + w, h = image_draw.size + ratio = 1 - ((max(w, h) - min(w, h)) / (max(w, h))) + + image_transform = self.BasicImageTransform(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), normalize=True) + image_token_id = 128815 + images_list, images_crop_list = [], [] + tokenized_str = [] + images_spatial_crop = [] + for image in images: + if crop_mode: + if image.size[0] <= 640 and image.size[1] <= 640: + crop_ratio = [1, 1] + else: + if crop_mode: + images_crop_raw, crop_ratio = self.dynamic_preprocess(image) + else: + crop_ratio = [1, 1] + """process the global view""" + # image = image.resize((base_size, base_size)) + global_view = ImageOps.pad( + image, (self.base_size, self.base_size), color=tuple(int(x * 255) for x in image_transform.mean)) + + if self.base_size == 1024: + valid_img_tokens += int(256 * ratio) + elif self.base_size == 1280: + valid_img_tokens += int(400 * ratio) + + images_list.append(image_transform(global_view).to(torch.bfloat16)) + width_crop_num, height_crop_num = crop_ratio + + images_spatial_crop.append([width_crop_num, height_crop_num]) + + if width_crop_num > 1 or height_crop_num > 1: + """process the local views""" + + for i in range(len(images_crop_raw)): + images_crop_list.append(image_transform(images_crop_raw[i]).to(torch.bfloat16)) + + if self.image_size == 640: + valid_img_tokens += len(images_crop_list) * 100 + + num_queries = math.ceil((self.image_size // patch_size) / downsample_ratio) + num_queries_base = math.ceil((self.base_size // patch_size) / downsample_ratio) + """add image tokens""" + + tokenized_image = ([image_token_id] * num_queries_base + [image_token_id]) * num_queries_base + tokenized_image += [image_token_id] + if width_crop_num > 1 or height_crop_num > 1: + tokenized_image += ([image_token_id] * (num_queries * width_crop_num) + [image_token_id]) * ( + num_queries * height_crop_num) + tokenized_str += tokenized_image + else: + """process the global view""" + if self.image_size <= 640: + image = image.resize((self.image_size, self.image_size)) + # else: + global_view = ImageOps.pad( + image, (self.image_size, self.image_size), color=tuple(int(x * 255) for x in image_transform.mean)) + images_list.append(image_transform(global_view).to(torch.bfloat16)) + + if self.base_size == 1024: + valid_img_tokens += int(256 * ratio) + elif self.base_size == 1280: + valid_img_tokens += int(400 * ratio) + elif self.base_size == 640: + valid_img_tokens += int(100 * 1) + elif self.base_size == 512: + valid_img_tokens += int(64 * 1) + + width_crop_num, height_crop_num = 1, 1 + + images_spatial_crop.append([width_crop_num, height_crop_num]) + """add image tokens""" + num_queries = math.ceil((self.image_size // patch_size) / downsample_ratio) + + tokenized_image = ([image_token_id] * num_queries + [image_token_id]) * num_queries + tokenized_image += [image_token_id] + tokenized_str += tokenized_image + if len(images_list) == 0: + images_ori = torch.zeros((1, 3, self.image_size, self.image_size)) + images_spatial_crop = torch.zeros((1, 2), dtype=torch.long) + images_crop = torch.zeros((1, 3, self.base_size, self.base_size)) + + else: + images_ori = torch.stack(images_list, dim=0) + images_spatial_crop = torch.tensor(images_spatial_crop, dtype=torch.long) + if images_crop_list: + images_crop = torch.stack(images_crop_list, dim=0) + else: + images_crop = torch.zeros((1, 3, self.base_size, self.base_size)) + return tokenized_str, images_ori, images_crop, images_spatial_crop + def _encode(self, inputs: StdTemplateInputs) -> Dict[str, Any]: encoded = super()._encode(inputs) print() From bbd1bdb760ff6bc692aaf27b1d401c15dc57a2a6 Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 14:20:08 +0800 Subject: [PATCH 06/13] update --- swift/llm/template/template/deepseek.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/swift/llm/template/template/deepseek.py b/swift/llm/template/template/deepseek.py index 65e4a364c0..c002f05428 100644 --- a/swift/llm/template/template/deepseek.py +++ b/swift/llm/template/template/deepseek.py @@ -238,7 +238,12 @@ class DeepseekOCR(Template): image_placeholder = ['\n'] def _encode(self, inputs: StdTemplateInputs) -> Dict[str, Any]: encoded = super()._encode(inputs) - print() + input_ids = encoded['input_ids'] + image_token = self._tokenize('') + idx_list = findall(input_ids, image_token) + if idx_list: + pass + register_template( From 8be1b89634cf690b885e40f354c103aa09362e20 Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 15:11:58 +0800 Subject: [PATCH 07/13] update --- ...44\350\241\214\345\217\202\346\225\260.md" | 2 +- ...44\350\241\214\345\217\202\346\225\260.md" | 2 +- .../Instruction/Command-line-parameters.md | 2 +- .../Megatron-SWIFT/Command-line-parameters.md | 2 +- swift/llm/model/model/deepseek.py | 7 ++++- swift/llm/template/template/deepseek.py | 29 +++++++++++++++++-- 6 files changed, 36 insertions(+), 8 deletions(-) diff --git "a/docs/source/Instruction/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" "b/docs/source/Instruction/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" index 22630be907..b4ea6d10e6 100644 --- "a/docs/source/Instruction/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" +++ "b/docs/source/Instruction/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" @@ -460,7 +460,7 @@ RLHF参数继承于[训练参数](#训练参数)。 - label_smoothing: 是否使用DPO smoothing,默认值为`0`。 - max_completion_length: GRPO/PPO/GKD算法中的最大生成长度,默认为512。 - 🔥rpo_alpha: 来自[RPO 论文](https://arxiv.org/abs/2404.19733)中的参数,用于控制损失函数中NLL项的权重(即SFT损失),`loss = dpo_loss + rpo_alpha * sft_loss`,论文中推荐设置为`1.`。默认为`None`,即默认不引入sft_loss。 - - 注意:在"ms-swift<3.8",其默认值为`1.`。在"ms-swift>=3.8"该默认值修改为`None`。 + - **注意**:在"ms-swift<3.8",其默认值为`1.`。在"ms-swift>=3.8"该默认值修改为`None`。 - ld_alpha: 来自[LD-DPO 论文](https://arxiv.org/abs/2409.06411),对超出公共前缀部分的logps加权 $\alpha$ 抑制长度偏好。 - discopop_tau: 来自 [DiscoPOP 论文](https://arxiv.org/abs/2406.08414)的温度参数 $\tau$ ,用于缩放 log-ratio。默认值0.05。在 loss_type 为 discopop 时生效。 - loss_type: 损失类型。默认为None,使用不同的rlhf算法,其默认值不同。 diff --git "a/docs/source/Megatron-SWIFT/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" "b/docs/source/Megatron-SWIFT/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" index 5d91c7bc45..10e8e2eb46 100644 --- "a/docs/source/Megatron-SWIFT/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" +++ "b/docs/source/Megatron-SWIFT/\345\221\275\344\273\244\350\241\214\345\217\202\346\225\260.md" @@ -245,7 +245,7 @@ lora训练: - ref_adapter_load: 加载ref_adapter的权重路径,默认为None。若你要使用SFT产生的LoRA权重进行DPO,请使用"ms-swift>=3.8",并在训练时设置`--adapter_load sft_ckpt --ref_adapter_load sft_ckpt --finetune true`。若是此场景的断点续训,则设置`--adapter_load rlhf_ckpt --ref_adapter_load sft_ckpt --finetune false`。 - beta: 含义与[TRL](https://huggingface.co/docs/trl/main/en/dpo_trainer#trl.DPOConfig)相同。控制与参考模型偏差程度的参数。beta值越高,表示与参考模型的偏差越小。对于 IPO 损失函数 (loss_type="ipo"),beta是[论文](https://huggingface.co/papers/2310.12036)中所指的正则化参数。默认为0.1。 - 🔥rpo_alpha: 来自[RPO 论文](https://huggingface.co/papers/2404.19733)中的参数,用于控制损失函数中NLL项的权重(即SFT损失),`loss = dpo_loss + rpo_alpha * sft_loss`,论文中推荐设置为`1.`。默认为`None`,即默认不引入sft_loss。 - - 注意:在"ms-swift<3.8",其默认值为`1.`。在"ms-swift>=3.8"该默认值修改为`None`。 + - **注意**:在"ms-swift<3.8",其默认值为`1.`。在"ms-swift>=3.8"该默认值修改为`None`。 - reference_free: 是否忽略提供的参考模型,并隐式地使用一个对所有响应赋予相等概率的参考模型。默认为False。 - label_smoothing: 默认为0.。 - f_divergence_type: 默认为`reverse_kl`。可选值参考[TRL文档](https://huggingface.co/docs/trl/main/en/dpo_trainer)。 diff --git a/docs/source_en/Instruction/Command-line-parameters.md b/docs/source_en/Instruction/Command-line-parameters.md index 9482ad38d8..09c1349d4f 100644 --- a/docs/source_en/Instruction/Command-line-parameters.md +++ b/docs/source_en/Instruction/Command-line-parameters.md @@ -470,7 +470,7 @@ RLHF arguments inherit from the [training arguments](#training-arguments). - 🔥rpo_alpha: A parameter from the [RPO paper](https://arxiv.org/abs/2404.19733) that controls the weight of the NLL term (i.e., the SFT loss) in the loss function, where `loss = dpo_loss + rpo_alpha * sft_loss`. The paper recommends setting it to `1.`. The default value is `None`, meaning the SFT loss is not included by default. - ld_alpha: From the [LD-DPO paper](https://arxiv.org/abs/2409.06411). Applies a weight α < 1 to the log-probabilities of tokens that lie beyond the shared prefix of the chosen and rejected responses, thereby mitigating length bias. - discopop_tau: Temperature parameter τ from the [DiscoPOP paper](https://arxiv.org/abs/2406.08414) used to scale the log-ratio before the sigmoid modulation. Default 0.05; only active when loss_type is discopop. - - Note: In "ms-swift<3.8", the default value was `1.`. Starting from "ms-swift>=3.8", the default has been changed to `None`. + - **Note**: In "ms-swift<3.8", the default value was `1.`. Starting from "ms-swift>=3.8", the default has been changed to `None`. - loss_type: Type of loss function. Default is None, with different defaults depending on the RLHF algorithm used. - DPO: Available options can be found in the [documentation](https://huggingface.co/docs/trl/main/en/dpo_trainer#loss-functions). Multiple values can be provided to enable mixed training ([MPO](https://arxiv.org/abs/2411.10442)); when multiple values are given, the loss_weights parameter must also be set. Default is `sigmoid`. - GRPO: See [GRPO parameters](#grpo-arguments) for reference. diff --git a/docs/source_en/Megatron-SWIFT/Command-line-parameters.md b/docs/source_en/Megatron-SWIFT/Command-line-parameters.md index 76a6dcbfc4..9e8038d18d 100644 --- a/docs/source_en/Megatron-SWIFT/Command-line-parameters.md +++ b/docs/source_en/Megatron-SWIFT/Command-line-parameters.md @@ -260,7 +260,7 @@ LoRA Training: - ref_adapter_load: The path to load the ref_adapter weights, default is `None`. If you want to use LoRA weights generated from SFT for DPO, please use "ms-swift>=3.8" and set `--adapter_load sft_ckpt --ref_adapter_load sft_ckpt --finetune true` during training. For resuming training from a checkpoint in this scenario, set `--adapter_load rlhf_ckpt --ref_adapter_load sft_ckpt --finetune false`. - beta: Has the same meaning as in [TRL](https://huggingface.co/docs/trl/main/en/dpo_trainer#trl.DPOConfig). It controls the degree of deviation from the reference model. A higher beta value indicates less deviation from the reference model. For the IPO loss function (`loss_type="ipo"`), beta is the regularization parameter as mentioned in the [paper](https://huggingface.co/papers/2310.12036). Default is 0.1. - 🔥rpo_alpha: A parameter from the [RPO paper](https://huggingface.co/papers/2404.19733) that controls the weight of the NLL term (i.e., the SFT loss) in the loss function, where `loss = dpo_loss + rpo_alpha * sft_loss`. The paper recommends setting it to `1.`. The default value is `None`, meaning the SFT loss is not included by default. - - Note: In "ms-swift<3.8", the default value was `1.`. Starting from "ms-swift>=3.8", the default has been changed to `None`. + - **Note**: In "ms-swift<3.8", the default value was `1.`. Starting from "ms-swift>=3.8", the default has been changed to `None`. - reference_free: Whether to ignore the provided reference model and implicitly use a reference model that assigns equal probability to all responses. Default is `False`. - label_smoothing: Default is 0. - f_divergence_type: Default is `reverse_kl`. See the [TRL documentation](https://huggingface.co/docs/trl/main/en/dpo_trainer) for possible values. diff --git a/swift/llm/model/model/deepseek.py b/swift/llm/model/model/deepseek.py index 3c13af0eef..ff22846019 100644 --- a/swift/llm/model/model/deepseek.py +++ b/swift/llm/model/model/deepseek.py @@ -322,7 +322,12 @@ def get_model_tokenizer_deepseek_vl2(model_dir: str, *args, **kwargs): def get_model_tokenizer_deepseek_ocr(*args, **kwargs): from transformers import AutoModel kwargs['automodel_class'] = kwargs['automodel_class'] or AutoModel - return get_model_tokenizer_with_flash_attn(*args, **kwargs) + model, tokenizer = get_model_tokenizer_with_flash_attn(*args, **kwargs) + if model is not None: + patch_output_to_input_device(model.model.sam_model) + patch_output_to_input_device(model.model.vision_model) + patch_output_to_input_device(model.model.projector) + return model, tokenizer register_model( diff --git a/swift/llm/template/template/deepseek.py b/swift/llm/template/template/deepseek.py index eac389ab40..98e1e17d22 100644 --- a/swift/llm/template/template/deepseek.py +++ b/swift/llm/template/template/deepseek.py @@ -247,7 +247,7 @@ def init_env_args(self): self.base_size = get_env_args('base_size', int, 1024) self.image_size = get_env_args('image_size', int, 640) - def _load_image(self, images): + def _preprocess_image(self, images): # Code borrowed from # https://modelscope.cn/models/deepseek-ai/DeepSeek-OCR/file/view/master/modeling_deepseekocr.py?status=1 crop_mode = True @@ -307,7 +307,7 @@ def _load_image(self, images): if width_crop_num > 1 or height_crop_num > 1: tokenized_image += ([image_token_id] * (num_queries * width_crop_num) + [image_token_id]) * ( num_queries * height_crop_num) - tokenized_str += tokenized_image + tokenized_str.append(tokenized_image) else: """process the global view""" if self.image_size <= 640: @@ -352,11 +352,34 @@ def _load_image(self, images): def _encode(self, inputs: StdTemplateInputs) -> Dict[str, Any]: encoded = super()._encode(inputs) input_ids = encoded['input_ids'] + labels = encoded['labels'] + loss_scale = encoded.get('loss_scale', None) image_token = self._tokenize('') idx_list = findall(input_ids, image_token) if idx_list: - pass + tokenized_str, images_ori, images_crop, images_spatial_crop = self._preprocess_image(inputs.images) + input_ids, labels, loss_scale = self._extend_tokens(input_ids, labels, loss_scale, idx_list, + lambda i: tokenized_str[i]) + encoded['input_ids'] = input_ids + encoded['labels'] = labels + encoded['loss_scale'] = loss_scale + encoded['images'] = [(images_crop, images_ori)] + encoded['images_seq_mask'] = (torch.tensor(input_ids) == image_token[0])[None] + encoded['images_spatial_crop'] = images_spatial_crop + return encoded + def _data_collator_mm_data(self, batch: List[Dict[str, Any]]) -> Dict[str, Any]: + res = super()._data_collator_mm_data(batch) + images = self.gather_list(batch, 'images') + if images: + res['images'] = images + images_seq_mask = self.concat_tensor(batch, 'images_seq_mask', 0) + images_spatial_crop = self.concat_tensor(batch, 'images_spatial_crop', 0) + if images_seq_mask is not None: + res['images_seq_mask'] = images_seq_mask + if images_spatial_crop is not None: + res['images_spatial_crop'] = images_spatial_crop + return res register_template( From e02cdc27a9c4f9b18587dc9749264138c1388ca7 Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 15:26:58 +0800 Subject: [PATCH 08/13] update --- ...14\346\225\260\346\215\256\351\233\206.md" | 16 ++++++++------- .../Supported-models-and-datasets.md | 16 ++++++++------- swift/llm/model/constant.py | 2 +- tests/test_align/test_template/test_vision.py | 20 +++++++++++++------ 4 files changed, 33 insertions(+), 21 deletions(-) diff --git "a/docs/source/Instruction/\346\224\257\346\214\201\347\232\204\346\250\241\345\236\213\345\222\214\346\225\260\346\215\256\351\233\206.md" "b/docs/source/Instruction/\346\224\257\346\214\201\347\232\204\346\250\241\345\236\213\345\222\214\346\225\260\346\215\256\351\233\206.md" index d62f36781a..6e8c8d2404 100644 --- "a/docs/source/Instruction/\346\224\257\346\214\201\347\232\204\346\250\241\345\236\213\345\222\214\346\225\260\346\215\256\351\233\206.md" +++ "b/docs/source/Instruction/\346\224\257\346\214\201\347\232\204\346\250\241\345\236\213\345\222\214\346\225\260\346\215\256\351\233\206.md" @@ -242,14 +242,8 @@ |[Qwen/Qwen3-Embedding-0.6B](https://modelscope.cn/models/Qwen/Qwen3-Embedding-0.6B)|qwen3_emb|qwen3_emb|-|✘|-|[Qwen/Qwen3-Embedding-0.6B](https://huggingface.co/Qwen/Qwen3-Embedding-0.6B)| |[Qwen/Qwen3-Embedding-4B](https://modelscope.cn/models/Qwen/Qwen3-Embedding-4B)|qwen3_emb|qwen3_emb|-|✘|-|[Qwen/Qwen3-Embedding-4B](https://huggingface.co/Qwen/Qwen3-Embedding-4B)| |[Qwen/Qwen3-Embedding-8B](https://modelscope.cn/models/Qwen/Qwen3-Embedding-8B)|qwen3_emb|qwen3_emb|-|✘|-|[Qwen/Qwen3-Embedding-8B](https://huggingface.co/Qwen/Qwen3-Embedding-8B)| -|[Qwen/Qwen3-Reranker-0.6B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-0.6B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-0.6B](https://huggingface.co/Qwen/Qwen3-Reranker-0.6B)| -|[Qwen/Qwen3-Reranker-4B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-4B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-4B](https://huggingface.co/Qwen/Qwen3-Reranker-4B)| -|[Qwen/Qwen3-Reranker-8B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-8B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-8B](https://huggingface.co/Qwen/Qwen3-Reranker-8B)| |[iic/gte_Qwen2-1.5B-instruct](https://modelscope.cn/models/iic/gte_Qwen2-1.5B-instruct)|qwen2_gte|dummy|-|✘|-|[Alibaba-NLP/gte-Qwen2-1.5B-instruct](https://huggingface.co/Alibaba-NLP/gte-Qwen2-1.5B-instruct)| |[iic/gte_Qwen2-7B-instruct](https://modelscope.cn/models/iic/gte_Qwen2-7B-instruct)|qwen2_gte|dummy|-|✘|-|[Alibaba-NLP/gte-Qwen2-7B-instruct](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct)| -|[BAAI/bge-reranker-base](https://modelscope.cn/models/BAAI/bge-reranker-base)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base)| -|[BAAI/bge-reranker-v2-m3](https://modelscope.cn/models/BAAI/bge-reranker-v2-m3)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3)| -|[BAAI/bge-reranker-large](https://modelscope.cn/models/BAAI/bge-reranker-large)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large)| |[codefuse-ai/CodeFuse-QWen-14B](https://modelscope.cn/models/codefuse-ai/CodeFuse-QWen-14B)|codefuse_qwen|codefuse|-|✘|coding|[codefuse-ai/CodeFuse-QWen-14B](https://huggingface.co/codefuse-ai/CodeFuse-QWen-14B)| |[iic/ModelScope-Agent-7B](https://modelscope.cn/models/iic/ModelScope-Agent-7B)|modelscope_agent|modelscope_agent|-|✘|-|-| |[iic/ModelScope-Agent-14B](https://modelscope.cn/models/iic/ModelScope-Agent-14B)|modelscope_agent|modelscope_agent|-|✘|-|-| @@ -647,7 +641,6 @@ |[answerdotai/ModernBERT-base](https://modelscope.cn/models/answerdotai/ModernBERT-base)|modern_bert|dummy|transformers>=4.48|✘|bert|[answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base)| |[answerdotai/ModernBERT-large](https://modelscope.cn/models/answerdotai/ModernBERT-large)|modern_bert|dummy|transformers>=4.48|✘|bert|[answerdotai/ModernBERT-large](https://huggingface.co/answerdotai/ModernBERT-large)| |[iic/gte-modernbert-base](https://modelscope.cn/models/iic/gte-modernbert-base)|modern_bert_gte|dummy|transformers>=4.48|✘|bert, embedding|[Alibaba-NLP/gte-modernbert-base](https://huggingface.co/Alibaba-NLP/gte-modernbert-base)| -|[iic/gte-reranker-modernbert-base](https://modelscope.cn/models/iic/gte-reranker-modernbert-base)|modern_bert_gte_reranker|bert|transformers>=4.48|✘|bert, reranker|[Alibaba-NLP/gte-reranker-modernbert-base](https://huggingface.co/Alibaba-NLP/gte-reranker-modernbert-base)| |[iic/nlp_structbert_backbone_base_std](https://modelscope.cn/models/iic/nlp_structbert_backbone_base_std)|bert|dummy|-|✘|bert|-| |[Shanghai_AI_Laboratory/internlm2-1_8b-reward](https://modelscope.cn/models/Shanghai_AI_Laboratory/internlm2-1_8b-reward)|internlm2_reward|internlm2_reward|transformers>=4.38|✘|-|[internlm/internlm2-1_8b-reward](https://huggingface.co/internlm/internlm2-1_8b-reward)| |[Shanghai_AI_Laboratory/internlm2-7b-reward](https://modelscope.cn/models/Shanghai_AI_Laboratory/internlm2-7b-reward)|internlm2_reward|internlm2_reward|transformers>=4.38|✘|-|[internlm/internlm2-7b-reward](https://huggingface.co/internlm/internlm2-7b-reward)| @@ -663,6 +656,13 @@ |[AI-ModelScope/GRM-llama3.2-3B-rewardmodel-ft](https://modelscope.cn/models/AI-ModelScope/GRM-llama3.2-3B-rewardmodel-ft)|llama3_2_reward|llama3_2|transformers>=4.43|✘|-|[Ray2333/GRM-llama3.2-3B-rewardmodel-ft](https://huggingface.co/Ray2333/GRM-llama3.2-3B-rewardmodel-ft)| |[AI-ModelScope/Skywork-Reward-Gemma-2-27B](https://modelscope.cn/models/AI-ModelScope/Skywork-Reward-Gemma-2-27B)|gemma_reward|gemma|transformers>=4.42|✘|-|[Skywork/Skywork-Reward-Gemma-2-27B](https://huggingface.co/Skywork/Skywork-Reward-Gemma-2-27B)| |[AI-ModelScope/Skywork-Reward-Gemma-2-27B-v0.2](https://modelscope.cn/models/AI-ModelScope/Skywork-Reward-Gemma-2-27B-v0.2)|gemma_reward|gemma|transformers>=4.42|✘|-|[Skywork/Skywork-Reward-Gemma-2-27B-v0.2](https://huggingface.co/Skywork/Skywork-Reward-Gemma-2-27B-v0.2)| +|[BAAI/bge-reranker-base](https://modelscope.cn/models/BAAI/bge-reranker-base)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base)| +|[BAAI/bge-reranker-v2-m3](https://modelscope.cn/models/BAAI/bge-reranker-v2-m3)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3)| +|[BAAI/bge-reranker-large](https://modelscope.cn/models/BAAI/bge-reranker-large)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large)| +|[iic/gte-reranker-modernbert-base](https://modelscope.cn/models/iic/gte-reranker-modernbert-base)|modern_bert_gte_reranker|bert|transformers>=4.48|✘|bert, reranker|[Alibaba-NLP/gte-reranker-modernbert-base](https://huggingface.co/Alibaba-NLP/gte-reranker-modernbert-base)| +|[Qwen/Qwen3-Reranker-0.6B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-0.6B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-0.6B](https://huggingface.co/Qwen/Qwen3-Reranker-0.6B)| +|[Qwen/Qwen3-Reranker-4B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-4B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-4B](https://huggingface.co/Qwen/Qwen3-Reranker-4B)| +|[Qwen/Qwen3-Reranker-8B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-8B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-8B](https://huggingface.co/Qwen/Qwen3-Reranker-8B)| ### 多模态大模型 @@ -929,6 +929,7 @@ |[deepseek-ai/Janus-1.3B](https://modelscope.cn/models/deepseek-ai/Janus-1.3B)|deepseek_janus|deepseek_janus|-|✘|vision|[deepseek-ai/Janus-1.3B](https://huggingface.co/deepseek-ai/Janus-1.3B)| |[deepseek-ai/Janus-Pro-1B](https://modelscope.cn/models/deepseek-ai/Janus-Pro-1B)|deepseek_janus_pro|deepseek_janus_pro|-|✘|vision|[deepseek-ai/Janus-Pro-1B](https://huggingface.co/deepseek-ai/Janus-Pro-1B)| |[deepseek-ai/Janus-Pro-7B](https://modelscope.cn/models/deepseek-ai/Janus-Pro-7B)|deepseek_janus_pro|deepseek_janus_pro|-|✘|vision|[deepseek-ai/Janus-Pro-7B](https://huggingface.co/deepseek-ai/Janus-Pro-7B)| +|[deepseek-ai/DeepSeek-OCR](https://modelscope.cn/models/deepseek-ai/DeepSeek-OCR)|deepseek_ocr|deepseek_ocr|-|✘|vision|[deepseek-ai/DeepSeek-OCR](https://huggingface.co/deepseek-ai/DeepSeek-OCR)| |[OpenBMB/MiniCPM-V](https://modelscope.cn/models/OpenBMB/MiniCPM-V)|minicpmv|minicpmv|timm, transformers<4.42|✘|vision|[openbmb/MiniCPM-V](https://huggingface.co/openbmb/MiniCPM-V)| |[OpenBMB/MiniCPM-V-2](https://modelscope.cn/models/OpenBMB/MiniCPM-V-2)|minicpmv|minicpmv|timm, transformers<4.42|✘|vision|[openbmb/MiniCPM-V-2](https://huggingface.co/openbmb/MiniCPM-V-2)| |[OpenBMB/MiniCPM-Llama3-V-2_5](https://modelscope.cn/models/OpenBMB/MiniCPM-Llama3-V-2_5)|minicpmv2_5|minicpmv2_5|timm, transformers>=4.36|✘|vision|[openbmb/MiniCPM-Llama3-V-2_5](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5)| @@ -1003,6 +1004,7 @@ |[google/gemma-3n-E4B-it](https://modelscope.cn/models/google/gemma-3n-E4B-it)|gemma3n|gemma3n|transformers>=4.53.1|✘|-|[google/gemma-3n-E4B-it](https://huggingface.co/google/gemma-3n-E4B-it)| |[mistralai/Mistral-Small-3.1-24B-Base-2503](https://modelscope.cn/models/mistralai/Mistral-Small-3.1-24B-Base-2503)|mistral_2503|mistral_2503|transformers>=4.49|✘|-|[mistralai/Mistral-Small-3.1-24B-Base-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Base-2503)| |[mistralai/Mistral-Small-3.1-24B-Instruct-2503](https://modelscope.cn/models/mistralai/Mistral-Small-3.1-24B-Instruct-2503)|mistral_2503|mistral_2503|transformers>=4.49|✘|-|[mistralai/Mistral-Small-3.1-24B-Instruct-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Instruct-2503)| +|[JinaAI/jina-reranker-m0](https://modelscope.cn/models/JinaAI/jina-reranker-m0)|jina_reranker_m0|jina_reranker_m0|-|✘|reranker, vision|[JinaAI/jina-reranker-m0](https://huggingface.co/JinaAI/jina-reranker-m0)| ## 数据集 diff --git a/docs/source_en/Instruction/Supported-models-and-datasets.md b/docs/source_en/Instruction/Supported-models-and-datasets.md index fa576052d9..01088300e2 100644 --- a/docs/source_en/Instruction/Supported-models-and-datasets.md +++ b/docs/source_en/Instruction/Supported-models-and-datasets.md @@ -242,14 +242,8 @@ The table below introduces the models integrated with ms-swift: |[Qwen/Qwen3-Embedding-0.6B](https://modelscope.cn/models/Qwen/Qwen3-Embedding-0.6B)|qwen3_emb|qwen3_emb|-|✘|-|[Qwen/Qwen3-Embedding-0.6B](https://huggingface.co/Qwen/Qwen3-Embedding-0.6B)| |[Qwen/Qwen3-Embedding-4B](https://modelscope.cn/models/Qwen/Qwen3-Embedding-4B)|qwen3_emb|qwen3_emb|-|✘|-|[Qwen/Qwen3-Embedding-4B](https://huggingface.co/Qwen/Qwen3-Embedding-4B)| |[Qwen/Qwen3-Embedding-8B](https://modelscope.cn/models/Qwen/Qwen3-Embedding-8B)|qwen3_emb|qwen3_emb|-|✘|-|[Qwen/Qwen3-Embedding-8B](https://huggingface.co/Qwen/Qwen3-Embedding-8B)| -|[Qwen/Qwen3-Reranker-0.6B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-0.6B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-0.6B](https://huggingface.co/Qwen/Qwen3-Reranker-0.6B)| -|[Qwen/Qwen3-Reranker-4B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-4B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-4B](https://huggingface.co/Qwen/Qwen3-Reranker-4B)| -|[Qwen/Qwen3-Reranker-8B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-8B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-8B](https://huggingface.co/Qwen/Qwen3-Reranker-8B)| |[iic/gte_Qwen2-1.5B-instruct](https://modelscope.cn/models/iic/gte_Qwen2-1.5B-instruct)|qwen2_gte|dummy|-|✘|-|[Alibaba-NLP/gte-Qwen2-1.5B-instruct](https://huggingface.co/Alibaba-NLP/gte-Qwen2-1.5B-instruct)| |[iic/gte_Qwen2-7B-instruct](https://modelscope.cn/models/iic/gte_Qwen2-7B-instruct)|qwen2_gte|dummy|-|✘|-|[Alibaba-NLP/gte-Qwen2-7B-instruct](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct)| -|[BAAI/bge-reranker-base](https://modelscope.cn/models/BAAI/bge-reranker-base)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base)| -|[BAAI/bge-reranker-v2-m3](https://modelscope.cn/models/BAAI/bge-reranker-v2-m3)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3)| -|[BAAI/bge-reranker-large](https://modelscope.cn/models/BAAI/bge-reranker-large)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large)| |[codefuse-ai/CodeFuse-QWen-14B](https://modelscope.cn/models/codefuse-ai/CodeFuse-QWen-14B)|codefuse_qwen|codefuse|-|✘|coding|[codefuse-ai/CodeFuse-QWen-14B](https://huggingface.co/codefuse-ai/CodeFuse-QWen-14B)| |[iic/ModelScope-Agent-7B](https://modelscope.cn/models/iic/ModelScope-Agent-7B)|modelscope_agent|modelscope_agent|-|✘|-|-| |[iic/ModelScope-Agent-14B](https://modelscope.cn/models/iic/ModelScope-Agent-14B)|modelscope_agent|modelscope_agent|-|✘|-|-| @@ -647,7 +641,6 @@ The table below introduces the models integrated with ms-swift: |[answerdotai/ModernBERT-base](https://modelscope.cn/models/answerdotai/ModernBERT-base)|modern_bert|dummy|transformers>=4.48|✘|bert|[answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base)| |[answerdotai/ModernBERT-large](https://modelscope.cn/models/answerdotai/ModernBERT-large)|modern_bert|dummy|transformers>=4.48|✘|bert|[answerdotai/ModernBERT-large](https://huggingface.co/answerdotai/ModernBERT-large)| |[iic/gte-modernbert-base](https://modelscope.cn/models/iic/gte-modernbert-base)|modern_bert_gte|dummy|transformers>=4.48|✘|bert, embedding|[Alibaba-NLP/gte-modernbert-base](https://huggingface.co/Alibaba-NLP/gte-modernbert-base)| -|[iic/gte-reranker-modernbert-base](https://modelscope.cn/models/iic/gte-reranker-modernbert-base)|modern_bert_gte_reranker|bert|transformers>=4.48|✘|bert, reranker|[Alibaba-NLP/gte-reranker-modernbert-base](https://huggingface.co/Alibaba-NLP/gte-reranker-modernbert-base)| |[iic/nlp_structbert_backbone_base_std](https://modelscope.cn/models/iic/nlp_structbert_backbone_base_std)|bert|dummy|-|✘|bert|-| |[Shanghai_AI_Laboratory/internlm2-1_8b-reward](https://modelscope.cn/models/Shanghai_AI_Laboratory/internlm2-1_8b-reward)|internlm2_reward|internlm2_reward|transformers>=4.38|✘|-|[internlm/internlm2-1_8b-reward](https://huggingface.co/internlm/internlm2-1_8b-reward)| |[Shanghai_AI_Laboratory/internlm2-7b-reward](https://modelscope.cn/models/Shanghai_AI_Laboratory/internlm2-7b-reward)|internlm2_reward|internlm2_reward|transformers>=4.38|✘|-|[internlm/internlm2-7b-reward](https://huggingface.co/internlm/internlm2-7b-reward)| @@ -663,6 +656,13 @@ The table below introduces the models integrated with ms-swift: |[AI-ModelScope/GRM-llama3.2-3B-rewardmodel-ft](https://modelscope.cn/models/AI-ModelScope/GRM-llama3.2-3B-rewardmodel-ft)|llama3_2_reward|llama3_2|transformers>=4.43|✘|-|[Ray2333/GRM-llama3.2-3B-rewardmodel-ft](https://huggingface.co/Ray2333/GRM-llama3.2-3B-rewardmodel-ft)| |[AI-ModelScope/Skywork-Reward-Gemma-2-27B](https://modelscope.cn/models/AI-ModelScope/Skywork-Reward-Gemma-2-27B)|gemma_reward|gemma|transformers>=4.42|✘|-|[Skywork/Skywork-Reward-Gemma-2-27B](https://huggingface.co/Skywork/Skywork-Reward-Gemma-2-27B)| |[AI-ModelScope/Skywork-Reward-Gemma-2-27B-v0.2](https://modelscope.cn/models/AI-ModelScope/Skywork-Reward-Gemma-2-27B-v0.2)|gemma_reward|gemma|transformers>=4.42|✘|-|[Skywork/Skywork-Reward-Gemma-2-27B-v0.2](https://huggingface.co/Skywork/Skywork-Reward-Gemma-2-27B-v0.2)| +|[BAAI/bge-reranker-base](https://modelscope.cn/models/BAAI/bge-reranker-base)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base)| +|[BAAI/bge-reranker-v2-m3](https://modelscope.cn/models/BAAI/bge-reranker-v2-m3)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3)| +|[BAAI/bge-reranker-large](https://modelscope.cn/models/BAAI/bge-reranker-large)|bge_reranker|bge_reranker|-|✘|-|[BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large)| +|[iic/gte-reranker-modernbert-base](https://modelscope.cn/models/iic/gte-reranker-modernbert-base)|modern_bert_gte_reranker|bert|transformers>=4.48|✘|bert, reranker|[Alibaba-NLP/gte-reranker-modernbert-base](https://huggingface.co/Alibaba-NLP/gte-reranker-modernbert-base)| +|[Qwen/Qwen3-Reranker-0.6B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-0.6B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-0.6B](https://huggingface.co/Qwen/Qwen3-Reranker-0.6B)| +|[Qwen/Qwen3-Reranker-4B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-4B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-4B](https://huggingface.co/Qwen/Qwen3-Reranker-4B)| +|[Qwen/Qwen3-Reranker-8B](https://modelscope.cn/models/Qwen/Qwen3-Reranker-8B)|qwen3_reranker|qwen3_reranker|-|✘|-|[Qwen/Qwen3-Reranker-8B](https://huggingface.co/Qwen/Qwen3-Reranker-8B)| ### Multimodal large models @@ -929,6 +929,7 @@ The table below introduces the models integrated with ms-swift: |[deepseek-ai/Janus-1.3B](https://modelscope.cn/models/deepseek-ai/Janus-1.3B)|deepseek_janus|deepseek_janus|-|✘|vision|[deepseek-ai/Janus-1.3B](https://huggingface.co/deepseek-ai/Janus-1.3B)| |[deepseek-ai/Janus-Pro-1B](https://modelscope.cn/models/deepseek-ai/Janus-Pro-1B)|deepseek_janus_pro|deepseek_janus_pro|-|✘|vision|[deepseek-ai/Janus-Pro-1B](https://huggingface.co/deepseek-ai/Janus-Pro-1B)| |[deepseek-ai/Janus-Pro-7B](https://modelscope.cn/models/deepseek-ai/Janus-Pro-7B)|deepseek_janus_pro|deepseek_janus_pro|-|✘|vision|[deepseek-ai/Janus-Pro-7B](https://huggingface.co/deepseek-ai/Janus-Pro-7B)| +|[deepseek-ai/DeepSeek-OCR](https://modelscope.cn/models/deepseek-ai/DeepSeek-OCR)|deepseek_ocr|deepseek_ocr|-|✘|vision|[deepseek-ai/DeepSeek-OCR](https://huggingface.co/deepseek-ai/DeepSeek-OCR)| |[OpenBMB/MiniCPM-V](https://modelscope.cn/models/OpenBMB/MiniCPM-V)|minicpmv|minicpmv|timm, transformers<4.42|✘|vision|[openbmb/MiniCPM-V](https://huggingface.co/openbmb/MiniCPM-V)| |[OpenBMB/MiniCPM-V-2](https://modelscope.cn/models/OpenBMB/MiniCPM-V-2)|minicpmv|minicpmv|timm, transformers<4.42|✘|vision|[openbmb/MiniCPM-V-2](https://huggingface.co/openbmb/MiniCPM-V-2)| |[OpenBMB/MiniCPM-Llama3-V-2_5](https://modelscope.cn/models/OpenBMB/MiniCPM-Llama3-V-2_5)|minicpmv2_5|minicpmv2_5|timm, transformers>=4.36|✘|vision|[openbmb/MiniCPM-Llama3-V-2_5](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5)| @@ -1003,6 +1004,7 @@ The table below introduces the models integrated with ms-swift: |[google/gemma-3n-E4B-it](https://modelscope.cn/models/google/gemma-3n-E4B-it)|gemma3n|gemma3n|transformers>=4.53.1|✘|-|[google/gemma-3n-E4B-it](https://huggingface.co/google/gemma-3n-E4B-it)| |[mistralai/Mistral-Small-3.1-24B-Base-2503](https://modelscope.cn/models/mistralai/Mistral-Small-3.1-24B-Base-2503)|mistral_2503|mistral_2503|transformers>=4.49|✘|-|[mistralai/Mistral-Small-3.1-24B-Base-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Base-2503)| |[mistralai/Mistral-Small-3.1-24B-Instruct-2503](https://modelscope.cn/models/mistralai/Mistral-Small-3.1-24B-Instruct-2503)|mistral_2503|mistral_2503|transformers>=4.49|✘|-|[mistralai/Mistral-Small-3.1-24B-Instruct-2503](https://huggingface.co/mistralai/Mistral-Small-3.1-24B-Instruct-2503)| +|[JinaAI/jina-reranker-m0](https://modelscope.cn/models/JinaAI/jina-reranker-m0)|jina_reranker_m0|jina_reranker_m0|-|✘|reranker, vision|[JinaAI/jina-reranker-m0](https://huggingface.co/JinaAI/jina-reranker-m0)| ## Datasets diff --git a/swift/llm/model/constant.py b/swift/llm/model/constant.py index 0146ca7f62..725fd5a304 100644 --- a/swift/llm/model/constant.py +++ b/swift/llm/model/constant.py @@ -296,4 +296,4 @@ def _get_model_name_list(cls): return list( chain.from_iterable( _get_model_name_list(model_type_cls) - for model_type_cls in [LLMModelType, MLLMModelType, BertModelType, RMModelType])) + for model_type_cls in [LLMModelType, MLLMModelType, BertModelType, RMModelType, RerankerModelType])) diff --git a/tests/test_align/test_template/test_vision.py b/tests/test_align/test_template/test_vision.py index 1172b12b19..9e65c9473b 100644 --- a/tests/test_align/test_template/test_vision.py +++ b/tests/test_align/test_template/test_vision.py @@ -8,7 +8,8 @@ def _infer_model(pt_engine, system=None, messages=None, images=None, **kwargs): seed_everything(42) - request_config = RequestConfig(max_tokens=128, temperature=0, repetition_penalty=1) + max_tokens = kwargs.get('max_tokens', 128) + request_config = RequestConfig(max_tokens=max_tokens, temperature=0, repetition_penalty=1) if messages is None: messages = [] if system is not None: @@ -964,12 +965,19 @@ def test_sailvl2(): def test_deepseek_ocr(): - pt_engine = PtEngine('deepseek-ai/DeepSeek-OCR') - query = 'describe the image' + pt_engine = PtEngine('deepseek-ai/DeepSeek-OCR', attn_impl='flash_attention_2') + query = 'Free OCR.' messages = [{'role': 'user', 'content': query}] - images = ['http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/cat.png'] - response = _infer_model(pt_engine, messages=messages, images=images) - assert ans in response + images = ['http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/ocr.png'] + response = _infer_model(pt_engine, messages=messages, images=images, max_tokens=256) + assert response == ('# 简介\n\nSWIFT支持250+ LLM和35+ MLLM(多模态大模型)的训练、推理、评测和部署。开发者可以直接' + '将我们的框架应用到自己的Research和生产环境中,实现模型训练评测到应用的完整链路。我们除支持了PEFT提' + '供的轻量训练方案外,也提供了一个完整的**Adapters库**以支持最新的训练技术,如NEFTune、LoRA+、' + 'LLaMA-PRO等,这个适配器库可以脱离训练脚本直接使用在自己的自定流程中。\n\n为方便不熟悉深度学习的用' + '户使用,我们提供了一个Gradio的web-ui用于控制训练和推理,并提供了配套的深度学习课程和最佳实践供新手' + '入门。\n\n此外,我们也在拓展其他模态的能力,目前我们支持了AnimateDiff的全参数训练和LoRA训练。\n' + '\nSWIFT具有丰富的文档体系,如有使用问题请查看这里。\n\n可以在Huggingface space 和 ModelScope' + '创空间 中体验SWIFT web-ui功能了。') if __name__ == '__main__': From 803cf519f12b750e723f97729f0b95d788ec6110 Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 15:33:34 +0800 Subject: [PATCH 09/13] update --- README.md | 6 +++--- README_CN.md | 6 +++--- .../\345\277\253\351\200\237\345\274\200\345\247\213.md" | 6 +++--- docs/source_en/GetStarted/Quick-start.md | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index d21b725630..8f9f844dc9 100644 --- a/README.md +++ b/README.md @@ -51,13 +51,13 @@ You can contact us and communicate with us by adding our group: ## 📝 Introduction -🍲 ms-swift is an official framework provided by the ModelScope community for fine-tuning and deploying large language models and multi-modal large models. It currently supports the training (pre-training, fine-tuning, human alignment), inference, evaluation, quantization, and deployment of 500+ large models and 200+ multi-modal large models. These large language models (LLMs) include models such as Qwen3, Qwen3-MoE, Qwen2.5, InternLM3, GLM4.5, Mistral, DeepSeek-R1, Yi1.5, TeleChat2, Baichuan2, and Gemma2. The multi-modal LLMs include models such as Qwen2.5-VL, Qwen2-Audio, Llama4, Llava, InternVL3, MiniCPM-V-4, Ovis2.5, GLM4v, Xcomposer2.5, Yi-VL, DeepSeek-VL2, Phi3.5-Vision, and GOT-OCR2. +🍲 ms-swift is an official framework provided by the ModelScope community for fine-tuning and deploying large language models and multi-modal large models. It currently supports the training (pre-training, fine-tuning, human alignment), inference, evaluation, quantization, and deployment of 600+ large models and 300+ multi-modal large models. These large language models (LLMs) include models such as Qwen3, Qwen3-MoE, Qwen2.5, InternLM3, GLM4.5, Mistral, DeepSeek-R1, TeleChat2, Baichuan2, and Gemma2. The multi-modal LLMs include models such as Qwen3-VL, Qwen3-Omni, Llama4, Llava, InternVL3.5, MiniCPM-V-4, Ovis2.5, GLM4.5-V, DeepSeek-VL2, Phi3.5-Vision, and GOT-OCR2. 🍔 Additionally, ms-swift incorporates the latest training technologies, including lightweight techniques such as LoRA, QLoRA, Llama-Pro, LongLoRA, GaLore, Q-GaLore, LoRA+, LISA, DoRA, FourierFt, ReFT, UnSloth, and Liger, as well as human alignment training methods like DPO, GRPO, RM, PPO, GKD, KTO, CPO, SimPO, and ORPO. ms-swift supports acceleration of inference, evaluation, and deployment modules using vLLM, SGLang and LMDeploy, and it supports model quantization with technologies like GPTQ, AWQ, and BNB. Furthermore, ms-swift offers a Gradio-based Web UI and a wealth of best practices. **Why choose ms-swift?** -- 🍎 **Model Types**: Supports 500+ pure text large models, **200+ multi-modal large models**, as well as All-to-All multi-modal models, sequence classification models, and embedding models, **covering the entire process from training to deployment**. +- 🍎 **Model Types**: Supports 600+ pure text large models, **300+ multi-modal large models**, as well as All-to-All multi-modal models, sequence classification models, and embedding models, **covering the entire process from training to deployment**. - **Dataset Types**: Comes with 150+ pre-training, fine-tuning, human alignment, multi-modal datasets, and supports custom datasets. - **Hardware Support**: Compatible with CPU, RTX series, T4/V100, A10/A100/H100, Ascend NPU, MPS, etc. - **Lightweight Training**: Supports lightweight fine-tuning methods like LoRA, QLoRA, DoRA, LoRA+, ReFT, RS-LoRA, LLaMAPro, Adapter, GaLore, Q-Galore, LISA, UnSloth, Liger-Kernel. @@ -65,7 +65,7 @@ You can contact us and communicate with us by adding our group: - **Quantization Training**: Supports training quantized models like BNB, AWQ, GPTQ, AQLM, HQQ, EETQ. - 🍊 **RLHF Training**: Supports human alignment training methods such as DPO, GRPO, RM, PPO, GKD, KTO, CPO, SimPO, ORPO for both pure text and multi-modal large models. - 🍓 **Multi-Modal Training**: Supports training on different modalities like images, videos, and audio, for tasks like VQA, captioning, OCR, and grounding. -- 🥥 **Megatron Parallelism**: Supports accelerating CPT/SFT/DPO using Megatron parallelism techniques, currently compatible with 200+ large language models. +- 🥥 **Megatron Parallelism**: Supports accelerating CPT/SFT/DPO/KTO/RM using Megatron parallelism techniques, currently compatible with 200+ pure text large models, 100+ multi-modal large models. - **Interface Training**: Provides capabilities for training, inference, evaluation, quantization through an interface, completing the whole large model pipeline. - **Plugin and Extension**: Supports custom model and dataset extensions, as well as customization of components like loss, metric, trainer, loss-scale, callback, optimizer. - 🍉 **Toolbox Capabilities**: Offers not only training support for large models and multi-modal large models but also covers the entire process of inference, evaluation, quantization, and deployment. diff --git a/README_CN.md b/README_CN.md index bc7154cccb..b8edd20019 100644 --- a/README_CN.md +++ b/README_CN.md @@ -49,12 +49,12 @@ | ## 📝 简介 -🍲 ms-swift是魔搭社区提供的大模型与多模态大模型微调部署框架,现已支持500+大模型与200+多模态大模型的训练(预训练、微调、人类对齐)、推理、评测、量化与部署。其中大模型包括:Qwen3、Qwen3-MoE、Qwen2.5、InternLM3、GLM4.5、Mistral、DeepSeek-R1、Yi1.5、TeleChat2、Baichuan2、Gemma2等模型,多模态大模型包括:Qwen2.5-VL、Qwen2-Audio、Llama4、Llava、InternVL3、MiniCPM-V-4、Ovis2.5、GLM4v、Xcomposer2.5、Yi-VL、DeepSeek-VL2、Phi3.5-Vision、GOT-OCR2等模型。 +🍲 ms-swift是魔搭社区提供的大模型与多模态大模型微调部署框架,现已支持600+大模型与300+多模态大模型的训练(预训练、微调、人类对齐)、推理、评测、量化与部署。其中大模型包括:Qwen3、Qwen3-MoE、Qwen2.5、InternLM3、GLM4.5、Mistral、DeepSeek-R1、TeleChat2、Baichuan2、Gemma2等模型,多模态大模型包括:Qwen3-VL、Qwen3-Omni、Llama4、Llava、InternVL3.5、MiniCPM-V-4、Ovis2.5、GLM4.5-V、DeepSeek-VL2、Phi3.5-Vision、GOT-OCR2等模型。 🍔 除此之外,ms-swift汇集了最新的训练技术,包括LoRA、QLoRA、Llama-Pro、LongLoRA、GaLore、Q-GaLore、LoRA+、LISA、DoRA、FourierFt、ReFT、UnSloth、和Liger等轻量化训练技术,以及DPO、GRPO、RM、PPO、GKD、KTO、CPO、SimPO、ORPO等人类对齐训练方法。ms-swift支持使用vLLM、SGLang和LMDeploy对推理、评测和部署模块进行加速,并支持使用GPTQ、AWQ、BNB等技术对大模型进行量化。ms-swift还提供了基于Gradio的Web-UI界面及丰富的最佳实践。 **为什么选择ms-swift?** -- 🍎 **模型类型**:支持500+纯文本大模型、**200+多模态大模型**以及All-to-All全模态模型、序列分类模型、Embedding模型**训练到部署全流程**。 +- 🍎 **模型类型**:支持600+纯文本大模型、**300+多模态大模型**以及All-to-All全模态模型、序列分类模型、Embedding模型**训练到部署全流程**。 - **数据集类型**:内置150+预训练、微调、人类对齐、多模态等各种类型的数据集,并支持自定义数据集。 - **硬件支持**:CPU、RTX系列、T4/V100、A10/A100/H100、Ascend NPU、MPS等。 - **轻量训练**:支持了LoRA、QLoRA、DoRA、LoRA+、ReFT、RS-LoRA、LLaMAPro、Adapter、GaLore、Q-Galore、LISA、UnSloth、Liger-Kernel等轻量微调方式。 @@ -62,7 +62,7 @@ - **量化训练**:支持对BNB、AWQ、GPTQ、AQLM、HQQ、EETQ量化模型进行训练。 - 🍊 **RLHF训练**:支持纯文本大模型和多模态大模型的DPO、GRPO、RM、PPO、GKD、KTO、CPO、SimPO、ORPO等人类对齐训练方法。 - 🍓 **多模态训练**:支持对图像、视频和语音不同模态模型进行训练,支持VQA、Caption、OCR、Grounding任务的训练。 -- 🥥 **Megatron并行技术**:支持使用Megatron并行技术对CPT/SFT/DPO进行加速,现支持200+大语言模型。 +- 🥥 **Megatron并行技术**:支持使用Megatron并行技术对CPT/SFT/DPO/KTO/RM进行加速,现支持200+纯文本大模型和100+多模态大模型。 - **界面训练**:以界面的方式提供训练、推理、评测、量化的能力,完成大模型的全链路。 - **插件化与拓展**:支持自定义模型和数据集拓展,支持对loss、metric、trainer、loss-scale、callback、optimizer等组件进行自定义。 - 🍉 **工具箱能力**:不仅提供大模型和多模态大模型的训练支持,还涵盖其推理、评测、量化和部署全流程。 diff --git "a/docs/source/GetStarted/\345\277\253\351\200\237\345\274\200\345\247\213.md" "b/docs/source/GetStarted/\345\277\253\351\200\237\345\274\200\345\247\213.md" index eb04bd833a..e491c89934 100644 --- "a/docs/source/GetStarted/\345\277\253\351\200\237\345\274\200\345\247\213.md" +++ "b/docs/source/GetStarted/\345\277\253\351\200\237\345\274\200\345\247\213.md" @@ -1,8 +1,8 @@ # 快速开始 -ms-swift是魔搭社区提供的大模型与多模态大模型训练部署框架,现已支持500+大模型与200+多模态大模型的训练(预训练、微调、人类对齐)、推理、评测、量化与部署。模型开发者可以在ms-swift框架中一站式完成围绕大模型的各类需求。目前ms-swift的主要能力包含: +ms-swift是魔搭社区提供的大模型与多模态大模型训练部署框架,现已支持600+大模型与300+多模态大模型的训练(预训练、微调、人类对齐)、推理、评测、量化与部署。模型开发者可以在ms-swift框架中一站式完成围绕大模型的各类需求。目前ms-swift的主要能力包含: -- 🍎 模型类型:支持500+纯文本大模型、200+多模态大模型以及All-to-All全模态模型、序列分类模型、Embedding模型训练到部署全流程。 +- 🍎 模型类型:支持600+纯文本大模型、300+多模态大模型以及All-to-All全模态模型、序列分类模型、Embedding模型训练到部署全流程。 - 数据集类型:内置150+预训练、微调、人类对齐、多模态等各种类型的数据集,并支持自定义数据集。 - 硬件支持:CPU、RTX系列、T4/V100、A10/A100/H100、Ascend NPU、MPS等。 - 轻量训练:支持了LoRA、QLoRA、DoRA、LoRA+、ReFT、RS-LoRA、LLaMAPro、Adapter、GaLore、Q-Galore、LISA、UnSloth、Liger-Kernel等轻量微调方式。 @@ -10,7 +10,7 @@ ms-swift是魔搭社区提供的大模型与多模态大模型训练部署框架 - 量化训练:支持对BNB、AWQ、GPTQ、AQLM、HQQ、EETQ量化模型进行训练。 - 🍊 RLHF训练:支持纯文本大模型和多模态大模型的DPO、GRPO、RM、PPO、GKD、KTO、CPO、SimPO、ORPO等人类对齐训练方法。 - 🍓 多模态训练:支持对图像、视频和语音不同模态模型进行训练,支持VQA、Caption、OCR、Grounding任务的训练。 -- 🥥 Megatron并行技术:支持使用Megatron并行技术对CPT/SFT/DPO/KTO/RM进行加速,现支持200+大语言模型。 +- 🥥 Megatron并行技术:支持使用Megatron并行技术对CPT/SFT/DPO/KTO/RM进行加速,现支持200+纯文本大模型和100+多模态大模型。 - 界面训练:以界面的方式提供训练、推理、评测、量化的能力,完成大模型的全链路。 - 插件化与拓展:支持自定义模型和数据集拓展,支持对loss、metric、trainer、loss-scale、callback、optimizer等组件进行自定义。 - 🍉 工具箱能力:除了对大模型和多模态大模型的训练支持外,还支持其推理、评测、量化和部署全流程。 diff --git a/docs/source_en/GetStarted/Quick-start.md b/docs/source_en/GetStarted/Quick-start.md index 9cf9a3217f..24f56b0bda 100644 --- a/docs/source_en/GetStarted/Quick-start.md +++ b/docs/source_en/GetStarted/Quick-start.md @@ -1,8 +1,8 @@ # Quick Start -ms-swift is a comprehensive training and deployment framework for large language models and multimodal large models, provided by the ModelScope Community. It currently supports the training (CPT, SFT, RLHF), inference, evaluation, quantization, and deployment of 500+ LLM and 200+ MLLM. Model developers can fulfill all kinds of needs related to large models in a single platform within the ms-swift framework. The main capabilities of ms-swift include: +ms-swift is a comprehensive training and deployment framework for large language models and multimodal large models, provided by the ModelScope Community. It currently supports the training (CPT, SFT, RLHF), inference, evaluation, quantization, and deployment of 600+ LLM and 300+ MLLM. Model developers can fulfill all kinds of needs related to large models in a single platform within the ms-swift framework. The main capabilities of ms-swift include: -- 🍎 Model Types: Supports 500+ pure text large models, 200+ multi-modal large models, as well as All-to-All multi-modal models, sequence classification models, and embedding models, covering the entire process from training to deployment. +- 🍎 Model Types: Supports 600+ pure text large models, 300+ multi-modal large models, as well as All-to-All multi-modal models, sequence classification models, and embedding models, covering the entire process from training to deployment. - Dataset Types: Comes with more than 150 pre-built datasets for pre-training, fine-tuning, human alignment, multimodal, and supports custom datasets. - Hardware Support: Compatible with CPU, RTX series, T4/V100, A10/A100/H100, Ascend NPU, MPS and others. - Lightweight Training: Supports lightweight fine-tuning methods like LoRA, QLoRA, DoRA, LoRA+, ReFT, RS-LoRA, LLaMAPro, Adapter, GaLore, Q-Galore, LISA, UnSloth, Liger-Kernel, and more. @@ -10,7 +10,7 @@ ms-swift is a comprehensive training and deployment framework for large language - Quantization Training: Provides training for quantized models like BNB, AWQ, GPTQ, AQLM, HQQ, EETQ. - 🍊 RLHF Training: Supports human alignment training methods like DPO, GRPO, RM, PPO, GKD, KTO, CPO, SimPO, ORPO for both text-based and multimodal large models. - 🍓 Multimodal Training: Capable of training models for different modalities such as images, videos, and audios; supports tasks like VQA (Visual Question Answering), Captioning, OCR (Optical Character Recognition), and Grounding. -- 🥥 Megatron Parallelism: Supports accelerating CPT/SFT/DPO/KTO/RM using Megatron parallelism techniques, currently compatible with 200+ large language models. +- 🥥 Megatron Parallelism: Supports accelerating CPT/SFT/DPO/KTO/RM using Megatron parallelism techniques, currently compatible with 200+ pure text large models, 100+ multi-modal large models. - Interface-driven Training: Offers training, inference, evaluation, and quantization capabilities through an interface, enabling a complete workflow for large models. - Plugins and Extensions: Allows customization and extension of models and datasets, and supports customizations for components like loss, metric, trainer, loss-scale, callback, optimizer, etc. - 🍉 Toolbox Capabilities: Offers not only training support for large models and multi-modal large models but also covers the entire process of inference, evaluation, quantization, and deployment. From e2ab416ace85a4804142496f2ce34e754cb7da2e Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 15:50:23 +0800 Subject: [PATCH 10/13] update --- swift/llm/template/template/deepseek.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/swift/llm/template/template/deepseek.py b/swift/llm/template/template/deepseek.py index 98e1e17d22..688c76c7a2 100644 --- a/swift/llm/template/template/deepseek.py +++ b/swift/llm/template/template/deepseek.py @@ -6,7 +6,7 @@ import numpy as np import torch import torch.nn as nn -from PIL import Image, ImageDraw, ImageFont, ImageOps +from PIL import Image, ImageOps from transformers.dynamic_module_utils import get_class_from_dynamic_module from swift.utils import get_env_args @@ -247,10 +247,10 @@ def init_env_args(self): self.base_size = get_env_args('base_size', int, 1024) self.image_size = get_env_args('image_size', int, 640) - def _preprocess_image(self, images): + def _preprocess_image(self, images, image_token_id): # Code borrowed from # https://modelscope.cn/models/deepseek-ai/DeepSeek-OCR/file/view/master/modeling_deepseekocr.py?status=1 - crop_mode = True + crop_mode = self.crop_mode patch_size = 16 downsample_ratio = 4 valid_img_tokens = 0 @@ -261,7 +261,6 @@ def _preprocess_image(self, images): ratio = 1 - ((max(w, h) - min(w, h)) / (max(w, h))) image_transform = self.BasicImageTransform(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), normalize=True) - image_token_id = 128815 images_list, images_crop_list = [], [] tokenized_str = [] images_spatial_crop = [] @@ -357,7 +356,8 @@ def _encode(self, inputs: StdTemplateInputs) -> Dict[str, Any]: image_token = self._tokenize('') idx_list = findall(input_ids, image_token) if idx_list: - tokenized_str, images_ori, images_crop, images_spatial_crop = self._preprocess_image(inputs.images) + tokenized_str, images_ori, images_crop, images_spatial_crop = self._preprocess_image(inputs.images, + image_token[0]) input_ids, labels, loss_scale = self._extend_tokens(input_ids, labels, loss_scale, idx_list, lambda i: tokenized_str[i]) encoded['input_ids'] = input_ids From 4497de082e84d7e8b20249ebe3c6bba7b50a5a4e Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 15:59:15 +0800 Subject: [PATCH 11/13] update --- ...14\346\225\260\346\215\256\351\233\206.md" | 2 +- .../Supported-models-and-datasets.md | 2 +- examples/models/deepseek_vl2/deepseek_ocr.sh | 30 +++++++++++++++++++ swift/llm/model/model/deepseek.py | 1 + 4 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 examples/models/deepseek_vl2/deepseek_ocr.sh diff --git "a/docs/source/Instruction/\346\224\257\346\214\201\347\232\204\346\250\241\345\236\213\345\222\214\346\225\260\346\215\256\351\233\206.md" "b/docs/source/Instruction/\346\224\257\346\214\201\347\232\204\346\250\241\345\236\213\345\222\214\346\225\260\346\215\256\351\233\206.md" index 6e8c8d2404..27c404cdb1 100644 --- "a/docs/source/Instruction/\346\224\257\346\214\201\347\232\204\346\250\241\345\236\213\345\222\214\346\225\260\346\215\256\351\233\206.md" +++ "b/docs/source/Instruction/\346\224\257\346\214\201\347\232\204\346\250\241\345\236\213\345\222\214\346\225\260\346\215\256\351\233\206.md" @@ -929,7 +929,7 @@ |[deepseek-ai/Janus-1.3B](https://modelscope.cn/models/deepseek-ai/Janus-1.3B)|deepseek_janus|deepseek_janus|-|✘|vision|[deepseek-ai/Janus-1.3B](https://huggingface.co/deepseek-ai/Janus-1.3B)| |[deepseek-ai/Janus-Pro-1B](https://modelscope.cn/models/deepseek-ai/Janus-Pro-1B)|deepseek_janus_pro|deepseek_janus_pro|-|✘|vision|[deepseek-ai/Janus-Pro-1B](https://huggingface.co/deepseek-ai/Janus-Pro-1B)| |[deepseek-ai/Janus-Pro-7B](https://modelscope.cn/models/deepseek-ai/Janus-Pro-7B)|deepseek_janus_pro|deepseek_janus_pro|-|✘|vision|[deepseek-ai/Janus-Pro-7B](https://huggingface.co/deepseek-ai/Janus-Pro-7B)| -|[deepseek-ai/DeepSeek-OCR](https://modelscope.cn/models/deepseek-ai/DeepSeek-OCR)|deepseek_ocr|deepseek_ocr|-|✘|vision|[deepseek-ai/DeepSeek-OCR](https://huggingface.co/deepseek-ai/DeepSeek-OCR)| +|[deepseek-ai/DeepSeek-OCR](https://modelscope.cn/models/deepseek-ai/DeepSeek-OCR)|deepseek_ocr|deepseek_ocr|transformers==4.46.3, easydict|✘|vision|[deepseek-ai/DeepSeek-OCR](https://huggingface.co/deepseek-ai/DeepSeek-OCR)| |[OpenBMB/MiniCPM-V](https://modelscope.cn/models/OpenBMB/MiniCPM-V)|minicpmv|minicpmv|timm, transformers<4.42|✘|vision|[openbmb/MiniCPM-V](https://huggingface.co/openbmb/MiniCPM-V)| |[OpenBMB/MiniCPM-V-2](https://modelscope.cn/models/OpenBMB/MiniCPM-V-2)|minicpmv|minicpmv|timm, transformers<4.42|✘|vision|[openbmb/MiniCPM-V-2](https://huggingface.co/openbmb/MiniCPM-V-2)| |[OpenBMB/MiniCPM-Llama3-V-2_5](https://modelscope.cn/models/OpenBMB/MiniCPM-Llama3-V-2_5)|minicpmv2_5|minicpmv2_5|timm, transformers>=4.36|✘|vision|[openbmb/MiniCPM-Llama3-V-2_5](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5)| diff --git a/docs/source_en/Instruction/Supported-models-and-datasets.md b/docs/source_en/Instruction/Supported-models-and-datasets.md index 01088300e2..80490a46df 100644 --- a/docs/source_en/Instruction/Supported-models-and-datasets.md +++ b/docs/source_en/Instruction/Supported-models-and-datasets.md @@ -929,7 +929,7 @@ The table below introduces the models integrated with ms-swift: |[deepseek-ai/Janus-1.3B](https://modelscope.cn/models/deepseek-ai/Janus-1.3B)|deepseek_janus|deepseek_janus|-|✘|vision|[deepseek-ai/Janus-1.3B](https://huggingface.co/deepseek-ai/Janus-1.3B)| |[deepseek-ai/Janus-Pro-1B](https://modelscope.cn/models/deepseek-ai/Janus-Pro-1B)|deepseek_janus_pro|deepseek_janus_pro|-|✘|vision|[deepseek-ai/Janus-Pro-1B](https://huggingface.co/deepseek-ai/Janus-Pro-1B)| |[deepseek-ai/Janus-Pro-7B](https://modelscope.cn/models/deepseek-ai/Janus-Pro-7B)|deepseek_janus_pro|deepseek_janus_pro|-|✘|vision|[deepseek-ai/Janus-Pro-7B](https://huggingface.co/deepseek-ai/Janus-Pro-7B)| -|[deepseek-ai/DeepSeek-OCR](https://modelscope.cn/models/deepseek-ai/DeepSeek-OCR)|deepseek_ocr|deepseek_ocr|-|✘|vision|[deepseek-ai/DeepSeek-OCR](https://huggingface.co/deepseek-ai/DeepSeek-OCR)| +|[deepseek-ai/DeepSeek-OCR](https://modelscope.cn/models/deepseek-ai/DeepSeek-OCR)|deepseek_ocr|deepseek_ocr|transformers==4.46.3, easydict|✘|vision|[deepseek-ai/DeepSeek-OCR](https://huggingface.co/deepseek-ai/DeepSeek-OCR)| |[OpenBMB/MiniCPM-V](https://modelscope.cn/models/OpenBMB/MiniCPM-V)|minicpmv|minicpmv|timm, transformers<4.42|✘|vision|[openbmb/MiniCPM-V](https://huggingface.co/openbmb/MiniCPM-V)| |[OpenBMB/MiniCPM-V-2](https://modelscope.cn/models/OpenBMB/MiniCPM-V-2)|minicpmv|minicpmv|timm, transformers<4.42|✘|vision|[openbmb/MiniCPM-V-2](https://huggingface.co/openbmb/MiniCPM-V-2)| |[OpenBMB/MiniCPM-Llama3-V-2_5](https://modelscope.cn/models/OpenBMB/MiniCPM-Llama3-V-2_5)|minicpmv2_5|minicpmv2_5|timm, transformers>=4.36|✘|vision|[openbmb/MiniCPM-Llama3-V-2_5](https://huggingface.co/openbmb/MiniCPM-Llama3-V-2_5)| diff --git a/examples/models/deepseek_vl2/deepseek_ocr.sh b/examples/models/deepseek_vl2/deepseek_ocr.sh new file mode 100644 index 0000000000..cba3a0c555 --- /dev/null +++ b/examples/models/deepseek_vl2/deepseek_ocr.sh @@ -0,0 +1,30 @@ +# 24GiB +pip install "transformers==4.46.3" + +CUDA_VISIBLE_DEVICES=0 \ +swift sft \ + --model deepseek-ai/DeepSeek-OCR \ + --dataset 'AI-ModelScope/LaTeX_OCR:human_handwrite#20000' \ + --load_from_cache_file true \ + --split_dataset_ratio 0.01 \ + --train_type lora \ + --torch_dtype bfloat16 \ + --num_train_epochs 1 \ + --per_device_train_batch_size 1 \ + --per_device_eval_batch_size 1 \ + --learning_rate 1e-4 \ + --lora_rank 8 \ + --lora_alpha 32 \ + --target_modules all-linear \ + --freeze_vit true \ + --freeze_aligner true \ + --gradient_accumulation_steps 16 \ + --eval_steps 50 \ + --save_steps 50 \ + --save_total_limit 2 \ + --logging_steps 5 \ + --max_length 4096 \ + --output_dir output \ + --warmup_ratio 0.05 \ + --dataset_num_proc 4 \ + --dataloader_num_workers 4 diff --git a/swift/llm/model/model/deepseek.py b/swift/llm/model/model/deepseek.py index ff22846019..ee627da41b 100644 --- a/swift/llm/model/model/deepseek.py +++ b/swift/llm/model/model/deepseek.py @@ -341,5 +341,6 @@ def get_model_tokenizer_deepseek_ocr(*args, **kwargs): TemplateType.deepseek_ocr, get_model_tokenizer_deepseek_ocr, model_arch=ModelArch.deepseek_ocr, + requires=['transformers==4.46.3', 'easydict'], tags=['vision'], )) From e88fb1bc6217d8a43f37049d5eb0d8df40fe410d Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 16:01:35 +0800 Subject: [PATCH 12/13] fix --- swift/llm/model/model/deepseek.py | 1 + swift/llm/template/template/deepseek.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/swift/llm/model/model/deepseek.py b/swift/llm/model/model/deepseek.py index ee627da41b..80694a2326 100644 --- a/swift/llm/model/model/deepseek.py +++ b/swift/llm/model/model/deepseek.py @@ -324,6 +324,7 @@ def get_model_tokenizer_deepseek_ocr(*args, **kwargs): kwargs['automodel_class'] = kwargs['automodel_class'] or AutoModel model, tokenizer = get_model_tokenizer_with_flash_attn(*args, **kwargs) if model is not None: + patch_output_clone(model.model.embed_tokens) patch_output_to_input_device(model.model.sam_model) patch_output_to_input_device(model.model.vision_model) patch_output_to_input_device(model.model.projector) diff --git a/swift/llm/template/template/deepseek.py b/swift/llm/template/template/deepseek.py index 688c76c7a2..16f9302c77 100644 --- a/swift/llm/template/template/deepseek.py +++ b/swift/llm/template/template/deepseek.py @@ -356,8 +356,8 @@ def _encode(self, inputs: StdTemplateInputs) -> Dict[str, Any]: image_token = self._tokenize('') idx_list = findall(input_ids, image_token) if idx_list: - tokenized_str, images_ori, images_crop, images_spatial_crop = self._preprocess_image(inputs.images, - image_token[0]) + tokenized_str, images_ori, images_crop, images_spatial_crop = self._preprocess_image( + inputs.images, image_token[0]) input_ids, labels, loss_scale = self._extend_tokens(input_ids, labels, loss_scale, idx_list, lambda i: tokenized_str[i]) encoded['input_ids'] = input_ids From 1edb9290e5da056f93095376c0f3f398d1a601f1 Mon Sep 17 00:00:00 2001 From: Jintao Huang Date: Tue, 21 Oct 2025 16:14:03 +0800 Subject: [PATCH 13/13] fix --- swift/llm/template/template/deepseek.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/swift/llm/template/template/deepseek.py b/swift/llm/template/template/deepseek.py index 16f9302c77..b385d5266c 100644 --- a/swift/llm/template/template/deepseek.py +++ b/swift/llm/template/template/deepseek.py @@ -254,10 +254,7 @@ def _preprocess_image(self, images, image_token_id): patch_size = 16 downsample_ratio = 4 valid_img_tokens = 0 - - image_draw = images[0].copy() - - w, h = image_draw.size + w, h = images[0].size ratio = 1 - ((max(w, h) - min(w, h)) / (max(w, h))) image_transform = self.BasicImageTransform(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), normalize=True) @@ -333,7 +330,7 @@ def _preprocess_image(self, images, image_token_id): tokenized_image = ([image_token_id] * num_queries + [image_token_id]) * num_queries tokenized_image += [image_token_id] - tokenized_str += tokenized_image + tokenized_str.append(tokenized_image) if len(images_list) == 0: images_ori = torch.zeros((1, 3, self.image_size, self.image_size)) images_spatial_crop = torch.zeros((1, 2), dtype=torch.long)