diff --git a/examples/pytorch/llm/README.md b/examples/pytorch/llm/README.md index 7c4ebb21db..13fe22de36 100644 --- a/examples/pytorch/llm/README.md +++ b/examples/pytorch/llm/README.md @@ -182,23 +182,23 @@ bash scripts/qwen_7b_chat_int4/qlora/infer.sh bash scripts/qwen_7b_chat_int4/qlora_ddp_ds/sft.sh bash scripts/qwen_7b_chat_int4/qlora_ddp_ds/infer.sh -# sft(lora) and infer qwen-7b-chat, Requires 60GB GPU memory. -# Recommended experimental environment: A100 +# sft(lora) and infer qwen-7b-chat, Requires 18GB GPU memory. +# Recommended experimental environment: V100, A10, 3090 bash scripts/qwen_7b_chat/lora/sft.sh bash scripts/qwen_7b_chat/lora/infer.sh -# sft(lora+ddp) and infer qwen-7b-chat, Requires 2*60GB GPU memory. -# Recommended experimental environment: A100 +# sft(lora+ddp) and infer qwen-7b-chat, Requires 2*18GB GPU memory. +# Recommended experimental environment: V100, A10, 3090 bash scripts/qwen_7b_chat/lora_ddp/sft.sh bash scripts/qwen_7b_chat/lora_ddp/infer.sh # sft(lora+ddp+deepspeed) and infer qwen-7b-chat, Requires 2*18GB GPU memory. -# Recommended experimental environment: A10, 3090 +# Recommended experimental environment: V100, A10, 3090 bash scripts/qwen_7b_chat/lora_ddp_ds/sft.sh bash scripts/qwen_7b_chat/lora_ddp_ds/infer.sh -# sft(lora+mp+ddp) and infer qwen-7b-chat, Requires 4*15GB GPU memory. -# Recommended experimental environment: A10, 3090 +# sft(lora+mp+ddp) and infer qwen-7b-chat, Requires 4*20GB GPU memory. +# Recommended experimental environment: V100, A10, 3090 bash scripts/qwen_7b_chat/lora_mp_ddp/sft.sh bash scripts/qwen_7b_chat/lora_mp_ddp/infer.sh @@ -213,12 +213,12 @@ bash scripts/qwen_7b_chat/full_mp_ddp/sft.sh bash scripts/qwen_7b_chat/full_mp_ddp/infer.sh # The qlora script based on bnb below is no longer recommended for use. Please prioritize using the qlora script based on auto_gptq. -# sft(qlora) and infer qwen-7b-chat, Requires 13GB GPU memory. +# sft(qlora) and infer qwen-7b-chat, Requires 18GB GPU memory. # Recommended experimental environment: A10, 3090 bash scripts/qwen_7b_chat/qlora/sft.sh bash scripts/qwen_7b_chat/qlora/infer.sh -# sft(qlora+ddp) and infer qwen-7b-chat, Requires 2*14GB GPU memory. +# sft(qlora+ddp) and infer qwen-7b-chat, Requires 2*20B GPU memory. # Recommended experimental environment: A10, 3090 bash scripts/qwen_7b_chat/qlora_ddp/sft.sh bash scripts/qwen_7b_chat/qlora_ddp/infer.sh @@ -494,7 +494,7 @@ The template initialization function retrieves the complete chat template based - `--ddp_backend`: Represents the backend support for distributed training, default is `'nccl'`. The possible values are: 'nccl', 'gloo', 'mpi', 'ccl'. - `--seed`: Global seed value, default is 42. In distributed training, to avoid each process using the same dropout, etc., we set `seed=seed+rank`. - `--resume_from_checkpoint`: Used for resuming training from a checkpoint, default is `None`. You can set it to the path of the checkpoint, for example: `'output/qwen-7b-chat/vx_xxx/checkpoint-xxx'`, to resume training from that checkpoint. -- `--dtype`: The torch_dtype used when loading the base model, default is `None`, which means automatic selection of the dtype: if the machine does not support bf16, fp16 will be used instead. If the `MODEL_MAPPING` specifies a torch_dtype for the corresponding model, it will be used; otherwise, bf16 will be used. The available values are: 'bf16', 'fp16', 'fp32'. +- `--dtype`: The torch_dtype used when loading the base model, default is `'AUTO'`, which means automatic selection of the dtype: if the machine does not support bf16, fp16 will be used instead. If the `MODEL_MAPPING` specifies a torch_dtype for the corresponding model, it will be used; otherwise, bf16 will be used. The available values are: 'bf16', 'fp16', 'fp32'. - `--dataset`: Used to select the training dataset, default is `'blossom-math-zh'`. Available datasets can be checked using `DATASET_MAPPING.keys()`. If you want to use multiple datasets for training, you can separate them using ',' or ' ', for example: `alpaca-en,alpaca-zh` or `alpaca-en alpaca-zh`. - `--dataset_seed`: Used to specify the seed for dataset processing. The default value is `42`. It is present in the form of `random_state` and does not affect the global seed. - `--dataset_test_ratio`: Specifies the ratio for splitting the sub-dataset into training and validation sets, default is `0.01`. This parameter is ignored if the sub-dataset has already been split into training and validation sets. When multiple sub-datasets are specified in `dataset` and the function for retrieving the sub-dataset does not perform the split (i.e., returns `HfDataset` or `Tuple[HfDataset, None]` instead of `Tuple[HfDataset, HfDataset]`), we need to split the sub-dataset. Finally, we concatenate the training and validation parts of these sub-datasets to generate the training and validation sets for the complete fine-tuning dataset. @@ -505,7 +505,7 @@ The template initialization function retrieves the complete chat template based - `custom_train_dataset_path`: The default value is `None`. Please refer to the `Custom Dataset` module in the README.md for specific meanings. - `custom_val_dataset_path`: The default value is `None`. Please refer to the `Custom Dataset` module in the README.md for specific meanings. - `--quantization_bit`: Specifies whether to perform quantization and the number of quantization bits, default is `0`, which means no quantization. Quantization is only supported for the lora fine-tuning method and not for full-parameter fine-tuning. -- `--bnb_4bit_comp_dtype`: When performing 4-bit quantization, we need to dequantize it during the model's forward and backward passes. This parameter specifies the torch_dtype after dequantization. Default is `None`, which means it remains consistent with `dtype`. The possible values are: 'fp16', 'bf16', 'fp32'. This parameter is ignored when `quantization_bit` is 0. +- `--bnb_4bit_comp_dtype`: When performing 4-bit quantization, we need to dequantize it during the model's forward and backward passes. This parameter specifies the torch_dtype after dequantization. Default is `'AUTO'`, which means it remains consistent with `dtype`. The possible values are: 'fp16', 'bf16', 'fp32'. This parameter is ignored when `quantization_bit` is 0. - `--bnb_4bit_quant_type`: The quantization type for 4-bit quantization, default is `'nf4'`. The possible values are: 'nf4', 'fp4'. This parameter is ignored when `quantization_bit` is 0. - `--bnb_4bit_use_double_quant`: Whether to enable double quantization during 4-bit quantization, default is `True`. This parameter is ignored when `quantization_bit` is 0. - `--lora_target_modules`: Specifies the LoRA module, default is `None`. If `lora_target_modules` is `None` or set to `DEFAULT`, it will look for `lora_target_modules` in `MODEL_MAPPING` based on `model_type` (default is set to qkv). If set to `ALL`, all Linear layers (excluding the head) will be specified as LoRA modules. This parameter only takes effect when `sft_type` is set to 'lora'. @@ -559,7 +559,7 @@ The template initialization function retrieves the complete chat template based - `--load_args_from_ckpt_dir`: Whether to load configuration information from the `sft_args.json` file in `ckpt_dir`. The default value is `True`. The imported keys include: `model_id_or_path`, `model_revision`, `sft_type`, `template_type`, `dtype`, `system`, `quantization_bit`, `bnb_4bit_comp_dtype`, `bnb_4bit_quant_type`, `bnb_4bit_use_double_quant`. If `eval_human` is set to False, the following keys will also be imported: `dataset`, `dataset_seed`, `dataset_test_ratio`, `check_dataset_strategy`, `custom_train_dataset_path`, `custom_val_dataset_path`. - `--eval_human`: Whether to evaluate using the validation set from the dataset or manually evaluate the model. Default value is `False`. This allows us to get an intuitive understanding of the model's performance after fine-tuning. - `--seed`: Default value is `42`. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. -- `--dtype`: Default value is `None`. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. +- `--dtype`: Default value is `'AUTO'`. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. - `--dataset`: Default value is `'blossom-math-zh'`. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. This parameter only takes effect when `eval_human` is set to False. - `--dataset_seed`: Default value is `42`. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. This parameter only takes effect when `eval_human` is set to False. - `--dataset_test_ratio`: Default value is `0.01`. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. This parameter only takes effect when `eval_human` is set to False. @@ -570,7 +570,7 @@ The template initialization function retrieves the complete chat template based - `--custom_train_dataset_path`: 默认值为`None`. 具体的含义参考README.md中的`自定义数据集`模块. - `--custom_val_dataset_path`: 默认值为`None`. 具体的含义参考README.md中的`自定义数据集`模块. - `--quantization_bit`: Default value is 0. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. -- `--bnb_4bit_comp_dtype`: Default value is `None`. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. This parameter is not effective if `quantization_bit` is set to 0. +- `--bnb_4bit_comp_dtype`: Default value is `'AUTO'`. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. This parameter is not effective if `quantization_bit` is set to 0. - `--bnb_4bit_quant_type`: Default value is `'nf4'`. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. This parameter is not effective if `quantization_bit` is set to 0. - `--bnb_4bit_use_double_quant`: Default value is `True`. For specific parameter details, please refer to the `sft.sh Command Line Arguments`. This parameter is not effective if `quantization_bit` is set to 0. - `--max_new_tokens`: Maximum number of new tokens to generate. Default value is `2048`. diff --git a/examples/pytorch/llm/README_CN.md b/examples/pytorch/llm/README_CN.md index 2ad776404f..df043ff356 100644 --- a/examples/pytorch/llm/README_CN.md +++ b/examples/pytorch/llm/README_CN.md @@ -182,23 +182,23 @@ bash scripts/qwen_7b_chat_int4/qlora/infer.sh bash scripts/qwen_7b_chat_int4/qlora_ddp_ds/sft.sh bash scripts/qwen_7b_chat_int4/qlora_ddp_ds/infer.sh -# 微调(lora)+推理 qwen-7b-chat, 需要60GB显存. -# 推荐的实验环境: A100 +# 微调(lora)+推理 qwen-7b-chat, 需要18GB显存. +# 推荐的实验环境: V100, A10, 3090 bash scripts/qwen_7b_chat/lora/sft.sh bash scripts/qwen_7b_chat/lora/infer.sh -# 微调(lora+ddp)+推理 qwen-7b-chat, 需要2卡*60GB显存. -# 推荐的实验环境: A100 +# 微调(lora+ddp)+推理 qwen-7b-chat, 需要2卡*18GB显存. +# 推荐的实验环境: V100, A10, 3090 bash scripts/qwen_7b_chat/lora_ddp/sft.sh bash scripts/qwen_7b_chat/lora_ddp/infer.sh # 微调(lora+ddp+deepspeed)+推理 qwen-7b-chat, 需要2卡*18GB显存. -# 推荐的实验环境: A10, 3090 +# 推荐的实验环境: V100, A10, 3090 bash scripts/qwen_7b_chat/lora_ddp_ds/sft.sh bash scripts/qwen_7b_chat/lora_ddp_ds/infer.sh -# 微调(lora+mp+ddp)+推理 qwen-7b-chat, 需要4卡*15GB显存. -# 推荐的实验环境: A10, 3090 +# 微调(lora+mp+ddp)+推理 qwen-7b-chat, 需要4卡*20GB显存. +# 推荐的实验环境: V100, A10, 3090 bash scripts/qwen_7b_chat/lora_mp_ddp/sft.sh bash scripts/qwen_7b_chat/lora_mp_ddp/infer.sh @@ -213,12 +213,12 @@ bash scripts/qwen_7b_chat/full_mp_ddp/sft.sh bash scripts/qwen_7b_chat/full_mp_ddp/infer.sh # 以下基于bnb的qlora脚本已不再推荐使用. 请优先使用基于auto_gptq的qlora脚本. -# 微调(qlora)+推理 qwen-7b-chat, 需要13GB显存. +# 微调(qlora)+推理 qwen-7b-chat, 需要18GB显存. # 推荐的实验环境: A10, 3090 bash scripts/qwen_7b_chat/qlora/sft.sh bash scripts/qwen_7b_chat/qlora/infer.sh -# 微调(qlora+ddp)+推理 qwen-7b-chat, 需要2卡*14GB显存. +# 微调(qlora+ddp)+推理 qwen-7b-chat, 需要2卡*20GB显存. # 推荐的实验环境: A10, 3090 bash scripts/qwen_7b_chat/qlora_ddp/sft.sh bash scripts/qwen_7b_chat/qlora_ddp/infer.sh @@ -497,7 +497,7 @@ if __name__ == '__main__': - `--ddp_backend`: 表示分布式的后端支持, 默认是`'nccl'`. 你可以选择的值包括: 'nccl', 'gloo', 'mpi', 'ccl'. - `--seed`: 全局的seed, 默认使用42. 在分布式训练中, 为避免每个进程使用相同的dropout等情况, 我们会令`seed=seed+rank`. - `--resume_from_checkpoint`: 用于断点续训, 默认为`None`. 你可以将其设置为checkpoint的路径, 例如: `'output/qwen-7b-chat/vx_xxx/checkpoint-xxx'`, 来进行断点续训. -- `--dtype`: 基模型载入时的torch_dtype, 默认为`None`, 即智能选择dtype: 如果机器不支持bf16, 则使用fp16, 如果`MODEL_MAPPING`中对应模型有指定torch_dtype, 则使用其对应dtype, 否则使用bf16. 你可以选择的值包括: 'bf16', 'fp16', 'fp32'. +- `--dtype`: 基模型载入时的torch_dtype, 默认为`'AUTO'`, 即智能选择dtype: 如果机器不支持bf16, 则使用fp16, 如果`MODEL_MAPPING`中对应模型有指定torch_dtype, 则使用其对应dtype, 否则使用bf16. 你可以选择的值包括: 'bf16', 'fp16', 'fp32'. - `--dataset`: 用于选择训练的数据集, 默认为`'blossom-math-zh'`. 可以选择的数据集可以查看`DATASET_MAPPING.keys()`. 如果需要使用多个数据集进行训练, 你可以使用','或者' '进行分割, 例如: `alpaca-en,alpaca-zh` or `alpaca-en alpaca-zh`. - `--dataset_seed`: 用于指定数据集处理的seed, 默认为`42`. 以random_state形式存在, 不影响全局seed. - `--dataset_test_ratio`: 用于指定子数据集切分成训练集和验证集的比例, 默认为`0.01`. 如果子数据集已经进行了训练集和验证集的切分, 则此参数无效. 当`dataset`中指定了多个子数据集时, 且获取子数据集的函数没有进行训练集和验证集的切分(即返回的是`HfDataset`, `Tuple[HfDataset, None]`, 而不是`Tuple[HfDataset, HfDataset]`), 则我们需要对该子数据集进行切分. 最后, 我们会将这些子数据集的训练集和验证集部分分别进行拼接, 生成完整微调数据集的训练集和验证集. @@ -508,7 +508,7 @@ if __name__ == '__main__': - `--custom_train_dataset_path`: 默认值为`None`. 具体的含义参考README.md中的`自定义数据集`模块. - `--custom_val_dataset_path`: 默认值为`None`. 具体的含义参考README.md中的`自定义数据集`模块. - `--quantization_bit`: 用于指定是否进行量化和量化的bit数, 默认为`0`, 即不进行量化. 量化情况下, 只支持lora的微调方式, 不支持全参数的微调方式. -- `--bnb_4bit_comp_dtype`: 在进行4bit量化时, 我们需要在模型的forward和backward时, 将其进行反量化. 该参数用于指定反量化后的torch_dtype. 默认为`None`, 即与`dtype`保持一致. 可选择的值包括: 'fp16', 'bf16', 'fp32'. 当quantization_bit为0时, 该参数无效. +- `--bnb_4bit_comp_dtype`: 在进行4bit量化时, 我们需要在模型的forward和backward时, 将其进行反量化. 该参数用于指定反量化后的torch_dtype. 默认为`'AUTO'`, 即与`dtype`保持一致. 可选择的值包括: 'fp16', 'bf16', 'fp32'. 当quantization_bit为0时, 该参数无效. - `--bnb_4bit_quant_type`: 4bit量化时的量化方式, 默认是`'nf4'`. 可选择的值包括: 'nf4', 'fp4'. 当quantization_bit为0时, 该参数无效. - `--bnb_4bit_use_double_quant`: 是否在4bit量化时开启double量化, 默认为`True`. 当quantization_bit为0时, 该参数无效. - `--lora_target_modules`: 指定lora模块, 默认为`None`. 如果lora_target_modules为None, 或者传入'DEFAULT', 则根据`model_type`查找`MODEL_MAPPING`中的`lora_target_modules`(默认指定为qkv). 如果传入`ALL`, 则将所有的Linear层都指定为lora模块(不含head). 该参数只有当`sft_type`指定为'lora'时才生效. @@ -563,7 +563,7 @@ if __name__ == '__main__': - `--load_args_from_ckpt_dir`: 是否从`ckpt_dir`的`sft_args.json`文件中读取配置信息. 默认是`True`. 导入的keys包括: `model_id_or_path`, `model_revision`, `sft_type`, `template_type`, `dtype`, `system`, `quantization_bit`, `bnb_4bit_comp_dtype`, `bnb_4bit_quant_type`, `bnb_4bit_use_double_quant`. 如果`eval_human`设置为False, 则还会导入`dataset`, `dataset_seed`, `dataset_test_ratio`, `check_dataset_strategy`, `custom_train_dataset_path`, `custom_val_dataset_path`. - `--eval_human`: 使用数据集中的验证集部分进行评估还是使用人工的方式评估, 默认值为`False`. 我们可以直观感受到微调后模型的效果. - `--seed`: 默认值为`42`, 具体的参数介绍可以在`sft.sh命令行参数`中查看. -- `--dtype`: 默认值为`None`, 具体的参数介绍可以在`sft.sh命令行参数`中查看. +- `--dtype`: 默认值为`'AUTO`, 具体的参数介绍可以在`sft.sh命令行参数`中查看. - `--dataset`: 默认值为`'blossom-math-zh'`, 具体的参数介绍可以在`sft.sh命令行参数`中查看. 该参数只有在`eval_human`设置为False时才生效. - `--dataset_seed`: 默认值为`42`, 具体的参数介绍可以在`sft.sh命令行参数`中查看. 该参数只有在`eval_human`设置为False时才生效. - `--dataset_test_ratio`: 默认值为`0.01`, 具体的参数介绍可以在`sft.sh命令行参数`中查看. 该参数只有在`eval_human`设置为False时才生效. @@ -574,7 +574,7 @@ if __name__ == '__main__': - `--custom_train_dataset_path`: 默认值为`None`. 具体的含义参考README.md中的`自定义数据集`模块. - `--custom_val_dataset_path`: 默认值为`None`. 具体的含义参考README.md中的`自定义数据集`模块. - `--quantization_bit`: 默认值为0. 具体的参数介绍可以在`sft.sh命令行参数`中查看. -- `--bnb_4bit_comp_dtype`: 默认值为`None`. 具体的参数介绍可以在`sft.sh命令行参数`中查看. 若`quantization_bit`设置为0, 则该参数失效. +- `--bnb_4bit_comp_dtype`: 默认值为`'AUTO'`. 具体的参数介绍可以在`sft.sh命令行参数`中查看. 若`quantization_bit`设置为0, 则该参数失效. - `--bnb_4bit_quant_type`: 默认值为`'nf4'`. 具体的参数介绍可以在`sft.sh命令行参数`中查看. 若`quantization_bit`设置为0, 则该参数失效. - `--bnb_4bit_use_double_quant`: 默认值为`True`. 具体的参数介绍可以在`sft.sh命令行参数`中查看. 若`quantization_bit`设置为0, 则该参数失效. - `--max_new_tokens`: 生成新token的最大数量, 默认值为`2048`. diff --git a/examples/pytorch/llm/scripts/baichuan2_13b_chat/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/baichuan2_13b_chat/lora_ddp_ds/sft.sh index 06fca28e34..7432801547 100644 --- a/examples/pytorch/llm/scripts/baichuan2_13b_chat/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/baichuan2_13b_chat/lora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type baichuan \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset damo-agent-mini-zh \ diff --git a/examples/pytorch/llm/scripts/baichuan2_13b_chat/lora_mp_ddp/sft.sh b/examples/pytorch/llm/scripts/baichuan2_13b_chat/lora_mp_ddp/sft.sh index ac9e7d8735..76484b16c4 100644 --- a/examples/pytorch/llm/scripts/baichuan2_13b_chat/lora_mp_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/baichuan2_13b_chat/lora_mp_ddp/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type baichuan \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset blossom-math-zh \ diff --git a/examples/pytorch/llm/scripts/baichuan2_13b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/baichuan2_13b_chat/qlora_ddp_ds/sft.sh index f880a5f529..66fb752d0a 100644 --- a/examples/pytorch/llm/scripts/baichuan2_13b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/baichuan2_13b_chat/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type baichuan \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset damo-agent-mini-zh \ @@ -22,7 +22,7 @@ torchrun \ --max_length 4096 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/baichuan2_13b_chat_int4/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/baichuan2_13b_chat_int4/qlora_ddp_ds/sft.sh index d5aaa664b5..d1baa48f60 100644 --- a/examples/pytorch/llm/scripts/baichuan2_13b_chat_int4/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/baichuan2_13b_chat_int4/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type baichuan \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset damo-agent-mini-zh \ diff --git a/examples/pytorch/llm/scripts/baichuan2_7b/qlora/sft.sh b/examples/pytorch/llm/scripts/baichuan2_7b/qlora/sft.sh index b858593680..a885e6ae2f 100644 --- a/examples/pytorch/llm/scripts/baichuan2_7b/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/baichuan2_7b/qlora/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset advertise-gen-zh \ --train_dataset_sample 20000 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/baichuan2_7b_chat/lora_ddp/sft.sh b/examples/pytorch/llm/scripts/baichuan2_7b_chat/lora_ddp/sft.sh index 9bb4297a5b..a3faa45c13 100644 --- a/examples/pytorch/llm/scripts/baichuan2_7b_chat/lora_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/baichuan2_7b_chat/lora_ddp/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type baichuan \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset damo-agent-mini-zh \ diff --git a/examples/pytorch/llm/scripts/baichuan2_7b_chat/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/baichuan2_7b_chat/lora_ddp_ds/sft.sh index 976f5c1fa9..06c606dca2 100644 --- a/examples/pytorch/llm/scripts/baichuan2_7b_chat/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/baichuan2_7b_chat/lora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type baichuan \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset damo-agent-mini-zh \ diff --git a/examples/pytorch/llm/scripts/baichuan2_7b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/baichuan2_7b_chat/qlora_ddp_ds/sft.sh index 7f508771f3..242eaf651d 100644 --- a/examples/pytorch/llm/scripts/baichuan2_7b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/baichuan2_7b_chat/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type baichuan \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset damo-agent-mini-zh \ @@ -22,7 +22,7 @@ torchrun \ --max_length 4096 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/baichuan2_7b_chat_int4/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/baichuan2_7b_chat_int4/qlora_ddp_ds/sft.sh index 6ead6b7770..484f356570 100644 --- a/examples/pytorch/llm/scripts/baichuan2_7b_chat_int4/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/baichuan2_7b_chat_int4/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type baichuan \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset damo-agent-mini-zh \ diff --git a/examples/pytorch/llm/scripts/baichuan_13b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/baichuan_13b_chat/qlora_ddp_ds/sft.sh index 15e260cb4a..c59802081c 100644 --- a/examples/pytorch/llm/scripts/baichuan_13b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/baichuan_13b_chat/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type baichuan \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset blossom-math-zh \ @@ -22,7 +22,7 @@ torchrun \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/bluelm_7b_chat/lora/sft.sh b/examples/pytorch/llm/scripts/bluelm_7b_chat/lora/sft.sh index e7e0829c9e..4660b38795 100644 --- a/examples/pytorch/llm/scripts/bluelm_7b_chat/lora/sft.sh +++ b/examples/pytorch/llm/scripts/bluelm_7b_chat/lora/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type bluelm \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset blossom-math-zh \ --train_dataset_sample -1 \ diff --git a/examples/pytorch/llm/scripts/chatglm2_6b/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/chatglm2_6b/lora_ddp_ds/sft.sh index 214566216b..c023bca8d5 100644 --- a/examples/pytorch/llm/scripts/chatglm2_6b/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/chatglm2_6b/lora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatglm2 \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset leetcode-python-en \ diff --git a/examples/pytorch/llm/scripts/chatglm3_6b/lora/infer.sh b/examples/pytorch/llm/scripts/chatglm3_6b/lora/infer.sh new file mode 100644 index 0000000000..bfd10b75d3 --- /dev/null +++ b/examples/pytorch/llm/scripts/chatglm3_6b/lora/infer.sh @@ -0,0 +1,16 @@ +# Experimental environment: V100, A10, 3090 +# If you want to merge LoRA weight and save it, you need to set `--merge_lora_and_save true`. +PYTHONPATH=../../.. \ +CUDA_VISIBLE_DEVICES=0 \ +python llm_infer.py \ + --ckpt_dir "output/chatglm3-6b/vx_xxx/checkpoint-xxx" \ + --load_args_from_ckpt_dir true \ + --eval_human false \ + --max_length 2048 \ + --max_new_tokens 2048 \ + --temperature 0.9 \ + --top_k 20 \ + --top_p 0.9 \ + --repetition_penalty 1.05 \ + --do_sample true \ + --merge_lora_and_save false \ diff --git a/examples/pytorch/llm/scripts/chatglm3_6b/lora/sft.sh b/examples/pytorch/llm/scripts/chatglm3_6b/lora/sft.sh new file mode 100644 index 0000000000..4f5d1902ff --- /dev/null +++ b/examples/pytorch/llm/scripts/chatglm3_6b/lora/sft.sh @@ -0,0 +1,36 @@ +# Experimental environment: V100, A10, 3090 +# 16GB GPU memory +PYTHONPATH=../../.. \ +CUDA_VISIBLE_DEVICES=0 \ +python llm_sft.py \ + --model_id_or_path ZhipuAI/chatglm3-6b \ + --model_revision master \ + --sft_type lora \ + --tuner_backend swift \ + --template_type chatglm3 \ + --dtype AUTO \ + --output_dir output \ + --dataset blossom-math-zh \ + --train_dataset_sample -1 \ + --num_train_epochs 1 \ + --max_length 2048 \ + --check_dataset_strategy warning \ + --lora_rank 8 \ + --lora_alpha 32 \ + --lora_dropout_p 0.05 \ + --lora_target_modules DEFAULT \ + --gradient_checkpointing true \ + --batch_size 1 \ + --weight_decay 0.01 \ + --learning_rate 1e-4 \ + --gradient_accumulation_steps 16 \ + --max_grad_norm 0.5 \ + --warmup_ratio 0.03 \ + --eval_steps 100 \ + --save_steps 100 \ + --save_total_limit 2 \ + --logging_steps 10 \ + --push_to_hub false \ + --hub_model_id chatglm3-6b-lora \ + --hub_private_repo true \ + --hub_token 'your-sdk-token' \ diff --git a/examples/pytorch/llm/scripts/chatglm3_6b/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/chatglm3_6b/lora_ddp_ds/sft.sh index 017979ecf5..9d1535bede 100644 --- a/examples/pytorch/llm/scripts/chatglm3_6b/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/chatglm3_6b/lora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatglm3 \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset leetcode-python-en \ diff --git a/examples/pytorch/llm/scripts/chatglm3_6b_32k/qlora/sft.sh b/examples/pytorch/llm/scripts/chatglm3_6b_32k/qlora/sft.sh index f5b5aeaf12..84e8ac8858 100644 --- a/examples/pytorch/llm/scripts/chatglm3_6b_32k/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/chatglm3_6b_32k/qlora/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type chatglm3 \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset agent-instruct-all-en \ --train_dataset_sample -1 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 4096 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/chatglm3_6b_32k/rome.sh b/examples/pytorch/llm/scripts/chatglm3_6b_32k/rome.sh index 0677b0023f..a3404ac00f 100644 --- a/examples/pytorch/llm/scripts/chatglm3_6b_32k/rome.sh +++ b/examples/pytorch/llm/scripts/chatglm3_6b_32k/rome.sh @@ -5,7 +5,7 @@ python rome_infer.py \ --model_id_or_path ZhipuAI/chatglm3-6b-32k \ --model_revision master \ --template_type chatglm3 \ - --dtype bf16 \ + --dtype AUTO \ --eval_human true \ --max_new_tokens 128 \ --temperature 0.1 \ diff --git a/examples/pytorch/llm/scripts/chatglm3_6b_base/lora/infer.sh b/examples/pytorch/llm/scripts/chatglm3_6b_base/lora/infer.sh new file mode 100644 index 0000000000..039773d331 --- /dev/null +++ b/examples/pytorch/llm/scripts/chatglm3_6b_base/lora/infer.sh @@ -0,0 +1,15 @@ +# Experimental environment: V100, A10, 3090 +PYTHONPATH=../../.. \ +CUDA_VISIBLE_DEVICES=0 \ +python llm_infer.py \ + --ckpt_dir "output/chatglm3-6b-base/vx_xxx/checkpoint-xxx" \ + --load_args_from_ckpt_dir true \ + --eval_human false \ + --max_length 2048 \ + --max_new_tokens 2048 \ + --temperature 0.9 \ + --top_k 20 \ + --top_p 0.9 \ + --repetition_penalty 1.05 \ + --do_sample true \ + --merge_lora_and_save false \ diff --git a/examples/pytorch/llm/scripts/chatglm3_6b_base/lora/sft.sh b/examples/pytorch/llm/scripts/chatglm3_6b_base/lora/sft.sh new file mode 100644 index 0000000000..d452e12dd1 --- /dev/null +++ b/examples/pytorch/llm/scripts/chatglm3_6b_base/lora/sft.sh @@ -0,0 +1,36 @@ +# Experimental environment: V100, A10, 3090 +# 16GB GPU memory +PYTHONPATH=../../.. \ +CUDA_VISIBLE_DEVICES=0 \ +python llm_sft.py \ + --model_id_or_path ZhipuAI/chatglm3-6b-base \ + --model_revision master \ + --sft_type lora \ + --tuner_backend swift \ + --template_type chatglm-generation \ + --dtype AUTO \ + --output_dir output \ + --dataset dureader-robust-zh \ + --train_dataset_sample -1 \ + --num_train_epochs 1 \ + --max_length 2048 \ + --check_dataset_strategy warning \ + --lora_rank 8 \ + --lora_alpha 32 \ + --lora_dropout_p 0.05 \ + --lora_target_modules DEFAULT \ + --gradient_checkpointing true \ + --batch_size 1 \ + --weight_decay 0.01 \ + --learning_rate 1e-4 \ + --gradient_accumulation_steps 16 \ + --max_grad_norm 0.5 \ + --warmup_ratio 0.03 \ + --eval_steps 100 \ + --save_steps 100 \ + --save_total_limit 2 \ + --logging_steps 10 \ + --push_to_hub false \ + --hub_model_id chatglm3-6b-base-lora \ + --hub_private_repo true \ + --hub_token 'your-sdk-token' \ diff --git a/examples/pytorch/llm/scripts/chatglm3_6b_base/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/chatglm3_6b_base/lora_ddp_ds/sft.sh index d5b5d2e95b..38198cdb32 100644 --- a/examples/pytorch/llm/scripts/chatglm3_6b_base/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/chatglm3_6b_base/lora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatglm-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset dureader-robust-zh \ diff --git a/examples/pytorch/llm/scripts/custom/tigerbot_13b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/custom/tigerbot_13b_chat/qlora_ddp_ds/sft.sh index 39c7f779eb..ba045e83d1 100644 --- a/examples/pytorch/llm/scripts/custom/tigerbot_13b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/custom/tigerbot_13b_chat/qlora_ddp_ds/sft.sh @@ -12,7 +12,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type tigerbot \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset stsb-en \ @@ -21,7 +21,7 @@ torchrun \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/custom/tigerbot_7b/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/custom/tigerbot_7b/lora_ddp_ds/sft.sh index f9a93cfcc7..1db2873fb9 100644 --- a/examples/pytorch/llm/scripts/custom/tigerbot_7b/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/custom/tigerbot_7b/lora_ddp_ds/sft.sh @@ -12,7 +12,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset stsb-en \ diff --git a/examples/pytorch/llm/scripts/internlm_20b/lora_ddp/sft.sh b/examples/pytorch/llm/scripts/internlm_20b/lora_ddp/sft.sh index daa2189553..35ef929477 100644 --- a/examples/pytorch/llm/scripts/internlm_20b/lora_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/internlm_20b/lora_ddp/sft.sh @@ -12,7 +12,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset jd-sentiment-zh \ diff --git a/examples/pytorch/llm/scripts/internlm_20b/qlora/sft.sh b/examples/pytorch/llm/scripts/internlm_20b/qlora/sft.sh index 08272133c3..592dbde833 100644 --- a/examples/pytorch/llm/scripts/internlm_20b/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/internlm_20b/qlora/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset advertise-gen-zh \ --train_dataset_sample 20000 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/internlm_20b_chat/lora_ddp/sft.sh b/examples/pytorch/llm/scripts/internlm_20b_chat/lora_ddp/sft.sh index cdad783289..0a2477af75 100644 --- a/examples/pytorch/llm/scripts/internlm_20b_chat/lora_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/internlm_20b_chat/lora_ddp/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type internlm \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset sql-create-context-en \ diff --git a/examples/pytorch/llm/scripts/internlm_20b_chat/qlora/sft.sh b/examples/pytorch/llm/scripts/internlm_20b_chat/qlora/sft.sh index 89d34b74c0..5c92eb7459 100644 --- a/examples/pytorch/llm/scripts/internlm_20b_chat/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/internlm_20b_chat/qlora/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type internlm \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset sql-create-context-en \ --train_dataset_sample 20000 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/internlm_20b_chat/qlora_ddp/sft.sh b/examples/pytorch/llm/scripts/internlm_20b_chat/qlora_ddp/sft.sh index 2966a1ab5a..4e7f96a388 100644 --- a/examples/pytorch/llm/scripts/internlm_20b_chat/qlora_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/internlm_20b_chat/qlora_ddp/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type internlm \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset sql-create-context-en \ --train_dataset_sample 20000 \ @@ -21,7 +21,7 @@ torchrun \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/llama2_13b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/llama2_13b_chat/qlora_ddp_ds/sft.sh index 5599ae8b4e..1b724d9063 100644 --- a/examples/pytorch/llm/scripts/llama2_13b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/llama2_13b_chat/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type llama \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset leetcode-python-en \ @@ -22,7 +22,7 @@ torchrun \ --max_length 4096 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/llama2_13b_chat/rome.sh b/examples/pytorch/llm/scripts/llama2_13b_chat/rome.sh index 478ddb7f67..0bfc8f7d02 100644 --- a/examples/pytorch/llm/scripts/llama2_13b_chat/rome.sh +++ b/examples/pytorch/llm/scripts/llama2_13b_chat/rome.sh @@ -5,7 +5,7 @@ python rome_infer.py \ --model_id_or_path modelscope/Llama-2-13b-chat-ms \ --model_revision master \ --template_type llama \ - --dtype bf16 \ + --dtype AUTO \ --eval_human true \ --max_new_tokens 128 \ --temperature 0.1 \ diff --git a/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_ddp_ds/sft.sh index b9a42a5e24..759f8110b1 100644 --- a/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type llama \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset leetcode-python-en \ @@ -22,7 +22,7 @@ torchrun \ --max_length 4096 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_mp/sft.sh b/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_mp/sft.sh index cfa4438048..a1f2564ee9 100644 --- a/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_mp/sft.sh +++ b/examples/pytorch/llm/scripts/llama2_70b_chat/qlora_mp/sft.sh @@ -9,7 +9,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type llama \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset sql-create-context-en \ --train_dataset_sample 20000 \ @@ -17,7 +17,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/mistral_7b_chat/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/mistral_7b_chat/lora_ddp_ds/sft.sh index 231330c8e2..d4a25bbbe7 100644 --- a/examples/pytorch/llm/scripts/mistral_7b_chat/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/mistral_7b_chat/lora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type llama \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset leetcode-python-en \ diff --git a/examples/pytorch/llm/scripts/mistral_7b_chat/lora_mp_ddp/infer.sh b/examples/pytorch/llm/scripts/mistral_7b_chat/lora_mp_ddp/infer.sh new file mode 100644 index 0000000000..4a2cfb4701 --- /dev/null +++ b/examples/pytorch/llm/scripts/mistral_7b_chat/lora_mp_ddp/infer.sh @@ -0,0 +1,16 @@ +# Experimental environment: 3090 +# If you want to merge LoRA weight and save it, you need to set `--merge_lora_and_save true`. +PYTHONPATH=../../.. \ +CUDA_VISIBLE_DEVICES=0 \ +python llm_infer.py \ + --ckpt_dir "output/mistral-7b-chat/vx_xxx/checkpoint-xxx" \ + --load_args_from_ckpt_dir true \ + --eval_human false \ + --max_length 4096 \ + --max_new_tokens 2048 \ + --temperature 0.9 \ + --top_k 20 \ + --top_p 0.9 \ + --repetition_penalty 1.05 \ + --do_sample true \ + --merge_lora_and_save false \ diff --git a/examples/pytorch/llm/scripts/mistral_7b_chat/lora_mp_ddp/sft.sh b/examples/pytorch/llm/scripts/mistral_7b_chat/lora_mp_ddp/sft.sh new file mode 100644 index 0000000000..cd0dfbd6cd --- /dev/null +++ b/examples/pytorch/llm/scripts/mistral_7b_chat/lora_mp_ddp/sft.sh @@ -0,0 +1,42 @@ +# Experimental environment: 4 * 3090 +# 4 * 19GB GPU memory +nproc_per_node=2 + +PYTHONPATH=../../.. \ +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +torchrun \ + --nproc_per_node=$nproc_per_node \ + --master_port 29500 \ + llm_sft.py \ + --model_id_or_path AI-ModelScope/Mistral-7B-Instruct-v0.1 \ + --model_revision master \ + --sft_type lora \ + --tuner_backend swift \ + --template_type llama \ + --dtype AUTO \ + --output_dir output \ + --ddp_backend nccl \ + --dataset damo-agent-mini-zh \ + --train_dataset_sample 20000 \ + --num_train_epochs 1 \ + --max_length 4096 \ + --check_dataset_strategy warning \ + --lora_rank 8 \ + --lora_alpha 32 \ + --lora_dropout_p 0.05 \ + --lora_target_modules DEFAULT \ + --gradient_checkpointing true \ + --batch_size 1 \ + --weight_decay 0.01 \ + --learning_rate 1e-4 \ + --gradient_accumulation_steps $(expr 16 / $nproc_per_node) \ + --max_grad_norm 0.5 \ + --warmup_ratio 0.03 \ + --eval_steps 100 \ + --save_steps 100 \ + --save_total_limit 2 \ + --logging_steps 10 \ + --push_to_hub false \ + --hub_model_id mistral-7b-chat-lora \ + --hub_private_repo true \ + --hub_token 'your-sdk-token' \ diff --git a/examples/pytorch/llm/scripts/openbuddy_llama2_13b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/openbuddy_llama2_13b_chat/qlora_ddp_ds/sft.sh index f41c8d2007..6fe3b1a445 100644 --- a/examples/pytorch/llm/scripts/openbuddy_llama2_13b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/openbuddy_llama2_13b_chat/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type openbuddy \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset blossom-math-zh \ @@ -22,7 +22,7 @@ torchrun \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/openbuddy_llama2_70b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/openbuddy_llama2_70b_chat/qlora_ddp_ds/sft.sh index 6327677e67..f22c54de4c 100644 --- a/examples/pytorch/llm/scripts/openbuddy_llama2_70b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/openbuddy_llama2_70b_chat/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type openbuddy \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset blossom-math-zh \ @@ -22,7 +22,7 @@ torchrun \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/openbuddy_llama2_70b_chat/qlora_mp/sft.sh b/examples/pytorch/llm/scripts/openbuddy_llama2_70b_chat/qlora_mp/sft.sh index a25251b0a6..b811db0eef 100644 --- a/examples/pytorch/llm/scripts/openbuddy_llama2_70b_chat/qlora_mp/sft.sh +++ b/examples/pytorch/llm/scripts/openbuddy_llama2_70b_chat/qlora_mp/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type openbuddy \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset blossom-math-zh \ --train_dataset_sample -1 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/openbuddy_mistral_7b_chat/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/openbuddy_mistral_7b_chat/lora_ddp_ds/sft.sh index cc5871975a..27128b9c1a 100644 --- a/examples/pytorch/llm/scripts/openbuddy_mistral_7b_chat/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/openbuddy_mistral_7b_chat/lora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type openbuddy \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset blossom-math-zh \ diff --git a/examples/pytorch/llm/scripts/openbuddy_mistral_7b_chat/lora_mp_ddp/infer.sh b/examples/pytorch/llm/scripts/openbuddy_mistral_7b_chat/lora_mp_ddp/infer.sh new file mode 100644 index 0000000000..de551d54b1 --- /dev/null +++ b/examples/pytorch/llm/scripts/openbuddy_mistral_7b_chat/lora_mp_ddp/infer.sh @@ -0,0 +1,16 @@ +# Experimental environment: 3090 +# If you want to merge LoRA weight and save it, you need to set `--merge_lora_and_save true`. +PYTHONPATH=../../.. \ +CUDA_VISIBLE_DEVICES=0 \ +python llm_infer.py \ + --ckpt_dir "output/openbuddy-mistral-7b-chat/vx_xxx/checkpoint-xxx" \ + --load_args_from_ckpt_dir true \ + --eval_human false \ + --max_length 4096 \ + --max_new_tokens 2048 \ + --temperature 0.9 \ + --top_k 20 \ + --top_p 0.9 \ + --repetition_penalty 1.05 \ + --do_sample true \ + --merge_lora_and_save false \ diff --git a/examples/pytorch/llm/scripts/openbuddy_mistral_7b_chat/lora_mp_ddp/sft.sh b/examples/pytorch/llm/scripts/openbuddy_mistral_7b_chat/lora_mp_ddp/sft.sh new file mode 100644 index 0000000000..ebdabcc29a --- /dev/null +++ b/examples/pytorch/llm/scripts/openbuddy_mistral_7b_chat/lora_mp_ddp/sft.sh @@ -0,0 +1,42 @@ +# Experimental environment: 4 * 3090 +# 4 * 19GB GPU memory +nproc_per_node=2 + +PYTHONPATH=../../.. \ +CUDA_VISIBLE_DEVICES=0,1,2,3 \ +torchrun \ + --nproc_per_node=$nproc_per_node \ + --master_port 29500 \ + llm_sft.py \ + --model_id_or_path OpenBuddy/openbuddy-mistral-7b-v13.1 \ + --model_revision master \ + --sft_type lora \ + --tuner_backend swift \ + --template_type openbuddy \ + --dtype AUTO \ + --output_dir output \ + --ddp_backend nccl \ + --dataset damo-agent-mini-zh \ + --train_dataset_sample 20000 \ + --num_train_epochs 1 \ + --max_length 4096 \ + --check_dataset_strategy warning \ + --lora_rank 8 \ + --lora_alpha 32 \ + --lora_dropout_p 0.05 \ + --lora_target_modules DEFAULT \ + --gradient_checkpointing true \ + --batch_size 1 \ + --weight_decay 0.01 \ + --learning_rate 1e-4 \ + --gradient_accumulation_steps $(expr 16 / $nproc_per_node) \ + --max_grad_norm 0.5 \ + --warmup_ratio 0.03 \ + --eval_steps 100 \ + --save_steps 100 \ + --save_total_limit 2 \ + --logging_steps 10 \ + --push_to_hub false \ + --hub_model_id openbuddy-mistral-7b-chat-lora \ + --hub_private_repo true \ + --hub_token 'your-sdk-token' \ diff --git a/examples/pytorch/llm/scripts/polylm_13b/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/polylm_13b/qlora_ddp_ds/sft.sh index 181a055ed6..0349a2fb89 100644 --- a/examples/pytorch/llm/scripts/polylm_13b/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/polylm_13b/qlora_ddp_ds/sft.sh @@ -13,17 +13,16 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ - --dtype bf16 \ --dataset advertise-gen-zh \ --train_dataset_sample 20000 \ --num_train_epochs 1 \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/qwen_14b/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/qwen_14b/lora_ddp_ds/sft.sh index 66f8c56b0e..8a80ae939b 100644 --- a/examples/pytorch/llm/scripts/qwen_14b/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_14b/lora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset dureader-robust-zh \ diff --git a/examples/pytorch/llm/scripts/qwen_14b/qlora/sft.sh b/examples/pytorch/llm/scripts/qwen_14b/qlora/sft.sh index d0db88f940..70755cdef3 100644 --- a/examples/pytorch/llm/scripts/qwen_14b/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_14b/qlora/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset dureader-robust-zh \ --train_dataset_sample -1 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/qwen_14b/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/qwen_14b/qlora_ddp_ds/sft.sh index 058edd7e0d..fbd4748ca9 100644 --- a/examples/pytorch/llm/scripts/qwen_14b/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_14b/qlora_ddp_ds/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: 2 * A10 -# 2 * 14GB GPU memory (not use flash_attn) +# 2 * 14GB GPU memory nproc_per_node=2 PYTHONPATH=../../.. \ @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset dureader-robust-zh \ @@ -22,7 +22,7 @@ torchrun \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/qwen_14b_chat/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/qwen_14b_chat/lora_ddp_ds/sft.sh index 86dcaa4aa8..0eafc4271b 100644 --- a/examples/pytorch/llm/scripts/qwen_14b_chat/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_14b_chat/lora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset blossom-math-zh \ diff --git a/examples/pytorch/llm/scripts/qwen_14b_chat/qlora/sft.sh b/examples/pytorch/llm/scripts/qwen_14b_chat/qlora/sft.sh index 8f366f392b..fc0be57ecb 100644 --- a/examples/pytorch/llm/scripts/qwen_14b_chat/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_14b_chat/qlora/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: A10, 3090 -# 16GB GPU memory (not use flash_attn) +# 16GB GPU memory PYTHONPATH=../../.. \ CUDA_VISIBLE_DEVICES=0 \ python llm_sft.py \ @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset blossom-math-zh \ --train_dataset_sample -1 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/qwen_14b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/qwen_14b_chat/qlora_ddp_ds/sft.sh index 3b29cb451d..43fc4c2aaf 100644 --- a/examples/pytorch/llm/scripts/qwen_14b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_14b_chat/qlora_ddp_ds/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: 2 * A10 -# 2 * 16GB GPU memory (not use flash_attn) +# 2 * 16GB GPU memory nproc_per_node=2 PYTHONPATH=../../.. \ @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset blossom-math-zh \ @@ -22,7 +22,7 @@ torchrun \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/qwen_7b/lora_ddp_ds/sft.sh index 8a4dbe5568..71b246b07d 100644 --- a/examples/pytorch/llm/scripts/qwen_7b/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b/lora_ddp_ds/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: 2 * A10 -# 2 * 19GB GPU memory (not use flash_attn) +# 2 * 19GB GPU memory nproc_per_node=2 PYTHONPATH=../../.. \ @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset dureader-robust-zh \ diff --git a/examples/pytorch/llm/scripts/qwen_7b/qlora_ddp/sft.sh b/examples/pytorch/llm/scripts/qwen_7b/qlora_ddp/sft.sh index cfe08cbfd1..af0fc532c6 100644 --- a/examples/pytorch/llm/scripts/qwen_7b/qlora_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b/qlora_ddp/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset tigerbot-law-zh \ @@ -22,7 +22,7 @@ torchrun \ --max_length 4096 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/full_mp/sft.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/full_mp/sft.sh index 48b6fb5793..d162451983 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/full_mp/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/full_mp/sft.sh @@ -9,7 +9,7 @@ python llm_sft.py \ --model_revision master \ --sft_type full \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset damo-agent-zh \ --train_dataset_sample 200000 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/full_mp_ddp/sft.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/full_mp_ddp/sft.sh index 480d3294a1..27b75a483a 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/full_mp_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/full_mp_ddp/sft.sh @@ -14,7 +14,7 @@ torchrun \ --model_revision master \ --sft_type full \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset medical-en medical-zh \ --train_dataset_sample 200000 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/lora/infer.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/lora/infer.sh index 3dfd23e397..b618573ba7 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/lora/infer.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/lora/infer.sh @@ -1,4 +1,4 @@ -# Experimental environment: A10, 3090 +# Experimental environment: V100, A10, 3090 # If you want to merge LoRA weight and save it, you need to set `--merge_lora_and_save true`. PYTHONPATH=../../.. \ CUDA_VISIBLE_DEVICES=0 \ @@ -6,8 +6,8 @@ python llm_infer.py \ --ckpt_dir "output/qwen-7b-chat/vx_xxx/checkpoint-xxx" \ --load_args_from_ckpt_dir true \ --eval_human false \ - --max_length 4096 \ - --use_flash_attn true \ + --max_length 2048 \ + --use_flash_attn false \ --max_new_tokens 2048 \ --temperature 0.9 \ --top_k 20 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/lora/sft.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/lora/sft.sh index 79c8a3c5e5..c52b921675 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/lora/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/lora/sft.sh @@ -1,7 +1,5 @@ -# Experimental environment: A100 -# 60GB GPU memory (use flash_attn) -# You need to install flash_attn or set gradient_checkpointing to True, -# otherwise it may result in an OOM (Out of Memory) error. +# Experimental environment: V100, A10, 3090 +# 18GB GPU memory PYTHONPATH=../../.. \ CUDA_VISIBLE_DEVICES=0 \ python llm_sft.py \ @@ -10,18 +8,18 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ - --dataset damo-agent-mini-zh \ + --dataset blossom-math-zh \ --train_dataset_sample -1 \ --num_train_epochs 1 \ - --max_length 4096 \ + --max_length 2048 \ --check_dataset_strategy warning \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ - --lora_target_modules ALL \ - --gradient_checkpointing false \ + --lora_target_modules DEFAULT \ + --gradient_checkpointing true \ --batch_size 1 \ --weight_decay 0.01 \ --learning_rate 1e-4 \ @@ -32,7 +30,7 @@ python llm_sft.py \ --save_steps 100 \ --save_total_limit 2 \ --logging_steps 10 \ - --use_flash_attn true \ + --use_flash_attn false \ --push_to_hub false \ --hub_model_id qwen-7b-chat-lora \ --hub_private_repo true \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp/infer.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp/infer.sh index 3dfd23e397..6436ad0e87 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp/infer.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp/infer.sh @@ -1,4 +1,4 @@ -# Experimental environment: A10, 3090 +# Experimental environment: A10 # If you want to merge LoRA weight and save it, you need to set `--merge_lora_and_save true`. PYTHONPATH=../../.. \ CUDA_VISIBLE_DEVICES=0 \ @@ -6,8 +6,8 @@ python llm_infer.py \ --ckpt_dir "output/qwen-7b-chat/vx_xxx/checkpoint-xxx" \ --load_args_from_ckpt_dir true \ --eval_human false \ - --max_length 4096 \ - --use_flash_attn true \ + --max_length 2048 \ + --use_flash_attn false \ --max_new_tokens 2048 \ --temperature 0.9 \ --top_k 20 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp/sft.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp/sft.sh index 9e05ecd98e..92c5a0fddc 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp/sft.sh @@ -1,7 +1,5 @@ -# Experimental environment: 2 * A100 -# 2 * 60GB GPU memory (use flash_attn) -# You need to install flash_attn or set gradient_checkpointing to True, -# otherwise it may result in an OOM (Out of Memory) error. +# Experimental environment: 2 * A10 +# 2 * 18GB GPU memory nproc_per_node=2 PYTHONPATH=../../.. \ @@ -15,19 +13,19 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ - --dataset damo-agent-mini-zh \ + --dataset blossom-math-zh \ --train_dataset_sample -1 \ --num_train_epochs 1 \ - --max_length 4096 \ + --max_length 2048 \ --check_dataset_strategy warning \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ - --lora_target_modules ALL \ - --gradient_checkpointing false \ + --lora_target_modules DEFAULT \ + --gradient_checkpointing true \ --batch_size 1 \ --weight_decay 0.01 \ --learning_rate 1e-4 \ @@ -38,7 +36,7 @@ torchrun \ --save_steps 100 \ --save_total_limit 2 \ --logging_steps 10 \ - --use_flash_attn true \ + --use_flash_attn false \ --push_to_hub false \ --hub_model_id qwen-7b-chat-lora \ --hub_private_repo true \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp_ds/sft.sh index 191f72eb65..0e7e74772d 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/lora_ddp_ds/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: 2 * A10 -# 2 * 18GB GPU memory (not use flash_attn) +# 2 * 18GB GPU memory nproc_per_node=2 PYTHONPATH=../../.. \ @@ -13,10 +13,10 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ - --dataset advertise-gen-zh \ + --dataset blossom-math-zh \ --train_dataset_sample -1 \ --num_train_epochs 1 \ --max_length 2048 \ @@ -24,7 +24,7 @@ torchrun \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ - --lora_target_modules ALL \ + --lora_target_modules DEFAULT \ --gradient_checkpointing true \ --batch_size 1 \ --weight_decay 0.01 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/lora_mp_ddp/infer.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/lora_mp_ddp/infer.sh index 8f194d9eae..9540587f4e 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/lora_mp_ddp/infer.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/lora_mp_ddp/infer.sh @@ -6,7 +6,7 @@ python llm_infer.py \ --ckpt_dir "output/qwen-7b-chat/vx_xxx/checkpoint-xxx" \ --load_args_from_ckpt_dir true \ --eval_human false \ - --max_length 2048 \ + --max_length 4096 \ --use_flash_attn false \ --max_new_tokens 2048 \ --temperature 0.9 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/lora_mp_ddp/sft.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/lora_mp_ddp/sft.sh index ea3079c51b..2135cb2077 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/lora_mp_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/lora_mp_ddp/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: 4 * 3090 -# 4 * 15GB GPU memory (not use flash_attn) +# 4 * 20GB GPU memory nproc_per_node=2 PYTHONPATH=../../.. \ @@ -13,18 +13,18 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ - --dataset advertise-gen-zh \ - --train_dataset_sample -1 \ + --dataset damo-agent-mini-zh \ + --train_dataset_sample 20000 \ --num_train_epochs 1 \ - --max_length 2048 \ + --max_length 4096 \ --check_dataset_strategy warning \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ - --lora_target_modules c_attn \ + --lora_target_modules DEFAULT \ --gradient_checkpointing true \ --batch_size 1 \ --weight_decay 0.01 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/qlora/sft.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/qlora/sft.sh index 12c0815ed7..6fc9537747 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/qlora/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: A10, 3090 -# 13GB GPU memory +# 18GB GPU memory PYTHONPATH=../../.. \ CUDA_VISIBLE_DEVICES=0 \ python llm_sft.py \ @@ -8,15 +8,15 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ - --dataset leetcode-python-en \ - --train_dataset_sample -1 \ + --dataset damo-agent-mini-zh \ + --train_dataset_sample 20000 \ --num_train_epochs 1 \ --max_length 4096 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp/infer.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp/infer.sh index cb5cd16ab3..0e4b2b80f3 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp/infer.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp/infer.sh @@ -5,7 +5,7 @@ python llm_infer.py \ --ckpt_dir "output/qwen-7b-chat/vx_xxx/checkpoint-xxx" \ --load_args_from_ckpt_dir true \ --eval_human false \ - --max_length 2048 \ + --max_length 4096 \ --use_flash_attn false \ --max_new_tokens 2048 \ --temperature 0.9 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp/sft.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp/sft.sh index d7eeeade80..8500d02820 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: 2 * A10 -# 2 * 14GB GPU memory +# 2 * 20GB GPU memory nproc_per_node=2 PYTHONPATH=../../.. \ @@ -13,16 +13,16 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ - --dataset advertise-gen-zh \ + --dataset damo-agent-mini-zh \ --train_dataset_sample 20000 \ --num_train_epochs 1 \ - --max_length 2048 \ + --max_length 4096 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp_ds/sft.sh index 284e410d7c..1169f3b1a9 100644 --- a/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_7b_chat/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset damo-agent-mini-zh \ @@ -22,7 +22,7 @@ torchrun \ --max_length 4096 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/qwen_vl/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/qwen_vl/lora_ddp_ds/sft.sh index c1d661a688..e57e9d63de 100644 --- a/examples/pytorch/llm/scripts/qwen_vl/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_vl/lora_ddp_ds/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: 2 * A10 -# 2 * 21GB GPU memory (not use flash_attn) +# 2 * 21GB GPU memory nproc_per_node=2 PYTHONPATH=../../.. \ @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type default \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset coco-en \ diff --git a/examples/pytorch/llm/scripts/qwen_vl_chat/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/qwen_vl_chat/lora_ddp_ds/sft.sh index 27ae7178de..01826cbff3 100644 --- a/examples/pytorch/llm/scripts/qwen_vl_chat/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_vl_chat/lora_ddp_ds/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: 2 * A10 -# 2 * 21GB GPU memory (not use flash_attn) +# 2 * 21GB GPU memory nproc_per_node=2 PYTHONPATH=../../.. \ @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset coco-en \ diff --git a/examples/pytorch/llm/scripts/qwen_vl_chat/qlora/sft.sh b/examples/pytorch/llm/scripts/qwen_vl_chat/qlora/sft.sh index 1d19cd3dd8..1de3f7814f 100644 --- a/examples/pytorch/llm/scripts/qwen_vl_chat/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_vl_chat/qlora/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: A10 -# 10GB GPU memory (not use flash_attn) +# 10GB GPU memory PYTHONPATH=../../.. \ CUDA_VISIBLE_DEVICES=0 \ python llm_sft.py \ @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type chatml \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset coco-en \ --train_dataset_sample 20000 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/qwen_vl_chat_int4/qlora/sft.sh b/examples/pytorch/llm/scripts/qwen_vl_chat_int4/qlora/sft.sh index 1395bf840e..f57554eeae 100644 --- a/examples/pytorch/llm/scripts/qwen_vl_chat_int4/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_vl_chat_int4/qlora/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: A10 -# 11GB GPU memory (not use flash_attn) +# 11GB GPU memory PYTHONPATH=../../.. \ CUDA_VISIBLE_DEVICES=0 \ python llm_sft.py \ diff --git a/examples/pytorch/llm/scripts/qwen_vl_chat_int4/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/qwen_vl_chat_int4/qlora_ddp_ds/sft.sh index d377053943..c18cf38ed9 100644 --- a/examples/pytorch/llm/scripts/qwen_vl_chat_int4/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/qwen_vl_chat_int4/qlora_ddp_ds/sft.sh @@ -1,5 +1,5 @@ # Experimental environment: 2 * A10 -# 2 * 13GB GPU memory (not use flash_attn) +# 2 * 13GB GPU memory nproc_per_node=2 PYTHONPATH=../../.. \ diff --git a/examples/pytorch/llm/scripts/seqgpt_560m/full/sft.sh b/examples/pytorch/llm/scripts/seqgpt_560m/full/sft.sh index 3c739f38d8..41bede9b04 100644 --- a/examples/pytorch/llm/scripts/seqgpt_560m/full/sft.sh +++ b/examples/pytorch/llm/scripts/seqgpt_560m/full/sft.sh @@ -7,7 +7,7 @@ python llm_sft.py \ --model_revision master \ --sft_type full \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset ner-jave-zh \ --train_dataset_sample -1 \ diff --git a/examples/pytorch/llm/scripts/seqgpt_560m/full_ddp/sft.sh b/examples/pytorch/llm/scripts/seqgpt_560m/full_ddp/sft.sh index 6bd887c862..74bdcc0a39 100644 --- a/examples/pytorch/llm/scripts/seqgpt_560m/full_ddp/sft.sh +++ b/examples/pytorch/llm/scripts/seqgpt_560m/full_ddp/sft.sh @@ -12,7 +12,7 @@ torchrun \ --model_revision master \ --sft_type full \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset ner-jave-zh \ diff --git a/examples/pytorch/llm/scripts/skywork_13b/qlora/sft.sh b/examples/pytorch/llm/scripts/skywork_13b/qlora/sft.sh index 48280fb3f9..13400b78ae 100644 --- a/examples/pytorch/llm/scripts/skywork_13b/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/skywork_13b/qlora/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset advertise-gen-zh \ --train_dataset_sample 20000 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/xverse_13b/qlora/sft.sh b/examples/pytorch/llm/scripts/xverse_13b/qlora/sft.sh index 9dd5d2724d..c2d1300780 100644 --- a/examples/pytorch/llm/scripts/xverse_13b/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/xverse_13b/qlora/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset advertise-gen-zh \ --train_dataset_sample 20000 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/xverse_65b/qlora_mp/sft.sh b/examples/pytorch/llm/scripts/xverse_65b/qlora_mp/sft.sh index 3fec8b41da..c9bb5691cc 100644 --- a/examples/pytorch/llm/scripts/xverse_65b/qlora_mp/sft.sh +++ b/examples/pytorch/llm/scripts/xverse_65b/qlora_mp/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset dureader-robust-zh \ --train_dataset_sample -1 \ @@ -16,7 +16,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/yi_34b/lora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/yi_34b/lora_ddp_ds/sft.sh index 94d035f124..d6d5b7c0df 100644 --- a/examples/pytorch/llm/scripts/yi_34b/lora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/yi_34b/lora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset dureader-robust-zh \ --train_dataset_sample -1 \ diff --git a/examples/pytorch/llm/scripts/yi_6b/lora/sft.sh b/examples/pytorch/llm/scripts/yi_6b/lora/sft.sh index ba0fcabf7c..8995f3e1a3 100644 --- a/examples/pytorch/llm/scripts/yi_6b/lora/sft.sh +++ b/examples/pytorch/llm/scripts/yi_6b/lora/sft.sh @@ -8,7 +8,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type default-generation \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset dureader-robust-zh \ --train_dataset_sample -1 \ diff --git a/examples/pytorch/llm/scripts/ziya2_13b_chat/qlora/sft.sh b/examples/pytorch/llm/scripts/ziya2_13b_chat/qlora/sft.sh index 72c0a15b12..dd232651b4 100644 --- a/examples/pytorch/llm/scripts/ziya2_13b_chat/qlora/sft.sh +++ b/examples/pytorch/llm/scripts/ziya2_13b_chat/qlora/sft.sh @@ -9,7 +9,7 @@ python llm_sft.py \ --sft_type lora \ --tuner_backend swift \ --template_type ziya \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --dataset lawyer-llama-zh \ --train_dataset_sample -1 \ @@ -17,7 +17,7 @@ python llm_sft.py \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/examples/pytorch/llm/scripts/ziya2_13b_chat/qlora_ddp_ds/sft.sh b/examples/pytorch/llm/scripts/ziya2_13b_chat/qlora_ddp_ds/sft.sh index 016197ae09..8e80bc27f9 100644 --- a/examples/pytorch/llm/scripts/ziya2_13b_chat/qlora_ddp_ds/sft.sh +++ b/examples/pytorch/llm/scripts/ziya2_13b_chat/qlora_ddp_ds/sft.sh @@ -13,7 +13,7 @@ torchrun \ --sft_type lora \ --tuner_backend swift \ --template_type ziya \ - --dtype bf16 \ + --dtype AUTO \ --output_dir output \ --ddp_backend nccl \ --dataset lawyer-llama-zh \ @@ -22,7 +22,7 @@ torchrun \ --max_length 2048 \ --check_dataset_strategy warning \ --quantization_bit 4 \ - --bnb_4bit_comp_dtype bf16 \ + --bnb_4bit_comp_dtype AUTO \ --lora_rank 8 \ --lora_alpha 32 \ --lora_dropout_p 0.05 \ diff --git a/swift/llm/utils/argument.py b/swift/llm/utils/argument.py index 8b1ce0194f..f07b8fb86b 100644 --- a/swift/llm/utils/argument.py +++ b/swift/llm/utils/argument.py @@ -48,8 +48,8 @@ class SftArguments: seed: int = 42 resume_from_checkpoint: Optional[str] = None - dtype: Optional[str] = field( - default=None, metadata={'choices': ['bf16', 'fp16', 'fp32']}) + dtype: str = field( + default='AUTO', metadata={'choices': ['bf16', 'fp16', 'fp32', 'AUTO']}) dataset: Optional[List[str]] = field( default=None, @@ -73,7 +73,7 @@ class SftArguments: # note: bf16 and quantization have requirements for gpu architecture quantization_bit: int = field(default=0, metadata={'choices': [0, 4, 8]}) bnb_4bit_comp_dtype: str = field( - default=None, metadata={'choices': ['fp16', 'bf16', 'fp32']}) + default='AUTO', metadata={'choices': ['fp16', 'bf16', 'fp32', 'AUTO']}) bnb_4bit_quant_type: str = field( default='nf4', metadata={'choices': ['fp4', 'nf4']}) bnb_4bit_use_double_quant: bool = True @@ -254,8 +254,8 @@ class InferArguments: eval_human: bool = False # False: eval val_dataset seed: int = 42 - dtype: Optional[str] = field( - default=None, metadata={'choices': ['bf16', 'fp16', 'fp32']}) + dtype: str = field( + default='AUTO', metadata={'choices': ['bf16', 'fp16', 'fp32', 'AUTO']}) dataset: Optional[List[str]] = field( default=None, @@ -276,7 +276,7 @@ class InferArguments: quantization_bit: int = field(default=0, metadata={'choices': [0, 4, 8]}) bnb_4bit_comp_dtype: str = field( - default=None, metadata={'choices': ['fp16', 'bf16', 'fp32']}) + default='AUTO', metadata={'choices': ['fp16', 'bf16', 'fp32', 'AUTO']}) bnb_4bit_quant_type: str = field( default='nf4', metadata={'choices': ['fp4', 'nf4']}) bnb_4bit_use_double_quant: bool = True @@ -358,14 +358,14 @@ def __post_init__(self) -> None: def select_dtype( args: Union[SftArguments, InferArguments]) -> Tuple[Dtype, bool, bool]: - if args.dtype is None and not torch.cuda.is_bf16_supported(): + if args.dtype == 'AUTO' and not torch.cuda.is_bf16_supported(): args.dtype = 'fp16' - if args.dtype is None and (args.model_type.endswith('int4') - or args.model_type.endswith('int8')): + if args.dtype == 'AUTO' and (args.model_type.endswith('int4') + or args.model_type.endswith('int8')): model_torch_dtype = MODEL_MAPPING[args.model_type]['torch_dtype'] if model_torch_dtype is not None: args.dtype = dtype_mapping[model_torch_dtype] - if args.dtype is None: + if args.dtype == 'AUTO': args.dtype = 'bf16' torch_dtype = dtype_mapping_reversed[args.dtype] @@ -388,7 +388,7 @@ def select_dtype( def select_bnb( args: Union[SftArguments, InferArguments]) -> Tuple[Dtype, bool, bool]: - if args.bnb_4bit_comp_dtype is None: + if args.bnb_4bit_comp_dtype == 'AUTO': args.bnb_4bit_comp_dtype = args.dtype quantization_bit = args.quantization_bit