From 0eadfc8c861cfa18bd1445a3e1194e92fb1f35a7 Mon Sep 17 00:00:00 2001 From: Atlas <153689947+Barbarian7676@users.noreply.github.com> Date: Tue, 16 Apr 2024 23:16:00 -0600 Subject: [PATCH] Create mixtral_22.yml (#1514) [skip ci] Code sourced from here: https://twitter.com/mattshumer_/status/1778135774887567712 --- examples/mistral/mixtral_22.yml | 59 +++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 examples/mistral/mixtral_22.yml diff --git a/examples/mistral/mixtral_22.yml b/examples/mistral/mixtral_22.yml new file mode 100644 index 000000000..3b480cf24 --- /dev/null +++ b/examples/mistral/mixtral_22.yml @@ -0,0 +1,59 @@ +base_model: mistral-community/Mixtral-8x22B-v0.1 +model_type: AutoModelForCausalLM +tokenizer_type: LlamaTokenizer +trust_remote_code: true + +load_in_8bit: false +load_in_4bit: false +strict: false + +unfrozen_parameters: + - ^lm_head.weight$ + - ^model.embed_tokens.weight$ + - model.layers.4[4-9]+.block_sparse_moe.gate + - model.layers.4[4-9]+.block_sparse_moe.experts + - model.layers.5[0-5]+.block_sparse_moe.gate + - model.layers.5[0-5]+.block_sparse_moe.experts + +model_config: + output_router_logits: true + +DATA_STUFF_HERE +output_dir: ./out + +sequence_len: 8000 +sample_packing: true +pad_to_sequence_len: true + +gradient_accumulation_steps: 1 +micro_batch_size: 1 +num_epochs: 3 +optimizer: adamw_bnb_8bit +lr_scheduler: cosine +learning_rate: 0.0001 + +train_on_inputs: false +group_by_length: false +bf16: auto +fp16: +tf32: false + +gradient_checkpointing: true +early_stopping_patience: +resume_from_checkpoint: +local_rank: +logging_steps: 1 +xformers_attention: +flash_attention: true + +save_total_limit: 1 +save_steps: +debug: +deepspeed: deepspeed_configs/zero3_bf16_cpuoffload_all.json +weight_decay: 0.0 +fsdp: +fsdp_config: +special_tokens: + eos_token: "<|im_end|>" +tokens: + - "<|im_start|>"