diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index b467a0d28adb..cc83f5dd75f2 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -322,9 +322,10 @@ def load_weights(self, model_name_or_path, cache_dir, load_format, revision): if "rotary_emb.inv_freq" in name: continue - if "rotary_emb.cos_cached" in name: - continue - if "rotary_emb.sin_cached" in name: + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using ColossalAI may include these tensors in + # the checkpoint. Skip them. continue for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: