Skip to content

Commit

Permalink
Bypass flash_attn import validation.
Browse files Browse the repository at this point in the history
  • Loading branch information
PranjalChitale committed May 17, 2024
1 parent 90144fb commit 450b0b6
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 5 deletions.
4 changes: 2 additions & 2 deletions huggingface_interface/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,9 @@ def initialize_model_and_tokenizer(ckpt_dir, quantization, attn_implementation):

if attn_implementation == "flash_attention_2":
if is_flash_attn_2_available() and is_flash_attn_greater_or_equal_2_10():
attn_implementation == "flash_attention_2"
attn_implementation = "flash_attention_2"
else:
attn_implementation == "eager"
attn_implementation = "eager"

tokenizer = AutoTokenizer.from_pretrained(ckpt_dir, trust_remote_code=True)
model = AutoModelForSeq2SeqLM.from_pretrained(
Expand Down
9 changes: 6 additions & 3 deletions huggingface_interface/modeling_indictrans.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,12 @@

INDICTRANS_PRETRAINED_MODEL_ARCHIVE_LIST = [""]

if is_flash_attn_2_available():
from flash_attn import flash_attn_func, flash_attn_varlen_func
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
try:
if is_flash_attn_2_available():
from flash_attn import flash_attn_func, flash_attn_varlen_func
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
except:
pass


# Copied from transformers.models.llama.modeling_llama._get_unpad_data
Expand Down

0 comments on commit 450b0b6

Please sign in to comment.