diff --git a/tests/unit_tests/mocks/mock_hf_models.py b/tests/unit_tests/mocks/mock_hf_models.py index 737f6f29c..e13c70349 100644 --- a/tests/unit_tests/mocks/mock_hf_models.py +++ b/tests/unit_tests/mocks/mock_hf_models.py @@ -3,6 +3,10 @@ def make_mock_model_and_tokenizer(): """Returns a tuple of HF AutoModelForCausalLM and AutoTokenizer.""" + import torch + + torch.set_num_threads(1) + from transformers import AutoModelForCausalLM, AutoTokenizer # Can regenerate the sample pipe with this: @@ -15,9 +19,15 @@ def make_mock_model_and_tokenizer(): os.path.abspath(os.path.normpath(os.path.dirname(__file__))), "tiny-random-gpt2" ) - model = AutoModelForCausalLM.from_pretrained(savedir, local_files_only=True) + model = AutoModelForCausalLM.from_pretrained( + savedir, + local_files_only=True, + ) - tokenizer = AutoTokenizer.from_pretrained(savedir, local_files_only=True) + tokenizer = AutoTokenizer.from_pretrained( + savedir, + local_files_only=True, + ) return model, tokenizer