diff --git a/tests/test_pipelines_common.py b/tests/test_pipelines_common.py index 88d7b2b184bd5a..12cee76bf8eab0 100644 --- a/tests/test_pipelines_common.py +++ b/tests/test_pipelines_common.py @@ -258,7 +258,7 @@ def __getitem__(self, i): return self.data[i] text_classifier = pipeline( - task="text-classification", model="Narsil/tiny-distilbert-sequence-classification", framework="pt" + task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) dataset = MyDataset() for output in text_classifier(dataset): @@ -266,7 +266,7 @@ def __getitem__(self, i): @require_torch def test_check_task_auto_inference(self): - pipe = pipeline(model="Narsil/tiny-distilbert-sequence-classification") + pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertIsInstance(pipe, TextClassificationPipeline) @@ -275,7 +275,7 @@ def test_pipeline_override(self): class MyPipeline(TextClassificationPipeline): pass - text_classifier = pipeline(model="Narsil/tiny-distilbert-sequence-classification", pipeline_class=MyPipeline) + text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline) self.assertIsInstance(text_classifier, MyPipeline) @@ -293,11 +293,11 @@ def data(n: int): for _ in range(n): yield "This is a test" - pipe = pipeline(model="Narsil/tiny-distilbert-sequence-classification") + pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") results = [] for out in pipe(data(10)): - self.assertEqual(nested_simplify(out), {"label": "LABEL_1", "score": 0.502}) + self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @@ -305,7 +305,7 @@ def data(n: int): # This will force using `num_workers=1` with a warning for now. results = [] for out in pipe(data(10), num_workers=2): - self.assertEqual(nested_simplify(out), {"label": "LABEL_1", "score": 0.502}) + self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @@ -315,20 +315,20 @@ def data(n: int): for _ in range(n): yield "This is a test" - pipe = pipeline(model="Narsil/tiny-distilbert-sequence-classification", framework="tf") + pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", framework="tf") out = pipe("This is a test") results = [] for out in pipe(data(10)): - self.assertEqual(nested_simplify(out), {"label": "LABEL_1", "score": 0.502}) + self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_torch def test_unbatch_attentions_hidden_states(self): model = DistilBertForSequenceClassification.from_pretrained( - "Narsil/tiny-distilbert-sequence-classification", output_hidden_states=True, output_attentions=True + "hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True ) - tokenizer = AutoTokenizer.from_pretrained("Narsil/tiny-distilbert-sequence-classification") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert") text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) # Used to throw an error because `hidden_states` are a tuple of tensors diff --git a/tests/test_pipelines_image_classification.py b/tests/test_pipelines_image_classification.py index 81cdde3f3ffed0..a8f5cb4da3d4dd 100644 --- a/tests/test_pipelines_image_classification.py +++ b/tests/test_pipelines_image_classification.py @@ -67,7 +67,7 @@ def run_pipeline_test(self, image_classifier, examples): import datasets - dataset = datasets.load_dataset("Narsil/image_dummy", "image", split="test") + dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") # Accepts URL + PIL.Image + lists outputs = image_classifier( diff --git a/tests/test_pipelines_image_segmentation.py b/tests/test_pipelines_image_segmentation.py index ad4d456ba633b5..78c1df7d3ba910 100644 --- a/tests/test_pipelines_image_segmentation.py +++ b/tests/test_pipelines_image_segmentation.py @@ -68,7 +68,7 @@ def run_pipeline_test(self, image_segmenter, examples): import datasets - dataset = datasets.load_dataset("Narsil/image_dummy", "image", split="test") + dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") batch = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), diff --git a/tests/test_pipelines_object_detection.py b/tests/test_pipelines_object_detection.py index 19f3447e97f7fe..0e7c293859ec23 100644 --- a/tests/test_pipelines_object_detection.py +++ b/tests/test_pipelines_object_detection.py @@ -74,7 +74,7 @@ def run_pipeline_test(self, object_detector, examples): import datasets - dataset = datasets.load_dataset("Narsil/image_dummy", "image", split="test") + dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", "image", split="test") batch = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), diff --git a/tests/test_pipelines_text_classification.py b/tests/test_pipelines_text_classification.py index 7bc794b4d3d260..39deed9bee55c9 100644 --- a/tests/test_pipelines_text_classification.py +++ b/tests/test_pipelines_text_classification.py @@ -33,20 +33,20 @@ class TextClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestC @require_torch def test_small_model_pt(self): text_classifier = pipeline( - task="text-classification", model="Narsil/tiny-distilbert-sequence-classification", framework="pt" + task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = text_classifier("This is great !") - self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_1", "score": 0.502}]) + self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @require_tf def test_small_model_tf(self): text_classifier = pipeline( - task="text-classification", model="Narsil/tiny-distilbert-sequence-classification", framework="tf" + task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = text_classifier("This is great !") - self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_1", "score": 0.502}]) + self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @slow @require_torch diff --git a/tests/test_pipelines_token_classification.py b/tests/test_pipelines_token_classification.py index cdb07091193009..8c469e14ab037c 100644 --- a/tests/test_pipelines_token_classification.py +++ b/tests/test_pipelines_token_classification.py @@ -582,14 +582,14 @@ def test_gather_pre_entities(self): @require_tf def test_tf_only(self): - model_name = "Narsil/small" # This model only has a TensorFlow version + model_name = "hf-internal-testing/tiny-random-bert-tf-only" # This model only has a TensorFlow version # We test that if we don't specificy framework='tf', it gets detected automatically token_classifier = pipeline(task="ner", model=model_name) self.assertEqual(token_classifier.framework, "tf") @require_tf def test_small_model_tf(self): - model_name = "Narsil/small2" + model_name = "hf-internal-testing/tiny-bert-for-token-classification" token_classifier = pipeline(task="token-classification", model=model_name, framework="tf") outputs = token_classifier("This is a test !") self.assertEqual( @@ -602,8 +602,8 @@ def test_small_model_tf(self): @require_torch def test_no_offset_tokenizer(self): - model_name = "Narsil/small2" - tokenizer = AutoTokenizer.from_pretrained("Narsil/small2", use_fast=False) + model_name = "hf-internal-testing/tiny-bert-for-token-classification" + tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) token_classifier = pipeline(task="token-classification", model=model_name, tokenizer=tokenizer, framework="pt") outputs = token_classifier("This is a test !") self.assertEqual( @@ -616,7 +616,7 @@ def test_no_offset_tokenizer(self): @require_torch def test_small_model_pt(self): - model_name = "Narsil/small2" + model_name = "hf-internal-testing/tiny-bert-for-token-classification" token_classifier = pipeline(task="token-classification", model=model_name, framework="pt") outputs = token_classifier("This is a test !") self.assertEqual(