Skip to content

Commit

Permalink
chore: Deflake test_generative_models system tests by allowing empty …
Browse files Browse the repository at this point in the history
…text with finish reason

PiperOrigin-RevId: 636328795
  • Loading branch information
matthew29tang authored and Copybara-Service committed May 22, 2024
1 parent 0936f35 commit 22eba21
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 17 deletions.
30 changes: 16 additions & 14 deletions tests/system/aiplatform/test_language_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,15 +58,16 @@ def test_text_generation(self, api_transport):

model = TextGenerationModel.from_pretrained("google/text-bison@001")
grounding_source = language_models.GroundingSource.WebSearch()
assert model.predict(
"What is the best recipe for banana bread? Recipe:",
response = model.predict(
"What is the best recipe for cupcakes? Recipe:",
max_output_tokens=128,
temperature=0.0,
top_p=1.0,
top_k=5,
stop_sequences=["# %%"],
grounding_source=grounding_source,
).text
)
assert response.text or response.is_blocked

@pytest.mark.parametrize("api_transport", ["grpc", "rest"])
def test_text_generation_preview_count_tokens(self, api_transport):
Expand Down Expand Up @@ -97,15 +98,15 @@ async def test_text_generation_model_predict_async(self, api_transport):
model = TextGenerationModel.from_pretrained("google/text-bison@001")
grounding_source = language_models.GroundingSource.WebSearch()
response = await model.predict_async(
"What is the best recipe for banana bread? Recipe:",
"What is the best recipe for cupcakes? Recipe:",
max_output_tokens=128,
temperature=0.0,
top_p=1.0,
top_k=5,
stop_sequences=["# %%"],
grounding_source=grounding_source,
)
assert response.text
assert response.text or response.is_blocked

@pytest.mark.parametrize("api_transport", ["grpc", "rest"])
def test_text_generation_streaming(self, api_transport):
Expand All @@ -118,13 +119,13 @@ def test_text_generation_streaming(self, api_transport):
model = TextGenerationModel.from_pretrained("google/text-bison@001")

for response in model.predict_streaming(
"What is the best recipe for banana bread? Recipe:",
"What is the best recipe for cupcakes? Recipe:",
max_output_tokens=128,
temperature=0.0,
top_p=1.0,
top_k=5,
):
assert response.text
assert response.text or response.is_blocked

@pytest.mark.parametrize("api_transport", ["grpc", "rest"])
def test_preview_text_embedding_top_level_from_pretrained(self, api_transport):
Expand All @@ -138,14 +139,15 @@ def test_preview_text_embedding_top_level_from_pretrained(self, api_transport):
foundation_model_name="google/text-bison@001"
)

assert model.predict(
"What is the best recipe for banana bread? Recipe:",
response = model.predict(
"What is the best recipe for cupcakes? Recipe:",
max_output_tokens=128,
temperature=0.0,
top_p=1.0,
top_k=5,
stop_sequences=["# %%"],
).text
)
assert response.text or response.is_blocked

assert isinstance(model, preview_language_models.TextGenerationModel)

Expand Down Expand Up @@ -430,13 +432,13 @@ def test_tuning(self, shared_state, api_transport):

# Testing the new model returned by the `tuning_job.get_tuned_model` method
response1 = tuned_model1.predict(
"What is the best recipe for banana bread? Recipe:",
"What is the best recipe for cupcakes? Recipe:",
max_output_tokens=128,
temperature=0.0,
top_p=1.0,
top_k=5,
)
assert response1.text
assert response1.text or response1.is_blocked

# Testing listing and getting tuned models
tuned_model_names = model.list_tuned_model_names()
Expand All @@ -446,13 +448,13 @@ def test_tuning(self, shared_state, api_transport):
tuned_model = TextGenerationModel.get_tuned_model(tuned_model_name)

tuned_model_response = tuned_model.predict(
"What is the best recipe for banana bread? Recipe:",
"What is the best recipe for cupcakes? Recipe:",
max_output_tokens=128,
temperature=0.0,
top_p=1.0,
top_k=5,
)
assert tuned_model_response.text
assert tuned_model_response.text or tuned_model_response.is_blocked

@pytest.mark.parametrize("api_transport", ["grpc", "rest"])
def test_batch_prediction_for_text_generation(self, api_transport):
Expand Down
18 changes: 15 additions & 3 deletions tests/system/vertexai/test_generative_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,11 @@ def test_generate_content_streaming(self):
generation_config=generative_models.GenerationConfig(temperature=0),
)
for chunk in stream:
assert chunk.text
assert (
chunk.text
or chunk.candidates[0].finish_reason
is generative_models.FinishReason.STOP
)

@pytest.mark.asyncio
async def test_generate_content_streaming_async(self):
Expand All @@ -123,7 +127,11 @@ async def test_generate_content_streaming_async(self):
generation_config=generative_models.GenerationConfig(temperature=0),
)
async for chunk in async_stream:
assert chunk.text
assert (
chunk.text
or chunk.candidates[0].finish_reason
is generative_models.FinishReason.STOP
)

def test_generate_content_with_parameters(self):
model = generative_models.GenerativeModel(
Expand Down Expand Up @@ -241,7 +249,11 @@ def test_grounding_google_search_retriever(self):
tools=[google_search_retriever_tool],
generation_config=generative_models.GenerationConfig(temperature=0),
)
assert response.text
assert (
response.candidates[0].finish_reason
is generative_models.FinishReason.RECITATION
or response.text
)

# Chat

Expand Down

0 comments on commit 22eba21

Please sign in to comment.