Skip to content

Commit

Permalink
feat: LLM - Improved representation for blocked responses
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 569046322
  • Loading branch information
Ark-kun authored and Copybara-Service committed Sep 28, 2023
1 parent c22220e commit 222f222
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 1 deletion.
11 changes: 11 additions & 0 deletions tests/unit/aiplatform/test_language_models.py
Expand Up @@ -1354,6 +1354,17 @@ def test_text_generation_model_predict_streaming(self):
):
assert len(response.text) > 10

def test_text_generation_response_repr(self):
response = language_models.TextGenerationResponse(
text="",
is_blocked=True,
safety_attributes={"Violent": 0.1},
_prediction_response=None,
)
response_repr = repr(response)
assert "blocked" in response_repr
assert "Violent" in response_repr

@pytest.mark.parametrize(
"job_spec",
[_TEST_PIPELINE_SPEC_JSON, _TEST_PIPELINE_JOB],
Expand Down
12 changes: 11 additions & 1 deletion vertexai/language_models/_language_models.py
Expand Up @@ -613,7 +613,17 @@ class TextGenerationResponse:
safety_attributes: Dict[str, float] = dataclasses.field(default_factory=dict)

def __repr__(self):
return self.text
if self.text:
return self.text
else:
# Falling back to the full representation
return (
"TextGenerationResponse("
f"text={self.text!r}"
f", is_blocked={self.is_blocked!r}"
f", safety_attributes={self.safety_attributes!r}"
")"
)

@property
def raw_prediction_response(self) -> aiplatform.models.Prediction:
Expand Down

0 comments on commit 222f222

Please sign in to comment.