diff --git a/README.md b/README.md index 8a728375d..7fe71f5a9 100644 --- a/README.md +++ b/README.md @@ -64,11 +64,11 @@ pip install guardrails-ai 3. Create a Guard from the installed guardrail. ```python - from guardrails import Guard + from guardrails import Guard, OnFailAction from guardrails.hub import RegexMatch guard = Guard().use( - RegexMatch, regex="\(?\d{3}\)?-? *\d{3}-? *-?\d{4}", on_fail="exception" + RegexMatch, regex="\(?\d{3}\)?-? *\d{3}-? *-?\d{4}", on_fail=OnFailAction.EXCEPTION ) guard.validate("123-456-7890") # Guardrail passes @@ -93,12 +93,12 @@ pip install guardrails-ai Then, create a Guard from the installed guardrails. ```python - from guardrails import Guard + from guardrails import Guard, OnFailAction from guardrails.hub import CompetitorCheck, ToxicLanguage guard = Guard().use_many( - CompetitorCheck(["Apple", "Microsoft", "Google"], on_fail="exception"), - ToxicLanguage(threshold=0.5, validation_method="sentence", on_fail="exception"), + CompetitorCheck(["Apple", "Microsoft", "Google"], on_fail=OnFailAction.EXCEPTION), + ToxicLanguage(threshold=0.5, validation_method="sentence", on_fail=OnFailAction.EXCEPTION),), ) guard.validate( diff --git a/docs/how_to_guides/telemetry.mdx b/docs/how_to_guides/telemetry.mdx index ae59b172d..50b21ce63 100644 --- a/docs/how_to_guides/telemetry.mdx +++ b/docs/how_to_guides/telemetry.mdx @@ -49,13 +49,13 @@ Then, set up your Guard the default tracer provided in the guardrails library. Y
main.py
```python -from guardrails import Guard +from guardrails import Guard, OnFailAction from guardrails.utils.telemetry_utils import default_otlp_tracer from guardrails.hub import ValidLength import openai guard = Guard.from_string( - validators=[ValidLength(min=1, max=10, on_fail="exception")], + validators=[ValidLength(min=1, max=10, on_fail=OnFailAction.EXCEPTION)], # use a descriptive name that will differentiate where your metrics are stored tracer=default_otlp_tracer("petname_guard") ) @@ -112,13 +112,13 @@ For advanced use cases (like if you have a metrics provider in a VPC), you can u Standard [open telemetry environment variables](https://opentelemetry-python.readthedocs.io/en/stable/getting-started.html#configure-the-exporter) are used to configure the collector. Use the default_otel_collector_tracer when configuring your guard. ```python -from guardrails import Guard +from guardrails import Guard, OnFailAction from guardrails.utils.telemetry_utils import default_otel_collector_tracer from guardrails.hub import ValidLength import openai guard = Guard.from_string( - validators=[ValidLength(min=1, max=10, on_fail="exception")], + validators=[ValidLength(min=1, max=10, on_fail=OnFailAction.EXCEPTION)], # use a descriptive name that will differentiate where your metrics are stored tracer=default_otel_collector_tracer("petname_guard") ) diff --git a/docs/hub/concepts/on_fail_policies.md b/docs/hub/concepts/on_fail_policies.md index d2ec6f483..836271d01 100644 --- a/docs/hub/concepts/on_fail_policies.md +++ b/docs/hub/concepts/on_fail_policies.md @@ -1,14 +1,14 @@ -# `on_fail` Policies for Validators +# `OnFailActions` for Validators -While initializing a validator, you can specify `on_fail` policies to handle the failure of the validator. The `on_fail` policy specifies the corrective action that should be taken if the quality criteria is not met. The corrective action can be one of the following: +Guardrails provides a number of `OnFailActions` for when a validator fails. The `OnFailAction` specifies the corrective action that should be taken if the quality criteria is not met. The corrective action can be one of the following: | Action | Behavior | |-----------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `reask` | Reask the LLM to generate an output that meets the correctness criteria specified in the validator. The prompt used for reasking contains information about which quality criteria failed, which is auto-generated by the validator. | -| `fix` | Programmatically fix the generated output to meet the correctness criteria when possible. E.g. the formatter `provenance_llm` validator will remove any sentences that are estimated to be hallucinated. | -| `filter` | (Only applicable for structured data validation) Filter the incorrect value. This only filters the field that fails, and will return the rest of the generated output. | -| `refrain` | Refrain from returning an output. This is useful when the generated output is not safe to return, in which case a `None` value is returned instead. | -| `noop` | Do nothing. The failure will still be recorded in the logs, but no corrective action will be taken. | -| `exception` | Raise an exception when validation fails. | -| `fix_reask` | First, fix the generated output deterministically, and then rerun validation with the deterministically fixed output. If validation fails, then perform reasking. | +| `OnFailAction.REASK` | Reask the LLM to generate an output that meets the correctness criteria specified in the validator. The prompt used for reasking contains information about which quality criteria failed, which is auto-generated by the validator. | +| `OnFailAction.FIX` | Programmatically fix the generated output to meet the correctness criteria when possible. E.g. the formatter `provenance_llm` validator will remove any sentences that are estimated to be hallucinated. | +| `OnFailAction.FILTER` | (Only applicable for structured data validation) Filter the incorrect value. This only filters the field that fails, and will return the rest of the generated output. | +| `OnFailAction.REFRAIN` | Refrain from returning an output. This is useful when the generated output is not safe to return, in which case a `None` value is returned instead. | +| `OnFailAction.NOOP` | Do nothing. The failure will still be recorded in the logs, but no corrective action will be taken. | +| `OnFailAction.EXCEPTION` | Raise an exception when validation fails. | +| `OnFailAction.FIX_REASK` | First, fix the generated output deterministically, and then rerun validation with the deterministically fixed output. If validation fails, then perform reasking. | diff --git a/guardrails/__init__.py b/guardrails/__init__.py index 7c7ee0c4b..6e28dff7f 100644 --- a/guardrails/__init__.py +++ b/guardrails/__init__.py @@ -6,13 +6,14 @@ from guardrails.prompt import Instructions, Prompt from guardrails.rail import Rail from guardrails.utils import constants, docs_utils -from guardrails.validator_base import Validator, register_validator +from guardrails.validator_base import OnFailAction, Validator, register_validator __all__ = [ "Guard", "PromptCallableBase", "Rail", "Validator", + "OnFailAction", "register_validator", "constants", "docs_utils",