diff --git a/guardrails/validator_base.py b/guardrails/validator_base.py index 259b5214a..0a1a1e6e9 100644 --- a/guardrails/validator_base.py +++ b/guardrails/validator_base.py @@ -79,7 +79,9 @@ def validate_wrapper(self, *args, **kwargs): return validator -def register_validator(name: str, data_type: Union[str, List[str]]): +def register_validator( + name: str, data_type: Union[str, List[str]], has_guardrails_endpoint: bool = False +): """Register a validator for a data type.""" from guardrails.datatypes import types_registry @@ -187,7 +189,6 @@ def __init__( f"{VALIDATOR_HUB_SERVICE}/validator/{validator_id}/inference" ) self.validation_endpoint = submission_url - self.on_fail_descriptor: Union[str, OnFailAction] = "custom" # chunking function returns empty list or list of 2 chunks @@ -275,6 +276,7 @@ def _inference(self, model_input: Any) -> Any: return self._inference_local(model_input) if not self.use_local and self.validation_endpoint: return self._inference_remote(model_input) + raise RuntimeError( "No inference endpoint set, but use_local was false. " "Please set either use_local=True or " @@ -329,14 +331,16 @@ def validate_stream( validation_result.validated_chunk = chunk_to_validate return validation_result - def _hub_inference_request(self, request_body: dict) -> Any: - """Makes a request to the Validator Hub to run a ML based validation - model. This request is authed through the hub and rerouted to a hosted - ML model. The reply from the hosted endpoint is returned and sent to - this client. + def _hub_inference_request( + self, request_body: dict, validation_endpoint: str + ) -> Any: + """Makes a request to the Validator Hub to run a ML based validation model. This + request is authed through the hub and rerouted to a hosted ML model. The reply + from the hosted endpoint is returned and sent to this client. Args: request_body (dict): A dictionary containing the required info for the final + validation_endpoint (str): The url to request as an endpoint inference endpoint to run. Raises: @@ -345,24 +349,15 @@ def _hub_inference_request(self, request_body: dict) -> Any: Returns: Any: Post request response from the ML based validation model. """ - - try: - submission_url = self.validation_endpoint - - headers = { - "Authorization": f"Bearer {self.hub_jwt_token}", - "Content-Type": "application/json", - } - req = requests.post(submission_url, json=request_body, headers=headers) - if not req.ok: - logging.error(req.status_code) - - return req.json() - - except Exception as e: - logging.error( - "An unexpected validation error occurred" f" in {self.rail_alias}: ", e - ) + headers = { + "Authorization": f"Bearer {self.hub_jwt_token}", + "Content-Type": "application/json", + } + req = requests.post(validation_endpoint, json=request_body, headers=headers) + if not req.ok: + logging.error(req.status_code) + + return req.json() def to_prompt(self, with_keywords: bool = True) -> str: """Convert the validator to a prompt.