You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
# Ensure that a non-empty model name was provided.
606
+
ifnotuser_model_name:
607
+
error_message= (
608
+
"No model name was provided for evaluation. For multi-model deployment, "
609
+
"a model must be specified in the model parameters."
610
+
)
611
+
logger.debug(error_message)
612
+
raiseAquaValueError(error_message)
613
+
614
+
# Retrieve and convert the model group count from metadata.
600
615
model_count=custom_metadata_list.get(
601
616
ModelCustomMetadataFields.MULTIMODEL_GROUP_COUNT
602
617
)
603
-
604
-
ifmodel_countandcustom_metadata_list:
618
+
try:
605
619
model_group_count=int(model_count.value)
606
-
else:
607
-
logger.debug(
608
-
f"The ModelCustomMetadataFields.MULTIMODEL_GROUP_COUNT or custom_metadata_list (ModelCustomMetadata) is missing from the metadata in evaluation source ID: {create_aqua_evaluation_details.evaluation_source_id}"
620
+
exceptExceptionasex:
621
+
error_message= (
622
+
"Missing or invalid `MULTIMODEL_GROUP_COUNT` "
623
+
f"in custom metadata for evaluation source ID '{create_aqua_evaluation_details.evaluation_source_id}'. "
624
+
f"Details: {ex}"
625
+
)
626
+
logger.error(error_message)
627
+
628
+
ifmodel_group_count<1:
629
+
error_message= (
630
+
f"Invalid model group count: {model_group_count} for evaluation source ID "
631
+
f"'{create_aqua_evaluation_details.evaluation_source_id}'. A valid multi-model deployment "
632
+
f"requires at least one model."
609
633
)
634
+
logger.error(error_message)
610
635
raiseAquaRuntimeError(
611
-
"Recreate the model deployment and retry the evaluation. An issue occured when initalizing the model group during deployment."
636
+
f"Cannot extract details about the multi-model deployment to evaluate. A valid multi-model deployment requires at least one model, however the provided evaluation source ID '{create_aqua_evaluation_details.evaluation_source_id}' doesn't contain details about the deployed models."
612
637
)
613
638
639
+
# Build the list of valid model names from custom metadata.
f"User did not input model name for multi model deployment evaluation with evaluation source ID: {create_aqua_evaluation_details.evaluation_source_id}"
624
-
)
625
-
raiseAquaValueError(
626
-
f"Provide the model name. For evaluation, a single model needs to be targeted using the name in the multi model deployment. The valid model names for this Model Deployment are {valid_model_names}."
627
-
)
628
-
645
+
# Check if the provided model name is among the valid names.
629
646
ifuser_model_namenotinmodel_names:
630
-
logger.debug(
631
-
f"User input for model name was {user_model_name}, expected {valid_model_names} evaluation source ID: {create_aqua_evaluation_details.evaluation_source_id}"
632
-
)
633
-
raiseAquaValueError(
634
-
f"Provide the correct model name. The valid model names for this Model Deployment are {valid_model_names}."
647
+
error_message= (
648
+
f"Provided model name '{user_model_name}' does not match any valid model names {model_names} "
649
+
f"for evaluation source ID '{create_aqua_evaluation_details.evaluation_source_id}'. "
0 commit comments