diff --git a/docs/dyn/aiplatform_v1.projects.locations.deploymentResourcePools.html b/docs/dyn/aiplatform_v1.projects.locations.deploymentResourcePools.html index 9abe0b02a5..82c6f2f2ea 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.deploymentResourcePools.html +++ b/docs/dyn/aiplatform_v1.projects.locations.deploymentResourcePools.html @@ -346,6 +346,7 @@

Method Details

"minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, "disableContainerLogging": True or False, # For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + "disableExplanations": True or False, # If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. "displayName": "A String", # The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. "enableAccessLogging": True or False, # If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. "explanationSpec": { # Specification of Model explanation. # Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. diff --git a/docs/dyn/aiplatform_v1.projects.locations.endpoints.html b/docs/dyn/aiplatform_v1.projects.locations.endpoints.html index c13c5a5bb3..088b910955 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.endpoints.html +++ b/docs/dyn/aiplatform_v1.projects.locations.endpoints.html @@ -215,8 +215,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -284,6 +284,7 @@

Method Details

"minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, "disableContainerLogging": True or False, # For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + "disableExplanations": True or False, # If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. "displayName": "A String", # The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. "enableAccessLogging": True or False, # If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. "explanationSpec": { # Specification of Model explanation. # Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. @@ -531,6 +532,7 @@

Method Details

"minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, "disableContainerLogging": True or False, # For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + "disableExplanations": True or False, # If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. "displayName": "A String", # The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. "enableAccessLogging": True or False, # If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. "explanationSpec": { # Specification of Model explanation. # Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. @@ -1077,8 +1079,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -1093,7 +1095,9 @@

Method Details

], "generationConfig": { # Generation config. # Optional. Generation config. "candidateCount": 42, # Optional. Number of candidates to generate. + "frequencyPenalty": 3.14, # Optional. Frequency penalties. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. + "presencePenalty": 3.14, # Optional. Positive penalties. "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -1108,6 +1112,38 @@

Method Details

"threshold": "A String", # Required. The harm block threshold. }, ], + "systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "fileData": { # URI based data. # Optional. URI based data. + "fileUri": "A String", # Required. URI. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. + "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. + "a_key": "", # Properties of the object. + }, + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name]. + }, + "functionResponse": { # The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. # Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. + "response": { # Required. The function response in JSON object format. + "a_key": "", # Properties of the object. + }, + }, + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "text": "A String", # Optional. Text part (can be code). + "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + "endOffset": "A String", # Optional. The end offset of the video. + "startOffset": "A String", # Optional. The start offset of the video. + }, + }, + ], + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. + }, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. @@ -1203,8 +1239,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -1233,6 +1269,9 @@

Method Details

}, }, ], + "retrievalQueries": [ # Optional. Queries executed by the retrieval tools. + "A String", + ], "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], @@ -1312,6 +1351,7 @@

Method Details

"minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, "disableContainerLogging": True or False, # For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + "disableExplanations": True or False, # If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. "displayName": "A String", # The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. "enableAccessLogging": True or False, # If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. "explanationSpec": { # Specification of Model explanation. # Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. @@ -1509,6 +1549,7 @@

Method Details

"minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, "disableContainerLogging": True or False, # For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + "disableExplanations": True or False, # If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. "displayName": "A String", # The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. "enableAccessLogging": True or False, # If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. "explanationSpec": { # Specification of Model explanation. # Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. @@ -1709,6 +1750,7 @@

Method Details

"minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, "disableContainerLogging": True or False, # For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + "disableExplanations": True or False, # If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. "displayName": "A String", # The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. "enableAccessLogging": True or False, # If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. "explanationSpec": { # Specification of Model explanation. # Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. @@ -1898,6 +1940,7 @@

Method Details

"minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, "disableContainerLogging": True or False, # For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + "disableExplanations": True or False, # If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. "displayName": "A String", # The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. "enableAccessLogging": True or False, # If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. "explanationSpec": { # Specification of Model explanation. # Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. @@ -2082,6 +2125,7 @@

Method Details

"minReplicaCount": 42, # Required. Immutable. The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. }, "disableContainerLogging": True or False, # For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true. + "disableExplanations": True or False, # If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec. "displayName": "A String", # The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. "enableAccessLogging": True or False, # If true, online prediction access logs are sent to Cloud Logging. These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. "explanationSpec": { # Specification of Model explanation. # Explanation configuration for this DeployedModel. When deploying a Model using EndpointService.DeployModel, this value overrides the value of Model.explanation_spec. All fields of explanation_spec are optional in the request. If a field of explanation_spec is not populated, the value of the same field of Model.explanation_spec is inherited. If the corresponding Model.explanation_spec is not populated, all fields of the explanation_spec will be used for the explanation configuration. @@ -2526,8 +2570,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -2542,7 +2586,9 @@

Method Details

], "generationConfig": { # Generation config. # Optional. Generation config. "candidateCount": 42, # Optional. Number of candidates to generate. + "frequencyPenalty": 3.14, # Optional. Frequency penalties. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. + "presencePenalty": 3.14, # Optional. Positive penalties. "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -2557,6 +2603,38 @@

Method Details

"threshold": "A String", # Required. The harm block threshold. }, ], + "systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "fileData": { # URI based data. # Optional. URI based data. + "fileUri": "A String", # Required. URI. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. + "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. + "a_key": "", # Properties of the object. + }, + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name]. + }, + "functionResponse": { # The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. # Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. + "response": { # Required. The function response in JSON object format. + "a_key": "", # Properties of the object. + }, + }, + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "text": "A String", # Optional. Text part (can be code). + "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + "endOffset": "A String", # Optional. The end offset of the video. + "startOffset": "A String", # Optional. The start offset of the video. + }, + }, + ], + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. + }, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. @@ -2652,8 +2730,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -2682,6 +2760,9 @@

Method Details

}, }, ], + "retrievalQueries": [ # Optional. Queries executed by the retrieval tools. + "A String", + ], "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.featureViews.html b/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.featureViews.html index d0d692055e..4b267be91a 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.featureViews.html +++ b/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.featureViews.html @@ -149,6 +149,20 @@

Method Details

], "projectNumber": "A String", # Optional. The project number of the parent project of the Feature Groups. }, + "indexConfig": { # Configuration for vector indexing. # Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. + "bruteForceConfig": { # Configuration options for using brute force search. # Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. + }, + "crowdingColumn": "A String", # Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. + "distanceMeasureType": "A String", # Optional. The distance measure used in nearest neighbor search. + "embeddingColumn": "A String", # Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. + "embeddingDimension": 42, # Optional. The number of dimensions of the input embedding. + "filterColumns": [ # Optional. Columns of features that're used to filter vector search results. + "A String", + ], + "treeAhConfig": { # Configuration options for the tree-AH algorithm. # Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 + "leafNodeEmbeddingCount": "A String", # Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + }, + }, "labels": { # Optional. The labels with user-defined metadata to organize your FeatureViews. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, @@ -340,6 +354,20 @@

Method Details

], "projectNumber": "A String", # Optional. The project number of the parent project of the Feature Groups. }, + "indexConfig": { # Configuration for vector indexing. # Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. + "bruteForceConfig": { # Configuration options for using brute force search. # Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. + }, + "crowdingColumn": "A String", # Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. + "distanceMeasureType": "A String", # Optional. The distance measure used in nearest neighbor search. + "embeddingColumn": "A String", # Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. + "embeddingDimension": 42, # Optional. The number of dimensions of the input embedding. + "filterColumns": [ # Optional. Columns of features that're used to filter vector search results. + "A String", + ], + "treeAhConfig": { # Configuration options for the tree-AH algorithm. # Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 + "leafNodeEmbeddingCount": "A String", # Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + }, + }, "labels": { # Optional. The labels with user-defined metadata to organize your FeatureViews. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, @@ -391,6 +419,20 @@

Method Details

], "projectNumber": "A String", # Optional. The project number of the parent project of the Feature Groups. }, + "indexConfig": { # Configuration for vector indexing. # Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. + "bruteForceConfig": { # Configuration options for using brute force search. # Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. + }, + "crowdingColumn": "A String", # Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. + "distanceMeasureType": "A String", # Optional. The distance measure used in nearest neighbor search. + "embeddingColumn": "A String", # Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. + "embeddingDimension": 42, # Optional. The number of dimensions of the input embedding. + "filterColumns": [ # Optional. Columns of features that're used to filter vector search results. + "A String", + ], + "treeAhConfig": { # Configuration options for the tree-AH algorithm. # Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 + "leafNodeEmbeddingCount": "A String", # Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + }, + }, "labels": { # Optional. The labels with user-defined metadata to organize your FeatureViews. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, @@ -448,6 +490,20 @@

Method Details

], "projectNumber": "A String", # Optional. The project number of the parent project of the Feature Groups. }, + "indexConfig": { # Configuration for vector indexing. # Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. + "bruteForceConfig": { # Configuration options for using brute force search. # Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. + }, + "crowdingColumn": "A String", # Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. + "distanceMeasureType": "A String", # Optional. The distance measure used in nearest neighbor search. + "embeddingColumn": "A String", # Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. + "embeddingDimension": 42, # Optional. The number of dimensions of the input embedding. + "filterColumns": [ # Optional. Columns of features that're used to filter vector search results. + "A String", + ], + "treeAhConfig": { # Configuration options for the tree-AH algorithm. # Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 + "leafNodeEmbeddingCount": "A String", # Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + }, + }, "labels": { # Optional. The labels with user-defined metadata to organize your FeatureViews. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, diff --git a/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.html b/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.html index 2ae33f55a7..83acc45133 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.html +++ b/docs/dyn/aiplatform_v1.projects.locations.featureOnlineStores.html @@ -129,11 +129,16 @@

Method Details

}, }, "createTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was created. + "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. + "publicEndpointDomainName": "A String", # Output only. This field will be populated with the domain name to use for this FeatureOnlineStore + }, "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "labels": { # Optional. The labels with user-defined metadata to organize your FeatureOnlineStore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, "name": "A String", # Identifier. Name of the FeatureOnlineStore. Format: `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}` + "optimized": { # Optimized storage type # Contains settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. When choose Optimized storage type, need to set PrivateServiceConnectConfig.enable_private_service_connect to use private endpoint. Otherwise will use public endpoint by default. + }, "state": "A String", # Output only. State of the featureOnlineStore. "updateTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was last updated. } @@ -227,11 +232,16 @@

Method Details

}, }, "createTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was created. + "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. + "publicEndpointDomainName": "A String", # Output only. This field will be populated with the domain name to use for this FeatureOnlineStore + }, "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "labels": { # Optional. The labels with user-defined metadata to organize your FeatureOnlineStore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, "name": "A String", # Identifier. Name of the FeatureOnlineStore. Format: `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}` + "optimized": { # Optimized storage type # Contains settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. When choose Optimized storage type, need to set PrivateServiceConnectConfig.enable_private_service_connect to use private endpoint. Otherwise will use public endpoint by default. + }, "state": "A String", # Output only. State of the featureOnlineStore. "updateTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was last updated. } @@ -266,11 +276,16 @@

Method Details

}, }, "createTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was created. + "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. + "publicEndpointDomainName": "A String", # Output only. This field will be populated with the domain name to use for this FeatureOnlineStore + }, "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "labels": { # Optional. The labels with user-defined metadata to organize your FeatureOnlineStore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, "name": "A String", # Identifier. Name of the FeatureOnlineStore. Format: `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}` + "optimized": { # Optimized storage type # Contains settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. When choose Optimized storage type, need to set PrivateServiceConnectConfig.enable_private_service_connect to use private endpoint. Otherwise will use public endpoint by default. + }, "state": "A String", # Output only. State of the featureOnlineStore. "updateTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was last updated. }, @@ -311,11 +326,16 @@

Method Details

}, }, "createTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was created. + "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. + "publicEndpointDomainName": "A String", # Output only. This field will be populated with the domain name to use for this FeatureOnlineStore + }, "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. "labels": { # Optional. The labels with user-defined metadata to organize your FeatureOnlineStore. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, "name": "A String", # Identifier. Name of the FeatureOnlineStore. Format: `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}` + "optimized": { # Optimized storage type # Contains settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. When choose Optimized storage type, need to set PrivateServiceConnectConfig.enable_private_service_connect to use private endpoint. Otherwise will use public endpoint by default. + }, "state": "A String", # Output only. State of the featureOnlineStore. "updateTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was last updated. } diff --git a/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html b/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html index 8a9ab3fc73..fa69d5997a 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html +++ b/docs/dyn/aiplatform_v1.projects.locations.publishers.models.html @@ -174,8 +174,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -238,8 +238,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -254,7 +254,9 @@

Method Details

], "generationConfig": { # Generation config. # Optional. Generation config. "candidateCount": 42, # Optional. Number of candidates to generate. + "frequencyPenalty": 3.14, # Optional. Frequency penalties. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. + "presencePenalty": 3.14, # Optional. Positive penalties. "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -269,6 +271,38 @@

Method Details

"threshold": "A String", # Required. The harm block threshold. }, ], + "systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "fileData": { # URI based data. # Optional. URI based data. + "fileUri": "A String", # Required. URI. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. + "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. + "a_key": "", # Properties of the object. + }, + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name]. + }, + "functionResponse": { # The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. # Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. + "response": { # Required. The function response in JSON object format. + "a_key": "", # Properties of the object. + }, + }, + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "text": "A String", # Optional. Text part (can be code). + "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + "endOffset": "A String", # Optional. The end offset of the video. + "startOffset": "A String", # Optional. The start offset of the video. + }, + }, + ], + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. + }, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. @@ -364,8 +398,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -394,6 +428,9 @@

Method Details

}, }, ], + "retrievalQueries": [ # Optional. Queries executed by the retrieval tools. + "A String", + ], "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], @@ -727,8 +764,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -743,7 +780,9 @@

Method Details

], "generationConfig": { # Generation config. # Optional. Generation config. "candidateCount": 42, # Optional. Number of candidates to generate. + "frequencyPenalty": 3.14, # Optional. Frequency penalties. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. + "presencePenalty": 3.14, # Optional. Positive penalties. "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -758,6 +797,38 @@

Method Details

"threshold": "A String", # Required. The harm block threshold. }, ], + "systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "fileData": { # URI based data. # Optional. URI based data. + "fileUri": "A String", # Required. URI. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. + "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. + "a_key": "", # Properties of the object. + }, + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name]. + }, + "functionResponse": { # The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. # Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. + "response": { # Required. The function response in JSON object format. + "a_key": "", # Properties of the object. + }, + }, + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "text": "A String", # Optional. Text part (can be code). + "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + "endOffset": "A String", # Optional. The end offset of the video. + "startOffset": "A String", # Optional. The start offset of the video. + }, + }, + ], + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. + }, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. @@ -853,8 +924,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -883,6 +954,9 @@

Method Details

}, }, ], + "retrievalQueries": [ # Optional. Queries executed by the retrieval tools. + "A String", + ], "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], diff --git a/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.operations.html b/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.operations.html index ee80de85d1..2084fd63b3 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.operations.html +++ b/docs/dyn/aiplatform_v1.projects.locations.tuningJobs.operations.html @@ -83,6 +83,12 @@

Instance Methods

get(name, x__xgafv=None)

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

Method Details

cancel(name, x__xgafv=None) @@ -142,4 +148,61 @@

Method Details

}
+
+ list(name, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.html b/docs/dyn/aiplatform_v1beta1.html index 909495b345..554ada442a 100644 --- a/docs/dyn/aiplatform_v1beta1.html +++ b/docs/dyn/aiplatform_v1beta1.html @@ -74,6 +74,11 @@

Vertex AI API

Instance Methods

+

+ media() +

+

Returns the media Resource.

+

projects()

diff --git a/docs/dyn/aiplatform_v1beta1.media.html b/docs/dyn/aiplatform_v1beta1.media.html new file mode 100644 index 0000000000..8630640220 --- /dev/null +++ b/docs/dyn/aiplatform_v1beta1.media.html @@ -0,0 +1,178 @@ + + + +

Vertex AI API . media

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ upload(parent, body=None, media_body=None, media_mime_type=None, x__xgafv=None)

+

Upload a file into a RagCorpus.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ upload(parent, body=None, media_body=None, media_mime_type=None, x__xgafv=None) +
Upload a file into a RagCorpus.
+
+Args:
+  parent: string, Required. The name of the RagCorpus resource into which to upload the file. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for VertexRagDataService.UploadRagFile.
+  "ragFile": { # A RagFile contains user data for chunking, embedding and indexing. # Required. The RagFile to upload.
+    "createTime": "A String", # Output only. Timestamp when this RagFile was created.
+    "description": "A String", # Optional. The description of the RagFile.
+    "directUploadSource": { # The input content is encapsulated and uploaded in the request. # Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request.
+    },
+    "displayName": "A String", # Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+    "gcsSource": { # The Google Cloud Storage location for the input content. # Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the GCS uri for now.
+      "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+        "A String",
+      ],
+    },
+    "googleDriveSource": { # The Google Drive location for the input content. # Output only. Google Drive location. Supports importing individual files as well as Google Drive folders.
+      "resourceIds": [ # Required. Google Drive resource IDs.
+        { # The type and ID of the Google Drive resource.
+          "resourceId": "A String", # Required. The ID of the Google Drive resource.
+          "resourceType": "A String", # Required. The type of the Google Drive resource.
+        },
+      ],
+    },
+    "name": "A String", # Output only. The resource name of the RagFile.
+    "ragFileType": "A String", # Output only. The type of the RagFile.
+    "sizeBytes": "A String", # Output only. The size of the RagFile in bytes.
+    "updateTime": "A String", # Output only. Timestamp when this RagFile was last updated.
+  },
+  "uploadRagFileConfig": { # Config for uploading RagFile. # Required. The config for the RagFiles to be uploaded into the RagCorpus. VertexRagDataService.UploadRagFile.
+    "ragFileChunkingConfig": { # Specifies the size and overlap of chunks for RagFiles. # Specifies the size and overlap of chunks after uploading RagFile.
+      "chunkOverlap": 42, # The overlap between chunks.
+      "chunkSize": 42, # The size of the chunks.
+    },
+  },
+}
+
+  media_body: string, The filename of the media request body, or an instance of a MediaUpload object.
+  media_mime_type: string, The MIME type of the media request body, or an instance of a MediaUpload object.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for VertexRagDataService.UploadRagFile.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error that occurred while processing the RagFile.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "ragFile": { # A RagFile contains user data for chunking, embedding and indexing. # The RagFile that had been uploaded into the RagCorpus.
+    "createTime": "A String", # Output only. Timestamp when this RagFile was created.
+    "description": "A String", # Optional. The description of the RagFile.
+    "directUploadSource": { # The input content is encapsulated and uploaded in the request. # Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request.
+    },
+    "displayName": "A String", # Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+    "gcsSource": { # The Google Cloud Storage location for the input content. # Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the GCS uri for now.
+      "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+        "A String",
+      ],
+    },
+    "googleDriveSource": { # The Google Drive location for the input content. # Output only. Google Drive location. Supports importing individual files as well as Google Drive folders.
+      "resourceIds": [ # Required. Google Drive resource IDs.
+        { # The type and ID of the Google Drive resource.
+          "resourceId": "A String", # Required. The ID of the Google Drive resource.
+          "resourceType": "A String", # Required. The type of the Google Drive resource.
+        },
+      ],
+    },
+    "name": "A String", # Output only. The resource name of the RagFile.
+    "ragFileType": "A String", # Output only. The type of the RagFile.
+    "sizeBytes": "A String", # Output only. The size of the RagFile in bytes.
+    "updateTime": "A String", # Output only. Timestamp when this RagFile was last updated.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html b/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html index 484b65d6ef..ba87dd36b1 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.endpoints.html @@ -221,8 +221,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -1223,8 +1223,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -1239,7 +1239,9 @@

Method Details

], "generationConfig": { # Generation config. # Optional. Generation config. "candidateCount": 42, # Optional. Number of candidates to generate. + "frequencyPenalty": 3.14, # Optional. Frequency penalties. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. + "presencePenalty": 3.14, # Optional. Positive penalties. "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -1254,6 +1256,46 @@

Method Details

"threshold": "A String", # Required. The harm block threshold. }, ], + "systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "fileData": { # URI based data. # Optional. URI based data. + "fileUri": "A String", # Required. URI. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. + "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. + "a_key": "", # Properties of the object. + }, + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name]. + }, + "functionResponse": { # The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. # Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. + "response": { # Required. The function response in JSON object format. + "a_key": "", # Properties of the object. + }, + }, + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "text": "A String", # Optional. Text part (can be code). + "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + "endOffset": "A String", # Optional. The end offset of the video. + "startOffset": "A String", # Optional. The start offset of the video. + }, + }, + ], + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. + }, + "toolConfig": { # Tool config. This config is shared for all tools provided in the request. # Optional. Tool config. This config is shared for all tools provided in the request. + "functionCallingConfig": { # Function calling config. # Optional. Function calling config. + "allowedFunctionNames": [ # Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided. + "A String", + ], + "mode": "A String", # Optional. Function calling mode. + }, + }, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. @@ -1288,6 +1330,34 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. + "default": "", # Optional. Default value of the data. + "description": "A String", # Optional. The description of the data. + "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "A String", + ], + "example": "", # Optional. Example of the object. Will only populated when the object is the root. + "format": "A String", # Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc + "items": # Object with schema name: GoogleCloudAiplatformV1beta1Schema # Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. + "maxItems": "A String", # Optional. Maximum number of the elements for Type.ARRAY. + "maxLength": "A String", # Optional. Maximum length of the Type.STRING + "maxProperties": "A String", # Optional. Maximum number of the properties for Type.OBJECT. + "maximum": 3.14, # Optional. Maximum value of the Type.INTEGER and Type.NUMBER + "minItems": "A String", # Optional. Minimum number of the elements for Type.ARRAY. + "minLength": "A String", # Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING + "minProperties": "A String", # Optional. Minimum number of the properties for Type.OBJECT. + "minimum": 3.14, # Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER + "nullable": True or False, # Optional. Indicates if the value may be null. + "pattern": "A String", # Optional. Pattern of the Type.STRING to restrict a string to a regular expression. + "properties": { # Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. + "a_key": # Object with schema name: GoogleCloudAiplatformV1beta1Schema + }, + "required": [ # Optional. Required properties of Type.OBJECT. + "A String", + ], + "title": "A String", # Optional. The title of the Schema. + "type": "A String", # Optional. The type of the data. + }, }, ], "googleSearchRetrieval": { # Tool to retrieve public web data for grounding, powered by Google. # Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search. @@ -1298,6 +1368,12 @@

Method Details

"vertexAiSearch": { # Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation # Set to use data source powered by Vertex AI Search. "datastore": "A String", # Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore} }, + "vertexRagStore": { # Retrieve from Vertex RAG Store for grounding. # Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. + "ragCorpora": [ # Required. Vertex RAG Store corpus resource name: projects/{project}/locations/{location}/ragCorpora/{ragCorpus} Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location. + "A String", + ], + "similarityTopK": 42, # Optional. Number of top k results to return from the selected corpora. + }, }, }, ], @@ -1349,8 +1425,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -1368,6 +1444,10 @@

Method Details

"groundingAttributions": [ # Optional. List of grounding attributions. { # Grounding attribution. "confidenceScore": 3.14, # Optional. Output only. Confidence score of the attribution. Ranges from 0 to 1. 1 is the most confident. + "retrievedContext": { # Attribution from context retrieved by the retrieval tools. # Optional. Attribution from context retrieved by the retrieval tools. + "title": "A String", # Output only. Title of the attribution. + "uri": "A String", # Output only. URI reference of the attribution. + }, "segment": { # Segment of the content. # Output only. Segment of the content this attribution belongs to. "endIndex": 42, # Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero. "partIndex": 42, # Output only. The index of a Part object within its parent Content object. @@ -1379,6 +1459,9 @@

Method Details

}, }, ], + "retrievalQueries": [ # Optional. Queries executed by the retrieval tools. + "A String", + ], "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], @@ -2794,8 +2877,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -2810,7 +2893,9 @@

Method Details

], "generationConfig": { # Generation config. # Optional. Generation config. "candidateCount": 42, # Optional. Number of candidates to generate. + "frequencyPenalty": 3.14, # Optional. Frequency penalties. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. + "presencePenalty": 3.14, # Optional. Positive penalties. "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -2825,6 +2910,46 @@

Method Details

"threshold": "A String", # Required. The harm block threshold. }, ], + "systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "fileData": { # URI based data. # Optional. URI based data. + "fileUri": "A String", # Required. URI. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. + "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. + "a_key": "", # Properties of the object. + }, + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name]. + }, + "functionResponse": { # The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. # Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. + "response": { # Required. The function response in JSON object format. + "a_key": "", # Properties of the object. + }, + }, + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "text": "A String", # Optional. Text part (can be code). + "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + "endOffset": "A String", # Optional. The end offset of the video. + "startOffset": "A String", # Optional. The start offset of the video. + }, + }, + ], + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. + }, + "toolConfig": { # Tool config. This config is shared for all tools provided in the request. # Optional. Tool config. This config is shared for all tools provided in the request. + "functionCallingConfig": { # Function calling config. # Optional. Function calling config. + "allowedFunctionNames": [ # Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided. + "A String", + ], + "mode": "A String", # Optional. Function calling mode. + }, + }, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. @@ -2859,6 +2984,34 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. + "default": "", # Optional. Default value of the data. + "description": "A String", # Optional. The description of the data. + "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "A String", + ], + "example": "", # Optional. Example of the object. Will only populated when the object is the root. + "format": "A String", # Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc + "items": # Object with schema name: GoogleCloudAiplatformV1beta1Schema # Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. + "maxItems": "A String", # Optional. Maximum number of the elements for Type.ARRAY. + "maxLength": "A String", # Optional. Maximum length of the Type.STRING + "maxProperties": "A String", # Optional. Maximum number of the properties for Type.OBJECT. + "maximum": 3.14, # Optional. Maximum value of the Type.INTEGER and Type.NUMBER + "minItems": "A String", # Optional. Minimum number of the elements for Type.ARRAY. + "minLength": "A String", # Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING + "minProperties": "A String", # Optional. Minimum number of the properties for Type.OBJECT. + "minimum": 3.14, # Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER + "nullable": True or False, # Optional. Indicates if the value may be null. + "pattern": "A String", # Optional. Pattern of the Type.STRING to restrict a string to a regular expression. + "properties": { # Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. + "a_key": # Object with schema name: GoogleCloudAiplatformV1beta1Schema + }, + "required": [ # Optional. Required properties of Type.OBJECT. + "A String", + ], + "title": "A String", # Optional. The title of the Schema. + "type": "A String", # Optional. The type of the data. + }, }, ], "googleSearchRetrieval": { # Tool to retrieve public web data for grounding, powered by Google. # Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search. @@ -2869,6 +3022,12 @@

Method Details

"vertexAiSearch": { # Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation # Set to use data source powered by Vertex AI Search. "datastore": "A String", # Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore} }, + "vertexRagStore": { # Retrieve from Vertex RAG Store for grounding. # Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. + "ragCorpora": [ # Required. Vertex RAG Store corpus resource name: projects/{project}/locations/{location}/ragCorpora/{ragCorpus} Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location. + "A String", + ], + "similarityTopK": 42, # Optional. Number of top k results to return from the selected corpora. + }, }, }, ], @@ -2920,8 +3079,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -2939,6 +3098,10 @@

Method Details

"groundingAttributions": [ # Optional. List of grounding attributions. { # Grounding attribution. "confidenceScore": 3.14, # Optional. Output only. Confidence score of the attribution. Ranges from 0 to 1. 1 is the most confident. + "retrievedContext": { # Attribution from context retrieved by the retrieval tools. # Optional. Attribution from context retrieved by the retrieval tools. + "title": "A String", # Output only. Title of the attribution. + "uri": "A String", # Output only. URI reference of the attribution. + }, "segment": { # Segment of the content. # Output only. Segment of the content this attribution belongs to. "endIndex": 42, # Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero. "partIndex": 42, # Output only. The index of a Part object within its parent Content object. @@ -2950,6 +3113,9 @@

Method Details

}, }, ], + "retrievalQueries": [ # Optional. Queries executed by the retrieval tools. + "A String", + ], "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html b/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html index d2b9d90d72..7a37d7958b 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.extensions.html @@ -257,6 +257,34 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. + "default": "", # Optional. Default value of the data. + "description": "A String", # Optional. The description of the data. + "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "A String", + ], + "example": "", # Optional. Example of the object. Will only populated when the object is the root. + "format": "A String", # Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc + "items": # Object with schema name: GoogleCloudAiplatformV1beta1Schema # Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. + "maxItems": "A String", # Optional. Maximum number of the elements for Type.ARRAY. + "maxLength": "A String", # Optional. Maximum length of the Type.STRING + "maxProperties": "A String", # Optional. Maximum number of the properties for Type.OBJECT. + "maximum": 3.14, # Optional. Maximum value of the Type.INTEGER and Type.NUMBER + "minItems": "A String", # Optional. Minimum number of the elements for Type.ARRAY. + "minLength": "A String", # Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING + "minProperties": "A String", # Optional. Minimum number of the properties for Type.OBJECT. + "minimum": 3.14, # Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER + "nullable": True or False, # Optional. Indicates if the value may be null. + "pattern": "A String", # Optional. Pattern of the Type.STRING to restrict a string to a regular expression. + "properties": { # Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. + "a_key": # Object with schema name: GoogleCloudAiplatformV1beta1Schema + }, + "required": [ # Optional. Required properties of Type.OBJECT. + "A String", + ], + "title": "A String", # Optional. The title of the Schema. + "type": "A String", # Optional. The type of the data. + }, }, "operationId": "A String", # Operation ID that uniquely identifies the operations among the extension. See: "Operation Object" in https://swagger.io/specification/. This field is parsed from the OpenAPI spec. For HTTP extensions, if it does not exist in the spec, we will generate one from the HTTP method and path. }, @@ -363,6 +391,34 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. + "default": "", # Optional. Default value of the data. + "description": "A String", # Optional. The description of the data. + "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "A String", + ], + "example": "", # Optional. Example of the object. Will only populated when the object is the root. + "format": "A String", # Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc + "items": # Object with schema name: GoogleCloudAiplatformV1beta1Schema # Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. + "maxItems": "A String", # Optional. Maximum number of the elements for Type.ARRAY. + "maxLength": "A String", # Optional. Maximum length of the Type.STRING + "maxProperties": "A String", # Optional. Maximum number of the properties for Type.OBJECT. + "maximum": 3.14, # Optional. Maximum value of the Type.INTEGER and Type.NUMBER + "minItems": "A String", # Optional. Minimum number of the elements for Type.ARRAY. + "minLength": "A String", # Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING + "minProperties": "A String", # Optional. Minimum number of the properties for Type.OBJECT. + "minimum": 3.14, # Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER + "nullable": True or False, # Optional. Indicates if the value may be null. + "pattern": "A String", # Optional. Pattern of the Type.STRING to restrict a string to a regular expression. + "properties": { # Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. + "a_key": # Object with schema name: GoogleCloudAiplatformV1beta1Schema + }, + "required": [ # Optional. Required properties of Type.OBJECT. + "A String", + ], + "title": "A String", # Optional. The title of the Schema. + "type": "A String", # Optional. The type of the data. + }, }, "operationId": "A String", # Operation ID that uniquely identifies the operations among the extension. See: "Operation Object" in https://swagger.io/specification/. This field is parsed from the OpenAPI spec. For HTTP extensions, if it does not exist in the spec, we will generate one from the HTTP method and path. }, @@ -508,6 +564,34 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. + "default": "", # Optional. Default value of the data. + "description": "A String", # Optional. The description of the data. + "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "A String", + ], + "example": "", # Optional. Example of the object. Will only populated when the object is the root. + "format": "A String", # Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc + "items": # Object with schema name: GoogleCloudAiplatformV1beta1Schema # Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. + "maxItems": "A String", # Optional. Maximum number of the elements for Type.ARRAY. + "maxLength": "A String", # Optional. Maximum length of the Type.STRING + "maxProperties": "A String", # Optional. Maximum number of the properties for Type.OBJECT. + "maximum": 3.14, # Optional. Maximum value of the Type.INTEGER and Type.NUMBER + "minItems": "A String", # Optional. Minimum number of the elements for Type.ARRAY. + "minLength": "A String", # Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING + "minProperties": "A String", # Optional. Minimum number of the properties for Type.OBJECT. + "minimum": 3.14, # Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER + "nullable": True or False, # Optional. Indicates if the value may be null. + "pattern": "A String", # Optional. Pattern of the Type.STRING to restrict a string to a regular expression. + "properties": { # Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. + "a_key": # Object with schema name: GoogleCloudAiplatformV1beta1Schema + }, + "required": [ # Optional. Required properties of Type.OBJECT. + "A String", + ], + "title": "A String", # Optional. The title of the Schema. + "type": "A String", # Optional. The type of the data. + }, }, "operationId": "A String", # Operation ID that uniquely identifies the operations among the extension. See: "Operation Object" in https://swagger.io/specification/. This field is parsed from the OpenAPI spec. For HTTP extensions, if it does not exist in the spec, we will generate one from the HTTP method and path. }, @@ -631,6 +715,34 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. + "default": "", # Optional. Default value of the data. + "description": "A String", # Optional. The description of the data. + "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "A String", + ], + "example": "", # Optional. Example of the object. Will only populated when the object is the root. + "format": "A String", # Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc + "items": # Object with schema name: GoogleCloudAiplatformV1beta1Schema # Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. + "maxItems": "A String", # Optional. Maximum number of the elements for Type.ARRAY. + "maxLength": "A String", # Optional. Maximum length of the Type.STRING + "maxProperties": "A String", # Optional. Maximum number of the properties for Type.OBJECT. + "maximum": 3.14, # Optional. Maximum value of the Type.INTEGER and Type.NUMBER + "minItems": "A String", # Optional. Minimum number of the elements for Type.ARRAY. + "minLength": "A String", # Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING + "minProperties": "A String", # Optional. Minimum number of the properties for Type.OBJECT. + "minimum": 3.14, # Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER + "nullable": True or False, # Optional. Indicates if the value may be null. + "pattern": "A String", # Optional. Pattern of the Type.STRING to restrict a string to a regular expression. + "properties": { # Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. + "a_key": # Object with schema name: GoogleCloudAiplatformV1beta1Schema + }, + "required": [ # Optional. Required properties of Type.OBJECT. + "A String", + ], + "title": "A String", # Optional. The title of the Schema. + "type": "A String", # Optional. The type of the data. + }, }, "operationId": "A String", # Operation ID that uniquely identifies the operations among the extension. See: "Operation Object" in https://swagger.io/specification/. This field is parsed from the OpenAPI spec. For HTTP extensions, if it does not exist in the spec, we will generate one from the HTTP method and path. }, @@ -736,6 +848,34 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. + "default": "", # Optional. Default value of the data. + "description": "A String", # Optional. The description of the data. + "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "A String", + ], + "example": "", # Optional. Example of the object. Will only populated when the object is the root. + "format": "A String", # Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc + "items": # Object with schema name: GoogleCloudAiplatformV1beta1Schema # Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. + "maxItems": "A String", # Optional. Maximum number of the elements for Type.ARRAY. + "maxLength": "A String", # Optional. Maximum length of the Type.STRING + "maxProperties": "A String", # Optional. Maximum number of the properties for Type.OBJECT. + "maximum": 3.14, # Optional. Maximum value of the Type.INTEGER and Type.NUMBER + "minItems": "A String", # Optional. Minimum number of the elements for Type.ARRAY. + "minLength": "A String", # Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING + "minProperties": "A String", # Optional. Minimum number of the properties for Type.OBJECT. + "minimum": 3.14, # Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER + "nullable": True or False, # Optional. Indicates if the value may be null. + "pattern": "A String", # Optional. Pattern of the Type.STRING to restrict a string to a regular expression. + "properties": { # Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. + "a_key": # Object with schema name: GoogleCloudAiplatformV1beta1Schema + }, + "required": [ # Optional. Required properties of Type.OBJECT. + "A String", + ], + "title": "A String", # Optional. The title of the Schema. + "type": "A String", # Optional. The type of the data. + }, }, "operationId": "A String", # Operation ID that uniquely identifies the operations among the extension. See: "Operation Object" in https://swagger.io/specification/. This field is parsed from the OpenAPI spec. For HTTP extensions, if it does not exist in the spec, we will generate one from the HTTP method and path. }, @@ -825,8 +965,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -839,10 +979,6 @@

Method Details

"role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. }, ], - "query": { # User provided query message. # Required. User provided input query message. - "query": "A String", # Required. The query from user. - }, - "useFunctionCall": True or False, # Optional. Experiment control on whether to use function call. } x__xgafv: string, V1 error format. @@ -855,40 +991,6 @@

Method Details

{ # Response message for ExtensionExecutionService.QueryExtension. "failureMessage": "A String", # Failure message if any. - "metadata": { # Metadata for response # Metadata related to the query execution. - "checkpoint": { # Placeholder for all checkpoint related data. Any data needed to restore a request and more go/vertex-extension-query-operation # Optional. Checkpoint to restore a request - "content": "A String", # Required. encoded checkpoint - }, - "executionPlan": { # Execution plan for a request. # Optional. Execution plan for the request. - "steps": [ # Required. Sequence of steps to execute a request. - { # Single step in query execution plan. - "extensionExecution": { # Extension execution step. # Extension execution step. - "extension": "A String", # Required. extension resource name - "operationId": "A String", # Required. the operation id - }, - "respondToUser": { # Respond to user step. # Respond to user step. - }, - }, - ], - }, - "flowOutputs": { # To surface the v2 flow output. - "a_key": "", # Properties of the object. - }, - }, - "queryResponseMetadata": { - "steps": [ # ReAgent execution steps. - { # ReAgent execution steps. - "error": "A String", # Error messages from the extension or during response parsing. - "extensionInstruction": "A String", # Planner's instruction to the extension. - "extensionInvoked": "A String", # Planner's choice of extension to invoke. - "response": "A String", # Response of the extension. - "success": True or False, # When set to False, either the extension fails to execute or the response cannot be summarized. - "thought": "A String", # Planner's thought. - }, - ], - "useCreativity": True or False, # Whether the reasoning agent used creativity (instead of extensions provided) to build the response. - }, - "response": "A String", # Response to the user's query. "steps": [ # Steps of extension or LLM interaction, can contain function call, function response, or text response. The last step contains the final response to the query. { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. @@ -909,8 +1011,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.featureViews.html b/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.featureViews.html index 9f525353a9..d6d62813e8 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.featureViews.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.featureViews.html @@ -161,6 +161,20 @@

Method Details

], "projectNumber": "A String", # Optional. The project number of the parent project of the Feature Groups. }, + "indexConfig": { # Configuration for vector indexing. # Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. + "bruteForceConfig": { # Configuration options for using brute force search. # Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. + }, + "crowdingColumn": "A String", # Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. + "distanceMeasureType": "A String", # Optional. The distance measure used in nearest neighbor search. + "embeddingColumn": "A String", # Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. + "embeddingDimension": 42, # Optional. The number of dimensions of the input embedding. + "filterColumns": [ # Optional. Columns of features that're used to filter vector search results. + "A String", + ], + "treeAhConfig": { # Configuration options for the tree-AH algorithm. # Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 + "leafNodeEmbeddingCount": "A String", # Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + }, + }, "labels": { # Optional. The labels with user-defined metadata to organize your FeatureViews. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, @@ -370,6 +384,20 @@

Method Details

], "projectNumber": "A String", # Optional. The project number of the parent project of the Feature Groups. }, + "indexConfig": { # Configuration for vector indexing. # Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. + "bruteForceConfig": { # Configuration options for using brute force search. # Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. + }, + "crowdingColumn": "A String", # Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. + "distanceMeasureType": "A String", # Optional. The distance measure used in nearest neighbor search. + "embeddingColumn": "A String", # Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. + "embeddingDimension": 42, # Optional. The number of dimensions of the input embedding. + "filterColumns": [ # Optional. Columns of features that're used to filter vector search results. + "A String", + ], + "treeAhConfig": { # Configuration options for the tree-AH algorithm. # Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 + "leafNodeEmbeddingCount": "A String", # Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + }, + }, "labels": { # Optional. The labels with user-defined metadata to organize your FeatureViews. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, @@ -472,6 +500,20 @@

Method Details

], "projectNumber": "A String", # Optional. The project number of the parent project of the Feature Groups. }, + "indexConfig": { # Configuration for vector indexing. # Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. + "bruteForceConfig": { # Configuration options for using brute force search. # Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. + }, + "crowdingColumn": "A String", # Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. + "distanceMeasureType": "A String", # Optional. The distance measure used in nearest neighbor search. + "embeddingColumn": "A String", # Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. + "embeddingDimension": 42, # Optional. The number of dimensions of the input embedding. + "filterColumns": [ # Optional. Columns of features that're used to filter vector search results. + "A String", + ], + "treeAhConfig": { # Configuration options for the tree-AH algorithm. # Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 + "leafNodeEmbeddingCount": "A String", # Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + }, + }, "labels": { # Optional. The labels with user-defined metadata to organize your FeatureViews. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, @@ -545,6 +587,20 @@

Method Details

], "projectNumber": "A String", # Optional. The project number of the parent project of the Feature Groups. }, + "indexConfig": { # Configuration for vector indexing. # Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving. + "bruteForceConfig": { # Configuration options for using brute force search. # Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search. + }, + "crowdingColumn": "A String", # Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response. + "distanceMeasureType": "A String", # Optional. The distance measure used in nearest neighbor search. + "embeddingColumn": "A String", # Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search. + "embeddingDimension": 42, # Optional. The number of dimensions of the input embedding. + "filterColumns": [ # Optional. Columns of features that're used to filter vector search results. + "A String", + ], + "treeAhConfig": { # Configuration options for the tree-AH algorithm. # Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396 + "leafNodeEmbeddingCount": "A String", # Optional. Number of embeddings on each leaf node. The default value is 1000 if not set. + }, + }, "labels": { # Optional. The labels with user-defined metadata to organize your FeatureViews. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information on and examples of labels. No more than 64 user labels can be associated with one FeatureOnlineStore(System labels are excluded)." System reserved label keys are prefixed with "aiplatform.googleapis.com/" and are immutable. "a_key": "A String", }, diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.html b/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.html index b8afa15a74..9e4508cf5e 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.featureOnlineStores.html @@ -138,7 +138,7 @@

Method Details

}, }, "createTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was created. - "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default. Note, for EmbeddingManagement use case, only [DedicatedServingEndpoint.public_endpoint_domain_name] is available now. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. + "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. "privateServiceConnectConfig": { # Represents configuration for private service connect. # Optional. Private service connect config. The private service connection is available only for Optimized storage type, not for embedding management now. If PrivateServiceConnectConfig.enable_private_service_connect set to true, customers will use private service connection to send request. Otherwise, the connection will set to public endpoint. "enablePrivateServiceConnect": True or False, # Required. If true, expose the IndexEndpoint via private service connect. "projectAllowlist": [ # A list of Projects from which the forwarding rule will target the service attachment. @@ -148,7 +148,7 @@

Method Details

"publicEndpointDomainName": "A String", # Output only. This field will be populated with the domain name to use for this FeatureOnlineStore "serviceAttachment": "A String", # Output only. The name of the service attachment resource. Populated if private service connect is enabled and after FeatureViewSync is created. }, - "embeddingManagement": { # Contains settings for embedding management. # Optional. The settings for embedding management in FeatureOnlineStore. + "embeddingManagement": { # Deprecated: This sub message is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. Contains settings for embedding management. # Optional. Deprecated: This field is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. "enabled": True or False, # Optional. Immutable. Whether to enable embedding management in this FeatureOnlineStore. It's immutable after creation to ensure the FeatureOnlineStore availability. }, "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. @@ -251,7 +251,7 @@

Method Details

}, }, "createTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was created. - "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default. Note, for EmbeddingManagement use case, only [DedicatedServingEndpoint.public_endpoint_domain_name] is available now. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. + "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. "privateServiceConnectConfig": { # Represents configuration for private service connect. # Optional. Private service connect config. The private service connection is available only for Optimized storage type, not for embedding management now. If PrivateServiceConnectConfig.enable_private_service_connect set to true, customers will use private service connection to send request. Otherwise, the connection will set to public endpoint. "enablePrivateServiceConnect": True or False, # Required. If true, expose the IndexEndpoint via private service connect. "projectAllowlist": [ # A list of Projects from which the forwarding rule will target the service attachment. @@ -261,7 +261,7 @@

Method Details

"publicEndpointDomainName": "A String", # Output only. This field will be populated with the domain name to use for this FeatureOnlineStore "serviceAttachment": "A String", # Output only. The name of the service attachment resource. Populated if private service connect is enabled and after FeatureViewSync is created. }, - "embeddingManagement": { # Contains settings for embedding management. # Optional. The settings for embedding management in FeatureOnlineStore. + "embeddingManagement": { # Deprecated: This sub message is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. Contains settings for embedding management. # Optional. Deprecated: This field is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. "enabled": True or False, # Optional. Immutable. Whether to enable embedding management in this FeatureOnlineStore. It's immutable after creation to ensure the FeatureOnlineStore availability. }, "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. @@ -340,7 +340,7 @@

Method Details

}, }, "createTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was created. - "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default. Note, for EmbeddingManagement use case, only [DedicatedServingEndpoint.public_endpoint_domain_name] is available now. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. + "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. "privateServiceConnectConfig": { # Represents configuration for private service connect. # Optional. Private service connect config. The private service connection is available only for Optimized storage type, not for embedding management now. If PrivateServiceConnectConfig.enable_private_service_connect set to true, customers will use private service connection to send request. Otherwise, the connection will set to public endpoint. "enablePrivateServiceConnect": True or False, # Required. If true, expose the IndexEndpoint via private service connect. "projectAllowlist": [ # A list of Projects from which the forwarding rule will target the service attachment. @@ -350,7 +350,7 @@

Method Details

"publicEndpointDomainName": "A String", # Output only. This field will be populated with the domain name to use for this FeatureOnlineStore "serviceAttachment": "A String", # Output only. The name of the service attachment resource. Populated if private service connect is enabled and after FeatureViewSync is created. }, - "embeddingManagement": { # Contains settings for embedding management. # Optional. The settings for embedding management in FeatureOnlineStore. + "embeddingManagement": { # Deprecated: This sub message is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. Contains settings for embedding management. # Optional. Deprecated: This field is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. "enabled": True or False, # Optional. Immutable. Whether to enable embedding management in this FeatureOnlineStore. It's immutable after creation to ensure the FeatureOnlineStore availability. }, "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. @@ -400,7 +400,7 @@

Method Details

}, }, "createTime": "A String", # Output only. Timestamp when this FeatureOnlineStore was created. - "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default. Note, for EmbeddingManagement use case, only [DedicatedServingEndpoint.public_endpoint_domain_name] is available now. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. + "dedicatedServingEndpoint": { # The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default. # Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint. "privateServiceConnectConfig": { # Represents configuration for private service connect. # Optional. Private service connect config. The private service connection is available only for Optimized storage type, not for embedding management now. If PrivateServiceConnectConfig.enable_private_service_connect set to true, customers will use private service connection to send request. Otherwise, the connection will set to public endpoint. "enablePrivateServiceConnect": True or False, # Required. If true, expose the IndexEndpoint via private service connect. "projectAllowlist": [ # A list of Projects from which the forwarding rule will target the service attachment. @@ -410,7 +410,7 @@

Method Details

"publicEndpointDomainName": "A String", # Output only. This field will be populated with the domain name to use for this FeatureOnlineStore "serviceAttachment": "A String", # Output only. The name of the service attachment resource. Populated if private service connect is enabled and after FeatureViewSync is created. }, - "embeddingManagement": { # Contains settings for embedding management. # Optional. The settings for embedding management in FeatureOnlineStore. + "embeddingManagement": { # Deprecated: This sub message is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. Contains settings for embedding management. # Optional. Deprecated: This field is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. "enabled": True or False, # Optional. Immutable. Whether to enable embedding management in this FeatureOnlineStore. It's immutable after creation to ensure the FeatureOnlineStore availability. }, "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.html b/docs/dyn/aiplatform_v1beta1.projects.locations.html index ca70554bbb..44de52670c 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.html @@ -267,6 +267,9 @@

Instance Methods

close()

Close httplib2 connections.

+

+ evaluateInstances(location, body=None, x__xgafv=None)

+

Evaluates instances based on a given metric.

get(name, x__xgafv=None)

Gets information about a location.

@@ -276,12 +279,412 @@

Instance Methods

list_next()

Retrieves the next page of results.

+

+ retrieveContexts(parent, body=None, x__xgafv=None)

+

Retrieves relevant contexts for a query.

Method Details

close()
Close httplib2 connections.
+
+ evaluateInstances(location, body=None, x__xgafv=None) +
Evaluates instances based on a given metric.
+
+Args:
+  location: string, Required. The resource name of the Location to evaluate the instances. Format: `projects/{project}/locations/{location}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for EvaluationService.EvaluateInstances.
+  "bleuInput": { # Input for bleu metric. # Instances and metric spec for bleu metric.
+    "instances": [ # Required. Repeated bleu instances.
+      { # Spec for bleu instance.
+        "prediction": "A String", # Required. Output of the evaluated model.
+        "reference": "A String", # Required. Ground truth used to compare against the prediction.
+      },
+    ],
+    "metricSpec": { # Spec for bleu score metric - calculates the precision of n-grams in the prediction as compared to reference - returns a score ranging between 0 to 1. # Required. Spec for bleu score metric.
+    },
+  },
+  "coherenceInput": { # Input for coherence metric. # Input for coherence metric.
+    "instance": { # Spec for coherence instance. # Required. Coherence instance.
+      "prediction": "A String", # Required. Output of the evaluated model.
+    },
+    "metricSpec": { # Spec for coherence score metric. # Required. Spec for coherence score metric.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "exactMatchInput": { # Input for exact match metric. # Auto metric instances. Instances and metric spec for exact match metric.
+    "instances": [ # Required. Repeated exact match instances.
+      { # Spec for exact match instance.
+        "prediction": "A String", # Required. Output of the evaluated model.
+        "reference": "A String", # Required. Ground truth used to compare against the prediction.
+      },
+    ],
+    "metricSpec": { # Spec for exact match metric - returns 1 if prediction and reference exactly matches, otherwise 0. # Required. Spec for exact match metric.
+    },
+  },
+  "fluencyInput": { # Input for fluency metric. # LLM-based metric instance. General text generation metrics, applicable to other categories. Input for fluency metric.
+    "instance": { # Spec for fluency instance. # Required. Fluency instance.
+      "prediction": "A String", # Required. Output of the evaluated model.
+    },
+    "metricSpec": { # Spec for fluency score metric. # Required. Spec for fluency score metric.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "fulfillmentInput": { # Input for fulfillment metric. # Input for fulfillment metric.
+    "instance": { # Spec for fulfillment instance. # Required. Fulfillment instance.
+      "instruction": "A String", # Required. Inference instruction prompt to compare prediction with.
+      "prediction": "A String", # Required. Output of the evaluated model.
+    },
+    "metricSpec": { # Spec for fulfillment metric. # Required. Spec for fulfillment score metric.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "groundednessInput": { # Input for groundedness metric. # Input for groundedness metric.
+    "instance": { # Spec for groundedness instance. # Required. Groundedness instance.
+      "context": "A String", # Required. Background information provided in context used to compare against the prediction.
+      "prediction": "A String", # Required. Output of the evaluated model.
+    },
+    "metricSpec": { # Spec for groundedness metric. # Required. Spec for groundedness metric.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "pairwiseQuestionAnsweringQualityInput": { # Input for pairwise question answering quality metric. # Input for pairwise question answering quality metric.
+    "instance": { # Spec for pairwise question answering quality instance. # Required. Pairwise question answering quality instance.
+      "baselinePrediction": "A String", # Required. Output of the baseline model.
+      "context": "A String", # Optional. Text to answer the question.
+      "instruction": "A String", # Required. Question Answering prompt for LLM.
+      "prediction": "A String", # Required. Output of the candidate model.
+      "reference": "A String", # Optional. Ground truth used to compare against the prediction.
+    },
+    "metricSpec": { # Spec for pairwise question answering quality score metric. # Required. Spec for pairwise question answering quality score metric.
+      "useReference": True or False, # Optional. Whether to use instance.reference to compute question answering quality.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "pairwiseSummarizationQualityInput": { # Input for pairwise summarization quality metric. # Input for pairwise summarization quality metric.
+    "instance": { # Spec for pairwise summarization quality instance. # Required. Pairwise summarization quality instance.
+      "baselinePrediction": "A String", # Required. Output of the baseline model.
+      "context": "A String", # Required. Text to be summarized.
+      "instruction": "A String", # Required. Summarization prompt for LLM.
+      "prediction": "A String", # Required. Output of the candidate model.
+      "reference": "A String", # Optional. Ground truth used to compare against the prediction.
+    },
+    "metricSpec": { # Spec for pairwise summarization quality score metric. # Required. Spec for pairwise summarization quality score metric.
+      "useReference": True or False, # Optional. Whether to use instance.reference to compute pairwise summarization quality.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "questionAnsweringCorrectnessInput": { # Input for question answering correctness metric. # Input for question answering correctness metric.
+    "instance": { # Spec for question answering correctness instance. # Required. Question answering correctness instance.
+      "context": "A String", # Optional. Text provided as context to answer the question.
+      "instruction": "A String", # Required. The question asked and other instruction in the inference prompt.
+      "prediction": "A String", # Required. Output of the evaluated model.
+      "reference": "A String", # Optional. Ground truth used to compare against the prediction.
+    },
+    "metricSpec": { # Spec for question answering correctness metric. # Required. Spec for question answering correctness score metric.
+      "useReference": True or False, # Optional. Whether to use instance.reference to compute question answering correctness.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "questionAnsweringHelpfulnessInput": { # Input for question answering helpfulness metric. # Input for question answering helpfulness metric.
+    "instance": { # Spec for question answering helpfulness instance. # Required. Question answering helpfulness instance.
+      "context": "A String", # Optional. Text provided as context to answer the question.
+      "instruction": "A String", # Required. The question asked and other instruction in the inference prompt.
+      "prediction": "A String", # Required. Output of the evaluated model.
+      "reference": "A String", # Optional. Ground truth used to compare against the prediction.
+    },
+    "metricSpec": { # Spec for question answering helpfulness metric. # Required. Spec for question answering helpfulness score metric.
+      "useReference": True or False, # Optional. Whether to use instance.reference to compute question answering helpfulness.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "questionAnsweringQualityInput": { # Input for question answering quality metric. # Input for question answering quality metric.
+    "instance": { # Spec for question answering quality instance. # Required. Question answering quality instance.
+      "context": "A String", # Optional. Text to answer the question.
+      "instruction": "A String", # Required. Question Answering prompt for LLM.
+      "prediction": "A String", # Required. Output of the evaluated model.
+      "reference": "A String", # Optional. Ground truth used to compare against the prediction.
+    },
+    "metricSpec": { # Spec for question answering quality score metric. # Required. Spec for question answering quality score metric.
+      "useReference": True or False, # Optional. Whether to use instance.reference to compute question answering quality.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "questionAnsweringRelevanceInput": { # Input for question answering relevance metric. # Input for question answering relevance metric.
+    "instance": { # Spec for question answering relevance instance. # Required. Question answering relevance instance.
+      "context": "A String", # Optional. Text provided as context to answer the question.
+      "instruction": "A String", # Required. The question asked and other instruction in the inference prompt.
+      "prediction": "A String", # Required. Output of the evaluated model.
+      "reference": "A String", # Optional. Ground truth used to compare against the prediction.
+    },
+    "metricSpec": { # Spec for question answering relevance metric. # Required. Spec for question answering relevance score metric.
+      "useReference": True or False, # Optional. Whether to use instance.reference to compute question answering relevance.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "ragContextRecallInput": { # Input for rag context recall metric. # Input for rag context recall metric.
+    "instance": { # Spec for rag context recall instance. # Required. Rag context recall instance.
+      "context": "A String", # Required. Retrieved facts from RAG pipeline as context to be evaluated.
+      "reference": "A String", # Required. Ground truth used to compare against the context.
+    },
+    "metricSpec": { # Spec for rag context recall metric. # Required. Spec for rag context recall metric.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "responseRecallInput": { # Input for response recall metric. # Input for response recall metric.
+    "instance": { # Spec for response recall instance. # Required. Response recall instance.
+      "prediction": "A String", # Required. Output of the evaluated model.
+      "reference": "A String", # Required. Ground truth used to compare against the prediction.
+    },
+    "metricSpec": { # Spec for response recall metric. # Required. Spec for response recall score metric.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "rougeInput": { # Input for rouge metric. # Instances and metric spec for rouge metric.
+    "instances": [ # Required. Repeated rouge instances.
+      { # Spec for rouge instance.
+        "prediction": "A String", # Required. Output of the evaluated model.
+        "reference": "A String", # Required. Ground truth used to compare against the prediction.
+      },
+    ],
+    "metricSpec": { # Spec for rouge score metric - calculates the recall of n-grams in prediction as compared to reference - returns a score ranging between 0 and 1. # Required. Spec for rouge score metric.
+      "rougeType": "A String", # Optional. Supported rouge types are rougen[1-9], rougeL and rougeLsum.
+      "splitSummaries": True or False, # Optional. Whether to split summaries while using rougeLsum.
+      "useStemmer": True or False, # Optional. Whether to use stemmer to compute rouge score.
+    },
+  },
+  "safetyInput": { # Input for safety metric. # Input for safety metric.
+    "instance": { # Spec for safety instance. # Required. Safety instance.
+      "prediction": "A String", # Required. Output of the evaluated model.
+    },
+    "metricSpec": { # Spec for safety metric. # Required. Spec for safety metric.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "summarizationHelpfulnessInput": { # Input for summarization helpfulness metric. # Input for summarization helpfulness metric.
+    "instance": { # Spec for summarization helpfulness instance. # Required. Summarization helpfulness instance.
+      "context": "A String", # Required. Text to be summarized.
+      "instruction": "A String", # Optional. Summarization prompt for LLM.
+      "prediction": "A String", # Required. Output of the evaluated model.
+      "reference": "A String", # Optional. Ground truth used to compare against the prediction.
+    },
+    "metricSpec": { # Spec for summarization helpfulness score metric. # Required. Spec for summarization helpfulness score metric.
+      "useReference": True or False, # Optional. Whether to use instance.reference to compute summarization helpfulness.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "summarizationQualityInput": { # Input for summarization quality metric. # Input for summarization quality metric.
+    "instance": { # Spec for summarization quality instance. # Required. Summarization quality instance.
+      "context": "A String", # Required. Text to be summarized.
+      "instruction": "A String", # Required. Summarization prompt for LLM.
+      "prediction": "A String", # Required. Output of the evaluated model.
+      "reference": "A String", # Optional. Ground truth used to compare against the prediction.
+    },
+    "metricSpec": { # Spec for summarization quality score metric. # Required. Spec for summarization quality score metric.
+      "useReference": True or False, # Optional. Whether to use instance.reference to compute summarization quality.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "summarizationVerbosityInput": { # Input for summarization verbosity metric. # Input for summarization verbosity metric.
+    "instance": { # Spec for summarization verbosity instance. # Required. Summarization verbosity instance.
+      "context": "A String", # Required. Text to be summarized.
+      "instruction": "A String", # Optional. Summarization prompt for LLM.
+      "prediction": "A String", # Required. Output of the evaluated model.
+      "reference": "A String", # Optional. Ground truth used to compare against the prediction.
+    },
+    "metricSpec": { # Spec for summarization verbosity score metric. # Required. Spec for summarization verbosity score metric.
+      "useReference": True or False, # Optional. Whether to use instance.reference to compute summarization verbosity.
+      "version": 42, # Optional. Which version to use for evaluation.
+    },
+  },
+  "toolCallValidInput": { # Input for tool call valid metric. # Tool call metric instances. Input for tool call valid metric.
+    "instances": [ # Required. Repeated tool call valid instances.
+      { # Spec for tool call valid instance.
+        "prediction": "A String", # Required. Output of the evaluated model.
+        "reference": "A String", # Required. Ground truth used to compare against the prediction.
+      },
+    ],
+    "metricSpec": { # Spec for tool call valid metric. # Required. Spec for tool call valid metric.
+    },
+  },
+  "toolNameMatchInput": { # Input for tool name match metric. # Input for tool name match metric.
+    "instances": [ # Required. Repeated tool name match instances.
+      { # Spec for tool name match instance.
+        "prediction": "A String", # Required. Output of the evaluated model.
+        "reference": "A String", # Required. Ground truth used to compare against the prediction.
+      },
+    ],
+    "metricSpec": { # Spec for tool name match metric. # Required. Spec for tool name match metric.
+    },
+  },
+  "toolParameterKeyMatchInput": { # Input for tool parameter key match metric. # Input for tool parameter key match metric.
+    "instances": [ # Required. Repeated tool parameter key match instances.
+      { # Spec for tool parameter key match instance.
+        "prediction": "A String", # Required. Output of the evaluated model.
+        "reference": "A String", # Required. Ground truth used to compare against the prediction.
+      },
+    ],
+    "metricSpec": { # Spec for tool parameter key match metric. # Required. Spec for tool parameter key match metric.
+    },
+  },
+  "toolParameterKvMatchInput": { # Input for tool parameter key value match metric. # Input for tool parameter key value match metric.
+    "instances": [ # Required. Repeated tool parameter key value match instances.
+      { # Spec for tool parameter key value match instance.
+        "prediction": "A String", # Required. Output of the evaluated model.
+        "reference": "A String", # Required. Ground truth used to compare against the prediction.
+      },
+    ],
+    "metricSpec": { # Spec for tool parameter key value match metric. # Required. Spec for tool parameter key value match metric.
+      "useStrictStringMatch": True or False, # Optional. Whether to use STRCIT string match on parameter values.
+    },
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for EvaluationService.EvaluateInstances.
+  "bleuResults": { # Results for bleu metric. # Results for bleu metric.
+    "bleuMetricValues": [ # Output only. Bleu metric values.
+      { # Bleu metric value for an instance.
+        "score": 3.14, # Output only. Bleu score.
+      },
+    ],
+  },
+  "coherenceResult": { # Spec for coherence result. # Result for coherence metric.
+    "confidence": 3.14, # Output only. Confidence for coherence score.
+    "explanation": "A String", # Output only. Explanation for coherence score.
+    "score": 3.14, # Output only. Coherence score.
+  },
+  "exactMatchResults": { # Results for exact match metric. # Auto metric evaluation results. Results for exact match metric.
+    "exactMatchMetricValues": [ # Output only. Exact match metric values.
+      { # Exact match metric value for an instance.
+        "score": 3.14, # Output only. Exact match score.
+      },
+    ],
+  },
+  "fluencyResult": { # Spec for fluency result. # LLM-based metric evaluation result. General text generation metrics, applicable to other categories. Result for fluency metric.
+    "confidence": 3.14, # Output only. Confidence for fluency score.
+    "explanation": "A String", # Output only. Explanation for fluency score.
+    "score": 3.14, # Output only. Fluency score.
+  },
+  "fulfillmentResult": { # Spec for fulfillment result. # Result for fulfillment metric.
+    "confidence": 3.14, # Output only. Confidence for fulfillment score.
+    "explanation": "A String", # Output only. Explanation for fulfillment score.
+    "score": 3.14, # Output only. Fulfillment score.
+  },
+  "groundednessResult": { # Spec for groundedness result. # Result for groundedness metric.
+    "confidence": 3.14, # Output only. Confidence for groundedness score.
+    "explanation": "A String", # Output only. Explanation for groundedness score.
+    "score": 3.14, # Output only. Groundedness score.
+  },
+  "pairwiseQuestionAnsweringQualityResult": { # Spec for pairwise question answering quality result. # Result for pairwise question answering quality metric.
+    "confidence": 3.14, # Output only. Confidence for question answering quality score.
+    "explanation": "A String", # Output only. Explanation for question answering quality score.
+    "pairwiseChoice": "A String", # Output only. Pairwise question answering prediction choice.
+  },
+  "pairwiseSummarizationQualityResult": { # Spec for pairwise summarization quality result. # Result for pairwise summarization quality metric.
+    "confidence": 3.14, # Output only. Confidence for summarization quality score.
+    "explanation": "A String", # Output only. Explanation for summarization quality score.
+    "pairwiseChoice": "A String", # Output only. Pairwise summarization prediction choice.
+  },
+  "questionAnsweringCorrectnessResult": { # Spec for question answering correctness result. # Result for question answering correctness metric.
+    "confidence": 3.14, # Output only. Confidence for question answering correctness score.
+    "explanation": "A String", # Output only. Explanation for question answering correctness score.
+    "score": 3.14, # Output only. Question Answering Correctness score.
+  },
+  "questionAnsweringHelpfulnessResult": { # Spec for question answering helpfulness result. # Result for question answering helpfulness metric.
+    "confidence": 3.14, # Output only. Confidence for question answering helpfulness score.
+    "explanation": "A String", # Output only. Explanation for question answering helpfulness score.
+    "score": 3.14, # Output only. Question Answering Helpfulness score.
+  },
+  "questionAnsweringQualityResult": { # Spec for question answering quality result. # Question answering only metrics. Result for question answering quality metric.
+    "confidence": 3.14, # Output only. Confidence for question answering quality score.
+    "explanation": "A String", # Output only. Explanation for question answering quality score.
+    "score": 3.14, # Output only. Question Answering Quality score.
+  },
+  "questionAnsweringRelevanceResult": { # Spec for question answering relevance result. # Result for question answering relevance metric.
+    "confidence": 3.14, # Output only. Confidence for question answering relevance score.
+    "explanation": "A String", # Output only. Explanation for question answering relevance score.
+    "score": 3.14, # Output only. Question Answering Relevance score.
+  },
+  "ragContextRecallResult": { # Spec for rag context recall result. # RAG only metrics. Result for context recall metric.
+    "confidence": 3.14, # Output only. Confidence for rag context recall score.
+    "explanation": "A String", # Output only. Explanation for rag context recall score.
+    "score": 3.14, # Output only. RagContextRecall score.
+  },
+  "responseRecallResult": { # Spec for response recall result. # Result for response recall metric.
+    "confidence": 3.14, # Output only. Confidence for fulfillment score.
+    "explanation": "A String", # Output only. Explanation for response recall score.
+    "score": 3.14, # Output only. ResponseRecall score.
+  },
+  "rougeResults": { # Results for rouge metric. # Results for rouge metric.
+    "rougeMetricValues": [ # Output only. Rouge metric values.
+      { # Rouge metric value for an instance.
+        "score": 3.14, # Output only. Rouge score.
+      },
+    ],
+  },
+  "safetyResult": { # Spec for safety result. # Result for safety metric.
+    "confidence": 3.14, # Output only. Confidence for safety score.
+    "explanation": "A String", # Output only. Explanation for safety score.
+    "score": 3.14, # Output only. Safety score.
+  },
+  "summarizationHelpfulnessResult": { # Spec for summarization helpfulness result. # Result for summarization helpfulness metric.
+    "confidence": 3.14, # Output only. Confidence for summarization helpfulness score.
+    "explanation": "A String", # Output only. Explanation for summarization helpfulness score.
+    "score": 3.14, # Output only. Summarization Helpfulness score.
+  },
+  "summarizationQualityResult": { # Spec for summarization quality result. # Summarization only metrics. Result for summarization quality metric.
+    "confidence": 3.14, # Output only. Confidence for summarization quality score.
+    "explanation": "A String", # Output only. Explanation for summarization quality score.
+    "score": 3.14, # Output only. Summarization Quality score.
+  },
+  "summarizationVerbosityResult": { # Spec for summarization verbosity result. # Result for summarization verbosity metric.
+    "confidence": 3.14, # Output only. Confidence for summarization verbosity score.
+    "explanation": "A String", # Output only. Explanation for summarization verbosity score.
+    "score": 3.14, # Output only. Summarization Verbosity score.
+  },
+  "toolCallValidResults": { # Results for tool call valid metric. # Tool call metrics. Results for tool call valid metric.
+    "toolCallValidMetricValues": [ # Output only. Tool call valid metric values.
+      { # Tool call valid metric value for an instance.
+        "score": 3.14, # Output only. Tool call valid score.
+      },
+    ],
+  },
+  "toolNameMatchResults": { # Results for tool name match metric. # Results for tool name match metric.
+    "toolNameMatchMetricValues": [ # Output only. Tool name match metric values.
+      { # Tool name match metric value for an instance.
+        "score": 3.14, # Output only. Tool name match score.
+      },
+    ],
+  },
+  "toolParameterKeyMatchResults": { # Results for tool parameter key match metric. # Results for tool parameter key match metric.
+    "toolParameterKeyMatchMetricValues": [ # Output only. Tool parameter key match metric values.
+      { # Tool parameter key match metric value for an instance.
+        "score": 3.14, # Output only. Tool parameter key match score.
+      },
+    ],
+  },
+  "toolParameterKvMatchResults": { # Results for tool parameter key value match metric. # Results for tool parameter key value match metric.
+    "toolParameterKvMatchMetricValues": [ # Output only. Tool parameter key value match metric values.
+      { # Tool parameter key value match metric value for an instance.
+        "score": 3.14, # Output only. Tool parameter key value match score.
+      },
+    ],
+  },
+}
+
+
get(name, x__xgafv=None)
Gets information about a location.
@@ -358,4 +761,46 @@ 

Method Details

+
+ retrieveContexts(parent, body=None, x__xgafv=None) +
Retrieves relevant contexts for a query.
+
+Args:
+  parent: string, Required. The resource name of the Location from which to retrieve RagContexts. The users must have permission to make a call in the project. Format: `projects/{project}/locations/{location}`. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for VertexRagService.RetrieveContexts.
+  "query": { # A query to retrieve relevant contexts. # Required. Single RAG retrieve query.
+    "similarityTopK": 42, # Optional. The number of contexts to retrieve.
+    "text": "A String", # Optional. The query in text format to get relevant contexts.
+  },
+  "vertexRagStore": { # The data source for Vertex RagStore. # The data source for Vertex RagStore.
+    "ragCorpora": [ # Required. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location.
+      "A String",
+    ],
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for VertexRagService.RetrieveContexts.
+  "contexts": { # Relevant contexts for one query. # The contexts of the query.
+    "contexts": [ # All its contexts.
+      { # A context of the query.
+        "distance": 3.14, # The distance between the query vector and the context text vector.
+        "sourceUri": "A String", # For vertex RagStore, if the file is imported from Cloud Storage or Google Drive, source_uri will be original file URI in Cloud Storage or Google Drive; if file is uploaded, source_uri will be file display name.
+        "text": "A String", # The text chunk.
+      },
+    ],
+  },
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html b/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html index eaf2899f93..a938dde960 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.publishers.models.html @@ -174,8 +174,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -238,8 +238,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -254,7 +254,9 @@

Method Details

], "generationConfig": { # Generation config. # Optional. Generation config. "candidateCount": 42, # Optional. Number of candidates to generate. + "frequencyPenalty": 3.14, # Optional. Frequency penalties. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. + "presencePenalty": 3.14, # Optional. Positive penalties. "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -269,6 +271,46 @@

Method Details

"threshold": "A String", # Required. The harm block threshold. }, ], + "systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "fileData": { # URI based data. # Optional. URI based data. + "fileUri": "A String", # Required. URI. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. + "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. + "a_key": "", # Properties of the object. + }, + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name]. + }, + "functionResponse": { # The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. # Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. + "response": { # Required. The function response in JSON object format. + "a_key": "", # Properties of the object. + }, + }, + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "text": "A String", # Optional. Text part (can be code). + "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + "endOffset": "A String", # Optional. The end offset of the video. + "startOffset": "A String", # Optional. The start offset of the video. + }, + }, + ], + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. + }, + "toolConfig": { # Tool config. This config is shared for all tools provided in the request. # Optional. Tool config. This config is shared for all tools provided in the request. + "functionCallingConfig": { # Function calling config. # Optional. Function calling config. + "allowedFunctionNames": [ # Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided. + "A String", + ], + "mode": "A String", # Optional. Function calling mode. + }, + }, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. @@ -303,6 +345,34 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. + "default": "", # Optional. Default value of the data. + "description": "A String", # Optional. The description of the data. + "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "A String", + ], + "example": "", # Optional. Example of the object. Will only populated when the object is the root. + "format": "A String", # Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc + "items": # Object with schema name: GoogleCloudAiplatformV1beta1Schema # Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. + "maxItems": "A String", # Optional. Maximum number of the elements for Type.ARRAY. + "maxLength": "A String", # Optional. Maximum length of the Type.STRING + "maxProperties": "A String", # Optional. Maximum number of the properties for Type.OBJECT. + "maximum": 3.14, # Optional. Maximum value of the Type.INTEGER and Type.NUMBER + "minItems": "A String", # Optional. Minimum number of the elements for Type.ARRAY. + "minLength": "A String", # Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING + "minProperties": "A String", # Optional. Minimum number of the properties for Type.OBJECT. + "minimum": 3.14, # Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER + "nullable": True or False, # Optional. Indicates if the value may be null. + "pattern": "A String", # Optional. Pattern of the Type.STRING to restrict a string to a regular expression. + "properties": { # Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. + "a_key": # Object with schema name: GoogleCloudAiplatformV1beta1Schema + }, + "required": [ # Optional. Required properties of Type.OBJECT. + "A String", + ], + "title": "A String", # Optional. The title of the Schema. + "type": "A String", # Optional. The type of the data. + }, }, ], "googleSearchRetrieval": { # Tool to retrieve public web data for grounding, powered by Google. # Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search. @@ -313,6 +383,12 @@

Method Details

"vertexAiSearch": { # Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation # Set to use data source powered by Vertex AI Search. "datastore": "A String", # Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore} }, + "vertexRagStore": { # Retrieve from Vertex RAG Store for grounding. # Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. + "ragCorpora": [ # Required. Vertex RAG Store corpus resource name: projects/{project}/locations/{location}/ragCorpora/{ragCorpus} Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location. + "A String", + ], + "similarityTopK": 42, # Optional. Number of top k results to return from the selected corpora. + }, }, }, ], @@ -364,8 +440,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -383,6 +459,10 @@

Method Details

"groundingAttributions": [ # Optional. List of grounding attributions. { # Grounding attribution. "confidenceScore": 3.14, # Optional. Output only. Confidence score of the attribution. Ranges from 0 to 1. 1 is the most confident. + "retrievedContext": { # Attribution from context retrieved by the retrieval tools. # Optional. Attribution from context retrieved by the retrieval tools. + "title": "A String", # Output only. Title of the attribution. + "uri": "A String", # Output only. URI reference of the attribution. + }, "segment": { # Segment of the content. # Output only. Segment of the content this attribution belongs to. "endIndex": 42, # Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero. "partIndex": 42, # Output only. The index of a Part object within its parent Content object. @@ -394,6 +474,9 @@

Method Details

}, }, ], + "retrievalQueries": [ # Optional. Queries executed by the retrieval tools. + "A String", + ], "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], @@ -762,8 +845,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -778,7 +861,9 @@

Method Details

], "generationConfig": { # Generation config. # Optional. Generation config. "candidateCount": 42, # Optional. Number of candidates to generate. + "frequencyPenalty": 3.14, # Optional. Frequency penalties. "maxOutputTokens": 42, # Optional. The maximum number of output tokens to generate per message. + "presencePenalty": 3.14, # Optional. Positive penalties. "stopSequences": [ # Optional. Stop sequences. "A String", ], @@ -793,6 +878,46 @@

Method Details

"threshold": "A String", # Required. The harm block threshold. }, ], + "systemInstruction": { # The base structured datatype containing multi-part content of a message. A `Content` includes a `role` field designating the producer of the `Content` and a `parts` field containing multi-part data that contains the content of the message turn. # Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph. + "parts": [ # Required. Ordered `Parts` that constitute a single message. Parts may have different IANA MIME types. + { # A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes. + "fileData": { # URI based data. # Optional. URI based data. + "fileUri": "A String", # Required. URI. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "functionCall": { # A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values. # Optional. A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] with the parameters and their values. + "args": { # Optional. Required. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. + "a_key": "", # Properties of the object. + }, + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name]. + }, + "functionResponse": { # The result output from a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function is used as context to the model. This should contain the result of a [FunctionCall] made based on model prediction. # Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. + "name": "A String", # Required. The name of the function to call. Matches [FunctionDeclaration.name] and [FunctionCall.name]. + "response": { # Required. The function response in JSON object format. + "a_key": "", # Properties of the object. + }, + }, + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. + "mimeType": "A String", # Required. The IANA standard MIME type of the source data. + }, + "text": "A String", # Optional. Text part (can be code). + "videoMetadata": { # Metadata describes the input video content. # Optional. Video metadata. The metadata should only be specified while the video data is presented in inline_data or file_data. + "endOffset": "A String", # Optional. The end offset of the video. + "startOffset": "A String", # Optional. The start offset of the video. + }, + }, + ], + "role": "A String", # Optional. The producer of the content. Must be either 'user' or 'model'. Useful to set for multi-turn conversations, otherwise can be left blank or unset. + }, + "toolConfig": { # Tool config. This config is shared for all tools provided in the request. # Optional. Tool config. This config is shared for all tools provided in the request. + "functionCallingConfig": { # Function calling config. # Optional. Function calling config. + "allowedFunctionNames": [ # Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided. + "A String", + ], + "mode": "A String", # Optional. Function calling mode. + }, + }, "tools": [ # Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. { # Tool details that the model may use to generate response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. A Tool object should contain exactly one type of Tool (e.g FunctionDeclaration, Retrieval or GoogleSearchRetrieval). "functionDeclarations": [ # Optional. Function tool type. One or more function declarations to be passed to the model along with the current user query. Model may decide to call a subset of these functions by populating FunctionCall in the response. User should provide a FunctionResponse for each function call in the next turn. Based on the function responses, Model will generate the final response back to the user. Maximum 64 function declarations can be provided. @@ -827,6 +952,34 @@

Method Details

"title": "A String", # Optional. The title of the Schema. "type": "A String", # Optional. The type of the data. }, + "response": { # Schema is used to define the format of input/output data. Represents a select subset of an [OpenAPI 3.0 schema object](https://spec.openapis.org/oas/v3.0.3#schema). More fields may be added in the future as needed. # Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function. + "default": "", # Optional. Default value of the data. + "description": "A String", # Optional. The description of the data. + "enum": [ # Optional. Possible values of the element of Type.STRING with enum format. For example we can define an Enum Direction as : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} + "A String", + ], + "example": "", # Optional. Example of the object. Will only populated when the object is the root. + "format": "A String", # Optional. The format of the data. Supported formats: for NUMBER type: "float", "double" for INTEGER type: "int32", "int64" for STRING type: "email", "byte", etc + "items": # Object with schema name: GoogleCloudAiplatformV1beta1Schema # Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of Type.ARRAY. + "maxItems": "A String", # Optional. Maximum number of the elements for Type.ARRAY. + "maxLength": "A String", # Optional. Maximum length of the Type.STRING + "maxProperties": "A String", # Optional. Maximum number of the properties for Type.OBJECT. + "maximum": 3.14, # Optional. Maximum value of the Type.INTEGER and Type.NUMBER + "minItems": "A String", # Optional. Minimum number of the elements for Type.ARRAY. + "minLength": "A String", # Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING + "minProperties": "A String", # Optional. Minimum number of the properties for Type.OBJECT. + "minimum": 3.14, # Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the Type.INTEGER and Type.NUMBER + "nullable": True or False, # Optional. Indicates if the value may be null. + "pattern": "A String", # Optional. Pattern of the Type.STRING to restrict a string to a regular expression. + "properties": { # Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. + "a_key": # Object with schema name: GoogleCloudAiplatformV1beta1Schema + }, + "required": [ # Optional. Required properties of Type.OBJECT. + "A String", + ], + "title": "A String", # Optional. The title of the Schema. + "type": "A String", # Optional. The type of the data. + }, }, ], "googleSearchRetrieval": { # Tool to retrieve public web data for grounding, powered by Google. # Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that is powered by Google search. @@ -837,6 +990,12 @@

Method Details

"vertexAiSearch": { # Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation # Set to use data source powered by Vertex AI Search. "datastore": "A String", # Required. Fully-qualified Vertex AI Search's datastore resource ID. Format: projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore} }, + "vertexRagStore": { # Retrieve from Vertex RAG Store for grounding. # Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService. + "ragCorpora": [ # Required. Vertex RAG Store corpus resource name: projects/{project}/locations/{location}/ragCorpora/{ragCorpus} Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location. + "A String", + ], + "similarityTopK": 42, # Optional. Number of top k results to return from the selected corpora. + }, }, }, ], @@ -888,8 +1047,8 @@

Method Details

"a_key": "", # Properties of the object. }, }, - "inlineData": { # Raw media bytes. Text should not be sent as raw bytes, use the 'text' field. # Optional. Inlined bytes data. - "data": "A String", # Required. Raw bytes for media formats. + "inlineData": { # Content blob. It's preferred to send as text directly rather than raw bytes. # Optional. Inlined bytes data. + "data": "A String", # Required. Raw bytes. "mimeType": "A String", # Required. The IANA standard MIME type of the source data. }, "text": "A String", # Optional. Text part (can be code). @@ -907,6 +1066,10 @@

Method Details

"groundingAttributions": [ # Optional. List of grounding attributions. { # Grounding attribution. "confidenceScore": 3.14, # Optional. Output only. Confidence score of the attribution. Ranges from 0 to 1. 1 is the most confident. + "retrievedContext": { # Attribution from context retrieved by the retrieval tools. # Optional. Attribution from context retrieved by the retrieval tools. + "title": "A String", # Output only. Title of the attribution. + "uri": "A String", # Output only. URI reference of the attribution. + }, "segment": { # Segment of the content. # Output only. Segment of the content this attribution belongs to. "endIndex": 42, # Output only. End index in the given Part, measured in bytes. Offset from the start of the Part, exclusive, starting at zero. "partIndex": 42, # Output only. The index of a Part object within its parent Content object. @@ -918,6 +1081,9 @@

Method Details

}, }, ], + "retrievalQueries": [ # Optional. Queries executed by the retrieval tools. + "A String", + ], "webSearchQueries": [ # Optional. Web search queries for the following-up web search. "A String", ], diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html index 98616af8ad..e255cbab28 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.html @@ -87,10 +87,174 @@

Instance Methods

close()

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

Creates a RagCorpus.

+

+ delete(name, force=None, x__xgafv=None)

+

Deletes a RagCorpus.

+

+ get(name, x__xgafv=None)

+

Gets a RagCorpus.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists RagCorpora in a Location.

+

+ list_next()

+

Retrieves the next page of results.

Method Details

close()
Close httplib2 connections.
+
+ create(parent, body=None, x__xgafv=None) +
Creates a RagCorpus.
+
+Args:
+  parent: string, Required. The resource name of the Location to create the RagCorpus in. Format: `projects/{project}/locations/{location}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A RagCorpus is a RagFile container and a project can have multiple RagCorpora.
+  "createTime": "A String", # Output only. Timestamp when this RagCorpus was created.
+  "description": "A String", # Optional. The description of the RagCorpus.
+  "displayName": "A String", # Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+  "name": "A String", # Output only. The resource name of the RagCorpus.
+  "updateTime": "A String", # Output only. Timestamp when this RagCorpus was last updated.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, force=None, x__xgafv=None) +
Deletes a RagCorpus.
+
+Args:
+  name: string, Required. The name of the RagCorpus resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` (required)
+  force: boolean, Optional. If set to true, any RagFiles in this RagCorpus will also be deleted. Otherwise, the request will only work if the RagCorpus has no RagFiles.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets a RagCorpus.
+
+Args:
+  name: string, Required. The name of the RagCorpus resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A RagCorpus is a RagFile container and a project can have multiple RagCorpora.
+  "createTime": "A String", # Output only. Timestamp when this RagCorpus was created.
+  "description": "A String", # Optional. The description of the RagCorpus.
+  "displayName": "A String", # Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+  "name": "A String", # Output only. The resource name of the RagCorpus.
+  "updateTime": "A String", # Output only. Timestamp when this RagCorpus was last updated.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists RagCorpora in a Location.
+
+Args:
+  parent: string, Required. The resource name of the Location from which to list the RagCorpora. Format: `projects/{project}/locations/{location}` (required)
+  pageSize: integer, Optional. The standard list page size.
+  pageToken: string, Optional. The standard list page token. Typically obtained via ListRagCorporaResponse.next_page_token of the previous VertexRagDataService.ListRagCorpora call.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for VertexRagDataService.ListRagCorpora.
+  "nextPageToken": "A String", # A token to retrieve the next page of results. Pass to ListRagCorporaRequest.page_token to obtain that page.
+  "ragCorpora": [ # List of RagCorpora in the requested page.
+    { # A RagCorpus is a RagFile container and a project can have multiple RagCorpora.
+      "createTime": "A String", # Output only. Timestamp when this RagCorpus was created.
+      "description": "A String", # Optional. The description of the RagCorpus.
+      "displayName": "A String", # Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+      "name": "A String", # Output only. The resource name of the RagCorpus.
+      "updateTime": "A String", # Output only. Timestamp when this RagCorpus was last updated.
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html index 91817d8cb9..ff8322bde4 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.ragCorpora.ragFiles.html @@ -82,10 +82,221 @@

Instance Methods

close()

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a RagFile.

+

+ get(name, x__xgafv=None)

+

Gets a RagFile.

+

+ import_(parent, body=None, x__xgafv=None)

+

Import files from Google Cloud Storage or Google Drive into a RagCorpus.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists RagFiles in a RagCorpus.

+

+ list_next()

+

Retrieves the next page of results.

Method Details

close()
Close httplib2 connections.
+
+ delete(name, x__xgafv=None) +
Deletes a RagFile.
+
+Args:
+  name: string, Required. The name of the RagFile resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets a RagFile.
+
+Args:
+  name: string, Required. The name of the RagFile resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A RagFile contains user data for chunking, embedding and indexing.
+  "createTime": "A String", # Output only. Timestamp when this RagFile was created.
+  "description": "A String", # Optional. The description of the RagFile.
+  "directUploadSource": { # The input content is encapsulated and uploaded in the request. # Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request.
+  },
+  "displayName": "A String", # Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+  "gcsSource": { # The Google Cloud Storage location for the input content. # Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the GCS uri for now.
+    "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+      "A String",
+    ],
+  },
+  "googleDriveSource": { # The Google Drive location for the input content. # Output only. Google Drive location. Supports importing individual files as well as Google Drive folders.
+    "resourceIds": [ # Required. Google Drive resource IDs.
+      { # The type and ID of the Google Drive resource.
+        "resourceId": "A String", # Required. The ID of the Google Drive resource.
+        "resourceType": "A String", # Required. The type of the Google Drive resource.
+      },
+    ],
+  },
+  "name": "A String", # Output only. The resource name of the RagFile.
+  "ragFileType": "A String", # Output only. The type of the RagFile.
+  "sizeBytes": "A String", # Output only. The size of the RagFile in bytes.
+  "updateTime": "A String", # Output only. Timestamp when this RagFile was last updated.
+}
+
+ +
+ import_(parent, body=None, x__xgafv=None) +
Import files from Google Cloud Storage or Google Drive into a RagCorpus.
+
+Args:
+  parent: string, Required. The name of the RagCorpus resource into which to import files. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for VertexRagDataService.ImportRagFiles.
+  "importRagFilesConfig": { # Config for importing RagFiles. # Required. The config for the RagFiles to be synced and imported into the RagCorpus. VertexRagDataService.ImportRagFiles.
+    "gcsSource": { # The Google Cloud Storage location for the input content. # Google Cloud Storage location. Supports importing individual files as well as entire Google Cloud Storage directories. Sample formats: * "gs://bucket_name/my_directory/object_name/my_file.txt". * "gs://bucket_name/my_directory"
+      "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+        "A String",
+      ],
+    },
+    "googleDriveSource": { # The Google Drive location for the input content. # Google Drive location. Supports importing individual files as well as Google Drive folders.
+      "resourceIds": [ # Required. Google Drive resource IDs.
+        { # The type and ID of the Google Drive resource.
+          "resourceId": "A String", # Required. The ID of the Google Drive resource.
+          "resourceType": "A String", # Required. The type of the Google Drive resource.
+        },
+      ],
+    },
+    "ragFileChunkingConfig": { # Specifies the size and overlap of chunks for RagFiles. # Specifies the size and overlap of chunks after importing RagFiles.
+      "chunkOverlap": 42, # The overlap between chunks.
+      "chunkSize": 42, # The size of the chunks.
+    },
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
Lists RagFiles in a RagCorpus.
+
+Args:
+  parent: string, Required. The resource name of the RagCorpus from which to list the RagFiles. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` (required)
+  pageSize: integer, Optional. The standard list page size.
+  pageToken: string, Optional. The standard list page token. Typically obtained via ListRagFilesResponse.next_page_token of the previous VertexRagDataService.ListRagFiles call.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for VertexRagDataService.ListRagFiles.
+  "nextPageToken": "A String", # A token to retrieve the next page of results. Pass to ListRagFilesRequest.page_token to obtain that page.
+  "ragFiles": [ # List of RagFiles in the requested page.
+    { # A RagFile contains user data for chunking, embedding and indexing.
+      "createTime": "A String", # Output only. Timestamp when this RagFile was created.
+      "description": "A String", # Optional. The description of the RagFile.
+      "directUploadSource": { # The input content is encapsulated and uploaded in the request. # Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request.
+      },
+      "displayName": "A String", # Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters.
+      "gcsSource": { # The Google Cloud Storage location for the input content. # Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the GCS uri for now.
+        "uris": [ # Required. Google Cloud Storage URI(-s) to the input file(s). May contain wildcards. For more information on wildcards, see https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames.
+          "A String",
+        ],
+      },
+      "googleDriveSource": { # The Google Drive location for the input content. # Output only. Google Drive location. Supports importing individual files as well as Google Drive folders.
+        "resourceIds": [ # Required. Google Drive resource IDs.
+          { # The type and ID of the Google Drive resource.
+            "resourceId": "A String", # Required. The ID of the Google Drive resource.
+            "resourceType": "A String", # Required. The type of the Google Drive resource.
+          },
+        ],
+      },
+      "name": "A String", # Output only. The resource name of the RagFile.
+      "ragFileType": "A String", # Output only. The type of the RagFile.
+      "sizeBytes": "A String", # Output only. The size of the RagFile in bytes.
+      "updateTime": "A String", # Output only. Timestamp when this RagFile was last updated.
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ \ No newline at end of file diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.reasoningEngines.html b/docs/dyn/aiplatform_v1beta1.projects.locations.reasoningEngines.html index 722b8002e1..b7501e9650 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.reasoningEngines.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.reasoningEngines.html @@ -82,10 +82,247 @@

Instance Methods

close()

Close httplib2 connections.

+

+ create(parent, body=None, x__xgafv=None)

+

Creates a reasoning engine.

+

+ delete(name, x__xgafv=None)

+

Deletes a reasoning engine.

+

+ get(name, x__xgafv=None)

+

Gets a reasoning engine.

+

+ list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists reasoning engines in a location.

+

+ list_next()

+

Retrieves the next page of results.

+

+ query(name, body=None, x__xgafv=None)

+

Queries using a reasoning engine.

Method Details

close()
Close httplib2 connections.
+
+ create(parent, body=None, x__xgafv=None) +
Creates a reasoning engine.
+
+Args:
+  parent: string, Required. The resource name of the Location to create the ReasoningEngine in. Format: `projects/{project}/locations/{location}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # ReasoningEngine provides a customizable runtime for models to determine which actions to take and in which order.
+  "createTime": "A String", # Output only. Timestamp when this ReasoningEngine was created.
+  "description": "A String", # Optional. The description of the ReasoningEngine.
+  "displayName": "A String", # Required. The display name of the ReasoningEngine.
+  "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.
+  "name": "A String", # Identifier. The resource name of the ReasoningEngine.
+  "spec": { # ReasoningEngine configurations # Required. Configurations of the ReasoningEngine
+    "classMethods": [ # Optional. Declarations for object class methods.
+      {
+        "a_key": "", # Properties of the object.
+      },
+    ],
+    "packageSpec": { # User provided package spec like pickled object and package requirements. # Required. User provided package spec of the ReasoningEngine.
+      "dependencyFilesGcsUri": "A String", # Optional. The Cloud Storage URI of the dependency files in tar.gz format.
+      "pickleObjectGcsUri": "A String", # Optional. The Cloud Storage URI of the pickled python object.
+      "pythonVersion": "A String", # Optional. The Python version. Currently support 3.8, 3.9, 3.10, 3.11. If not specified, default value is 3.10.
+      "requirementsGcsUri": "A String", # Optional. The Cloud Storage URI of the `requirements.txt` file
+    },
+  },
+  "updateTime": "A String", # Output only. Timestamp when this ReasoningEngine was most recently updated.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a reasoning engine.
+
+Args:
+  name: string, Required. The name of the ReasoningEngine resource to be deleted. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets a reasoning engine.
+
+Args:
+  name: string, Required. The name of the ReasoningEngine resource. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # ReasoningEngine provides a customizable runtime for models to determine which actions to take and in which order.
+  "createTime": "A String", # Output only. Timestamp when this ReasoningEngine was created.
+  "description": "A String", # Optional. The description of the ReasoningEngine.
+  "displayName": "A String", # Required. The display name of the ReasoningEngine.
+  "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.
+  "name": "A String", # Identifier. The resource name of the ReasoningEngine.
+  "spec": { # ReasoningEngine configurations # Required. Configurations of the ReasoningEngine
+    "classMethods": [ # Optional. Declarations for object class methods.
+      {
+        "a_key": "", # Properties of the object.
+      },
+    ],
+    "packageSpec": { # User provided package spec like pickled object and package requirements. # Required. User provided package spec of the ReasoningEngine.
+      "dependencyFilesGcsUri": "A String", # Optional. The Cloud Storage URI of the dependency files in tar.gz format.
+      "pickleObjectGcsUri": "A String", # Optional. The Cloud Storage URI of the pickled python object.
+      "pythonVersion": "A String", # Optional. The Python version. Currently support 3.8, 3.9, 3.10, 3.11. If not specified, default value is 3.10.
+      "requirementsGcsUri": "A String", # Optional. The Cloud Storage URI of the `requirements.txt` file
+    },
+  },
+  "updateTime": "A String", # Output only. Timestamp when this ReasoningEngine was most recently updated.
+}
+
+ +
+ list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists reasoning engines in a location.
+
+Args:
+  parent: string, Required. The resource name of the Location to list the ReasoningEngines from. Format: `projects/{project}/locations/{location}` (required)
+  filter: string, Optional. The standard list filter. More detail in [AIP-160](https://google.aip.dev/160).
+  pageSize: integer, Optional. The standard list page size.
+  pageToken: string, Optional. The standard list page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for ReasoningEngineService.ListReasoningEngines
+  "nextPageToken": "A String", # A token to retrieve the next page of results. Pass to ListReasoningEnginesRequest.page_token to obtain that page.
+  "reasoningEngines": [ # List of ReasoningEngines in the requested page.
+    { # ReasoningEngine provides a customizable runtime for models to determine which actions to take and in which order.
+      "createTime": "A String", # Output only. Timestamp when this ReasoningEngine was created.
+      "description": "A String", # Optional. The description of the ReasoningEngine.
+      "displayName": "A String", # Required. The display name of the ReasoningEngine.
+      "etag": "A String", # Optional. Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.
+      "name": "A String", # Identifier. The resource name of the ReasoningEngine.
+      "spec": { # ReasoningEngine configurations # Required. Configurations of the ReasoningEngine
+        "classMethods": [ # Optional. Declarations for object class methods.
+          {
+            "a_key": "", # Properties of the object.
+          },
+        ],
+        "packageSpec": { # User provided package spec like pickled object and package requirements. # Required. User provided package spec of the ReasoningEngine.
+          "dependencyFilesGcsUri": "A String", # Optional. The Cloud Storage URI of the dependency files in tar.gz format.
+          "pickleObjectGcsUri": "A String", # Optional. The Cloud Storage URI of the pickled python object.
+          "pythonVersion": "A String", # Optional. The Python version. Currently support 3.8, 3.9, 3.10, 3.11. If not specified, default value is 3.10.
+          "requirementsGcsUri": "A String", # Optional. The Cloud Storage URI of the `requirements.txt` file
+        },
+      },
+      "updateTime": "A String", # Output only. Timestamp when this ReasoningEngine was most recently updated.
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ query(name, body=None, x__xgafv=None) +
Queries using a reasoning engine.
+
+Args:
+  name: string, Required. The name of the ReasoningEngine resource to use. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request message for ReasoningEngineExecutionService.Query.
+  "input": { # Optional. Input content provided by users in JSON object format. Examples include text query, function calling parameters, media bytes, etc.
+    "a_key": "", # Properties of the object.
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for ReasoningEngineExecutionService.Query
+  "output": "", # Response provided by users in JSON object format.
+}
+
+ \ No newline at end of file diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1.json index 51dfb4ce9d..45bf9add9b 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1.json @@ -15974,6 +15974,47 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/tuningJobs/{tuningJobsId}/operations", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.tuningJobs.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/tuningJobs/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}/operations", +"response": { +"$ref": "GoogleLongrunningListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } } } @@ -16039,7 +16080,7 @@ } } }, -"revision": "20240320", +"revision": "20240328", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionEmbedVideoResponse": { @@ -16678,6 +16719,10 @@ "$ref": "CloudAiNlLlmProtoServicePartBlob", "description": "Inline bytes data" }, +"lmRootMetadata": { +"$ref": "CloudAiNlLlmProtoServicePartLMRootMetadata", +"description": "Metadata provides extra info for building the LM Root request. Note: High enough tag number for internal only fields." +}, "text": { "description": "Text input.", "type": "string" @@ -16740,6 +16785,17 @@ }, "type": "object" }, +"CloudAiNlLlmProtoServicePartLMRootMetadata": { +"description": "Metadata provides extra info for building the LM Root request.", +"id": "CloudAiNlLlmProtoServicePartLMRootMetadata", +"properties": { +"chunkId": { +"description": "Chunk id that will be used when mapping the part to the LM Root's chunk.", +"type": "string" +} +}, +"type": "object" +}, "CloudAiNlLlmProtoServicePartVideoMetadata": { "description": "Metadata describes the input video content.", "id": "CloudAiNlLlmProtoServicePartVideoMetadata", @@ -18200,11 +18256,11 @@ "type": "object" }, "GoogleCloudAiplatformV1Blob": { -"description": "Raw media bytes. Text should not be sent as raw bytes, use the 'text' field.", +"description": "Content blob. It's preferred to send as text directly rather than raw bytes.", "id": "GoogleCloudAiplatformV1Blob", "properties": { "data": { -"description": "Required. Raw bytes for media formats.", +"description": "Required. Raw bytes.", "format": "byte", "type": "string" }, @@ -19911,6 +19967,10 @@ "description": "For custom-trained Models and AutoML Tabular Models, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Cloud Logging by default. Please note that the logs incur cost, which are subject to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User can disable container logging by setting this flag to true.", "type": "boolean" }, +"disableExplanations": { +"description": "If true, deploy the model without explainable feature, regardless the existence of Model.explanation_spec or explanation_spec.", +"type": "boolean" +}, "displayName": { "description": "The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used.", "type": "string" @@ -21591,6 +21651,10 @@ "readOnly": true, "type": "string" }, +"dedicatedServingEndpoint": { +"$ref": "GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint", +"description": "Optional. The dedicated serving endpoint for this FeatureOnlineStore, which is different from common Vertex service endpoint." +}, "etag": { "description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", "type": "string" @@ -21606,6 +21670,10 @@ "description": "Identifier. Name of the FeatureOnlineStore. Format: `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}`", "type": "string" }, +"optimized": { +"$ref": "GoogleCloudAiplatformV1FeatureOnlineStoreOptimized", +"description": "Contains settings for the Optimized store that will be created to serve featureValues for all FeatureViews under this FeatureOnlineStore. When choose Optimized storage type, need to set PrivateServiceConnectConfig.enable_private_service_connect to use private endpoint. Otherwise will use public endpoint by default." +}, "state": { "description": "Output only. State of the featureOnlineStore.", "enum": [ @@ -21661,6 +21729,24 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint": { +"description": "The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default.", +"id": "GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint", +"properties": { +"publicEndpointDomainName": { +"description": "Output only. This field will be populated with the domain name to use for this FeatureOnlineStore", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1FeatureOnlineStoreOptimized": { +"description": "Optimized storage type", +"id": "GoogleCloudAiplatformV1FeatureOnlineStoreOptimized", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1FeatureSelector": { "description": "Selector for Features of an EntityType.", "id": "GoogleCloudAiplatformV1FeatureSelector", @@ -21829,6 +21915,10 @@ "$ref": "GoogleCloudAiplatformV1FeatureViewFeatureRegistrySource", "description": "Optional. Configures the features from a Feature Registry source that need to be loaded onto the FeatureOnlineStore." }, +"indexConfig": { +"$ref": "GoogleCloudAiplatformV1FeatureViewIndexConfig", +"description": "Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving." +}, "labels": { "additionalProperties": { "type": "string" @@ -21936,6 +22026,75 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1FeatureViewIndexConfig": { +"description": "Configuration for vector indexing.", +"id": "GoogleCloudAiplatformV1FeatureViewIndexConfig", +"properties": { +"bruteForceConfig": { +"$ref": "GoogleCloudAiplatformV1FeatureViewIndexConfigBruteForceConfig", +"description": "Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search." +}, +"crowdingColumn": { +"description": "Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response.", +"type": "string" +}, +"distanceMeasureType": { +"description": "Optional. The distance measure used in nearest neighbor search.", +"enum": [ +"DISTANCE_MEASURE_TYPE_UNSPECIFIED", +"SQUARED_L2_DISTANCE", +"COSINE_DISTANCE", +"DOT_PRODUCT_DISTANCE" +], +"enumDescriptions": [ +"Should not be set.", +"Euclidean (L_2) Distance.", +"Cosine Distance. Defined as 1 - cosine similarity. We strongly suggest using DOT_PRODUCT_DISTANCE + UNIT_L2_NORM instead of COSINE distance. Our algorithms have been more optimized for DOT_PRODUCT distance which, when combined with UNIT_L2_NORM, is mathematically equivalent to COSINE distance and results in the same ranking.", +"Dot Product Distance. Defined as a negative of the dot product." +], +"type": "string" +}, +"embeddingColumn": { +"description": "Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search.", +"type": "string" +}, +"embeddingDimension": { +"description": "Optional. The number of dimensions of the input embedding.", +"format": "int32", +"type": "integer" +}, +"filterColumns": { +"description": "Optional. Columns of features that're used to filter vector search results.", +"items": { +"type": "string" +}, +"type": "array" +}, +"treeAhConfig": { +"$ref": "GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig", +"description": "Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1FeatureViewIndexConfigBruteForceConfig": { +"description": "Configuration options for using brute force search.", +"id": "GoogleCloudAiplatformV1FeatureViewIndexConfigBruteForceConfig", +"properties": {}, +"type": "object" +}, +"GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig": { +"description": "Configuration options for the tree-AH algorithm.", +"id": "GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig", +"properties": { +"leafNodeEmbeddingCount": { +"description": "Optional. Number of embeddings on each leaf node. The default value is 1000 if not set.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1FeatureViewSync": { "description": "FeatureViewSync is a representation of sync operation which copies data from data source to Feature View in Online Store.", "id": "GoogleCloudAiplatformV1FeatureViewSync", @@ -22532,6 +22691,10 @@ }, "type": "array" }, +"systemInstruction": { +"$ref": "GoogleCloudAiplatformV1Content", +"description": "Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph." +}, "tools": { "description": "Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.", "items": { @@ -22635,11 +22798,21 @@ "format": "int32", "type": "integer" }, +"frequencyPenalty": { +"description": "Optional. Frequency penalties.", +"format": "float", +"type": "number" +}, "maxOutputTokens": { "description": "Optional. The maximum number of output tokens to generate per message.", "format": "int32", "type": "integer" }, +"presencePenalty": { +"description": "Optional. Positive penalties.", +"format": "float", +"type": "number" +}, "stopSequences": { "description": "Optional. Stop sequences.", "items": { @@ -22764,6 +22937,13 @@ }, "type": "array" }, +"retrievalQueries": { +"description": "Optional. Queries executed by the retrieval tools.", +"items": { +"type": "string" +}, +"type": "array" +}, "webSearchQueries": { "description": "Optional. Web search queries for the following-up web search.", "items": { @@ -27127,6 +27307,7 @@ "RUNNING", "STOPPING", "ERROR", +"REBOOTING", "UPDATING" ], "enumDescriptions": [ @@ -27135,6 +27316,7 @@ "The RUNNING state indicates the persistent resource is healthy and fully usable.", "The STOPPING state indicates the persistent resource is being deleted.", "The ERROR state indicates the persistent resource may be unusable. Details can be found in the `error` field.", +"The REBOOTING state indicates the persistent resource is being rebooted (PR is not available right now but is expected to be ready again later).", "The UPDATING state indicates the persistent resource is being updated." ], "readOnly": true, @@ -39132,7 +39314,8 @@ false "STEP_SCORE_THRESHOLDING", "STEP_MODEL_CONFIG_STOP_SEQUENCE_TRUNCATION", "STEP_CUSTOM_STOP_SEQUENCE_TRUNCATION", -"STEP_EXPECTED_SAMPLE_SIZE" +"STEP_EXPECTED_SAMPLE_SIZE", +"STEP_TREE_TRIM_TRUNCATION" ], "enumDeprecated": [ false, @@ -39150,6 +39333,7 @@ false, false, false, false, +false, false ], "enumDescriptions": [ @@ -39168,7 +39352,8 @@ false "Thresholding samples based on a minimum score.", "StopSequencePostProcessor.", "StopSequencePostProcessor.", -"Drop extra number of samples that exceeds expected_samples." +"Drop extra number of samples that exceeds expected_samples.", +"Truncated by highest end token score." ], "type": "string" } @@ -39297,10 +39482,12 @@ false "FILTER_REASON_RAI_IMAGE_PORN", "FILTER_REASON_RAI_IMAGE_CSAM", "FILTER_REASON_RAI_IMAGE_PEDO", +"FILTER_REASON_RAI_IMAGE_CHILD", "FILTER_REASON_RAI_VIDEO_FRAME_VIOLENCE", "FILTER_REASON_RAI_VIDEO_FRAME_PORN", "FILTER_REASON_RAI_VIDEO_FRAME_CSAM", "FILTER_REASON_RAI_VIDEO_FRAME_PEDO", +"FILTER_REASON_RAI_VIDEO_FRAME_CHILD", "FILTER_REASON_RAI_CONTEXTUAL_DANGEROUS", "FILTER_REASON_RAI_GRAIL_TEXT", "FILTER_REASON_RAI_GRAIL_IMAGE", @@ -39310,7 +39497,8 @@ false "FILTER_REASON_ATLAS_BILLING", "FILTER_REASON_ATLAS_NON_ENGLISH_QUESTION", "FILTER_REASON_ATLAS_NOT_RELATED_TO_GCP", -"FILTER_REASON_ATLAS_AWS_AZURE_RELATED" +"FILTER_REASON_ATLAS_AWS_AZURE_RELATED", +"FILTER_REASON_XAI" ], "enumDescriptions": [ "Unknown filter reason.", @@ -39343,6 +39531,8 @@ false "RAI Filter", "RAI Filter", "RAI Filter", +"RAI Filter", +"RAI Filter", "Grail Text", "Grail Image", "SafetyCat.", @@ -39351,7 +39541,8 @@ false "Atlas specific topic filter for billing questions.", "Atlas specific topic filter for non english questions.", "Atlas specific topic filter for non GCP questions.", -"Atlas specific topic filter aws/azure related questions." +"Atlas specific topic filter aws/azure related questions.", +"Right now we don't do any filtering for XAI. Adding this just want to differentiatiat the XAI output metadata from other SafetyCat RAI output metadata" ], "type": "string" }, @@ -39465,6 +39656,9 @@ false "grailTextHarmType": { "$ref": "LearningGenaiRootHarmGrailTextHarmType" }, +"imageChild": { +"type": "boolean" +}, "imageCsam": { "type": "boolean" }, @@ -39492,6 +39686,9 @@ false "format": "double", "type": "number" }, +"videoFrameChild": { +"type": "boolean" +}, "videoFrameCsam": { "type": "boolean" }, @@ -39849,6 +40046,14 @@ false }, "modelId": { "type": "string" +}, +"pickedAsFallback": { +"description": "If true, the model was selected as a fallback, since no model met requirements.", +"type": "boolean" +}, +"selected": { +"description": "If true, the model was selected since it met the requriements.", +"type": "boolean" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json index c1f9316330..94dceeecaa 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json @@ -305,10 +305,82 @@ }, "protocol": "rest", "resources": { +"media": { +"methods": { +"upload": { +"description": "Upload a file into a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}/ragFiles:upload", +"httpMethod": "POST", +"id": "aiplatform.media.upload", +"mediaUpload": { +"accept": [ +"*/*" +], +"protocols": { +"simple": { +"multipart": true, +"path": "/upload/v1beta1/{+parent}/ragFiles:upload" +} +} +}, +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The name of the RagCorpus resource into which to upload the file. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/ragFiles:upload", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1UploadRagFileRequest" +}, +"response": { +"$ref": "GoogleCloudAiplatformV1beta1UploadRagFileResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +], +"supportsMediaUpload": true +} +} +}, "projects": { "resources": { "locations": { "methods": { +"evaluateInstances": { +"description": "Evaluates instances based on a given metric.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}:evaluateInstances", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.evaluateInstances", +"parameterOrder": [ +"location" +], +"parameters": { +"location": { +"description": "Required. The resource name of the Location to evaluate the instances. Format: `projects/{project}/locations/{location}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+location}:evaluateInstances", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1EvaluateInstancesRequest" +}, +"response": { +"$ref": "GoogleCloudAiplatformV1beta1EvaluateInstancesResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "get": { "description": "Gets information about a location.", "flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}", @@ -374,6 +446,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"retrieveContexts": { +"description": "Retrieves relevant contexts for a query.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}:retrieveContexts", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.retrieveContexts", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The resource name of the Location from which to retrieve RagContexts. The users must have permission to make a call in the project. Format: `projects/{project}/locations/{location}`.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}:retrieveContexts", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1RetrieveContextsRequest" +}, +"response": { +"$ref": "GoogleCloudAiplatformV1beta1RetrieveContextsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } }, "resources": { @@ -14239,6 +14339,127 @@ } }, "ragCorpora": { +"methods": { +"create": { +"description": "Creates a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.ragCorpora.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The resource name of the Location to create the RagCorpus in. Format: `projects/{project}/locations/{location}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/ragCorpora", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1RagCorpus" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}", +"httpMethod": "DELETE", +"id": "aiplatform.projects.locations.ragCorpora.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"force": { +"description": "Optional. If set to true, any RagFiles in this RagCorpus will also be deleted. Otherwise, the request will only work if the RagCorpus has no RagFiles.", +"location": "query", +"type": "boolean" +}, +"name": { +"description": "Required. The name of the RagCorpus resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.ragCorpora.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the RagCorpus resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleCloudAiplatformV1beta1RagCorpus" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists RagCorpora in a Location.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.ragCorpora.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"pageSize": { +"description": "Optional. The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The standard list page token. Typically obtained via ListRagCorporaResponse.next_page_token of the previous VertexRagDataService.ListRagCorpora call.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The resource name of the Location from which to list the RagCorpora. Format: `projects/{project}/locations/{location}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/ragCorpora", +"response": { +"$ref": "GoogleCloudAiplatformV1beta1ListRagCorporaResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, "resources": { "operations": { "methods": { @@ -14392,6 +14613,122 @@ } }, "ragFiles": { +"methods": { +"delete": { +"description": "Deletes a RagFile.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}/ragFiles/{ragFilesId}", +"httpMethod": "DELETE", +"id": "aiplatform.projects.locations.ragCorpora.ragFiles.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the RagFile resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+/ragFiles/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets a RagFile.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}/ragFiles/{ragFilesId}", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.ragCorpora.ragFiles.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the RagFile resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+/ragFiles/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleCloudAiplatformV1beta1RagFile" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"import": { +"description": "Import files from Google Cloud Storage or Google Drive into a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}/ragFiles:import", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.ragCorpora.ragFiles.import", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The name of the RagCorpus resource into which to import files. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/ragFiles:import", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1ImportRagFilesRequest" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists RagFiles in a RagCorpus.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/ragCorpora/{ragCorporaId}/ragFiles", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.ragCorpora.ragFiles.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"pageSize": { +"description": "Optional. The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The standard list page token. Typically obtained via ListRagFilesResponse.next_page_token of the previous VertexRagDataService.ListRagFiles call.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The resource name of the RagCorpus from which to list the RagFiles. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/ragCorpora/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/ragFiles", +"response": { +"$ref": "GoogleCloudAiplatformV1beta1ListRagFilesResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, "resources": { "operations": { "methods": { @@ -14549,6 +14886,155 @@ } }, "reasoningEngines": { +"methods": { +"create": { +"description": "Creates a reasoning engine.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/reasoningEngines", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.reasoningEngines.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"parent": { +"description": "Required. The resource name of the Location to create the ReasoningEngine in. Format: `projects/{project}/locations/{location}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/reasoningEngines", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1ReasoningEngine" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a reasoning engine.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/reasoningEngines/{reasoningEnginesId}", +"httpMethod": "DELETE", +"id": "aiplatform.projects.locations.reasoningEngines.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the ReasoningEngine resource to be deleted. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/reasoningEngines/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets a reasoning engine.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/reasoningEngines/{reasoningEnginesId}", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.reasoningEngines.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the ReasoningEngine resource. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/reasoningEngines/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}", +"response": { +"$ref": "GoogleCloudAiplatformV1beta1ReasoningEngine" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists reasoning engines in a location.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/reasoningEngines", +"httpMethod": "GET", +"id": "aiplatform.projects.locations.reasoningEngines.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"filter": { +"description": "Optional. The standard list filter. More detail in [AIP-160](https://google.aip.dev/160).", +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "Optional. The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The standard list page token.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The resource name of the Location to list the ReasoningEngines from. Format: `projects/{project}/locations/{location}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+parent}/reasoningEngines", +"response": { +"$ref": "GoogleCloudAiplatformV1beta1ListReasoningEnginesResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"query": { +"description": "Queries using a reasoning engine.", +"flatPath": "v1beta1/projects/{projectsId}/locations/{locationsId}/reasoningEngines/{reasoningEnginesId}:query", +"httpMethod": "POST", +"id": "aiplatform.projects.locations.reasoningEngines.query", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The name of the ReasoningEngine resource to use. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/reasoningEngines/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1beta1/{+name}:query", +"request": { +"$ref": "GoogleCloudAiplatformV1beta1QueryReasoningEngineRequest" +}, +"response": { +"$ref": "GoogleCloudAiplatformV1beta1QueryReasoningEngineResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, "resources": { "operations": { "methods": { @@ -18252,7 +18738,7 @@ } } }, -"revision": "20240320", +"revision": "20240328", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionEmbedVideoResponse": { @@ -18891,6 +19377,10 @@ "$ref": "CloudAiNlLlmProtoServicePartBlob", "description": "Inline bytes data" }, +"lmRootMetadata": { +"$ref": "CloudAiNlLlmProtoServicePartLMRootMetadata", +"description": "Metadata provides extra info for building the LM Root request. Note: High enough tag number for internal only fields." +}, "text": { "description": "Text input.", "type": "string" @@ -18953,6 +19443,17 @@ }, "type": "object" }, +"CloudAiNlLlmProtoServicePartLMRootMetadata": { +"description": "Metadata provides extra info for building the LM Root request.", +"id": "CloudAiNlLlmProtoServicePartLMRootMetadata", +"properties": { +"chunkId": { +"description": "Chunk id that will be used when mapping the part to the LM Root's chunk.", +"type": "string" +} +}, +"type": "object" +}, "CloudAiNlLlmProtoServicePartVideoMetadata": { "description": "Metadata describes the input video content.", "id": "CloudAiNlLlmProtoServicePartVideoMetadata", @@ -20603,12 +21104,79 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1BleuInput": { +"description": "Input for bleu metric.", +"id": "GoogleCloudAiplatformV1beta1BleuInput", +"properties": { +"instances": { +"description": "Required. Repeated bleu instances.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1BleuInstance" +}, +"type": "array" +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1BleuSpec", +"description": "Required. Spec for bleu score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1BleuInstance": { +"description": "Spec for bleu instance.", +"id": "GoogleCloudAiplatformV1beta1BleuInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Required. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1BleuMetricValue": { +"description": "Bleu metric value for an instance.", +"id": "GoogleCloudAiplatformV1beta1BleuMetricValue", +"properties": { +"score": { +"description": "Output only. Bleu score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1BleuResults": { +"description": "Results for bleu metric.", +"id": "GoogleCloudAiplatformV1beta1BleuResults", +"properties": { +"bleuMetricValues": { +"description": "Output only. Bleu metric values.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1BleuMetricValue" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1BleuSpec": { +"description": "Spec for bleu score metric - calculates the precision of n-grams in the prediction as compared to reference - returns a score ranging between 0 to 1.", +"id": "GoogleCloudAiplatformV1beta1BleuSpec", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1Blob": { -"description": "Raw media bytes. Text should not be sent as raw bytes, use the 'text' field.", +"description": "Content blob. It's preferred to send as text directly rather than raw bytes.", "id": "GoogleCloudAiplatformV1beta1Blob", "properties": { "data": { -"description": "Required. Raw bytes for media formats.", +"description": "Required. Raw bytes.", "format": "byte", "type": "string" }, @@ -20755,17 +21323,6 @@ }, "type": "object" }, -"GoogleCloudAiplatformV1beta1CheckPoint": { -"description": "Placeholder for all checkpoint related data. Any data needed to restore a request and more go/vertex-extension-query-operation", -"id": "GoogleCloudAiplatformV1beta1CheckPoint", -"properties": { -"content": { -"description": "Required. encoded checkpoint", -"type": "string" -} -}, -"type": "object" -}, "GoogleCloudAiplatformV1beta1CheckTrialEarlyStoppingStateMetatdata": { "description": "This message will be placed in the metadata field of a google.longrunning.Operation associated with a CheckTrialEarlyStoppingState request.", "id": "GoogleCloudAiplatformV1beta1CheckTrialEarlyStoppingStateMetatdata", @@ -20856,6 +21413,68 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1CoherenceInput": { +"description": "Input for coherence metric.", +"id": "GoogleCloudAiplatformV1beta1CoherenceInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1CoherenceInstance", +"description": "Required. Coherence instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1CoherenceSpec", +"description": "Required. Spec for coherence score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1CoherenceInstance": { +"description": "Spec for coherence instance.", +"id": "GoogleCloudAiplatformV1beta1CoherenceInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1CoherenceResult": { +"description": "Spec for coherence result.", +"id": "GoogleCloudAiplatformV1beta1CoherenceResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for coherence score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for coherence score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Coherence score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1CoherenceSpec": { +"description": "Spec for coherence score metric.", +"id": "GoogleCloudAiplatformV1beta1CoherenceSpec", +"properties": { +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1CompleteTrialRequest": { "description": "Request message for VizierService.CompleteTrial.", "id": "GoogleCloudAiplatformV1beta1CompleteTrialRequest", @@ -22518,6 +23137,12 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1DirectUploadSource": { +"description": "The input content is encapsulated and uploaded in the request.", +"id": "GoogleCloudAiplatformV1beta1DirectUploadSource", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1DiskSpec": { "description": "Represents the spec of disk options.", "id": "GoogleCloudAiplatformV1beta1DiskSpec", @@ -22774,6 +23399,204 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1EvaluateInstancesRequest": { +"description": "Request message for EvaluationService.EvaluateInstances.", +"id": "GoogleCloudAiplatformV1beta1EvaluateInstancesRequest", +"properties": { +"bleuInput": { +"$ref": "GoogleCloudAiplatformV1beta1BleuInput", +"description": "Instances and metric spec for bleu metric." +}, +"coherenceInput": { +"$ref": "GoogleCloudAiplatformV1beta1CoherenceInput", +"description": "Input for coherence metric." +}, +"exactMatchInput": { +"$ref": "GoogleCloudAiplatformV1beta1ExactMatchInput", +"description": "Auto metric instances. Instances and metric spec for exact match metric." +}, +"fluencyInput": { +"$ref": "GoogleCloudAiplatformV1beta1FluencyInput", +"description": "LLM-based metric instance. General text generation metrics, applicable to other categories. Input for fluency metric." +}, +"fulfillmentInput": { +"$ref": "GoogleCloudAiplatformV1beta1FulfillmentInput", +"description": "Input for fulfillment metric." +}, +"groundednessInput": { +"$ref": "GoogleCloudAiplatformV1beta1GroundednessInput", +"description": "Input for groundedness metric." +}, +"pairwiseQuestionAnsweringQualityInput": { +"$ref": "GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInput", +"description": "Input for pairwise question answering quality metric." +}, +"pairwiseSummarizationQualityInput": { +"$ref": "GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInput", +"description": "Input for pairwise summarization quality metric." +}, +"questionAnsweringCorrectnessInput": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInput", +"description": "Input for question answering correctness metric." +}, +"questionAnsweringHelpfulnessInput": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInput", +"description": "Input for question answering helpfulness metric." +}, +"questionAnsweringQualityInput": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInput", +"description": "Input for question answering quality metric." +}, +"questionAnsweringRelevanceInput": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInput", +"description": "Input for question answering relevance metric." +}, +"ragContextRecallInput": { +"$ref": "GoogleCloudAiplatformV1beta1RagContextRecallInput", +"description": "Input for rag context recall metric." +}, +"responseRecallInput": { +"$ref": "GoogleCloudAiplatformV1beta1ResponseRecallInput", +"description": "Input for response recall metric." +}, +"rougeInput": { +"$ref": "GoogleCloudAiplatformV1beta1RougeInput", +"description": "Instances and metric spec for rouge metric." +}, +"safetyInput": { +"$ref": "GoogleCloudAiplatformV1beta1SafetyInput", +"description": "Input for safety metric." +}, +"summarizationHelpfulnessInput": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInput", +"description": "Input for summarization helpfulness metric." +}, +"summarizationQualityInput": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationQualityInput", +"description": "Input for summarization quality metric." +}, +"summarizationVerbosityInput": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationVerbosityInput", +"description": "Input for summarization verbosity metric." +}, +"toolCallValidInput": { +"$ref": "GoogleCloudAiplatformV1beta1ToolCallValidInput", +"description": "Tool call metric instances. Input for tool call valid metric." +}, +"toolNameMatchInput": { +"$ref": "GoogleCloudAiplatformV1beta1ToolNameMatchInput", +"description": "Input for tool name match metric." +}, +"toolParameterKeyMatchInput": { +"$ref": "GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInput", +"description": "Input for tool parameter key match metric." +}, +"toolParameterKvMatchInput": { +"$ref": "GoogleCloudAiplatformV1beta1ToolParameterKVMatchInput", +"description": "Input for tool parameter key value match metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1EvaluateInstancesResponse": { +"description": "Response message for EvaluationService.EvaluateInstances.", +"id": "GoogleCloudAiplatformV1beta1EvaluateInstancesResponse", +"properties": { +"bleuResults": { +"$ref": "GoogleCloudAiplatformV1beta1BleuResults", +"description": "Results for bleu metric." +}, +"coherenceResult": { +"$ref": "GoogleCloudAiplatformV1beta1CoherenceResult", +"description": "Result for coherence metric." +}, +"exactMatchResults": { +"$ref": "GoogleCloudAiplatformV1beta1ExactMatchResults", +"description": "Auto metric evaluation results. Results for exact match metric." +}, +"fluencyResult": { +"$ref": "GoogleCloudAiplatformV1beta1FluencyResult", +"description": "LLM-based metric evaluation result. General text generation metrics, applicable to other categories. Result for fluency metric." +}, +"fulfillmentResult": { +"$ref": "GoogleCloudAiplatformV1beta1FulfillmentResult", +"description": "Result for fulfillment metric." +}, +"groundednessResult": { +"$ref": "GoogleCloudAiplatformV1beta1GroundednessResult", +"description": "Result for groundedness metric." +}, +"pairwiseQuestionAnsweringQualityResult": { +"$ref": "GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityResult", +"description": "Result for pairwise question answering quality metric." +}, +"pairwiseSummarizationQualityResult": { +"$ref": "GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityResult", +"description": "Result for pairwise summarization quality metric." +}, +"questionAnsweringCorrectnessResult": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessResult", +"description": "Result for question answering correctness metric." +}, +"questionAnsweringHelpfulnessResult": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessResult", +"description": "Result for question answering helpfulness metric." +}, +"questionAnsweringQualityResult": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringQualityResult", +"description": "Question answering only metrics. Result for question answering quality metric." +}, +"questionAnsweringRelevanceResult": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceResult", +"description": "Result for question answering relevance metric." +}, +"ragContextRecallResult": { +"$ref": "GoogleCloudAiplatformV1beta1RagContextRecallResult", +"description": "RAG only metrics. Result for context recall metric." +}, +"responseRecallResult": { +"$ref": "GoogleCloudAiplatformV1beta1ResponseRecallResult", +"description": "Result for response recall metric." +}, +"rougeResults": { +"$ref": "GoogleCloudAiplatformV1beta1RougeResults", +"description": "Results for rouge metric." +}, +"safetyResult": { +"$ref": "GoogleCloudAiplatformV1beta1SafetyResult", +"description": "Result for safety metric." +}, +"summarizationHelpfulnessResult": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationHelpfulnessResult", +"description": "Result for summarization helpfulness metric." +}, +"summarizationQualityResult": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationQualityResult", +"description": "Summarization only metrics. Result for summarization quality metric." +}, +"summarizationVerbosityResult": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationVerbosityResult", +"description": "Result for summarization verbosity metric." +}, +"toolCallValidResults": { +"$ref": "GoogleCloudAiplatformV1beta1ToolCallValidResults", +"description": "Tool call metrics. Results for tool call valid metric." +}, +"toolNameMatchResults": { +"$ref": "GoogleCloudAiplatformV1beta1ToolNameMatchResults", +"description": "Results for tool name match metric." +}, +"toolParameterKeyMatchResults": { +"$ref": "GoogleCloudAiplatformV1beta1ToolParameterKeyMatchResults", +"description": "Results for tool parameter key match metric." +}, +"toolParameterKvMatchResults": { +"$ref": "GoogleCloudAiplatformV1beta1ToolParameterKVMatchResults", +"description": "Results for tool parameter key value match metric." +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1EvaluatedAnnotation": { "description": "True positive, false positive, or false negative. EvaluatedAnnotation is only available under ModelEvaluationSlice with slice of `annotationSpec` dimension.", "id": "GoogleCloudAiplatformV1beta1EvaluatedAnnotation", @@ -22896,6 +23719,73 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ExactMatchInput": { +"description": "Input for exact match metric.", +"id": "GoogleCloudAiplatformV1beta1ExactMatchInput", +"properties": { +"instances": { +"description": "Required. Repeated exact match instances.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ExactMatchInstance" +}, +"type": "array" +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1ExactMatchSpec", +"description": "Required. Spec for exact match metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ExactMatchInstance": { +"description": "Spec for exact match instance.", +"id": "GoogleCloudAiplatformV1beta1ExactMatchInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Required. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ExactMatchMetricValue": { +"description": "Exact match metric value for an instance.", +"id": "GoogleCloudAiplatformV1beta1ExactMatchMetricValue", +"properties": { +"score": { +"description": "Output only. Exact match score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ExactMatchResults": { +"description": "Results for exact match metric.", +"id": "GoogleCloudAiplatformV1beta1ExactMatchResults", +"properties": { +"exactMatchMetricValues": { +"description": "Output only. Exact match metric values.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ExactMatchMetricValue" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ExactMatchSpec": { +"description": "Spec for exact match metric - returns 1 if prediction and reference exactly matches, otherwise 0.", +"id": "GoogleCloudAiplatformV1beta1ExactMatchSpec", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1Examples": { "description": "Example-based explainability that returns the nearest neighbors from the provided dataset.", "id": "GoogleCloudAiplatformV1beta1Examples", @@ -23129,56 +24019,6 @@ }, "type": "object" }, -"GoogleCloudAiplatformV1beta1ExecutionPlan": { -"description": "Execution plan for a request.", -"id": "GoogleCloudAiplatformV1beta1ExecutionPlan", -"properties": { -"steps": { -"description": "Required. Sequence of steps to execute a request.", -"items": { -"$ref": "GoogleCloudAiplatformV1beta1ExecutionPlanStep" -}, -"type": "array" -} -}, -"type": "object" -}, -"GoogleCloudAiplatformV1beta1ExecutionPlanStep": { -"description": "Single step in query execution plan.", -"id": "GoogleCloudAiplatformV1beta1ExecutionPlanStep", -"properties": { -"extensionExecution": { -"$ref": "GoogleCloudAiplatformV1beta1ExecutionPlanStepExtensionExecution", -"description": "Extension execution step." -}, -"respondToUser": { -"$ref": "GoogleCloudAiplatformV1beta1ExecutionPlanStepRespondToUser", -"description": "Respond to user step." -} -}, -"type": "object" -}, -"GoogleCloudAiplatformV1beta1ExecutionPlanStepExtensionExecution": { -"description": "Extension execution step.", -"id": "GoogleCloudAiplatformV1beta1ExecutionPlanStepExtensionExecution", -"properties": { -"extension": { -"description": "Required. extension resource name", -"type": "string" -}, -"operationId": { -"description": "Required. the operation id", -"type": "string" -} -}, -"type": "object" -}, -"GoogleCloudAiplatformV1beta1ExecutionPlanStepRespondToUser": { -"description": "Respond to user step.", -"id": "GoogleCloudAiplatformV1beta1ExecutionPlanStepRespondToUser", -"properties": {}, -"type": "object" -}, "GoogleCloudAiplatformV1beta1ExplainRequest": { "description": "Request message for PredictionService.Explain.", "id": "GoogleCloudAiplatformV1beta1ExplainRequest", @@ -24239,7 +25079,8 @@ }, "embeddingManagement": { "$ref": "GoogleCloudAiplatformV1beta1FeatureOnlineStoreEmbeddingManagement", -"description": "Optional. The settings for embedding management in FeatureOnlineStore." +"deprecated": true, +"description": "Optional. Deprecated: This field is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type." }, "etag": { "description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", @@ -24316,7 +25157,7 @@ "type": "object" }, "GoogleCloudAiplatformV1beta1FeatureOnlineStoreDedicatedServingEndpoint": { -"description": "The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default. Note, for EmbeddingManagement use case, only [DedicatedServingEndpoint.public_endpoint_domain_name] is available now.", +"description": "The dedicated serving endpoint for this FeatureOnlineStore. Only need to set when you choose Optimized storage type. Public endpoint is provisioned by default.", "id": "GoogleCloudAiplatformV1beta1FeatureOnlineStoreDedicatedServingEndpoint", "properties": { "privateServiceConnectConfig": { @@ -24337,7 +25178,8 @@ "type": "object" }, "GoogleCloudAiplatformV1beta1FeatureOnlineStoreEmbeddingManagement": { -"description": "Contains settings for embedding management.", +"deprecated": true, +"description": "Deprecated: This sub message is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type. Contains settings for embedding management.", "id": "GoogleCloudAiplatformV1beta1FeatureOnlineStoreEmbeddingManagement", "properties": { "enabled": { @@ -24521,6 +25363,10 @@ "$ref": "GoogleCloudAiplatformV1beta1FeatureViewFeatureRegistrySource", "description": "Optional. Configures the features from a Feature Registry source that need to be loaded onto the FeatureOnlineStore." }, +"indexConfig": { +"$ref": "GoogleCloudAiplatformV1beta1FeatureViewIndexConfig", +"description": "Optional. Configuration for index preparation for vector search. It contains the required configurations to create an index from source data, so that approximate nearest neighbor (a.k.a ANN) algorithms search can be performed during online serving." +}, "labels": { "additionalProperties": { "type": "string" @@ -24652,6 +25498,75 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1FeatureViewIndexConfig": { +"description": "Configuration for vector indexing.", +"id": "GoogleCloudAiplatformV1beta1FeatureViewIndexConfig", +"properties": { +"bruteForceConfig": { +"$ref": "GoogleCloudAiplatformV1beta1FeatureViewIndexConfigBruteForceConfig", +"description": "Optional. Configuration options for using brute force search, which simply implements the standard linear search in the database for each query. It is primarily meant for benchmarking and to generate the ground truth for approximate search." +}, +"crowdingColumn": { +"description": "Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced by FeatureOnlineStoreService.SearchNearestEntities to diversify search results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than K entities of the same crowding attribute are returned in the response.", +"type": "string" +}, +"distanceMeasureType": { +"description": "Optional. The distance measure used in nearest neighbor search.", +"enum": [ +"DISTANCE_MEASURE_TYPE_UNSPECIFIED", +"SQUARED_L2_DISTANCE", +"COSINE_DISTANCE", +"DOT_PRODUCT_DISTANCE" +], +"enumDescriptions": [ +"Should not be set.", +"Euclidean (L_2) Distance.", +"Cosine Distance. Defined as 1 - cosine similarity. We strongly suggest using DOT_PRODUCT_DISTANCE + UNIT_L2_NORM instead of COSINE distance. Our algorithms have been more optimized for DOT_PRODUCT distance which, when combined with UNIT_L2_NORM, is mathematically equivalent to COSINE distance and results in the same ranking.", +"Dot Product Distance. Defined as a negative of the dot product." +], +"type": "string" +}, +"embeddingColumn": { +"description": "Optional. Column of embedding. This column contains the source data to create index for vector search. embedding_column must be set when using vector search.", +"type": "string" +}, +"embeddingDimension": { +"description": "Optional. The number of dimensions of the input embedding.", +"format": "int32", +"type": "integer" +}, +"filterColumns": { +"description": "Optional. Columns of features that're used to filter vector search results.", +"items": { +"type": "string" +}, +"type": "array" +}, +"treeAhConfig": { +"$ref": "GoogleCloudAiplatformV1beta1FeatureViewIndexConfigTreeAHConfig", +"description": "Optional. Configuration options for the tree-AH algorithm (Shallow tree + Asymmetric Hashing). Please refer to this paper for more details: https://arxiv.org/abs/1908.10396" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1FeatureViewIndexConfigBruteForceConfig": { +"description": "Configuration options for using brute force search.", +"id": "GoogleCloudAiplatformV1beta1FeatureViewIndexConfigBruteForceConfig", +"properties": {}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1FeatureViewIndexConfigTreeAHConfig": { +"description": "Configuration options for the tree-AH algorithm.", +"id": "GoogleCloudAiplatformV1beta1FeatureViewIndexConfigTreeAHConfig", +"properties": { +"leafNodeEmbeddingCount": { +"description": "Optional. Number of embeddings on each leaf node. The default value is 1000 if not set.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1FeatureViewSync": { "description": "FeatureViewSync is a representation of sync operation which copies data from data source to Feature View in Online Store.", "id": "GoogleCloudAiplatformV1beta1FeatureViewSync", @@ -25216,6 +26131,68 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1FluencyInput": { +"description": "Input for fluency metric.", +"id": "GoogleCloudAiplatformV1beta1FluencyInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1FluencyInstance", +"description": "Required. Fluency instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1FluencySpec", +"description": "Required. Spec for fluency score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1FluencyInstance": { +"description": "Spec for fluency instance.", +"id": "GoogleCloudAiplatformV1beta1FluencyInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1FluencyResult": { +"description": "Spec for fluency result.", +"id": "GoogleCloudAiplatformV1beta1FluencyResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for fluency score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for fluency score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Fluency score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1FluencySpec": { +"description": "Spec for fluency score metric.", +"id": "GoogleCloudAiplatformV1beta1FluencySpec", +"properties": { +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1FractionSplit": { "description": "Assigns the input data to training, validation, and test sets as per the given fractions. Any of `training_fraction`, `validation_fraction` and `test_fraction` may optionally be provided, they must sum to up to 1. If the provided ones sum to less than 1, the remainder is assigned to sets as decided by Vertex AI. If none of the fractions are set, by default roughly 80% of data is used for training, 10% for validation, and 10% for test.", "id": "GoogleCloudAiplatformV1beta1FractionSplit", @@ -25238,6 +26215,72 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1FulfillmentInput": { +"description": "Input for fulfillment metric.", +"id": "GoogleCloudAiplatformV1beta1FulfillmentInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1FulfillmentInstance", +"description": "Required. Fulfillment instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1FulfillmentSpec", +"description": "Required. Spec for fulfillment score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1FulfillmentInstance": { +"description": "Spec for fulfillment instance.", +"id": "GoogleCloudAiplatformV1beta1FulfillmentInstance", +"properties": { +"instruction": { +"description": "Required. Inference instruction prompt to compare prediction with.", +"type": "string" +}, +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1FulfillmentResult": { +"description": "Spec for fulfillment result.", +"id": "GoogleCloudAiplatformV1beta1FulfillmentResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for fulfillment score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for fulfillment score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Fulfillment score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1FulfillmentSpec": { +"description": "Spec for fulfillment metric.", +"id": "GoogleCloudAiplatformV1beta1FulfillmentSpec", +"properties": { +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1FunctionCall": { "description": "A predicted [FunctionCall] returned from the model that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing the parameters and their values.", "id": "GoogleCloudAiplatformV1beta1FunctionCall", @@ -25257,6 +26300,36 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1FunctionCallingConfig": { +"description": "Function calling config.", +"id": "GoogleCloudAiplatformV1beta1FunctionCallingConfig", +"properties": { +"allowedFunctionNames": { +"description": "Optional. Function names to call. Only set when the Mode is ANY. Function names should match [FunctionDeclaration.name]. With mode set to ANY, model will predict a function call from the set of function names provided.", +"items": { +"type": "string" +}, +"type": "array" +}, +"mode": { +"description": "Optional. Function calling mode.", +"enum": [ +"MODE_UNSPECIFIED", +"AUTO", +"ANY", +"NONE" +], +"enumDescriptions": [ +"Unspecified function calling mode. This value should not be used.", +"Default model behavior, model decides to predict either a function call or a natural language repspose.", +"Model is constrained to always predicting a function call only. If \"allowed_function_names\" are set, the predicted function call will be limited to any one of \"allowed_function_names\", else the predicted function call will be any one of the provided \"function_declarations\".", +"Model will not predict any function call. Model behavior is same as when not passing any function declarations." +], +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1FunctionDeclaration": { "description": "Structured representation of a function declaration as defined by the [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included in this declaration are the function name and parameters. This FunctionDeclaration is a representation of a block of code that can be used as a `Tool` by the model and executed by the client.", "id": "GoogleCloudAiplatformV1beta1FunctionDeclaration", @@ -25272,6 +26345,10 @@ "parameters": { "$ref": "GoogleCloudAiplatformV1beta1Schema", "description": "Optional. Describes the parameters to this function in JSON Schema Object format. Reflects the Open API 3.03 Parameter Object. string Key: the name of the parameter. Parameter names are case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left unset. Parameter names must start with a letter or an underscore and must only contain chars a-z, A-Z, 0-9, or underscores with a maximum length of 64. Example with 1 required and 1 optional parameter: type: OBJECT properties: param1: type: STRING param2: type: INTEGER required: - param1" +}, +"response": { +"$ref": "GoogleCloudAiplatformV1beta1Schema", +"description": "Optional. Describes the output from this function in JSON Schema format. Reflects the Open API 3.03 Response Object. The Schema defines the type used for the response value of the function." } }, "type": "object" @@ -25377,6 +26454,14 @@ }, "type": "array" }, +"systemInstruction": { +"$ref": "GoogleCloudAiplatformV1beta1Content", +"description": "Optional. The user provided system instructions for the model. Note: only text should be used in parts and content in each part will be in a separate paragraph." +}, +"toolConfig": { +"$ref": "GoogleCloudAiplatformV1beta1ToolConfig", +"description": "Optional. Tool config. This config is shared for all tools provided in the request." +}, "tools": { "description": "Optional. A list of `Tools` the model may use to generate the next response. A `Tool` is a piece of code that enables the system to interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model.", "items": { @@ -25480,11 +26565,21 @@ "format": "int32", "type": "integer" }, +"frequencyPenalty": { +"description": "Optional. Frequency penalties.", +"format": "float", +"type": "number" +}, "maxOutputTokens": { "description": "Optional. The maximum number of output tokens to generate per message.", "format": "int32", "type": "integer" }, +"presencePenalty": { +"description": "Optional. Positive penalties.", +"format": "float", +"type": "number" +}, "stopSequences": { "description": "Optional. Stop sequences.", "items": { @@ -25548,6 +26643,45 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1GoogleDriveSource": { +"description": "The Google Drive location for the input content.", +"id": "GoogleCloudAiplatformV1beta1GoogleDriveSource", +"properties": { +"resourceIds": { +"description": "Required. Google Drive resource IDs.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1GoogleDriveSourceResourceId" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1GoogleDriveSourceResourceId": { +"description": "The type and ID of the Google Drive resource.", +"id": "GoogleCloudAiplatformV1beta1GoogleDriveSourceResourceId", +"properties": { +"resourceId": { +"description": "Required. The ID of the Google Drive resource.", +"type": "string" +}, +"resourceType": { +"description": "Required. The type of the Google Drive resource.", +"enum": [ +"RESOURCE_TYPE_UNSPECIFIED", +"RESOURCE_TYPE_FILE", +"RESOURCE_TYPE_FOLDER" +], +"enumDescriptions": [ +"Unspecified resource type.", +"File resource type.", +"Folder resource type." +], +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1GoogleSearchRetrieval": { "description": "Tool to retrieve public web data for grounding, powered by Google.", "id": "GoogleCloudAiplatformV1beta1GoogleSearchRetrieval", @@ -25559,6 +26693,72 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1GroundednessInput": { +"description": "Input for groundedness metric.", +"id": "GoogleCloudAiplatformV1beta1GroundednessInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1GroundednessInstance", +"description": "Required. Groundedness instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1GroundednessSpec", +"description": "Required. Spec for groundedness metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1GroundednessInstance": { +"description": "Spec for groundedness instance.", +"id": "GoogleCloudAiplatformV1beta1GroundednessInstance", +"properties": { +"context": { +"description": "Required. Background information provided in context used to compare against the prediction.", +"type": "string" +}, +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1GroundednessResult": { +"description": "Spec for groundedness result.", +"id": "GoogleCloudAiplatformV1beta1GroundednessResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for groundedness score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for groundedness score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Groundedness score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1GroundednessSpec": { +"description": "Spec for groundedness metric.", +"id": "GoogleCloudAiplatformV1beta1GroundednessSpec", +"properties": { +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1GroundingAttribution": { "description": "Grounding attribution.", "id": "GoogleCloudAiplatformV1beta1GroundingAttribution", @@ -25569,6 +26769,10 @@ "readOnly": true, "type": "number" }, +"retrievedContext": { +"$ref": "GoogleCloudAiplatformV1beta1GroundingAttributionRetrievedContext", +"description": "Optional. Attribution from context retrieved by the retrieval tools." +}, "segment": { "$ref": "GoogleCloudAiplatformV1beta1Segment", "description": "Output only. Segment of the content this attribution belongs to.", @@ -25581,6 +26785,23 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1GroundingAttributionRetrievedContext": { +"description": "Attribution from context retrieved by the retrieval tools.", +"id": "GoogleCloudAiplatformV1beta1GroundingAttributionRetrievedContext", +"properties": { +"title": { +"description": "Output only. Title of the attribution.", +"readOnly": true, +"type": "string" +}, +"uri": { +"description": "Output only. URI reference of the attribution.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1GroundingAttributionWeb": { "description": "Attribution from the web.", "id": "GoogleCloudAiplatformV1beta1GroundingAttributionWeb", @@ -25609,6 +26830,13 @@ }, "type": "array" }, +"retrievalQueries": { +"description": "Optional. Queries executed by the retrieval tools.", +"items": { +"type": "string" +}, +"type": "array" +}, "webSearchQueries": { "description": "Optional. Web search queries for the following-up web search.", "items": { @@ -25972,6 +27200,36 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ImportRagFilesConfig": { +"description": "Config for importing RagFiles.", +"id": "GoogleCloudAiplatformV1beta1ImportRagFilesConfig", +"properties": { +"gcsSource": { +"$ref": "GoogleCloudAiplatformV1beta1GcsSource", +"description": "Google Cloud Storage location. Supports importing individual files as well as entire Google Cloud Storage directories. Sample formats: * \"gs://bucket_name/my_directory/object_name/my_file.txt\". * \"gs://bucket_name/my_directory\"" +}, +"googleDriveSource": { +"$ref": "GoogleCloudAiplatformV1beta1GoogleDriveSource", +"description": "Google Drive location. Supports importing individual files as well as Google Drive folders." +}, +"ragFileChunkingConfig": { +"$ref": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", +"description": "Specifies the size and overlap of chunks after importing RagFiles." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ImportRagFilesRequest": { +"description": "Request message for VertexRagDataService.ImportRagFiles.", +"id": "GoogleCloudAiplatformV1beta1ImportRagFilesRequest", +"properties": { +"importRagFilesConfig": { +"$ref": "GoogleCloudAiplatformV1beta1ImportRagFilesConfig", +"description": "Required. The config for the RagFiles to be synced and imported into the RagCorpus. VertexRagDataService.ImportRagFiles." +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1Index": { "description": "A representation of a collection of database items organized in a way that allows for approximate nearest neighbor (a.k.a ANN) algorithms search.", "id": "GoogleCloudAiplatformV1beta1Index", @@ -27159,6 +28417,60 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ListRagCorporaResponse": { +"description": "Response message for VertexRagDataService.ListRagCorpora.", +"id": "GoogleCloudAiplatformV1beta1ListRagCorporaResponse", +"properties": { +"nextPageToken": { +"description": "A token to retrieve the next page of results. Pass to ListRagCorporaRequest.page_token to obtain that page.", +"type": "string" +}, +"ragCorpora": { +"description": "List of RagCorpora in the requested page.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1RagCorpus" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ListRagFilesResponse": { +"description": "Response message for VertexRagDataService.ListRagFiles.", +"id": "GoogleCloudAiplatformV1beta1ListRagFilesResponse", +"properties": { +"nextPageToken": { +"description": "A token to retrieve the next page of results. Pass to ListRagFilesRequest.page_token to obtain that page.", +"type": "string" +}, +"ragFiles": { +"description": "List of RagFiles in the requested page.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1RagFile" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ListReasoningEnginesResponse": { +"description": "Response message for ReasoningEngineService.ListReasoningEngines", +"id": "GoogleCloudAiplatformV1beta1ListReasoningEnginesResponse", +"properties": { +"nextPageToken": { +"description": "A token to retrieve the next page of results. Pass to ListReasoningEnginesRequest.page_token to obtain that page.", +"type": "string" +}, +"reasoningEngines": { +"description": "List of ReasoningEngines in the requested page.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ReasoningEngine" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1ListSavedQueriesResponse": { "description": "Response message for DatasetService.ListSavedQueries.", "id": "GoogleCloudAiplatformV1beta1ListSavedQueriesResponse", @@ -29931,6 +31243,192 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInput": { +"description": "Input for pairwise question answering quality metric.", +"id": "GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInstance", +"description": "Required. Pairwise question answering quality instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualitySpec", +"description": "Required. Spec for pairwise question answering quality score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInstance": { +"description": "Spec for pairwise question answering quality instance.", +"id": "GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityInstance", +"properties": { +"baselinePrediction": { +"description": "Required. Output of the baseline model.", +"type": "string" +}, +"context": { +"description": "Optional. Text to answer the question.", +"type": "string" +}, +"instruction": { +"description": "Required. Question Answering prompt for LLM.", +"type": "string" +}, +"prediction": { +"description": "Required. Output of the candidate model.", +"type": "string" +}, +"reference": { +"description": "Optional. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityResult": { +"description": "Spec for pairwise question answering quality result.", +"id": "GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualityResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for question answering quality score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for question answering quality score.", +"readOnly": true, +"type": "string" +}, +"pairwiseChoice": { +"description": "Output only. Pairwise question answering prediction choice.", +"enum": [ +"PAIRWISE_CHOICE_UNSPECIFIED", +"BASELINE", +"CANDIDATE", +"TIE" +], +"enumDescriptions": [ +"Unspecified prediction choice.", +"Baseline prediction wins", +"Candidate prediction wins", +"Winner cannot be determined" +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualitySpec": { +"description": "Spec for pairwise question answering quality score metric.", +"id": "GoogleCloudAiplatformV1beta1PairwiseQuestionAnsweringQualitySpec", +"properties": { +"useReference": { +"description": "Optional. Whether to use instance.reference to compute question answering quality.", +"type": "boolean" +}, +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInput": { +"description": "Input for pairwise summarization quality metric.", +"id": "GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInstance", +"description": "Required. Pairwise summarization quality instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1PairwiseSummarizationQualitySpec", +"description": "Required. Spec for pairwise summarization quality score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInstance": { +"description": "Spec for pairwise summarization quality instance.", +"id": "GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityInstance", +"properties": { +"baselinePrediction": { +"description": "Required. Output of the baseline model.", +"type": "string" +}, +"context": { +"description": "Required. Text to be summarized.", +"type": "string" +}, +"instruction": { +"description": "Required. Summarization prompt for LLM.", +"type": "string" +}, +"prediction": { +"description": "Required. Output of the candidate model.", +"type": "string" +}, +"reference": { +"description": "Optional. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityResult": { +"description": "Spec for pairwise summarization quality result.", +"id": "GoogleCloudAiplatformV1beta1PairwiseSummarizationQualityResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for summarization quality score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for summarization quality score.", +"readOnly": true, +"type": "string" +}, +"pairwiseChoice": { +"description": "Output only. Pairwise summarization prediction choice.", +"enum": [ +"PAIRWISE_CHOICE_UNSPECIFIED", +"BASELINE", +"CANDIDATE", +"TIE" +], +"enumDescriptions": [ +"Unspecified prediction choice.", +"Baseline prediction wins", +"Candidate prediction wins", +"Winner cannot be determined" +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1PairwiseSummarizationQualitySpec": { +"description": "Spec for pairwise summarization quality score metric.", +"id": "GoogleCloudAiplatformV1beta1PairwiseSummarizationQualitySpec", +"properties": { +"useReference": { +"description": "Optional. Whether to use instance.reference to compute pairwise summarization quality.", +"type": "boolean" +}, +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1Part": { "description": "A datatype containing media that is part of a multi-part `Content` message. A `Part` consists of data which has an associated datatype. A `Part` can only contain one of the accepted types in `Part.data`. A `Part` must have a fixed IANA MIME type identifying the type and subtype of the media if `inline_data` or `file_data` field is filled with raw bytes.", "id": "GoogleCloudAiplatformV1beta1Part", @@ -30065,6 +31563,7 @@ "RUNNING", "STOPPING", "ERROR", +"REBOOTING", "UPDATING" ], "enumDescriptions": [ @@ -30073,6 +31572,7 @@ "The RUNNING state indicates the persistent resource is healthy and fully usable.", "The STOPPING state indicates the persistent resource is being deleted.", "The ERROR state indicates the persistent resource may be unusable. Details can be found in the `error` field.", +"The REBOOTING state indicates the persistent resource is being rebooted (PR is not available right now but is expected to be ready again later).", "The UPDATING state indicates the persistent resource is being updated." ], "readOnly": true, @@ -31383,15 +32883,6 @@ "$ref": "GoogleCloudAiplatformV1beta1Content" }, "type": "array" -}, -"query": { -"$ref": "GoogleCloudAiplatformV1beta1QueryRequestQuery", -"deprecated": true, -"description": "Required. User provided input query message." -}, -"useFunctionCall": { -"description": "Optional. Experiment control on whether to use function call.", -"type": "boolean" } }, "type": "object" @@ -31404,20 +32895,6 @@ "description": "Failure message if any.", "type": "string" }, -"metadata": { -"$ref": "GoogleCloudAiplatformV1beta1QueryResponseResponseMetadata", -"deprecated": true, -"description": "Metadata related to the query execution." -}, -"queryResponseMetadata": { -"$ref": "GoogleCloudAiplatformV1beta1QueryResponseQueryResponseMetadata", -"deprecated": true -}, -"response": { -"deprecated": true, -"description": "Response to the user's query.", -"type": "string" -}, "steps": { "description": "Steps of extension or LLM interaction, can contain function call, function response, or text response. The last step contains the final response to the query.", "items": { @@ -31428,84 +32905,573 @@ }, "type": "object" }, -"GoogleCloudAiplatformV1beta1QueryRequestQuery": { -"description": "User provided query message.", -"id": "GoogleCloudAiplatformV1beta1QueryRequestQuery", +"GoogleCloudAiplatformV1beta1QueryReasoningEngineRequest": { +"description": "Request message for ReasoningEngineExecutionService.Query.", +"id": "GoogleCloudAiplatformV1beta1QueryReasoningEngineRequest", "properties": { -"query": { -"description": "Required. The query from user.", +"input": { +"additionalProperties": { +"description": "Properties of the object.", +"type": "any" +}, +"description": "Optional. Input content provided by users in JSON object format. Examples include text query, function calling parameters, media bytes, etc.", +"type": "object" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QueryReasoningEngineResponse": { +"description": "Response message for ReasoningEngineExecutionService.Query", +"id": "GoogleCloudAiplatformV1beta1QueryReasoningEngineResponse", +"properties": { +"output": { +"description": "Response provided by users in JSON object format.", +"type": "any" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInput": { +"description": "Input for question answering correctness metric.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInstance", +"description": "Required. Question answering correctness instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessSpec", +"description": "Required. Spec for question answering correctness score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInstance": { +"description": "Spec for question answering correctness instance.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessInstance", +"properties": { +"context": { +"description": "Optional. Text provided as context to answer the question.", +"type": "string" +}, +"instruction": { +"description": "Required. The question asked and other instruction in the inference prompt.", +"type": "string" +}, +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Optional. Ground truth used to compare against the prediction.", "type": "string" } }, "type": "object" }, -"GoogleCloudAiplatformV1beta1QueryResponseQueryResponseMetadata": { -"id": "GoogleCloudAiplatformV1beta1QueryResponseQueryResponseMetadata", +"GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessResult": { +"description": "Spec for question answering correctness result.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessResult", "properties": { -"steps": { -"description": "ReAgent execution steps.", -"items": { -"$ref": "GoogleCloudAiplatformV1beta1QueryResponseQueryResponseMetadataReAgentSteps" +"confidence": { +"description": "Output only. Confidence for question answering correctness score.", +"format": "float", +"readOnly": true, +"type": "number" }, -"type": "array" +"explanation": { +"description": "Output only. Explanation for question answering correctness score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Question Answering Correctness score.", +"format": "float", +"readOnly": true, +"type": "number" +} }, -"useCreativity": { -"description": "Whether the reasoning agent used creativity (instead of extensions provided) to build the response.", +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessSpec": { +"description": "Spec for question answering correctness metric.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringCorrectnessSpec", +"properties": { +"useReference": { +"description": "Optional. Whether to use instance.reference to compute question answering correctness.", "type": "boolean" +}, +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" } }, "type": "object" }, -"GoogleCloudAiplatformV1beta1QueryResponseQueryResponseMetadataReAgentSteps": { -"description": "ReAgent execution steps.", -"id": "GoogleCloudAiplatformV1beta1QueryResponseQueryResponseMetadataReAgentSteps", +"GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInput": { +"description": "Input for question answering helpfulness metric.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInput", "properties": { -"error": { -"description": "Error messages from the extension or during response parsing.", +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInstance", +"description": "Required. Question answering helpfulness instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessSpec", +"description": "Required. Spec for question answering helpfulness score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInstance": { +"description": "Spec for question answering helpfulness instance.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessInstance", +"properties": { +"context": { +"description": "Optional. Text provided as context to answer the question.", "type": "string" }, -"extensionInstruction": { -"description": "Planner's instruction to the extension.", +"instruction": { +"description": "Required. The question asked and other instruction in the inference prompt.", "type": "string" }, -"extensionInvoked": { -"description": "Planner's choice of extension to invoke.", +"prediction": { +"description": "Required. Output of the evaluated model.", "type": "string" }, -"response": { -"description": "Response of the extension.", +"reference": { +"description": "Optional. Ground truth used to compare against the prediction.", "type": "string" +} }, -"success": { -"description": "When set to False, either the extension fails to execute or the response cannot be summarized.", +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessResult": { +"description": "Spec for question answering helpfulness result.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for question answering helpfulness score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for question answering helpfulness score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Question Answering Helpfulness score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessSpec": { +"description": "Spec for question answering helpfulness metric.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringHelpfulnessSpec", +"properties": { +"useReference": { +"description": "Optional. Whether to use instance.reference to compute question answering helpfulness.", "type": "boolean" }, -"thought": { -"description": "Planner's thought.", +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInput": { +"description": "Input for question answering quality metric.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInstance", +"description": "Required. Question answering quality instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringQualitySpec", +"description": "Required. Spec for question answering quality score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInstance": { +"description": "Spec for question answering quality instance.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringQualityInstance", +"properties": { +"context": { +"description": "Optional. Text to answer the question.", +"type": "string" +}, +"instruction": { +"description": "Required. Question Answering prompt for LLM.", +"type": "string" +}, +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Optional. Ground truth used to compare against the prediction.", "type": "string" } }, "type": "object" }, -"GoogleCloudAiplatformV1beta1QueryResponseResponseMetadata": { -"description": "Metadata for response", -"id": "GoogleCloudAiplatformV1beta1QueryResponseResponseMetadata", +"GoogleCloudAiplatformV1beta1QuestionAnsweringQualityResult": { +"description": "Spec for question answering quality result.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringQualityResult", "properties": { -"checkpoint": { -"$ref": "GoogleCloudAiplatformV1beta1CheckPoint", -"description": "Optional. Checkpoint to restore a request" +"confidence": { +"description": "Output only. Confidence for question answering quality score.", +"format": "float", +"readOnly": true, +"type": "number" }, -"executionPlan": { -"$ref": "GoogleCloudAiplatformV1beta1ExecutionPlan", -"description": "Optional. Execution plan for the request." +"explanation": { +"description": "Output only. Explanation for question answering quality score.", +"readOnly": true, +"type": "string" }, -"flowOutputs": { -"additionalProperties": { -"description": "Properties of the object.", -"type": "any" +"score": { +"description": "Output only. Question Answering Quality score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringQualitySpec": { +"description": "Spec for question answering quality score metric.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringQualitySpec", +"properties": { +"useReference": { +"description": "Optional. Whether to use instance.reference to compute question answering quality.", +"type": "boolean" +}, +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInput": { +"description": "Input for question answering relevance metric.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInstance", +"description": "Required. Question answering relevance instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceSpec", +"description": "Required. Spec for question answering relevance score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInstance": { +"description": "Spec for question answering relevance instance.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceInstance", +"properties": { +"context": { +"description": "Optional. Text provided as context to answer the question.", +"type": "string" +}, +"instruction": { +"description": "Required. The question asked and other instruction in the inference prompt.", +"type": "string" +}, +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Optional. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceResult": { +"description": "Spec for question answering relevance result.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for question answering relevance score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for question answering relevance score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Question Answering Relevance score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceSpec": { +"description": "Spec for question answering relevance metric.", +"id": "GoogleCloudAiplatformV1beta1QuestionAnsweringRelevanceSpec", +"properties": { +"useReference": { +"description": "Optional. Whether to use instance.reference to compute question answering relevance.", +"type": "boolean" +}, +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagContextRecallInput": { +"description": "Input for rag context recall metric.", +"id": "GoogleCloudAiplatformV1beta1RagContextRecallInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1RagContextRecallInstance", +"description": "Required. Rag context recall instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1RagContextRecallSpec", +"description": "Required. Spec for rag context recall metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagContextRecallInstance": { +"description": "Spec for rag context recall instance.", +"id": "GoogleCloudAiplatformV1beta1RagContextRecallInstance", +"properties": { +"context": { +"description": "Required. Retrieved facts from RAG pipeline as context to be evaluated.", +"type": "string" +}, +"reference": { +"description": "Required. Ground truth used to compare against the context.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagContextRecallResult": { +"description": "Spec for rag context recall result.", +"id": "GoogleCloudAiplatformV1beta1RagContextRecallResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for rag context recall score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for rag context recall score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. RagContextRecall score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagContextRecallSpec": { +"description": "Spec for rag context recall metric.", +"id": "GoogleCloudAiplatformV1beta1RagContextRecallSpec", +"properties": { +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagContexts": { +"description": "Relevant contexts for one query.", +"id": "GoogleCloudAiplatformV1beta1RagContexts", +"properties": { +"contexts": { +"description": "All its contexts.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1RagContextsContext" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagContextsContext": { +"description": "A context of the query.", +"id": "GoogleCloudAiplatformV1beta1RagContextsContext", +"properties": { +"distance": { +"description": "The distance between the query vector and the context text vector.", +"format": "double", +"type": "number" +}, +"sourceUri": { +"description": "For vertex RagStore, if the file is imported from Cloud Storage or Google Drive, source_uri will be original file URI in Cloud Storage or Google Drive; if file is uploaded, source_uri will be file display name.", +"type": "string" +}, +"text": { +"description": "The text chunk.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagCorpus": { +"description": "A RagCorpus is a RagFile container and a project can have multiple RagCorpora.", +"id": "GoogleCloudAiplatformV1beta1RagCorpus", +"properties": { +"createTime": { +"description": "Output only. Timestamp when this RagCorpus was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. The description of the RagCorpus.", +"type": "string" +}, +"displayName": { +"description": "Required. The display name of the RagCorpus. The name can be up to 128 characters long and can consist of any UTF-8 characters.", +"type": "string" +}, +"name": { +"description": "Output only. The resource name of the RagCorpus.", +"readOnly": true, +"type": "string" +}, +"updateTime": { +"description": "Output only. Timestamp when this RagCorpus was last updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} }, -"description": "To surface the v2 flow output.", "type": "object" +}, +"GoogleCloudAiplatformV1beta1RagFile": { +"description": "A RagFile contains user data for chunking, embedding and indexing.", +"id": "GoogleCloudAiplatformV1beta1RagFile", +"properties": { +"createTime": { +"description": "Output only. Timestamp when this RagFile was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. The description of the RagFile.", +"type": "string" +}, +"directUploadSource": { +"$ref": "GoogleCloudAiplatformV1beta1DirectUploadSource", +"description": "Output only. The RagFile is encapsulated and uploaded in the UploadRagFile request.", +"readOnly": true +}, +"displayName": { +"description": "Required. The display name of the RagFile. The name can be up to 128 characters long and can consist of any UTF-8 characters.", +"type": "string" +}, +"gcsSource": { +"$ref": "GoogleCloudAiplatformV1beta1GcsSource", +"description": "Output only. Google Cloud Storage location of the RagFile. It does not support wildcards in the GCS uri for now.", +"readOnly": true +}, +"googleDriveSource": { +"$ref": "GoogleCloudAiplatformV1beta1GoogleDriveSource", +"description": "Output only. Google Drive location. Supports importing individual files as well as Google Drive folders.", +"readOnly": true +}, +"name": { +"description": "Output only. The resource name of the RagFile.", +"readOnly": true, +"type": "string" +}, +"ragFileType": { +"description": "Output only. The type of the RagFile.", +"enum": [ +"RAG_FILE_TYPE_UNSPECIFIED", +"RAG_FILE_TYPE_TXT", +"RAG_FILE_TYPE_PDF" +], +"enumDescriptions": [ +"RagFile type is unspecified.", +"RagFile type is TXT.", +"RagFile type is PDF." +], +"readOnly": true, +"type": "string" +}, +"sizeBytes": { +"description": "Output only. The size of the RagFile in bytes.", +"format": "int64", +"readOnly": true, +"type": "string" +}, +"updateTime": { +"description": "Output only. Timestamp when this RagFile was last updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagFileChunkingConfig": { +"description": "Specifies the size and overlap of chunks for RagFiles.", +"id": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", +"properties": { +"chunkOverlap": { +"description": "The overlap between chunks.", +"format": "int32", +"type": "integer" +}, +"chunkSize": { +"description": "The size of the chunks.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RagQuery": { +"description": "A query to retrieve relevant contexts.", +"id": "GoogleCloudAiplatformV1beta1RagQuery", +"properties": { +"similarityTopK": { +"description": "Optional. The number of contexts to retrieve.", +"format": "int32", +"type": "integer" +}, +"text": { +"description": "Optional. The query in text format to get relevant contexts.", +"type": "string" } }, "type": "object" @@ -31763,6 +33729,90 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ReasoningEngine": { +"description": "ReasoningEngine provides a customizable runtime for models to determine which actions to take and in which order.", +"id": "GoogleCloudAiplatformV1beta1ReasoningEngine", +"properties": { +"createTime": { +"description": "Output only. Timestamp when this ReasoningEngine was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. The description of the ReasoningEngine.", +"type": "string" +}, +"displayName": { +"description": "Required. The display name of the ReasoningEngine.", +"type": "string" +}, +"etag": { +"description": "Optional. Used to perform consistent read-modify-write updates. If not set, a blind \"overwrite\" update happens.", +"type": "string" +}, +"name": { +"description": "Identifier. The resource name of the ReasoningEngine.", +"type": "string" +}, +"spec": { +"$ref": "GoogleCloudAiplatformV1beta1ReasoningEngineSpec", +"description": "Required. Configurations of the ReasoningEngine" +}, +"updateTime": { +"description": "Output only. Timestamp when this ReasoningEngine was most recently updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ReasoningEngineSpec": { +"description": "ReasoningEngine configurations", +"id": "GoogleCloudAiplatformV1beta1ReasoningEngineSpec", +"properties": { +"classMethods": { +"description": "Optional. Declarations for object class methods.", +"items": { +"additionalProperties": { +"description": "Properties of the object.", +"type": "any" +}, +"type": "object" +}, +"type": "array" +}, +"packageSpec": { +"$ref": "GoogleCloudAiplatformV1beta1ReasoningEngineSpecPackageSpec", +"description": "Required. User provided package spec of the ReasoningEngine." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ReasoningEngineSpecPackageSpec": { +"description": "User provided package spec like pickled object and package requirements.", +"id": "GoogleCloudAiplatformV1beta1ReasoningEngineSpecPackageSpec", +"properties": { +"dependencyFilesGcsUri": { +"description": "Optional. The Cloud Storage URI of the dependency files in tar.gz format.", +"type": "string" +}, +"pickleObjectGcsUri": { +"description": "Optional. The Cloud Storage URI of the pickled python object.", +"type": "string" +}, +"pythonVersion": { +"description": "Optional. The Python version. Currently support 3.8, 3.9, 3.10, 3.11. If not specified, default value is 3.10.", +"type": "string" +}, +"requirementsGcsUri": { +"description": "Optional. The Cloud Storage URI of the `requirements.txt` file", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1RebootPersistentResourceOperationMetadata": { "description": "Details of operations that perform reboot PersistentResource.", "id": "GoogleCloudAiplatformV1beta1RebootPersistentResourceOperationMetadata", @@ -32018,6 +34068,72 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ResponseRecallInput": { +"description": "Input for response recall metric.", +"id": "GoogleCloudAiplatformV1beta1ResponseRecallInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1ResponseRecallInstance", +"description": "Required. Response recall instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1ResponseRecallSpec", +"description": "Required. Spec for response recall score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ResponseRecallInstance": { +"description": "Spec for response recall instance.", +"id": "GoogleCloudAiplatformV1beta1ResponseRecallInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Required. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ResponseRecallResult": { +"description": "Spec for response recall result.", +"id": "GoogleCloudAiplatformV1beta1ResponseRecallResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for fulfillment score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for response recall score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. ResponseRecall score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ResponseRecallSpec": { +"description": "Spec for response recall metric.", +"id": "GoogleCloudAiplatformV1beta1ResponseRecallSpec", +"properties": { +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1RestoreDatasetVersionOperationMetadata": { "description": "Runtime operation information for DatasetService.RestoreDatasetVersion.", "id": "GoogleCloudAiplatformV1beta1RestoreDatasetVersionOperationMetadata", @@ -32057,6 +34173,156 @@ "vertexAiSearch": { "$ref": "GoogleCloudAiplatformV1beta1VertexAISearch", "description": "Set to use data source powered by Vertex AI Search." +}, +"vertexRagStore": { +"$ref": "GoogleCloudAiplatformV1beta1VertexRagStore", +"description": "Set to use data source powered by Vertex RAG store. User data is uploaded via the VertexRagDataService." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RetrieveContextsRequest": { +"description": "Request message for VertexRagService.RetrieveContexts.", +"id": "GoogleCloudAiplatformV1beta1RetrieveContextsRequest", +"properties": { +"query": { +"$ref": "GoogleCloudAiplatformV1beta1RagQuery", +"description": "Required. Single RAG retrieve query." +}, +"vertexRagStore": { +"$ref": "GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStore", +"description": "The data source for Vertex RagStore." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStore": { +"description": "The data source for Vertex RagStore.", +"id": "GoogleCloudAiplatformV1beta1RetrieveContextsRequestVertexRagStore", +"properties": { +"ragCorpora": { +"description": "Required. RagCorpora resource name. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RetrieveContextsResponse": { +"description": "Response message for VertexRagService.RetrieveContexts.", +"id": "GoogleCloudAiplatformV1beta1RetrieveContextsResponse", +"properties": { +"contexts": { +"$ref": "GoogleCloudAiplatformV1beta1RagContexts", +"description": "The contexts of the query." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RougeInput": { +"description": "Input for rouge metric.", +"id": "GoogleCloudAiplatformV1beta1RougeInput", +"properties": { +"instances": { +"description": "Required. Repeated rouge instances.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1RougeInstance" +}, +"type": "array" +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1RougeSpec", +"description": "Required. Spec for rouge score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RougeInstance": { +"description": "Spec for rouge instance.", +"id": "GoogleCloudAiplatformV1beta1RougeInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Required. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RougeMetricValue": { +"description": "Rouge metric value for an instance.", +"id": "GoogleCloudAiplatformV1beta1RougeMetricValue", +"properties": { +"score": { +"description": "Output only. Rouge score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RougeResults": { +"description": "Results for rouge metric.", +"id": "GoogleCloudAiplatformV1beta1RougeResults", +"properties": { +"rougeMetricValues": { +"description": "Output only. Rouge metric values.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1RougeMetricValue" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1RougeSpec": { +"description": "Spec for rouge score metric - calculates the recall of n-grams in prediction as compared to reference - returns a score ranging between 0 and 1.", +"id": "GoogleCloudAiplatformV1beta1RougeSpec", +"properties": { +"rougeType": { +"description": "Optional. Supported rouge types are rougen[1-9], rougeL and rougeLsum.", +"type": "string" +}, +"splitSummaries": { +"description": "Optional. Whether to split summaries while using rougeLsum.", +"type": "boolean" +}, +"useStemmer": { +"description": "Optional. Whether to use stemmer to compute rouge score.", +"type": "boolean" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SafetyInput": { +"description": "Input for safety metric.", +"id": "GoogleCloudAiplatformV1beta1SafetyInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1SafetyInstance", +"description": "Required. Safety instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1SafetySpec", +"description": "Required. Spec for safety metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SafetyInstance": { +"description": "Spec for safety instance.", +"id": "GoogleCloudAiplatformV1beta1SafetyInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" } }, "type": "object" @@ -32142,6 +34408,30 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1SafetyResult": { +"description": "Spec for safety result.", +"id": "GoogleCloudAiplatformV1beta1SafetyResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for safety score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for safety score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Safety score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1SafetySetting": { "description": "Safety settings.", "id": "GoogleCloudAiplatformV1beta1SafetySetting", @@ -32199,6 +34489,18 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1SafetySpec": { +"description": "Spec for safety metric.", +"id": "GoogleCloudAiplatformV1beta1SafetySpec", +"properties": { +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1SampleConfig": { "description": "Active learning data sampling config. For every active learning labeling iteration, it will select a batch of data based on the sampling strategy.", "id": "GoogleCloudAiplatformV1beta1SampleConfig", @@ -37263,6 +39565,240 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInput": { +"description": "Input for summarization helpfulness metric.", +"id": "GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInstance", +"description": "Required. Summarization helpfulness instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationHelpfulnessSpec", +"description": "Required. Spec for summarization helpfulness score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInstance": { +"description": "Spec for summarization helpfulness instance.", +"id": "GoogleCloudAiplatformV1beta1SummarizationHelpfulnessInstance", +"properties": { +"context": { +"description": "Required. Text to be summarized.", +"type": "string" +}, +"instruction": { +"description": "Optional. Summarization prompt for LLM.", +"type": "string" +}, +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Optional. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationHelpfulnessResult": { +"description": "Spec for summarization helpfulness result.", +"id": "GoogleCloudAiplatformV1beta1SummarizationHelpfulnessResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for summarization helpfulness score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for summarization helpfulness score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Summarization Helpfulness score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationHelpfulnessSpec": { +"description": "Spec for summarization helpfulness score metric.", +"id": "GoogleCloudAiplatformV1beta1SummarizationHelpfulnessSpec", +"properties": { +"useReference": { +"description": "Optional. Whether to use instance.reference to compute summarization helpfulness.", +"type": "boolean" +}, +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationQualityInput": { +"description": "Input for summarization quality metric.", +"id": "GoogleCloudAiplatformV1beta1SummarizationQualityInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationQualityInstance", +"description": "Required. Summarization quality instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationQualitySpec", +"description": "Required. Spec for summarization quality score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationQualityInstance": { +"description": "Spec for summarization quality instance.", +"id": "GoogleCloudAiplatformV1beta1SummarizationQualityInstance", +"properties": { +"context": { +"description": "Required. Text to be summarized.", +"type": "string" +}, +"instruction": { +"description": "Required. Summarization prompt for LLM.", +"type": "string" +}, +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Optional. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationQualityResult": { +"description": "Spec for summarization quality result.", +"id": "GoogleCloudAiplatformV1beta1SummarizationQualityResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for summarization quality score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for summarization quality score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Summarization Quality score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationQualitySpec": { +"description": "Spec for summarization quality score metric.", +"id": "GoogleCloudAiplatformV1beta1SummarizationQualitySpec", +"properties": { +"useReference": { +"description": "Optional. Whether to use instance.reference to compute summarization quality.", +"type": "boolean" +}, +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationVerbosityInput": { +"description": "Input for summarization verbosity metric.", +"id": "GoogleCloudAiplatformV1beta1SummarizationVerbosityInput", +"properties": { +"instance": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationVerbosityInstance", +"description": "Required. Summarization verbosity instance." +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1SummarizationVerbositySpec", +"description": "Required. Spec for summarization verbosity score metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationVerbosityInstance": { +"description": "Spec for summarization verbosity instance.", +"id": "GoogleCloudAiplatformV1beta1SummarizationVerbosityInstance", +"properties": { +"context": { +"description": "Required. Text to be summarized.", +"type": "string" +}, +"instruction": { +"description": "Optional. Summarization prompt for LLM.", +"type": "string" +}, +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Optional. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationVerbosityResult": { +"description": "Spec for summarization verbosity result.", +"id": "GoogleCloudAiplatformV1beta1SummarizationVerbosityResult", +"properties": { +"confidence": { +"description": "Output only. Confidence for summarization verbosity score.", +"format": "float", +"readOnly": true, +"type": "number" +}, +"explanation": { +"description": "Output only. Explanation for summarization verbosity score.", +"readOnly": true, +"type": "string" +}, +"score": { +"description": "Output only. Summarization Verbosity score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1SummarizationVerbositySpec": { +"description": "Spec for summarization verbosity score metric.", +"id": "GoogleCloudAiplatformV1beta1SummarizationVerbositySpec", +"properties": { +"useReference": { +"description": "Optional. Whether to use instance.reference to compute summarization verbosity.", +"type": "boolean" +}, +"version": { +"description": "Optional. Which version to use for evaluation.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1SyncFeatureViewRequest": { "description": "Request message for FeatureOnlineStoreAdminService.SyncFeatureView.", "id": "GoogleCloudAiplatformV1beta1SyncFeatureViewRequest", @@ -37866,6 +40402,290 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1ToolCallValidInput": { +"description": "Input for tool call valid metric.", +"id": "GoogleCloudAiplatformV1beta1ToolCallValidInput", +"properties": { +"instances": { +"description": "Required. Repeated tool call valid instances.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ToolCallValidInstance" +}, +"type": "array" +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1ToolCallValidSpec", +"description": "Required. Spec for tool call valid metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolCallValidInstance": { +"description": "Spec for tool call valid instance.", +"id": "GoogleCloudAiplatformV1beta1ToolCallValidInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Required. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolCallValidMetricValue": { +"description": "Tool call valid metric value for an instance.", +"id": "GoogleCloudAiplatformV1beta1ToolCallValidMetricValue", +"properties": { +"score": { +"description": "Output only. Tool call valid score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolCallValidResults": { +"description": "Results for tool call valid metric.", +"id": "GoogleCloudAiplatformV1beta1ToolCallValidResults", +"properties": { +"toolCallValidMetricValues": { +"description": "Output only. Tool call valid metric values.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ToolCallValidMetricValue" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolCallValidSpec": { +"description": "Spec for tool call valid metric.", +"id": "GoogleCloudAiplatformV1beta1ToolCallValidSpec", +"properties": {}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolConfig": { +"description": "Tool config. This config is shared for all tools provided in the request.", +"id": "GoogleCloudAiplatformV1beta1ToolConfig", +"properties": { +"functionCallingConfig": { +"$ref": "GoogleCloudAiplatformV1beta1FunctionCallingConfig", +"description": "Optional. Function calling config." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolNameMatchInput": { +"description": "Input for tool name match metric.", +"id": "GoogleCloudAiplatformV1beta1ToolNameMatchInput", +"properties": { +"instances": { +"description": "Required. Repeated tool name match instances.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ToolNameMatchInstance" +}, +"type": "array" +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1ToolNameMatchSpec", +"description": "Required. Spec for tool name match metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolNameMatchInstance": { +"description": "Spec for tool name match instance.", +"id": "GoogleCloudAiplatformV1beta1ToolNameMatchInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Required. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolNameMatchMetricValue": { +"description": "Tool name match metric value for an instance.", +"id": "GoogleCloudAiplatformV1beta1ToolNameMatchMetricValue", +"properties": { +"score": { +"description": "Output only. Tool name match score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolNameMatchResults": { +"description": "Results for tool name match metric.", +"id": "GoogleCloudAiplatformV1beta1ToolNameMatchResults", +"properties": { +"toolNameMatchMetricValues": { +"description": "Output only. Tool name match metric values.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ToolNameMatchMetricValue" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolNameMatchSpec": { +"description": "Spec for tool name match metric.", +"id": "GoogleCloudAiplatformV1beta1ToolNameMatchSpec", +"properties": {}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolParameterKVMatchInput": { +"description": "Input for tool parameter key value match metric.", +"id": "GoogleCloudAiplatformV1beta1ToolParameterKVMatchInput", +"properties": { +"instances": { +"description": "Required. Repeated tool parameter key value match instances.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ToolParameterKVMatchInstance" +}, +"type": "array" +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1ToolParameterKVMatchSpec", +"description": "Required. Spec for tool parameter key value match metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolParameterKVMatchInstance": { +"description": "Spec for tool parameter key value match instance.", +"id": "GoogleCloudAiplatformV1beta1ToolParameterKVMatchInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Required. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolParameterKVMatchMetricValue": { +"description": "Tool parameter key value match metric value for an instance.", +"id": "GoogleCloudAiplatformV1beta1ToolParameterKVMatchMetricValue", +"properties": { +"score": { +"description": "Output only. Tool parameter key value match score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolParameterKVMatchResults": { +"description": "Results for tool parameter key value match metric.", +"id": "GoogleCloudAiplatformV1beta1ToolParameterKVMatchResults", +"properties": { +"toolParameterKvMatchMetricValues": { +"description": "Output only. Tool parameter key value match metric values.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ToolParameterKVMatchMetricValue" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolParameterKVMatchSpec": { +"description": "Spec for tool parameter key value match metric.", +"id": "GoogleCloudAiplatformV1beta1ToolParameterKVMatchSpec", +"properties": { +"useStrictStringMatch": { +"description": "Optional. Whether to use STRCIT string match on parameter values.", +"type": "boolean" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInput": { +"description": "Input for tool parameter key match metric.", +"id": "GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInput", +"properties": { +"instances": { +"description": "Required. Repeated tool parameter key match instances.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInstance" +}, +"type": "array" +}, +"metricSpec": { +"$ref": "GoogleCloudAiplatformV1beta1ToolParameterKeyMatchSpec", +"description": "Required. Spec for tool parameter key match metric." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInstance": { +"description": "Spec for tool parameter key match instance.", +"id": "GoogleCloudAiplatformV1beta1ToolParameterKeyMatchInstance", +"properties": { +"prediction": { +"description": "Required. Output of the evaluated model.", +"type": "string" +}, +"reference": { +"description": "Required. Ground truth used to compare against the prediction.", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolParameterKeyMatchMetricValue": { +"description": "Tool parameter key match metric value for an instance.", +"id": "GoogleCloudAiplatformV1beta1ToolParameterKeyMatchMetricValue", +"properties": { +"score": { +"description": "Output only. Tool parameter key match score.", +"format": "float", +"readOnly": true, +"type": "number" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolParameterKeyMatchResults": { +"description": "Results for tool parameter key match metric.", +"id": "GoogleCloudAiplatformV1beta1ToolParameterKeyMatchResults", +"properties": { +"toolParameterKeyMatchMetricValues": { +"description": "Output only. Tool parameter key match metric values.", +"items": { +"$ref": "GoogleCloudAiplatformV1beta1ToolParameterKeyMatchMetricValue" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1ToolParameterKeyMatchSpec": { +"description": "Spec for tool parameter key match metric.", +"id": "GoogleCloudAiplatformV1beta1ToolParameterKeyMatchSpec", +"properties": {}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1ToolUseExample": { "description": "A single example of the tool usage.", "id": "GoogleCloudAiplatformV1beta1ToolUseExample", @@ -38504,6 +41324,47 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1UploadRagFileConfig": { +"description": "Config for uploading RagFile.", +"id": "GoogleCloudAiplatformV1beta1UploadRagFileConfig", +"properties": { +"ragFileChunkingConfig": { +"$ref": "GoogleCloudAiplatformV1beta1RagFileChunkingConfig", +"description": "Specifies the size and overlap of chunks after uploading RagFile." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1UploadRagFileRequest": { +"description": "Request message for VertexRagDataService.UploadRagFile.", +"id": "GoogleCloudAiplatformV1beta1UploadRagFileRequest", +"properties": { +"ragFile": { +"$ref": "GoogleCloudAiplatformV1beta1RagFile", +"description": "Required. The RagFile to upload." +}, +"uploadRagFileConfig": { +"$ref": "GoogleCloudAiplatformV1beta1UploadRagFileConfig", +"description": "Required. The config for the RagFiles to be uploaded into the RagCorpus. VertexRagDataService.UploadRagFile." +} +}, +"type": "object" +}, +"GoogleCloudAiplatformV1beta1UploadRagFileResponse": { +"description": "Response message for VertexRagDataService.UploadRagFile.", +"id": "GoogleCloudAiplatformV1beta1UploadRagFileResponse", +"properties": { +"error": { +"$ref": "GoogleRpcStatus", +"description": "The error that occurred while processing the RagFile." +}, +"ragFile": { +"$ref": "GoogleCloudAiplatformV1beta1RagFile", +"description": "The RagFile that had been uploaded into the RagCorpus." +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1UpsertDatapointsRequest": { "description": "Request message for IndexService.UpsertDatapoints", "id": "GoogleCloudAiplatformV1beta1UpsertDatapointsRequest", @@ -38580,6 +41441,25 @@ }, "type": "object" }, +"GoogleCloudAiplatformV1beta1VertexRagStore": { +"description": "Retrieve from Vertex RAG Store for grounding.", +"id": "GoogleCloudAiplatformV1beta1VertexRagStore", +"properties": { +"ragCorpora": { +"description": "Required. Vertex RAG Store corpus resource name: projects/{project}/locations/{location}/ragCorpora/{ragCorpus} Currently only one corpus is allowed. In the future we may open up multiple corpora support. However, they should be from the same project and location.", +"items": { +"type": "string" +}, +"type": "array" +}, +"similarityTopK": { +"description": "Optional. Number of top k results to return from the selected corpora.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "GoogleCloudAiplatformV1beta1VideoMetadata": { "description": "Metadata describes the input video content.", "id": "GoogleCloudAiplatformV1beta1VideoMetadata", @@ -42407,7 +45287,8 @@ "STEP_SCORE_THRESHOLDING", "STEP_MODEL_CONFIG_STOP_SEQUENCE_TRUNCATION", "STEP_CUSTOM_STOP_SEQUENCE_TRUNCATION", -"STEP_EXPECTED_SAMPLE_SIZE" +"STEP_EXPECTED_SAMPLE_SIZE", +"STEP_TREE_TRIM_TRUNCATION" ], "enumDeprecated": [ false, @@ -42425,6 +45306,7 @@ false, false, false, false, +false, false ], "enumDescriptions": [ @@ -42443,7 +45325,8 @@ false "Thresholding samples based on a minimum score.", "StopSequencePostProcessor.", "StopSequencePostProcessor.", -"Drop extra number of samples that exceeds expected_samples." +"Drop extra number of samples that exceeds expected_samples.", +"Truncated by highest end token score." ], "type": "string" } @@ -42572,10 +45455,12 @@ false "FILTER_REASON_RAI_IMAGE_PORN", "FILTER_REASON_RAI_IMAGE_CSAM", "FILTER_REASON_RAI_IMAGE_PEDO", +"FILTER_REASON_RAI_IMAGE_CHILD", "FILTER_REASON_RAI_VIDEO_FRAME_VIOLENCE", "FILTER_REASON_RAI_VIDEO_FRAME_PORN", "FILTER_REASON_RAI_VIDEO_FRAME_CSAM", "FILTER_REASON_RAI_VIDEO_FRAME_PEDO", +"FILTER_REASON_RAI_VIDEO_FRAME_CHILD", "FILTER_REASON_RAI_CONTEXTUAL_DANGEROUS", "FILTER_REASON_RAI_GRAIL_TEXT", "FILTER_REASON_RAI_GRAIL_IMAGE", @@ -42585,7 +45470,8 @@ false "FILTER_REASON_ATLAS_BILLING", "FILTER_REASON_ATLAS_NON_ENGLISH_QUESTION", "FILTER_REASON_ATLAS_NOT_RELATED_TO_GCP", -"FILTER_REASON_ATLAS_AWS_AZURE_RELATED" +"FILTER_REASON_ATLAS_AWS_AZURE_RELATED", +"FILTER_REASON_XAI" ], "enumDescriptions": [ "Unknown filter reason.", @@ -42618,6 +45504,8 @@ false "RAI Filter", "RAI Filter", "RAI Filter", +"RAI Filter", +"RAI Filter", "Grail Text", "Grail Image", "SafetyCat.", @@ -42626,7 +45514,8 @@ false "Atlas specific topic filter for billing questions.", "Atlas specific topic filter for non english questions.", "Atlas specific topic filter for non GCP questions.", -"Atlas specific topic filter aws/azure related questions." +"Atlas specific topic filter aws/azure related questions.", +"Right now we don't do any filtering for XAI. Adding this just want to differentiatiat the XAI output metadata from other SafetyCat RAI output metadata" ], "type": "string" }, @@ -42740,6 +45629,9 @@ false "grailTextHarmType": { "$ref": "LearningGenaiRootHarmGrailTextHarmType" }, +"imageChild": { +"type": "boolean" +}, "imageCsam": { "type": "boolean" }, @@ -42767,6 +45659,9 @@ false "format": "double", "type": "number" }, +"videoFrameChild": { +"type": "boolean" +}, "videoFrameCsam": { "type": "boolean" }, @@ -43124,6 +46019,14 @@ false }, "modelId": { "type": "string" +}, +"pickedAsFallback": { +"description": "If true, the model was selected as a fallback, since no model met requirements.", +"type": "boolean" +}, +"selected": { +"description": "If true, the model was selected since it met the requriements.", +"type": "boolean" } }, "type": "object"