Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .kokoro/test-samples-impl.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ env | grep KOKORO

# Install nox
# `virtualenv==20.26.6` is added for Python 3.7 compatibility
python3 -m pip install --upgrade --quiet nox uv virtualenv==20.26.6
python3 -m pip install --upgrade --quiet nox virtualenv==20.26.6

# Use secrets acessor service account to get secrets
if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then
Expand Down
26 changes: 26 additions & 0 deletions google/cloud/aiplatform_v1/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,15 +123,21 @@
from .types.content import LogprobsResult
from .types.content import ModalityTokenCount
from .types.content import ModelArmorConfig
from .types.content import MultiSpeakerVoiceConfig
from .types.content import Part
from .types.content import PrebuiltVoiceConfig
from .types.content import ReplicatedVoiceConfig
from .types.content import RetrievalMetadata
from .types.content import SafetyRating
from .types.content import SafetySetting
from .types.content import SearchEntryPoint
from .types.content import Segment
from .types.content import SpeakerVoiceConfig
from .types.content import SpeechConfig
from .types.content import UrlContextMetadata
from .types.content import UrlMetadata
from .types.content import VideoMetadata
from .types.content import VoiceConfig
from .types.content import HarmCategory
from .types.content import Modality
from .types.context import Context
Expand Down Expand Up @@ -837,6 +843,8 @@
from .types.prediction_service import DirectPredictResponse
from .types.prediction_service import DirectRawPredictRequest
from .types.prediction_service import DirectRawPredictResponse
from .types.prediction_service import EmbedContentRequest
from .types.prediction_service import EmbedContentResponse
from .types.prediction_service import ExplainRequest
from .types.prediction_service import ExplainResponse
from .types.prediction_service import GenerateContentRequest
Expand Down Expand Up @@ -963,6 +971,9 @@
from .types.tool import FunctionCallingConfig
from .types.tool import FunctionDeclaration
from .types.tool import FunctionResponse
from .types.tool import FunctionResponseBlob
from .types.tool import FunctionResponseFileData
from .types.tool import FunctionResponsePart
from .types.tool import GoogleMaps
from .types.tool import GoogleSearchRetrieval
from .types.tool import RagRetrievalConfig
Expand All @@ -980,6 +991,7 @@
from .types.training_pipeline import StratifiedSplit
from .types.training_pipeline import TimestampSplit
from .types.training_pipeline import TrainingPipeline
from .types.tuning_job import PreTunedModel
from .types.tuning_job import SupervisedHyperParameters
from .types.tuning_job import SupervisedTuningDatasetDistribution
from .types.tuning_job import SupervisedTuningDataStats
Expand All @@ -995,6 +1007,7 @@
from .types.types import StringArray
from .types.types import Tensor
from .types.unmanaged_container_model import UnmanagedContainerModel
from .types.usage_metadata import UsageMetadata
from .types.user_action_reference import UserActionReference
from .types.value import Value
from .types.vertex_rag_data import CorpusStatus
Expand Down Expand Up @@ -1348,6 +1361,8 @@
"DnsPeeringConfig",
"DoubleArray",
"DynamicRetrievalConfig",
"EmbedContentRequest",
"EmbedContentResponse",
"EncryptionSpec",
"Endpoint",
"EndpointServiceClient",
Expand Down Expand Up @@ -1438,6 +1453,9 @@
"FunctionCallingConfig",
"FunctionDeclaration",
"FunctionResponse",
"FunctionResponseBlob",
"FunctionResponseFileData",
"FunctionResponsePart",
"GcsDestination",
"GcsSource",
"GenAiAdvancedFeaturesConfig",
Expand Down Expand Up @@ -1689,6 +1707,7 @@
"ModelServiceClient",
"ModelSourceInfo",
"ModelVersionCheckpoint",
"MultiSpeakerVoiceConfig",
"MutateDeployedIndexOperationMetadata",
"MutateDeployedIndexRequest",
"MutateDeployedIndexResponse",
Expand Down Expand Up @@ -1752,6 +1771,8 @@
"PointwiseMetricSpec",
"Port",
"PostStartupScriptConfig",
"PreTunedModel",
"PrebuiltVoiceConfig",
"PredefinedSplit",
"PredictRequest",
"PredictRequestResponseLoggingConfig",
Expand Down Expand Up @@ -1840,6 +1861,7 @@
"RemoveContextChildrenResponse",
"RemoveDatapointsRequest",
"RemoveDatapointsResponse",
"ReplicatedVoiceConfig",
"ReservationAffinity",
"ResourcePool",
"ResourceRuntime",
Expand Down Expand Up @@ -1893,9 +1915,11 @@
"ShieldedVmConfig",
"SlackSource",
"SmoothGradConfig",
"SpeakerVoiceConfig",
"SpecialistPool",
"SpecialistPoolServiceClient",
"SpeculativeDecodingSpec",
"SpeechConfig",
"StartNotebookRuntimeOperationMetadata",
"StartNotebookRuntimeRequest",
"StartNotebookRuntimeResponse",
Expand Down Expand Up @@ -2061,6 +2085,7 @@
"UrlContext",
"UrlContextMetadata",
"UrlMetadata",
"UsageMetadata",
"UserActionReference",
"Value",
"VertexAISearch",
Expand All @@ -2070,6 +2095,7 @@
"VertexRagStore",
"VideoMetadata",
"VizierServiceClient",
"VoiceConfig",
"WorkerPoolSpec",
"WriteFeatureValuesPayload",
"WriteFeatureValuesRequest",
Expand Down
15 changes: 15 additions & 0 deletions google/cloud/aiplatform_v1/gapic_metadata.json
Original file line number Diff line number Diff line change
Expand Up @@ -4091,6 +4091,11 @@
"direct_raw_predict"
]
},
"EmbedContent": {
"methods": [
"embed_content"
]
},
"Explain": {
"methods": [
"explain"
Expand Down Expand Up @@ -4161,6 +4166,11 @@
"direct_raw_predict"
]
},
"EmbedContent": {
"methods": [
"embed_content"
]
},
"Explain": {
"methods": [
"explain"
Expand Down Expand Up @@ -4231,6 +4241,11 @@
"direct_raw_predict"
]
},
"EmbedContent": {
"methods": [
"embed_content"
]
},
"Explain": {
"methods": [
"explain"
Expand Down
123 changes: 123 additions & 0 deletions google/cloud/aiplatform_v1/services/prediction_service/async_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,11 @@

from google.api import httpbody_pb2 # type: ignore
from google.cloud.aiplatform_v1.types import content
from google.cloud.aiplatform_v1.types import content as gca_content
from google.cloud.aiplatform_v1.types import explanation
from google.cloud.aiplatform_v1.types import prediction_service
from google.cloud.aiplatform_v1.types import types
from google.cloud.aiplatform_v1.types import usage_metadata
from google.cloud.location import locations_pb2 # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
Expand Down Expand Up @@ -1931,6 +1933,127 @@ async def sample_stream_generate_content():
# Done; return the response.
return response

async def embed_content(
self,
request: Optional[Union[prediction_service.EmbedContentRequest, dict]] = None,
*,
model: Optional[str] = None,
content: Optional[gca_content.Content] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Union[float, object] = gapic_v1.method.DEFAULT,
metadata: Sequence[Tuple[str, Union[str, bytes]]] = (),
) -> prediction_service.EmbedContentResponse:
r"""Embed content with multimodal inputs.

.. code-block:: python

# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import aiplatform_v1

async def sample_embed_content():
# Create a client
client = aiplatform_v1.PredictionServiceAsyncClient()

# Initialize request argument(s)
request = aiplatform_v1.EmbedContentRequest(
)

# Make the request
response = await client.embed_content(request=request)

# Handle the response
print(response)

Args:
request (Optional[Union[google.cloud.aiplatform_v1.types.EmbedContentRequest, dict]]):
The request object. Request message for
[PredictionService.EmbedContent][google.cloud.aiplatform.v1.PredictionService.EmbedContent].
model (:class:`str`):
Required. The name of the publisher model requested to
serve the prediction. Format:
``projects/{project}/locations/{location}/publishers/*/models/*``

This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
content (:class:`google.cloud.aiplatform_v1.types.Content`):
Required. Input content to be
embedded. Required.

This corresponds to the ``content`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be
sent along with the request as metadata. Normally, each value must be of type `str`,
but for metadata keys ending with the suffix `-bin`, the corresponding values must
be of type `bytes`.

Returns:
google.cloud.aiplatform_v1.types.EmbedContentResponse:
Response message for
[PredictionService.EmbedContent][google.cloud.aiplatform.v1.PredictionService.EmbedContent].

"""
# Create or coerce a protobuf request object.
# - Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
flattened_params = [model, content]
has_flattened_params = (
len([param for param in flattened_params if param is not None]) > 0
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)

# - Use the request object if provided (there's no risk of modifying the input as
# there are no flattened fields), or create one.
if not isinstance(request, prediction_service.EmbedContentRequest):
request = prediction_service.EmbedContentRequest(request)

# If we have keyword arguments corresponding to fields on the
# request, apply these.
if model is not None:
request.model = model
if content is not None:
request.content = content

# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._client._transport._wrapped_methods[
self._client._transport.embed_content
]

# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("model", request.model),)),
)

# Validate the universe domain.
self._client._validate_universe_domain()

# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)

# Done; return the response.
return response

async def list_operations(
self,
request: Optional[operations_pb2.ListOperationsRequest] = None,
Expand Down
Loading