Skip to content

Commit

Permalink
Standardize the location parameter (#11745)
Browse files Browse the repository at this point in the history
* adds the location fixes

* Updated variable name REGION to LOCATION

* fixed issue in comments

* fixes linter issues

* Update gemini_tuning.py

convert LOCATION to location

* Update generative_ai/multimodal_embedding_video.py

---------

Co-authored-by: rohith-egen <rohith.alla@egen.ai>
Co-authored-by: nileshspringml <nilesh.mahajan@egen.ai>
Co-authored-by: Holt Skinner <13262395+holtskinner@users.noreply.github.com>
  • Loading branch information
4 people committed May 11, 2024
1 parent 9f412c8 commit b4ae6e7
Show file tree
Hide file tree
Showing 13 changed files with 24 additions and 32 deletions.
2 changes: 2 additions & 0 deletions generative_ai/gemini_tuning.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ def gemini_tuning_basic(project_id: str) -> sft.SupervisedTuningJob:
# [START generativeaionvertexai_tuning_basic]

import time

import vertexai
from vertexai.preview.tuning import sft

Expand Down Expand Up @@ -51,6 +52,7 @@ def gemini_tuning_advanced(project_id: str) -> sft.SupervisedTuningJob:
# [START generativeaionvertexai_tuning_advanced]

import time

import vertexai
from vertexai.preview.tuning import sft

Expand Down
7 changes: 4 additions & 3 deletions generative_ai/gemini_tuning_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import gemini_tuning

PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
REGION = "us-central1"
LOCATION = "us-central1"
MODEL_ID = "gemini-1.5-pro-preview-0409"
TUNING_JOB_ID = "4982013113894174720"

Expand All @@ -34,7 +34,8 @@ def test_gemini_tuning() -> None:


def test_get_tuning_job() -> None:
response = gemini_tuning.get_tuning_job(PROJECT_ID, REGION, TUNING_JOB_ID)
response = gemini_tuning.get_tuning_job(
PROJECT_ID, LOCATION, TUNING_JOB_ID)
assert response


Expand All @@ -45,4 +46,4 @@ def test_list_tuning_jobs() -> None:

@pytest.mark.skip(reason="Skip due to tuning taking a long time.")
def test_cancel_tuning_job() -> None:
gemini_tuning.cancel_tuning_job(PROJECT_ID, REGION, TUNING_JOB_ID)
gemini_tuning.cancel_tuning_job(PROJECT_ID, LOCATION, TUNING_JOB_ID)
9 changes: 4 additions & 5 deletions generative_ai/inference/inference_api_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,29 +21,28 @@


PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
REGION = "us-central1"
MODEL_ID = "gemini-1.5-pro-preview-0409"


def test_non_stream_text_basic() -> None:
response = non_stream_text_basic.generate_content(PROJECT_ID, REGION, MODEL_ID)
response = non_stream_text_basic.generate_content(PROJECT_ID, MODEL_ID)
assert response


def test_non_stream_multi_modality_basic() -> None:
response = non_stream_multimodality_basic.generate_content(
PROJECT_ID, REGION, MODEL_ID
PROJECT_ID, MODEL_ID
)
assert response


def test_stream_text_basic() -> None:
responses = stream_text_basic.generate_content(PROJECT_ID, REGION, MODEL_ID)
responses = stream_text_basic.generate_content(PROJECT_ID, MODEL_ID)
assert responses


def test_stream_multi_modality_basic() -> None:
responses = stream_multimodality_basic.generate_content(
PROJECT_ID, REGION, MODEL_ID
PROJECT_ID, MODEL_ID
)
assert responses
4 changes: 2 additions & 2 deletions generative_ai/inference/non_stream_multimodality_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@
# limitations under the License.


def generate_content(PROJECT_ID: str, REGION: str, MODEL_ID: str) -> object:
def generate_content(PROJECT_ID: str, MODEL_ID: str) -> object:
# [START generativeaionvertexai_non_stream_multimodality_basic]
import vertexai

from vertexai.generative_models import GenerativeModel, Part

vertexai.init(project=PROJECT_ID, location=REGION)
vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel(MODEL_ID)
response = model.generate_content(
Expand Down
4 changes: 2 additions & 2 deletions generative_ai/inference/non_stream_text_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@
# limitations under the License.


def generate_content(PROJECT_ID: str, REGION: str, MODEL_ID: str) -> object:
def generate_content(PROJECT_ID: str, MODEL_ID: str) -> object:
# [START generativeaionvertexai_non_stream_text_basic]
import vertexai

from vertexai.generative_models import GenerativeModel

vertexai.init(project=PROJECT_ID, location=REGION)
vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel(MODEL_ID)
response = model.generate_content("Write a story about a magic backpack.")
Expand Down
4 changes: 2 additions & 2 deletions generative_ai/inference/stream_multimodality_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@
# limitations under the License.


def generate_content(PROJECT_ID: str, REGION: str, MODEL_ID: str) -> object:
def generate_content(PROJECT_ID: str, MODEL_ID: str) -> object:
# [START generativeaionvertexai_stream_multimodality_basic]
import vertexai

from vertexai.generative_models import GenerativeModel, Part

vertexai.init(project=PROJECT_ID, location=REGION)
vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel(MODEL_ID)
responses = model.generate_content(
Expand Down
4 changes: 2 additions & 2 deletions generative_ai/inference/stream_text_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@
# limitations under the License.


def generate_content(PROJECT_ID: str, REGION: str, MODEL_ID: str) -> object:
def generate_content(PROJECT_ID: str, MODEL_ID: str) -> object:
# [START generativeaionvertexai_stream_text_basic]
import vertexai

from vertexai.generative_models import GenerativeModel

vertexai.init(project=PROJECT_ID, location=REGION)
vertexai.init(project=PROJECT_ID, location="us-central1")

model = GenerativeModel(MODEL_ID)
responses = model.generate_content(
Expand Down
6 changes: 2 additions & 4 deletions generative_ai/multimodal_embedding_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,24 +19,22 @@

def get_image_embeddings(
project_id: str,
location: str,
image_path: str,
contextual_text: Optional[str] = None,
) -> MultiModalEmbeddingResponse:
"""Example of how to generate multimodal embeddings from image and text.
Args:
project_id: Google Cloud Project ID, used to initialize vertexai
location: Google Cloud Region, used to initialize vertexai
image_path: Path to image (local or Google Cloud Storage) to generate embeddings for.
contextual_text: Text to generate embeddings for.
"""
# [START aiplatform_sdk_multimodal_embedding_image]
import vertexai
from vertexai.vision_models import Image, MultiModalEmbeddingModel

# TODO(developer): Update values for project_id, location, image_path & contextual_text
vertexai.init(project=project_id, location=location)
# TODO(developer): Update values for project_id, image_path & contextual_text
vertexai.init(project=project_id, location="us-central1")

model = MultiModalEmbeddingModel.from_pretrained("multimodalembedding")
image = Image.load_from_file(image_path)
Expand Down
2 changes: 0 additions & 2 deletions generative_ai/multimodal_embedding_image_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,12 @@
import multimodal_embedding_image

_PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
_LOCATION = "us-central1"


@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10)
def test_multimodal_embedding_image() -> None:
embeddings = multimodal_embedding_image.get_image_embeddings(
project_id=_PROJECT_ID,
location=_LOCATION,
image_path="gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png",
contextual_text="Colosseum",
)
Expand Down
5 changes: 2 additions & 3 deletions generative_ai/multimodal_embedding_image_video_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

def get_image_video_text_embeddings(
project_id: str,
location: str,
image_path: str,
video_path: str,
contextual_text: Optional[str] = None,
Expand All @@ -44,9 +43,9 @@ def get_image_video_text_embeddings(

from vertexai.vision_models import Image, MultiModalEmbeddingModel, Video

# TODO(developer): Update values for project_id, location,
# TODO(developer): Update values for project_id,
# image_path, video_path, contextual_text, video_segment_config
vertexai.init(project=project_id, location=location)
vertexai.init(project=project_id, location="us-central1")

model = MultiModalEmbeddingModel.from_pretrained("multimodalembedding")
image = Image.load_from_file(image_path)
Expand Down
2 changes: 0 additions & 2 deletions generative_ai/multimodal_embedding_image_video_text_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,12 @@
import multimodal_embedding_image_video_text

_PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
_LOCATION = "us-central1"


@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10)
def test_multimodal_embedding_image_video_text() -> None:
embeddings = multimodal_embedding_image_video_text.get_image_video_text_embeddings(
project_id=_PROJECT_ID,
location=_LOCATION,
image_path="gs://cloud-samples-data/vertex-ai/llm/prompts/landmark1.png",
video_path="gs://cloud-samples-data/vertex-ai-vision/highway_vehicles.mp4",
contextual_text="Cars on Highway",
Expand Down
5 changes: 2 additions & 3 deletions generative_ai/multimodal_embedding_video.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

def get_video_embeddings(
project_id: str,
location: str,
video_path: str,
contextual_text: Optional[str] = None,
dimension: Optional[int] = 1408,
Expand All @@ -42,9 +41,9 @@ def get_video_embeddings(

from vertexai.vision_models import MultiModalEmbeddingModel, Video

# TODO(developer): Update values for project_id, location,
# TODO(developer): Update values for project_id,
# video_path, contextual_text, dimension, video_segment_config
vertexai.init(project=project_id, location=location)
vertexai.init(project=project_id, location="us-central1")

model = MultiModalEmbeddingModel.from_pretrained("multimodalembedding")
video = Video.load_from_file(video_path)
Expand Down
2 changes: 0 additions & 2 deletions generative_ai/multimodal_embedding_video_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,12 @@
import multimodal_embedding_video

_PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
_LOCATION = "us-central1"


@backoff.on_exception(backoff.expo, ResourceExhausted, max_time=10)
def test_multimodal_embedding_video() -> None:
embeddings = multimodal_embedding_video.get_video_embeddings(
project_id=_PROJECT_ID,
location=_LOCATION,
video_path="gs://cloud-samples-data/vertex-ai-vision/highway_vehicles.mp4",
contextual_text="Cars on Highway",
)
Expand Down

0 comments on commit b4ae6e7

Please sign in to comment.