Skip to content

Commit

Permalink
automl: add natural language sentiment analysis ga samples [(#2677)](G…
Browse files Browse the repository at this point in the history
…oogleCloudPlatform/python-docs-samples#2677)

* automl: add natural language sentiment analysis ga samples

* Add links to documentation

* Update tests to use centralized project

* Fix environment variable, make translate test less flaky
  • Loading branch information
nnegrey committed Jan 7, 2020
1 parent e529d69 commit 6c3db43
Show file tree
Hide file tree
Showing 25 changed files with 296 additions and 21 deletions.
2 changes: 1 addition & 1 deletion samples/snippets/delete_model_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

import delete_model

PROJECT_ID = os.environ["GCLOUD_PROJECT"]
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]


def test_delete_model(capsys):
Expand Down
2 changes: 1 addition & 1 deletion samples/snippets/deploy_model_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

import deploy_model

PROJECT_ID = os.environ["GCLOUD_PROJECT"]
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
MODEL_ID = "TRL0000000000000000000"


Expand Down
4 changes: 2 additions & 2 deletions samples/snippets/get_model_evaluation_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@

import get_model_evaluation

PROJECT_ID = os.environ["GCLOUD_PROJECT"]
MODEL_ID = "TEN1499896588007374848"
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
MODEL_ID = os.environ["ENTITY_EXTRACTION_MODEL_ID"]


@pytest.fixture(scope="function")
Expand Down
4 changes: 2 additions & 2 deletions samples/snippets/get_model_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@

import get_model

PROJECT_ID = os.environ["GCLOUD_PROJECT"]
MODEL_ID = "TEN1499896588007374848"
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
MODEL_ID = os.environ["ENTITY_EXTRACTION_MODEL_ID"]


def test_get_model(capsys):
Expand Down
50 changes: 50 additions & 0 deletions samples/snippets/language_sentiment_analysis_create_dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


def create_dataset(project_id, display_name):
"""Create a dataset."""
# [START automl_language_sentiment_analysis_create_dataset]
from google.cloud import automl

# TODO(developer): Uncomment and set the following variables
# project_id = "YOUR_PROJECT_ID"
# display_name = "YOUR_DATASET_NAME"

client = automl.AutoMlClient()

# A resource that represents Google Cloud Platform location.
project_location = client.location_path(project_id, "us-central1")

# Each dataset requires a sentiment score with a defined sentiment_max
# value, for more information on TextSentimentDatasetMetadata, see:
# https://cloud.google.com/natural-language/automl/docs/prepare#sentiment-analysis
# https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsentimentdatasetmetadata
metadata = automl.types.TextSentimentDatasetMetadata(
sentiment_max=4
) # Possible max sentiment score: 1-10

dataset = automl.types.Dataset(
display_name=display_name, text_sentiment_dataset_metadata=metadata
)

# Create a dataset with the dataset metadata in the region.
response = client.create_dataset(project_location, dataset)

created_dataset = response.result()

# Display the dataset information
print("Dataset name: {}".format(created_dataset.name))
print("Dataset id: {}".format(created_dataset.name.split("/")[-1]))
# [END automl_language_sentiment_analysis_create_dataset]
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import datetime
import os

from google.cloud import automl

import language_sentiment_analysis_create_dataset


PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]


def test_sentiment_analysis_create_dataset(capsys):
dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
language_sentiment_analysis_create_dataset.create_dataset(
PROJECT_ID, dataset_name
)
out, _ = capsys.readouterr()
assert "Dataset id: " in out

# Delete the created dataset
dataset_id = out.splitlines()[1].split()[2]
client = automl.AutoMlClient()
dataset_full_id = client.dataset_path(
PROJECT_ID, "us-central1", dataset_id
)
response = client.delete_dataset(dataset_full_id)
response.result()
43 changes: 43 additions & 0 deletions samples/snippets/language_sentiment_analysis_create_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


def create_model(project_id, dataset_id, display_name):
"""Create a model."""
# [START automl_language_sentiment_analysis_create_model]
from google.cloud import automl

# TODO(developer): Uncomment and set the following variables
# project_id = "YOUR_PROJECT_ID"
# dataset_id = "YOUR_DATASET_ID"
# display_name = "YOUR_MODEL_NAME"

client = automl.AutoMlClient()

# A resource that represents Google Cloud Platform location.
project_location = client.location_path(project_id, "us-central1")
# Leave model unset to use the default base model provided by Google
metadata = automl.types.TextSentimentModelMetadata()
model = automl.types.Model(
display_name=display_name,
dataset_id=dataset_id,
text_sentiment_model_metadata=metadata,
)

# Create a model with the model metadata in the region.
response = client.create_model(project_location, model)

print("Training operation name: {}".format(response.operation.name))
print("Training started...")
# [END automl_language_sentiment_analysis_create_model]
37 changes: 37 additions & 0 deletions samples/snippets/language_sentiment_analysis_create_model_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

from google.cloud import automl
import pytest

import language_sentiment_analysis_create_model

PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
DATASET_ID = os.environ["SENTIMENT_ANALYSIS_DATASET_ID"]


@pytest.mark.slow
def test_sentiment_analysis_create_model(capsys):
language_sentiment_analysis_create_model.create_model(
PROJECT_ID, DATASET_ID, "object_test_create_model"
)
out, _ = capsys.readouterr()
assert "Training started" in out

# Cancel the operation
operation_id = out.split("Training operation name: ")[1].split("\n")[0]
client = automl.AutoMlClient()
client.transport._operations_client.cancel_operation(operation_id)
51 changes: 51 additions & 0 deletions samples/snippets/language_sentiment_analysis_predict.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


def predict(project_id, model_id, content):
"""Predict."""
# [START automl_language_sentiment_analysis_predict]
from google.cloud import automl

# TODO(developer): Uncomment and set the following variables
# project_id = "YOUR_PROJECT_ID"
# model_id = "YOUR_MODEL_ID"
# content = "text to predict"

prediction_client = automl.PredictionServiceClient()

# Get the full path of the model.
model_full_id = prediction_client.model_path(
project_id, "us-central1", model_id
)

# Supported mime_types: 'text/plain', 'text/html'
# https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet
text_snippet = automl.types.TextSnippet(
content=content, mime_type="text/plain"
)
payload = automl.types.ExamplePayload(text_snippet=text_snippet)

response = prediction_client.predict(model_full_id, payload)

for annotation_payload in response.payload:
print(
"Predicted class name: {}".format(annotation_payload.display_name)
)
print(
"Predicted sentiment score: {}".format(
annotation_payload.text_sentiment.sentiment
)
)
# [END automl_language_sentiment_analysis_predict]
43 changes: 43 additions & 0 deletions samples/snippets/language_sentiment_analysis_predict_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

from google.cloud import automl
import pytest

import language_sentiment_analysis_predict

PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
MODEL_ID = os.environ["SENTIMENT_ANALYSIS_MODEL_ID"]


@pytest.fixture(scope="function")
def verify_model_state():
client = automl.AutoMlClient()
model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID)

model = client.get_model(model_full_id)
if model.deployment_state == automl.enums.Model.DeploymentState.UNDEPLOYED:
# Deploy model if it is not deployed
response = client.deploy_model(model_full_id)
response.result()


def test_predict(capsys, verify_model_state):
verify_model_state
text = "Hopefully this Claritin kicks in soon"
language_sentiment_analysis_predict.predict(PROJECT_ID, MODEL_ID, text)
out, _ = capsys.readouterr()
assert "Predicted sentiment score: " in out
4 changes: 2 additions & 2 deletions samples/snippets/list_model_evaluations_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@
import list_model_evaluations


PROJECT_ID = os.environ["GCLOUD_PROJECT"]
MODEL_ID = "TEN1499896588007374848"
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
MODEL_ID = os.environ["ENTITY_EXTRACTION_MODEL_ID"]


def test_list_get_eval_model(capsys):
Expand Down
2 changes: 1 addition & 1 deletion samples/snippets/list_models_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

import list_models

PROJECT_ID = os.environ["GCLOUD_PROJECT"]
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]


def test_list_get_eval_model(capsys):
Expand Down
2 changes: 2 additions & 0 deletions samples/snippets/translate_create_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ def create_dataset(project_id, display_name):

# A resource that represents Google Cloud Platform location.
project_location = client.location_path(project_id, "us-central1")
# For a list of supported languages, see:
# https://cloud.google.com/translate/automl/docs/languages
dataset_metadata = automl.types.TranslationDatasetMetadata(
source_language_code="en", target_language_code="ja"
)
Expand Down
2 changes: 1 addition & 1 deletion samples/snippets/translate_create_dataset_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import translate_create_dataset


PROJECT_ID = os.environ["GCLOUD_PROJECT"]
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]


def test_translate_create_dataset(capsys):
Expand Down
4 changes: 2 additions & 2 deletions samples/snippets/translate_create_model_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@

import translate_create_model

PROJECT_ID = os.environ["GCLOUD_PROJECT"]
DATASET_ID = "TRL3876092572857648864"
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
DATASET_ID = os.environ["TRANSLATION_DATASET_ID"]


def test_translate_create_model(capsys):
Expand Down
4 changes: 2 additions & 2 deletions samples/snippets/translate_predict_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@

import translate_predict

PROJECT_ID = os.environ["GCLOUD_PROJECT"]
MODEL_ID = "TRL3128559826197068699"
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
MODEL_ID = os.environ["TRANSLATION_MODEL_ID"]


@pytest.fixture(scope="function")
Expand Down
2 changes: 1 addition & 1 deletion samples/snippets/undeploy_model_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

import undeploy_model

PROJECT_ID = os.environ["GCLOUD_PROJECT"]
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
MODEL_ID = "TRL0000000000000000000"


Expand Down
1 change: 1 addition & 0 deletions samples/snippets/vision_classification_create_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ def create_dataset(project_id, display_name):
# Types:
# MultiLabel: Multiple labels are allowed for one example.
# MultiClass: At most one label is allowed per example.
# https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#classificationtype
metadata = automl.types.ImageClassificationDatasetMetadata(
classification_type=automl.enums.ClassificationType.MULTILABEL
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import vision_classification_create_dataset


PROJECT_ID = os.environ["GCLOUD_PROJECT"]
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]


@pytest.mark.slow
Expand Down
Loading

0 comments on commit 6c3db43

Please sign in to comment.