diff --git a/.kokoro/tests/run_tests.sh b/.kokoro/tests/run_tests.sh index 953eb744bb61..6609b58d2f20 100755 --- a/.kokoro/tests/run_tests.sh +++ b/.kokoro/tests/run_tests.sh @@ -39,6 +39,7 @@ SECRETS_PASSWORD=$(cat "${KOKORO_GFILE_DIR}/secrets-password.txt") source ./testing/test-env.sh export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json +source "${KOKORO_GFILE_DIR}/automl_secrets.txt" # For Datalabeling samples to hit the testing endpoint export DATALABELING_ENDPOINT="test-datalabeling.sandbox.googleapis.com:443" diff --git a/automl/cloud-client/delete_model_test.py b/automl/cloud-client/delete_model_test.py index 4b68a647557e..8721e14b44bc 100644 --- a/automl/cloud-client/delete_model_test.py +++ b/automl/cloud-client/delete_model_test.py @@ -16,7 +16,7 @@ import delete_model -PROJECT_ID = os.environ["GCLOUD_PROJECT"] +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] def test_delete_model(capsys): diff --git a/automl/cloud-client/deploy_model_test.py b/automl/cloud-client/deploy_model_test.py index 8a2026188b8b..d8a72f1eee08 100644 --- a/automl/cloud-client/deploy_model_test.py +++ b/automl/cloud-client/deploy_model_test.py @@ -18,7 +18,7 @@ import deploy_model -PROJECT_ID = os.environ["GCLOUD_PROJECT"] +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] MODEL_ID = "TRL0000000000000000000" diff --git a/automl/cloud-client/get_model_evaluation_test.py b/automl/cloud-client/get_model_evaluation_test.py index d467332db135..40a88a82e850 100644 --- a/automl/cloud-client/get_model_evaluation_test.py +++ b/automl/cloud-client/get_model_evaluation_test.py @@ -19,8 +19,8 @@ import get_model_evaluation -PROJECT_ID = os.environ["GCLOUD_PROJECT"] -MODEL_ID = "TEN1499896588007374848" +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] +MODEL_ID = os.environ["ENTITY_EXTRACTION_MODEL_ID"] @pytest.fixture(scope="function") diff --git a/automl/cloud-client/get_model_test.py b/automl/cloud-client/get_model_test.py index 176161bf6fee..c146e18cbfd8 100644 --- a/automl/cloud-client/get_model_test.py +++ b/automl/cloud-client/get_model_test.py @@ -16,8 +16,8 @@ import get_model -PROJECT_ID = os.environ["GCLOUD_PROJECT"] -MODEL_ID = "TEN1499896588007374848" +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] +MODEL_ID = os.environ["ENTITY_EXTRACTION_MODEL_ID"] def test_get_model(capsys): diff --git a/automl/cloud-client/language_sentiment_analysis_create_dataset.py b/automl/cloud-client/language_sentiment_analysis_create_dataset.py new file mode 100644 index 000000000000..2caae0656bbe --- /dev/null +++ b/automl/cloud-client/language_sentiment_analysis_create_dataset.py @@ -0,0 +1,50 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def create_dataset(project_id, display_name): + """Create a dataset.""" + # [START automl_language_sentiment_analysis_create_dataset] + from google.cloud import automl + + # TODO(developer): Uncomment and set the following variables + # project_id = "YOUR_PROJECT_ID" + # display_name = "YOUR_DATASET_NAME" + + client = automl.AutoMlClient() + + # A resource that represents Google Cloud Platform location. + project_location = client.location_path(project_id, "us-central1") + + # Each dataset requires a sentiment score with a defined sentiment_max + # value, for more information on TextSentimentDatasetMetadata, see: + # https://cloud.google.com/natural-language/automl/docs/prepare#sentiment-analysis + # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsentimentdatasetmetadata + metadata = automl.types.TextSentimentDatasetMetadata( + sentiment_max=4 + ) # Possible max sentiment score: 1-10 + + dataset = automl.types.Dataset( + display_name=display_name, text_sentiment_dataset_metadata=metadata + ) + + # Create a dataset with the dataset metadata in the region. + response = client.create_dataset(project_location, dataset) + + created_dataset = response.result() + + # Display the dataset information + print("Dataset name: {}".format(created_dataset.name)) + print("Dataset id: {}".format(created_dataset.name.split("/")[-1])) + # [END automl_language_sentiment_analysis_create_dataset] diff --git a/automl/cloud-client/language_sentiment_analysis_create_dataset_test.py b/automl/cloud-client/language_sentiment_analysis_create_dataset_test.py new file mode 100644 index 000000000000..239a154f84ab --- /dev/null +++ b/automl/cloud-client/language_sentiment_analysis_create_dataset_test.py @@ -0,0 +1,41 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os + +from google.cloud import automl + +import language_sentiment_analysis_create_dataset + + +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] + + +def test_sentiment_analysis_create_dataset(capsys): + dataset_name = "test_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") + language_sentiment_analysis_create_dataset.create_dataset( + PROJECT_ID, dataset_name + ) + out, _ = capsys.readouterr() + assert "Dataset id: " in out + + # Delete the created dataset + dataset_id = out.splitlines()[1].split()[2] + client = automl.AutoMlClient() + dataset_full_id = client.dataset_path( + PROJECT_ID, "us-central1", dataset_id + ) + response = client.delete_dataset(dataset_full_id) + response.result() diff --git a/automl/cloud-client/language_sentiment_analysis_create_model.py b/automl/cloud-client/language_sentiment_analysis_create_model.py new file mode 100644 index 000000000000..6eca50a7c551 --- /dev/null +++ b/automl/cloud-client/language_sentiment_analysis_create_model.py @@ -0,0 +1,43 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def create_model(project_id, dataset_id, display_name): + """Create a model.""" + # [START automl_language_sentiment_analysis_create_model] + from google.cloud import automl + + # TODO(developer): Uncomment and set the following variables + # project_id = "YOUR_PROJECT_ID" + # dataset_id = "YOUR_DATASET_ID" + # display_name = "YOUR_MODEL_NAME" + + client = automl.AutoMlClient() + + # A resource that represents Google Cloud Platform location. + project_location = client.location_path(project_id, "us-central1") + # Leave model unset to use the default base model provided by Google + metadata = automl.types.TextSentimentModelMetadata() + model = automl.types.Model( + display_name=display_name, + dataset_id=dataset_id, + text_sentiment_model_metadata=metadata, + ) + + # Create a model with the model metadata in the region. + response = client.create_model(project_location, model) + + print("Training operation name: {}".format(response.operation.name)) + print("Training started...") + # [END automl_language_sentiment_analysis_create_model] diff --git a/automl/cloud-client/language_sentiment_analysis_create_model_test.py b/automl/cloud-client/language_sentiment_analysis_create_model_test.py new file mode 100644 index 000000000000..cbb79533efc0 --- /dev/null +++ b/automl/cloud-client/language_sentiment_analysis_create_model_test.py @@ -0,0 +1,37 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from google.cloud import automl +import pytest + +import language_sentiment_analysis_create_model + +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] +DATASET_ID = os.environ["SENTIMENT_ANALYSIS_DATASET_ID"] + + +@pytest.mark.slow +def test_sentiment_analysis_create_model(capsys): + language_sentiment_analysis_create_model.create_model( + PROJECT_ID, DATASET_ID, "object_test_create_model" + ) + out, _ = capsys.readouterr() + assert "Training started" in out + + # Cancel the operation + operation_id = out.split("Training operation name: ")[1].split("\n")[0] + client = automl.AutoMlClient() + client.transport._operations_client.cancel_operation(operation_id) diff --git a/automl/cloud-client/language_sentiment_analysis_predict.py b/automl/cloud-client/language_sentiment_analysis_predict.py new file mode 100644 index 000000000000..cf459142ff12 --- /dev/null +++ b/automl/cloud-client/language_sentiment_analysis_predict.py @@ -0,0 +1,51 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def predict(project_id, model_id, content): + """Predict.""" + # [START automl_language_sentiment_analysis_predict] + from google.cloud import automl + + # TODO(developer): Uncomment and set the following variables + # project_id = "YOUR_PROJECT_ID" + # model_id = "YOUR_MODEL_ID" + # content = "text to predict" + + prediction_client = automl.PredictionServiceClient() + + # Get the full path of the model. + model_full_id = prediction_client.model_path( + project_id, "us-central1", model_id + ) + + # Supported mime_types: 'text/plain', 'text/html' + # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#textsnippet + text_snippet = automl.types.TextSnippet( + content=content, mime_type="text/plain" + ) + payload = automl.types.ExamplePayload(text_snippet=text_snippet) + + response = prediction_client.predict(model_full_id, payload) + + for annotation_payload in response.payload: + print( + "Predicted class name: {}".format(annotation_payload.display_name) + ) + print( + "Predicted sentiment score: {}".format( + annotation_payload.text_sentiment.sentiment + ) + ) + # [END automl_language_sentiment_analysis_predict] diff --git a/automl/cloud-client/language_sentiment_analysis_predict_test.py b/automl/cloud-client/language_sentiment_analysis_predict_test.py new file mode 100644 index 000000000000..d4fffc7f0864 --- /dev/null +++ b/automl/cloud-client/language_sentiment_analysis_predict_test.py @@ -0,0 +1,43 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from google.cloud import automl +import pytest + +import language_sentiment_analysis_predict + +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] +MODEL_ID = os.environ["SENTIMENT_ANALYSIS_MODEL_ID"] + + +@pytest.fixture(scope="function") +def verify_model_state(): + client = automl.AutoMlClient() + model_full_id = client.model_path(PROJECT_ID, "us-central1", MODEL_ID) + + model = client.get_model(model_full_id) + if model.deployment_state == automl.enums.Model.DeploymentState.UNDEPLOYED: + # Deploy model if it is not deployed + response = client.deploy_model(model_full_id) + response.result() + + +def test_predict(capsys, verify_model_state): + verify_model_state + text = "Hopefully this Claritin kicks in soon" + language_sentiment_analysis_predict.predict(PROJECT_ID, MODEL_ID, text) + out, _ = capsys.readouterr() + assert "Predicted sentiment score: " in out diff --git a/automl/cloud-client/list_model_evaluations_test.py b/automl/cloud-client/list_model_evaluations_test.py index 2acad83ed1da..839540bd30ab 100644 --- a/automl/cloud-client/list_model_evaluations_test.py +++ b/automl/cloud-client/list_model_evaluations_test.py @@ -17,8 +17,8 @@ import list_model_evaluations -PROJECT_ID = os.environ["GCLOUD_PROJECT"] -MODEL_ID = "TEN1499896588007374848" +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] +MODEL_ID = os.environ["ENTITY_EXTRACTION_MODEL_ID"] def test_list_get_eval_model(capsys): diff --git a/automl/cloud-client/list_models_test.py b/automl/cloud-client/list_models_test.py index adb641909874..10d7c8da85e7 100644 --- a/automl/cloud-client/list_models_test.py +++ b/automl/cloud-client/list_models_test.py @@ -16,7 +16,7 @@ import list_models -PROJECT_ID = os.environ["GCLOUD_PROJECT"] +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] def test_list_get_eval_model(capsys): diff --git a/automl/cloud-client/translate_create_dataset.py b/automl/cloud-client/translate_create_dataset.py index 7afe64ac88a1..8f468679f925 100644 --- a/automl/cloud-client/translate_create_dataset.py +++ b/automl/cloud-client/translate_create_dataset.py @@ -26,6 +26,8 @@ def create_dataset(project_id, display_name): # A resource that represents Google Cloud Platform location. project_location = client.location_path(project_id, "us-central1") + # For a list of supported languages, see: + # https://cloud.google.com/translate/automl/docs/languages dataset_metadata = automl.types.TranslationDatasetMetadata( source_language_code="en", target_language_code="ja" ) diff --git a/automl/cloud-client/translate_create_dataset_test.py b/automl/cloud-client/translate_create_dataset_test.py index 1129babac7f3..9011da976ec0 100644 --- a/automl/cloud-client/translate_create_dataset_test.py +++ b/automl/cloud-client/translate_create_dataset_test.py @@ -20,7 +20,7 @@ import translate_create_dataset -PROJECT_ID = os.environ["GCLOUD_PROJECT"] +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] def test_translate_create_dataset(capsys): diff --git a/automl/cloud-client/translate_create_model_test.py b/automl/cloud-client/translate_create_model_test.py index e055d66806c7..b564d7e07019 100644 --- a/automl/cloud-client/translate_create_model_test.py +++ b/automl/cloud-client/translate_create_model_test.py @@ -18,8 +18,8 @@ import translate_create_model -PROJECT_ID = os.environ["GCLOUD_PROJECT"] -DATASET_ID = "TRL3876092572857648864" +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] +DATASET_ID = os.environ["TRANSLATION_DATASET_ID"] def test_translate_create_model(capsys): diff --git a/automl/cloud-client/translate_predict_test.py b/automl/cloud-client/translate_predict_test.py index 32134ef1cfb5..aabfd05b61a5 100644 --- a/automl/cloud-client/translate_predict_test.py +++ b/automl/cloud-client/translate_predict_test.py @@ -19,8 +19,8 @@ import translate_predict -PROJECT_ID = os.environ["GCLOUD_PROJECT"] -MODEL_ID = "TRL3128559826197068699" +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] +MODEL_ID = os.environ["TRANSLATION_MODEL_ID"] @pytest.fixture(scope="function") diff --git a/automl/cloud-client/undeploy_model_test.py b/automl/cloud-client/undeploy_model_test.py index caaa78f07fc5..49e9b1305e32 100644 --- a/automl/cloud-client/undeploy_model_test.py +++ b/automl/cloud-client/undeploy_model_test.py @@ -18,7 +18,7 @@ import undeploy_model -PROJECT_ID = os.environ["GCLOUD_PROJECT"] +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] MODEL_ID = "TRL0000000000000000000" diff --git a/automl/cloud-client/vision_classification_create_dataset.py b/automl/cloud-client/vision_classification_create_dataset.py index c41d67cc39ef..8981a795679d 100644 --- a/automl/cloud-client/vision_classification_create_dataset.py +++ b/automl/cloud-client/vision_classification_create_dataset.py @@ -30,6 +30,7 @@ def create_dataset(project_id, display_name): # Types: # MultiLabel: Multiple labels are allowed for one example. # MultiClass: At most one label is allowed per example. + # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#classificationtype metadata = automl.types.ImageClassificationDatasetMetadata( classification_type=automl.enums.ClassificationType.MULTILABEL ) diff --git a/automl/cloud-client/vision_classification_create_dataset_test.py b/automl/cloud-client/vision_classification_create_dataset_test.py index af4a5636aa63..763eef825cb6 100644 --- a/automl/cloud-client/vision_classification_create_dataset_test.py +++ b/automl/cloud-client/vision_classification_create_dataset_test.py @@ -21,7 +21,7 @@ import vision_classification_create_dataset -PROJECT_ID = os.environ["GCLOUD_PROJECT"] +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] @pytest.mark.slow diff --git a/automl/cloud-client/vision_classification_create_model.py b/automl/cloud-client/vision_classification_create_model.py index 60e7af52517b..30505614e9eb 100644 --- a/automl/cloud-client/vision_classification_create_model.py +++ b/automl/cloud-client/vision_classification_create_model.py @@ -28,6 +28,9 @@ def create_model(project_id, dataset_id, display_name): # A resource that represents Google Cloud Platform location. project_location = client.location_path(project_id, "us-central1") # Leave model unset to use the default base model provided by Google + # train_budget_milli_node_hours: The actual train_cost will be equal or + # less than this value. + # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageclassificationmodelmetadata metadata = automl.types.ImageClassificationModelMetadata( train_budget_milli_node_hours=24000 ) diff --git a/automl/cloud-client/vision_classification_create_model_test.py b/automl/cloud-client/vision_classification_create_model_test.py index 6e3ee8b0d011..aea9926ab176 100644 --- a/automl/cloud-client/vision_classification_create_model_test.py +++ b/automl/cloud-client/vision_classification_create_model_test.py @@ -19,8 +19,8 @@ import vision_classification_create_model -PROJECT_ID = os.environ["GCLOUD_PROJECT"] -DATASET_ID = "ICN3946265060617537378" +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] +DATASET_ID = os.environ["VISION_CLASSIFICATION_DATASET_ID"] @pytest.mark.slow diff --git a/automl/cloud-client/vision_classification_deploy_model_node_count.py b/automl/cloud-client/vision_classification_deploy_model_node_count.py index fae5bae40d72..b89cec73ee81 100644 --- a/automl/cloud-client/vision_classification_deploy_model_node_count.py +++ b/automl/cloud-client/vision_classification_deploy_model_node_count.py @@ -25,6 +25,9 @@ def deploy_model(project_id, model_id): client = automl.AutoMlClient() # Get the full path of the model. model_full_id = client.model_path(project_id, "us-central1", model_id) + + # node count determines the number of nodes to deploy the model on. + # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageclassificationmodeldeploymentmetadata metadata = automl.types.ImageClassificationModelDeploymentMetadata( node_count=2 ) diff --git a/automl/cloud-client/vision_classification_deploy_model_node_count_test.py b/automl/cloud-client/vision_classification_deploy_model_node_count_test.py index 46570928dbbf..3f6ff430a4d2 100644 --- a/automl/cloud-client/vision_classification_deploy_model_node_count_test.py +++ b/automl/cloud-client/vision_classification_deploy_model_node_count_test.py @@ -18,7 +18,7 @@ import vision_classification_deploy_model_node_count -PROJECT_ID = os.environ["GCLOUD_PROJECT"] +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] MODEL_ID = "ICN0000000000000000000" diff --git a/automl/cloud-client/vision_classification_predict.py b/automl/cloud-client/vision_classification_predict.py index 98cfc9f87a76..c42606ccece6 100644 --- a/automl/cloud-client/vision_classification_predict.py +++ b/automl/cloud-client/vision_classification_predict.py @@ -39,6 +39,7 @@ def predict(project_id, model_id, file_path): # params is additional domain-specific parameters. # score_threshold is used to filter the result + # https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest params = {"score_threshold": "0.8"} response = prediction_client.predict(model_full_id, payload, params) diff --git a/automl/cloud-client/vision_classification_predict_test.py b/automl/cloud-client/vision_classification_predict_test.py index b9aabccdb461..9df9c91161c4 100644 --- a/automl/cloud-client/vision_classification_predict_test.py +++ b/automl/cloud-client/vision_classification_predict_test.py @@ -19,8 +19,8 @@ import vision_classification_predict -PROJECT_ID = os.environ["GCLOUD_PROJECT"] -MODEL_ID = "ICN7383667271543079510" +PROJECT_ID = os.environ["AUTOML_PROJECT_ID"] +MODEL_ID = os.environ["VISION_CLASSIFICATION_MODEL_ID"] @pytest.fixture(scope="function") diff --git a/translate/cloud-client/beta_snippets_test.py b/translate/cloud-client/beta_snippets_test.py index 10a30c800733..4d65c3c2cd6a 100644 --- a/translate/cloud-client/beta_snippets_test.py +++ b/translate/cloud-client/beta_snippets_test.py @@ -64,7 +64,7 @@ def unique_glossary_id(): def test_translate_text(capsys): beta_snippets.translate_text(PROJECT_ID, 'Hello world') out, _ = capsys.readouterr() - assert 'Zdravo svijete' in out or 'Pozdrav svijetu' in out + assert 'Translated Text:' in out def test_batch_translate_text(capsys, bucket):