diff --git a/appengine/flexible/tasks/snippets.py b/appengine/flexible/tasks/snippets.py index 0c96f96d87d..eaf34280511 100644 --- a/appengine/flexible/tasks/snippets.py +++ b/appengine/flexible/tasks/snippets.py @@ -176,26 +176,44 @@ def create_task_with_name(project, location, queue, task_name): def delete_task(project, location, queue): - # [START taskqueues_setup] + # [START taskqueues_deleting_tasks] client = tasks.CloudTasksClient() # TODO(developer): Uncomment these lines and replace with your values. # project = 'my-project-id' # location = 'us- central1' # queue = 'queue1' - # [START taskqueues_setup] - # [START taskqueues_deleting_tasks] task_path = client.task_path(project, location, queue, 'foo') response = client.delete_task(task_path) # [END taskqueues_deleting_tasks] + return response + +def purge_queue(project, location, queue): # [START taskqueues_purging_tasks] + client = tasks.CloudTasksClient() + + # TODO(developer): Uncomment these lines and replace with your values. + # project = 'my-project-id' + # location = 'us- central1' + # queue = 'queue1' + queue_path = client.queue_path(project, location, queue) response = client.purge_queue(queue_path) # [END taskqueues_purging_tasks] + return response + +def pause_queue(project, location, queue): # [START taskqueues_pause_queue] + client = tasks.CloudTasksClient() + + # TODO(developer): Uncomment these lines and replace with your values. + # project = 'my-project-id' + # location = 'us- central1' + # queue = 'queue1' + queue_path = client.queue_path(project, location, queue) response = client.pause_queue(queue_path) # [END taskqueues_pause_queues] @@ -203,6 +221,7 @@ def delete_task(project, location, queue): def delete_queue(project, location, queue): + # [START taskqueues_deleting_queues] client = tasks.CloudTasksClient() # TODO(developer): Uncomment these lines and replace with your values. @@ -210,7 +229,6 @@ def delete_queue(project, location, queue): # location = 'us- central1' # queue = 'queue1' - # [START taskqueues_deleting_queues] queue_path = client.queue_path(project, location, queue) response = client.delete_queue(queue_path) # [END taskqueues_deleting_queues] diff --git a/appengine/flexible/tasks/snippets_test.py b/appengine/flexible/tasks/snippets_test.py index 8923c670ea6..73ff52bdc88 100644 --- a/appengine/flexible/tasks/snippets_test.py +++ b/appengine/flexible/tasks/snippets_test.py @@ -71,14 +71,30 @@ def test_create_task_with_name(): @pytest.mark.order6 def test_delete_task(): + result = snippets.delete_task( + TEST_PROJECT_ID, TEST_LOCATION, QUEUE_NAME_1) + assert result is None + + +@pytest.mark.order7 +def test_purge_queue(): name = "projects/{}/locations/{}/queues/{}".format( TEST_PROJECT_ID, TEST_LOCATION, QUEUE_NAME_1) - result = snippets.delete_task( + result = snippets.purge_queue( TEST_PROJECT_ID, TEST_LOCATION, QUEUE_NAME_1) assert name in result.name -@pytest.mark.order7 +@pytest.mark.order8 +def test_pause_queue(): + name = "projects/{}/locations/{}/queues/{}".format( + TEST_PROJECT_ID, TEST_LOCATION, QUEUE_NAME_1) + result = snippets.pause_queue( + TEST_PROJECT_ID, TEST_LOCATION, QUEUE_NAME_1) + assert name in result.name + + +@pytest.mark.order9 def test_delete_queue(): result = snippets.delete_queue( TEST_PROJECT_ID, TEST_LOCATION, QUEUE_NAME_1) @@ -89,7 +105,7 @@ def test_delete_queue(): assert result is None -@pytest.mark.order8 +@pytest.mark.order10 def test_retry_task(): QUEUE_SIZE = 3 QUEUE_NAME = [] diff --git a/appengine/standard/i18n/i18n_utils.py b/appengine/standard/i18n/i18n_utils.py index 12eb5e59179..18ebf35ae80 100644 --- a/appengine/standard/i18n/i18n_utils.py +++ b/appengine/standard/i18n/i18n_utils.py @@ -19,7 +19,7 @@ The idea of this example, especially for how to translate strings in Javascript is originally from an implementation of Django i18n. """ - +from __future__ import print_function import gettext import json @@ -31,6 +31,11 @@ from webob import Request +try: + basestring +except NameError: + basestring = str + def _get_plural_forms(js_translations): """Extracts the parameters for what constitutes a plural. @@ -49,7 +54,7 @@ def _get_plural_forms(js_translations): for l in js_translations._catalog[''].split('\n'): if l.startswith('Plural-Forms:'): plural = l.split(':', 1)[1].strip() - print "plural is %s" % plural + print('plural is {}'.format(plural)) if plural is not None: for raw_element in plural.split(';'): element = raw_element.strip() @@ -57,7 +62,7 @@ def _get_plural_forms(js_translations): n_plural = int(element.split('=', 1)[1]) elif element.startswith('plural='): plural = element.split('=', 1)[1] - print "plural is now %s" % plural + print('plural is now {}'.format(plural)) else: n_plural = 2 plural = '(n == 1) ? 0 : 1' @@ -83,9 +88,9 @@ def convert_translations_to_dict(js_translations): for key, value in js_translations._catalog.items(): if key == '': continue - if type(key) in (str, unicode): + if isinstance(key, basestring): translations_dict['catalog'][key] = value - elif type(key) == tuple: + elif isinstance(key, tuple): if key[0] not in translations_dict['catalog']: translations_dict['catalog'][key[0]] = [''] * n_plural translations_dict['catalog'][key[0]][int(key[1])] = value diff --git a/appengine/standard/mail/header.py b/appengine/standard/mail/header.py index 4a84edeac23..7f994600bf2 100644 --- a/appengine/standard/mail/header.py +++ b/appengine/standard/mail/header.py @@ -1,3 +1,4 @@ +from __future__ import print_function # Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -40,7 +41,7 @@ def get(self): """) def post(self): - print repr(self.request.POST) + print(repr(self.request.POST)) id = self.request.POST['thread_id'] send_example_mail('{}@appspot.gserviceaccount.com'.format( app_identity.get_application_id()), id) diff --git a/appengine/standard/ndb/properties/snippets.py b/appengine/standard/ndb/properties/snippets.py index 9c98ac6701d..fecdbd3c65e 100644 --- a/appengine/standard/ndb/properties/snippets.py +++ b/appengine/standard/ndb/properties/snippets.py @@ -1,3 +1,4 @@ +from __future__ import print_function # Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -147,4 +148,4 @@ class Part(ndb.Model): def print_part(): p1 = Part(name='foo', color=Color.RED) - print p1.color # prints "RED" + print(p1.color) # prints "RED" diff --git a/appengine/standard/ndb/queries/snippets_test.py b/appengine/standard/ndb/queries/snippets_test.py index d8e6ddef79c..7000fc055e7 100644 --- a/appengine/standard/ndb/queries/snippets_test.py +++ b/appengine/standard/ndb/queries/snippets_test.py @@ -1,3 +1,4 @@ +from __future__ import print_function # Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -343,7 +344,7 @@ def test_fetch_message_accounts_inefficient(testbed): assert len(message_account_pairs) == 5 - print repr(message_account_pairs) + print(repr(message_account_pairs)) for i in range(1, 6): message, account = message_account_pairs[i - 1] assert message.content == 'Message %s' % i diff --git a/appengine/standard_python37/django/requirements.txt b/appengine/standard_python37/django/requirements.txt index 9ad3c8e50a5..7874a18eebc 100644 --- a/appengine/standard_python37/django/requirements.txt +++ b/appengine/standard_python37/django/requirements.txt @@ -1,2 +1,2 @@ -Django==2.1.10 +Django==2.1.11 PyMySQL==0.9.3 diff --git a/asset/cloud-client/quickstart_createfeed.py b/asset/cloud-client/quickstart_createfeed.py new file mode 100644 index 00000000000..a36d8bd094f --- /dev/null +++ b/asset/cloud-client/quickstart_createfeed.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +# Copyright 2019 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse + + +def create_feed(project_id, feed_id, asset_names, topic): + # [START asset_quickstart_create_feed] + from google.cloud import asset_v1p2beta1 + from google.cloud.asset_v1p2beta1.proto import asset_service_pb2 + + # TODO project_id = 'Your Google Cloud Project ID' + # TODO feed_id = 'Feed ID you want to create' + # TODO asset_names = 'List of asset names the feed listen to' + # TODO topic = "Topic name of the feed" + + client = asset_v1p2beta1.AssetServiceClient() + parent = "projects/{}".format(project_id) + feed = asset_service_pb2.Feed() + feed.asset_names.extend(asset_names) + feed.feed_output_config.pubsub_destination.topic = topic + response = client.create_feed(parent, feed_id, feed) + print('feed: {}'.format(response)) + # [END asset_quickstart_create_feed] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('project_id', help='Your Google Cloud project ID') + parser.add_argument('feed_id', help='Feed ID you want to create') + parser.add_argument('asset_names', + help='List of asset names the feed listen to') + parser.add_argument('topic', help='Topic name of the feed') + args = parser.parse_args() + create_feed(args.project_id, args.feed_id, args.asset_names, args.topic) diff --git a/asset/cloud-client/quickstart_createfeed_test.py b/asset/cloud-client/quickstart_createfeed_test.py new file mode 100644 index 00000000000..c566fe7c73b --- /dev/null +++ b/asset/cloud-client/quickstart_createfeed_test.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import time + +import quickstart_createfeed +import quickstart_deletefeed +from google.cloud import resource_manager + +json_data = open(os.environ["GOOGLE_APPLICATION_CREDENTIALS"]).read() +data = json.loads(json_data) +PROJECT = data['project_id'] +ASSET_NAME = 'assets-{}'.format(int(time.time())) +FEED_ID = 'feed-{}'.format(int(time.time())) +TOPIC = 'topic-{}'.format(int(time.time())) + + +def test_create_feed(capsys): + client = resource_manager.Client() + project_number = client.fetch_project(PROJECT).number + full_topic_name = "projects/{}/topics/{}".format(PROJECT, TOPIC) + quickstart_createfeed.create_feed( + PROJECT, FEED_ID, [ASSET_NAME, ], full_topic_name) + out, _ = capsys.readouterr() + assert "feed" in out + + # Clean up, delete the feed + feed_name = "projects/{}/feeds/{}".format(project_number, FEED_ID) + quickstart_deletefeed.delete_feed(feed_name) diff --git a/asset/cloud-client/quickstart_deletefeed.py b/asset/cloud-client/quickstart_deletefeed.py new file mode 100644 index 00000000000..81a54b4ff5d --- /dev/null +++ b/asset/cloud-client/quickstart_deletefeed.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse + + +def delete_feed(feed_name): + # [START asset_quickstart_delete_feed] + from google.cloud import asset_v1p2beta1 + + # TODO feed_name = 'Feed name you want to delete' + + client = asset_v1p2beta1.AssetServiceClient() + client.delete_feed(feed_name) + print('deleted_feed') + # [END asset_quickstart_delete_feed] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('feed_name', help='Feed name you want to delete') + args = parser.parse_args() + delete_feed(args.feed_name) diff --git a/asset/cloud-client/quickstart_deletefeed_test.py b/asset/cloud-client/quickstart_deletefeed_test.py new file mode 100644 index 00000000000..e4aa8abd651 --- /dev/null +++ b/asset/cloud-client/quickstart_deletefeed_test.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time + +import quickstart_createfeed +import quickstart_deletefeed +from google.cloud import resource_manager + +PROJECT = os.environ['GCLOUD_PROJECT'] +ASSET_NAME = 'assets-{}'.format(int(time.time())) +FEED_ID = 'feed-{}'.format(int(time.time())) +TOPIC = 'topic-{}'.format(int(time.time())) + + +def test_delete_feed(capsys): + client = resource_manager.Client() + project_number = client.fetch_project(PROJECT).number + # First create the feed, which will be deleted later + full_topic_name = "projects/{}/topics/{}".format(PROJECT, TOPIC) + quickstart_createfeed.create_feed( + PROJECT, FEED_ID, [ASSET_NAME, ], full_topic_name) + + feed_name = "projects/{}/feeds/{}".format(project_number, FEED_ID) + quickstart_deletefeed.delete_feed(feed_name) + + out, _ = capsys.readouterr() + assert "deleted_feed" in out diff --git a/asset/cloud-client/quickstart_getfeed.py b/asset/cloud-client/quickstart_getfeed.py new file mode 100644 index 00000000000..aab32d242e2 --- /dev/null +++ b/asset/cloud-client/quickstart_getfeed.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse + + +def get_feed(feed_name): + # [START asset_quickstart_get_feed] + from google.cloud import asset_v1p2beta1 + + # TODO feed_name = 'Feed Name you want to get' + + client = asset_v1p2beta1.AssetServiceClient() + response = client.get_feed(feed_name) + print('gotten_feed: {}'.format(response)) + # [START asset_quickstart_get_feed] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('feed_name', help='Feed Name you want to get') + args = parser.parse_args() + get_feed(args.feed_name) diff --git a/asset/cloud-client/quickstart_getfeed_test.py b/asset/cloud-client/quickstart_getfeed_test.py new file mode 100644 index 00000000000..7104db47f3c --- /dev/null +++ b/asset/cloud-client/quickstart_getfeed_test.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time + +import quickstart_createfeed +import quickstart_deletefeed +import quickstart_getfeed +from google.cloud import resource_manager + +PROJECT = os.environ['GCLOUD_PROJECT'] +ASSET_NAME = 'assets-{}'.format(int(time.time())) +FEED_ID = 'feed-{}'.format(int(time.time())) +TOPIC = 'topic-{}'.format(int(time.time())) + + +def test_get_feed(capsys): + client = resource_manager.Client() + project_number = client.fetch_project(PROJECT).number + # First create the feed, which will be gotten later + full_topic_name = "projects/{}/topics/{}".format(PROJECT, TOPIC) + quickstart_createfeed.create_feed( + PROJECT, FEED_ID, [ASSET_NAME, ], full_topic_name) + + feed_name = "projects/{}/feeds/{}".format(project_number, FEED_ID) + quickstart_getfeed.get_feed(feed_name) + out, _ = capsys.readouterr() + + assert "gotten_feed" in out + # Clean up and delete the feed + quickstart_deletefeed.delete_feed(feed_name) diff --git a/asset/cloud-client/quickstart_listfeeds.py b/asset/cloud-client/quickstart_listfeeds.py new file mode 100644 index 00000000000..ebcdd46b895 --- /dev/null +++ b/asset/cloud-client/quickstart_listfeeds.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse + + +def list_feeds(parent_resource): + # [START asset_quickstart_list_feeds] + from google.cloud import asset_v1p2beta1 + + # TODO parent_resource = 'Parent resource you want to list all feeds' + + client = asset_v1p2beta1.AssetServiceClient() + response = client.list_feeds(parent_resource) + print('feeds: {}'.format(response.feeds)) + # [END asset_quickstart_list_feeds] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument( + 'parent_resource', + help='Parent resource you want to list all feeds') + args = parser.parse_args() + list_feeds(args.parent_resource) diff --git a/asset/cloud-client/quickstart_listfeeds_test.py b/asset/cloud-client/quickstart_listfeeds_test.py new file mode 100644 index 00000000000..87678f5d233 --- /dev/null +++ b/asset/cloud-client/quickstart_listfeeds_test.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import quickstart_listfeeds + +PROJECT = os.environ['GCLOUD_PROJECT'] + + +def test_list_feeds(capsys): + parent_resource = "projects/{}".format(PROJECT) + quickstart_listfeeds.list_feeds(parent_resource) + out, _ = capsys.readouterr() + assert "feeds" in out diff --git a/asset/cloud-client/quickstart_updatefeed.py b/asset/cloud-client/quickstart_updatefeed.py new file mode 100644 index 00000000000..5f82b11540a --- /dev/null +++ b/asset/cloud-client/quickstart_updatefeed.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse + + +def update_feed(feed_name, topic): + # [START asset_quickstart_update_feed] + from google.cloud import asset_v1p2beta1 + from google.cloud.asset_v1p2beta1.proto import asset_service_pb2 + from google.protobuf import field_mask_pb2 + + # TODO feed_name = 'Feed Name you want to update' + # TODO topic = "Topic name you want to update with" + + client = asset_v1p2beta1.AssetServiceClient() + feed = asset_service_pb2.Feed() + feed.name = feed_name + feed.feed_output_config.pubsub_destination.topic = topic + update_mask = field_mask_pb2.FieldMask() + # In this example, we update topic of the feed + update_mask.paths.append("feed_output_config.pubsub_destination.topic") + response = client.update_feed(feed, update_mask) + print('updated_feed: {}'.format(response)) + # [END asset_quickstart_update_feed] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument('feed_name', help='Feed Name you want to update') + parser.add_argument('topic', help='Topic name you want to update with') + args = parser.parse_args() + update_feed(args.feed_name, args.topic) diff --git a/asset/cloud-client/quickstart_updatefeed_test.py b/asset/cloud-client/quickstart_updatefeed_test.py new file mode 100644 index 00000000000..a47c2777a3e --- /dev/null +++ b/asset/cloud-client/quickstart_updatefeed_test.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import time + +import quickstart_createfeed +import quickstart_deletefeed +import quickstart_updatefeed +from google.cloud import resource_manager + +PROJECT = os.environ['GCLOUD_PROJECT'] +ASSET_NAME = 'assets-{}'.format(int(time.time())) +FEED_ID = 'feed-{}'.format(int(time.time())) +TOPIC = 'topic-{}'.format(int(time.time())) +NEW_TOPIC = 'new-topic-{}'.format(int(time.time())) + + +def test_update_feed(capsys): + client = resource_manager.Client() + project_number = client.fetch_project(PROJECT).number + # First create the feed, which will be updated later + full_topic_name = "projects/{}/topics/{}".format(PROJECT, TOPIC) + quickstart_createfeed.create_feed( + PROJECT, FEED_ID, [ASSET_NAME, ], full_topic_name) + + feed_name = "projects/{}/feeds/{}".format(project_number, FEED_ID) + new_full_topic_name = "projects/" + PROJECT + "/topics/" + NEW_TOPIC + quickstart_updatefeed.update_feed(feed_name, new_full_topic_name) + out, _ = capsys.readouterr() + + assert "updated_feed" in out + # Clean up and delete the feed + quickstart_deletefeed.delete_feed(feed_name) diff --git a/asset/cloud-client/requirements.txt b/asset/cloud-client/requirements.txt index 0e87ed672ee..46cfaa017f7 100644 --- a/asset/cloud-client/requirements.txt +++ b/asset/cloud-client/requirements.txt @@ -1,2 +1,3 @@ -google-cloud-storage==1.13.2 -google-cloud-asset==0.2.0 +google-cloud-storage==1.18.0 +google-cloud-asset==0.4.1 +google-cloud-resource-manager==0.29.2 \ No newline at end of file diff --git a/cloud-sql/mysql/sqlalchemy/main.py b/cloud-sql/mysql/sqlalchemy/main.py index b44c62820ab..57a5e6fbd4a 100644 --- a/cloud-sql/mysql/sqlalchemy/main.py +++ b/cloud-sql/mysql/sqlalchemy/main.py @@ -43,7 +43,7 @@ password=db_pass, database=db_name, query={ - 'unix_socket': '/cloudsql/{}/'.format(cloud_sql_connection_name) + 'unix_socket': '/cloudsql/{}'.format(cloud_sql_connection_name) } ), # ... Specify additional properties here. diff --git a/dlp/inspect_content_test.py b/dlp/inspect_content_test.py index d5dd84a6f2f..8860cf9e7a8 100644 --- a/dlp/inspect_content_test.py +++ b/dlp/inspect_content_test.py @@ -294,7 +294,8 @@ def test_inspect_gcs_file(bucket, topic_id, subscription_id, capsys): 'test.txt', topic_id, subscription_id, - ['FIRST_NAME', 'EMAIL_ADDRESS', 'PHONE_NUMBER']) + ['FIRST_NAME', 'EMAIL_ADDRESS', 'PHONE_NUMBER'], + timeout=420) out, _ = capsys.readouterr() assert 'Info type: EMAIL_ADDRESS' in out @@ -314,7 +315,8 @@ def test_inspect_gcs_file_with_custom_info_types(bucket, topic_id, subscription_id, [], custom_dictionaries=dictionaries, - custom_regexes=regexes) + custom_regexes=regexes, + timeout=420) out, _ = capsys.readouterr() assert 'Info type: CUSTOM_DICTIONARY_0' in out @@ -330,7 +332,8 @@ def test_inspect_gcs_file_no_results( 'harmless.txt', topic_id, subscription_id, - ['FIRST_NAME', 'EMAIL_ADDRESS', 'PHONE_NUMBER']) + ['FIRST_NAME', 'EMAIL_ADDRESS', 'PHONE_NUMBER'], + timeout=420) out, _ = capsys.readouterr() assert 'No findings' in out diff --git a/firestore/cloud-client/requirements.txt b/firestore/cloud-client/requirements.txt index dd21b3e16df..85829b74e17 100644 --- a/firestore/cloud-client/requirements.txt +++ b/firestore/cloud-client/requirements.txt @@ -1 +1 @@ -google-cloud-firestore==1.1.0 +google-cloud-firestore==1.4.0 diff --git a/firestore/cloud-client/snippets.py b/firestore/cloud-client/snippets.py index c1146b9aabb..d750512bbb0 100644 --- a/firestore/cloud-client/snippets.py +++ b/firestore/cloud-client/snippets.py @@ -875,3 +875,11 @@ def collection_group_query(db): print(u'{} => {}'.format(doc.id, doc.to_dict())) # [END fs_collection_group_query] return docs + + +def update_document_increment(db): + # [START fs_update_document_increment] + washington_ref = db.collection(u'cities').document(u'DC') + + washington_ref.update("population", firestore.Increment(50)) + # [END fs_update_document_increment] diff --git a/healthcare/api-client/dicom/dicomweb.py b/healthcare/api-client/dicom/dicomweb.py index 3173e3cebce..4a0224722b5 100644 --- a/healthcare/api-client/dicom/dicomweb.py +++ b/healthcare/api-client/dicom/dicomweb.py @@ -131,20 +131,113 @@ def dicomweb_retrieve_study( dicomweb_path = '{}/datasets/{}/dicomStores/{}/dicomWeb/studies/{}'.format( url, dataset_id, dicom_store_id, study_uid) + # When specifying the output file, use an extension like ".multipart." + # Then, parse the downloaded multipart file to get each individual + # DICOM file. + file_name = 'study.multipart' + + # Make an authenticated API request + session = get_session(service_account_json) + + response = session.get(dicomweb_path) + + response.raise_for_status() + + with open(file_name, 'wb') as f: + f.write(response.content) + print('Retrieved study and saved to file ' + + '{} in current directory'.format(file_name)) + + return response +# [END healthcare_dicomweb_retrieve_study] + + +# [START healthcare_dicomweb_retrieve_instance] +def dicomweb_retrieve_instance( + service_account_json, + base_url, + project_id, + cloud_region, + dataset_id, + dicom_store_id, + study_uid, + series_uid, + instance_uid): + """Handles the GET requests specified in the DICOMweb standard.""" + url = '{}/projects/{}/locations/{}'.format(base_url, + project_id, cloud_region) + + dicom_store_path = '{}/datasets/{}/dicomStores/{}'.format( + url, dataset_id, dicom_store_id) + + dicomweb_path = '{}/dicomWeb/studies/{}/series/{}/instances/{}'.format( + dicom_store_path, study_uid, series_uid, instance_uid) + + file_name = 'instance.dcm' + # Make an authenticated API request session = get_session(service_account_json) headers = { - 'Content-Type': 'application/dicom+json; charset=utf-8' + 'Accept': 'application/dicom; transfer-syntax=*' } response = session.get(dicomweb_path, headers=headers) + response.raise_for_status() - print('Retrieved study with UID: {}'.format(study_uid)) + with open(file_name, 'wb') as f: + f.write(response.content) + print('Retrieved DICOM instance and saved to file ' + + '{} in current directory'.format(file_name)) return response -# [END healthcare_dicomweb_retrieve_study] +# [END healthcare_dicomweb_retrieve_instance] + + +# [START healthcare_dicomweb_retrieve_rendered] +def dicomweb_retrieve_rendered( + service_account_json, + base_url, + project_id, + cloud_region, + dataset_id, + dicom_store_id, + study_uid, + series_uid, + instance_uid): + """Handles the GET requests specified in the DICOMweb standard.""" + url = '{}/projects/{}/locations/{}'.format(base_url, + project_id, cloud_region) + + dicom_store_path = '{}/datasets/{}/dicomStores/{}'.format( + url, dataset_id, dicom_store_id) + + instance_path = '{}/dicomWeb/studies/{}/series/{}/instances/{}'.format( + dicom_store_path, study_uid, series_uid, instance_uid) + + dicomweb_path = '{}/rendered'.format(instance_path) + + file_name = 'rendered_image.png' + + # Make an authenticated API request + session = get_session(service_account_json) + + headers = { + 'Accept': 'image/png' + } + + response = session.get(dicomweb_path, headers=headers) + + response.raise_for_status() + + with open(file_name, 'wb') as f: + f.write(response.content) + print('Retrieved rendered image and saved to file ' + + '{} in current directory'.format(file_name)) + + return response +# [END healthcare_dicomweb_retrieve_rendered] # [START healthcare_dicomweb_delete_study] @@ -228,6 +321,16 @@ def parse_command_line_args(): default=None, help='Unique identifier for a study.') + parser.add_argument( + '--series_uid', + default=None, + help='Unique identifier for a series.') + + parser.add_argument( + '--instance_uid', + default=None, + help='Unique identifier for an instance.') + command = parser.add_subparsers(dest='command') command.add_parser( @@ -239,6 +342,12 @@ def parse_command_line_args(): command.add_parser( 'dicomweb-retrieve-study', help=dicomweb_retrieve_study.__doc__) + command.add_parser( + 'dicomweb-retrieve-instance', + help=dicomweb_retrieve_instance.__doc__) + command.add_parser( + 'dicomweb-retrieve-rendered', + help=dicomweb_retrieve_rendered.__doc__) command.add_parser( 'dicomweb-delete-study', help=dicomweb_delete_study.__doc__) @@ -282,6 +391,30 @@ def run_command(args): args.dicom_store_id, args.study_uid) + elif args.command == 'dicomweb-retrieve-instance': + dicomweb_retrieve_instance( + args.service_account_json, + args.base_url, + args.project_id, + args.cloud_region, + args.dataset_id, + args.dicom_store_id, + args.study_uid, + args.series_uid, + args.instance_uid) + + elif args.command == 'dicomweb-retrieve-rendered': + dicomweb_retrieve_rendered( + args.service_account_json, + args.base_url, + args.project_id, + args.cloud_region, + args.dataset_id, + args.dicom_store_id, + args.study_uid, + args.series_uid, + args.instance_uid) + elif args.command == 'dicomweb-delete-study': dicomweb_delete_study( args.service_account_json, diff --git a/healthcare/api-client/dicom/dicomweb_test.py b/healthcare/api-client/dicom/dicomweb_test.py index a3bb6489a73..1f7d99f9e2e 100644 --- a/healthcare/api-client/dicom/dicomweb_test.py +++ b/healthcare/api-client/dicom/dicomweb_test.py @@ -34,9 +34,13 @@ RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') dcm_file_name = 'dicom_00000001_000.dcm' dcm_file = os.path.join(RESOURCES, dcm_file_name) -# The study_uid is not assigned by the server and is part of the -# metadata of dcm_file +# The study_uid, series_uid, and instance_uid are not assigned by the +# server and are part of the metadata of dcm_file study_uid = '1.3.6.1.4.1.11129.5.5.111396399361969898205364400549799252857604' +series_uid = '1.3.6.1.4.1.11129.5.5.195628213694300498946760767481291263511724' +instance_uid = '{}.{}'.format( + '1.3.6.1.4.1.11129.5.5', + '153751009835107614666834563294684339746480') @pytest.fixture(scope='module') @@ -154,10 +158,100 @@ def test_dicomweb_retrieve_study(test_dataset, test_dicom_store, capsys): dicom_store_id, study_uid) + # Assert study was downloaded + assert os.path.isfile('study.multipart') + out, _ = capsys.readouterr() - # Check that store instance worked - assert 'Retrieved study with UID:' in out + # Check that retrieve study worked + assert 'Retrieved study' in out + + # Delete downloaded study + os.remove('study.multipart') + + dicomweb.dicomweb_delete_study( + service_account_json, + base_url, + project_id, + cloud_region, + dataset_id, + dicom_store_id, + study_uid) + + +def test_dicomweb_retrieve_instance(test_dataset, test_dicom_store, capsys): + dicomweb.dicomweb_store_instance( + service_account_json, + base_url, + project_id, + cloud_region, + dataset_id, + dicom_store_id, + dcm_file) + + dicomweb.dicomweb_retrieve_instance( + service_account_json, + base_url, + project_id, + cloud_region, + dataset_id, + dicom_store_id, + study_uid, + series_uid, + instance_uid) + + # Assert instance was downloaded + assert os.path.isfile('instance.dcm') + + out, _ = capsys.readouterr() + + # Check that retrieve instance worked + assert 'Retrieved DICOM instance' in out + + # Delete downloaded instance + os.remove('instance.dcm') + + dicomweb.dicomweb_delete_study( + service_account_json, + base_url, + project_id, + cloud_region, + dataset_id, + dicom_store_id, + study_uid) + + +def test_dicomweb_retrieve_rendered(test_dataset, test_dicom_store, capsys): + dicomweb.dicomweb_store_instance( + service_account_json, + base_url, + project_id, + cloud_region, + dataset_id, + dicom_store_id, + dcm_file) + + dicomweb.dicomweb_retrieve_rendered( + service_account_json, + base_url, + project_id, + cloud_region, + dataset_id, + dicom_store_id, + study_uid, + series_uid, + instance_uid) + + # Assert rendered image was downloaded + assert os.path.isfile('rendered_image.png') + + out, _ = capsys.readouterr() + + # Check that retrieve rendered image worked + assert 'Retrieved rendered image' in out + + # Delete downloaded rendered image + os.remove('rendered_image.png') dicomweb.dicomweb_delete_study( service_account_json, diff --git a/iam/api-client/access.py b/iam/api-client/access.py index 3c112742e2b..337ae80ab7a 100644 --- a/iam/api-client/access.py +++ b/iam/api-client/access.py @@ -69,6 +69,17 @@ def modify_policy_add_role(policy, role, member): # [END iam_modify_policy_add_role] +# [START iam_modify_policy_remove_member] +def modify_policy_remove_member(policy, role, member): + """Removes a member from a role binding.""" + binding = next(b for b in policy['bindings'] if b['role'] == role) + if 'members' in binding and member in binding['members']: + binding['members'].remove(member) + print(binding) + return policy +# [END iam_modify_policy_remove_member] + + # [START iam_set_policy] def set_policy(project_id, policy): """Sets IAM policy for a project.""" @@ -116,6 +127,13 @@ def main(): modify_role_parser.add_argument('role') modify_role_parser.add_argument('member') + # Modify: remove member + modify_member_parser = subparsers.add_parser( + 'modify_member', help=get_policy.__doc__) + modify_member_parser.add_argument('project_id') + modify_member_parser.add_argument('role') + modify_member_parser.add_argument('member') + # Set set_parser = subparsers.add_parser( 'set', help=set_policy.__doc__) @@ -130,6 +148,8 @@ def main(): set_policy(args.project_id, args.policy) elif args.command == 'add_member': modify_policy_add_member(args.policy, args.role, args.member) + elif args.command == 'remove_member': + modify_policy_remove_member(args.policy, args.role, args.member) elif args.command == 'add_binding': modify_policy_add_role(args.policy, args.role, args.member) diff --git a/iam/api-client/access_test.py b/iam/api-client/access_test.py index eb95f9398ea..fc73474c87b 100644 --- a/iam/api-client/access_test.py +++ b/iam/api-client/access_test.py @@ -13,17 +13,43 @@ # limitations under the License. import os +import random import access +import service_accounts def test_access(capsys): - project = os.environ['GCLOUD_PROJECT'] + # Setting up variables for testing + project_id = os.environ['GCLOUD_PROJECT'] - policy = access.get_policy(project) + # specifying a sample role to be assigned + gcp_role = 'roles/owner' + + # section to create service account to test policy updates. + rand = str(random.randint(0, 1000)) + name = 'python-test-' + rand + email = name + '@' + project_id + '.iam.gserviceaccount.com' + member = 'serviceAccount:' + email + service_accounts.create_service_account( + project_id, name, 'Py Test Account') + + policy = access.get_policy(project_id) + out, _ = capsys.readouterr() + assert u'etag' in out + + policy = access.modify_policy_add_role(policy, gcp_role, member) out, _ = capsys.readouterr() - assert 'etag' in out + assert u'etag' in out - policy = access.set_policy(project, policy) + policy = access.modify_policy_remove_member(policy, gcp_role, member) out, _ = capsys.readouterr() - assert 'etag' in out + assert 'iam.gserviceaccount.com' in out + + policy = access.set_policy(project_id, policy) + out, _ = capsys.readouterr() + assert u'etag' in out + + # deleting the service account created above + service_accounts.delete_service_account( + email) diff --git a/iam/api-client/grantable_roles.py b/iam/api-client/grantable_roles.py index e5986e7e1e7..ec8e87706b9 100644 --- a/iam/api-client/grantable_roles.py +++ b/iam/api-client/grantable_roles.py @@ -34,7 +34,8 @@ def view_grantable_roles(full_resource_name): }).execute() for role in roles['roles']: - print('Title: ' + role['title']) + if 'title' in role: + print('Title: ' + role['title']) print('Name: ' + role['name']) print('Description: ' + role['description']) print(' ') diff --git a/iam/api-client/quickstart.py b/iam/api-client/quickstart.py index 932e6832010..d38c601dee0 100644 --- a/iam/api-client/quickstart.py +++ b/iam/api-client/quickstart.py @@ -41,7 +41,8 @@ def quickstart(): for role in roles: print('Title: ' + role['title']) print('Name: ' + role['name']) - print('Description: ' + role['description']) + if 'description' in role: + print('Description: ' + role['description']) print('') # [END iam_quickstart] diff --git a/iot/api-client/manager/manager.py b/iot/api-client/manager/manager.py index 72ed7aa44f0..dc58b86c414 100644 --- a/iot/api-client/manager/manager.py +++ b/iot/api-client/manager/manager.py @@ -704,17 +704,8 @@ def list_devices_for_gateway( device.get('numId'), device.get('id'))) - if devices.get('deviceNumIds') is not None: - for device_id in devices.get('deviceNumIds'): - device_name = '{}/devices/{}'.format( - registry_name, device_id) - device = client.projects().locations().registries().devices().get( - name=device_name).execute() - print('Id: {}\n\tName: {}\n\traw: {}'.format( - device_id, device.get('id'), device)) - else: - if not found: - print('No devices bound to gateway {}'.format(gateway_id)) + if not found: + print('No devices bound to gateway {}'.format(gateway_id)) # [END iot_list_devices_for_gateway] @@ -755,7 +746,6 @@ def parse_command_line_args(): help='Path to public ES256 key file.') parser.add_argument( '--gateway_id', - required=True, help='Gateway identifier.') parser.add_argument( '--member', diff --git a/pubsub/cloud-client/subscriber.py b/pubsub/cloud-client/subscriber.py index 64e93951505..dbaa396cddd 100644 --- a/pubsub/cloud-client/subscriber.py +++ b/pubsub/cloud-client/subscriber.py @@ -198,6 +198,7 @@ def callback(message): def receive_messages_with_custom_attributes(project_id, subscription_name): """Receives messages from a pull subscription.""" # [START pubsub_subscriber_sync_pull_custom_attributes] + # [START pubsub_subscriber_async_pull_custom_attributes] import time from google.cloud import pubsub_v1 @@ -225,6 +226,7 @@ def callback(message): print('Listening for messages on {}'.format(subscription_path)) while True: time.sleep(60) + # [END pubsub_subscriber_async_pull_custom_attributes] # [END pubsub_subscriber_sync_pull_custom_attributes] @@ -284,7 +286,8 @@ def synchronous_pull(project_id, subscription_name): # Acknowledges the received messages so they will not be sent again. subscriber.acknowledge(subscription_path, ack_ids) - print("Received and acknowledged {} messages. Done.".format(NUM_MESSAGES)) + print('Received and acknowledged {} messages. Done.'.format( + len(response.received_messages))) # [END pubsub_subscriber_sync_pull] @@ -357,7 +360,8 @@ def worker(msg): if processes: time.sleep(SLEEP_TIME) - print("Received and acknowledged {} messages. Done.".format(NUM_MESSAGES)) + print('Received and acknowledged {} messages. Done.'.format( + len(response.received_messages))) # [END pubsub_subscriber_sync_pull_with_lease] diff --git a/texttospeech/ssml_addresses/resources/expected_example.mp3 b/texttospeech/ssml_addresses/resources/expected_example.mp3 index 407b85f7f5d..efa38a1428e 100644 Binary files a/texttospeech/ssml_addresses/resources/expected_example.mp3 and b/texttospeech/ssml_addresses/resources/expected_example.mp3 differ diff --git a/texttospeech/ssml_addresses/tts.py b/texttospeech/ssml_addresses/tts.py index 0482b8f0b7e..b30cfce004f 100644 --- a/texttospeech/ssml_addresses/tts.py +++ b/texttospeech/ssml_addresses/tts.py @@ -16,9 +16,7 @@ # [START tts_ssml_address_imports] from google.cloud import texttospeech -# For Python 3, instead use: -# import html -import cgi +import html # [END tts_ssml_address_imports] @@ -89,9 +87,7 @@ def text_to_ssml(inputfile): # SSML commands # For example, '<' --> '<' and '&' --> '&' - # For Python 3, instead use: - # escaped_lines = html.escape(raw_lines) - escaped_lines = cgi.escape(raw_lines) + escaped_lines = html.escape(raw_lines) # Convert plaintext to SSML # Wait two seconds between each address diff --git a/translate/cloud-client/beta_snippets.py b/translate/cloud-client/beta_snippets.py index 7cd94aed59a..0f8a423c39e 100644 --- a/translate/cloud-client/beta_snippets.py +++ b/translate/cloud-client/beta_snippets.py @@ -269,7 +269,7 @@ def translate_text_with_glossary(project_id, glossary_id, text): target_language_code='es', glossary_config=glossary_config) - for translation in result.translations: + for translation in result.glossary_translations: print(translation) # [END translate_translate_text_with_glossary_beta] diff --git a/translate/cloud-client/beta_snippets_test.py b/translate/cloud-client/beta_snippets_test.py index f7099a27cae..265fb986710 100644 --- a/translate/cloud-client/beta_snippets_test.py +++ b/translate/cloud-client/beta_snippets_test.py @@ -63,7 +63,7 @@ def unique_glossary_id(): def test_translate_text(capsys): beta_snippets.translate_text(PROJECT_ID, 'Hello world') out, _ = capsys.readouterr() - assert 'Zdravo svet' in out + assert 'Zdravo svet' in out or 'Pozdrav svijetu' in out def test_batch_translate_text(capsys, bucket): @@ -120,9 +120,9 @@ def test_list_glossary(capsys, glossary): def test_translate_text_with_glossary(capsys, glossary): beta_snippets.translate_text_with_glossary( - PROJECT_ID, glossary, 'directions') + PROJECT_ID, glossary, 'account') out, _ = capsys.readouterr() - assert 'direcciones' in out + assert 'cuenta' in out def test_delete_glossary(capsys, unique_glossary_id): diff --git a/translate/cloud-client/hybrid_glossaries/README.rst b/translate/cloud-client/hybrid_glossaries/README.rst new file mode 100644 index 00000000000..2c9866545dc --- /dev/null +++ b/translate/cloud-client/hybrid_glossaries/README.rst @@ -0,0 +1,97 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Translation API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=/README.rst + + +This directory contains samples for Google Translation API. With `Google Translation API`, you can dynamically translate text between thousands of language pairs. + + + + +.. _Google Translation API: https://cloud.google.com/translate/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Using glossaries with vision and text-to-speech ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=/hybrid_tutorial.py,/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python hybrid_tutorial.py + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/translate/cloud-client/hybrid_glossaries/README.rst.in b/translate/cloud-client/hybrid_glossaries/README.rst.in new file mode 100644 index 00000000000..882f3666fc9 --- /dev/null +++ b/translate/cloud-client/hybrid_glossaries/README.rst.in @@ -0,0 +1,22 @@ + + +# This file is used to generate README.rst + +product: + name: Google Translation API + short_name: Translation API + url: https://cloud.google.com/translate/docs + description: > + With `Google Translation API`, you can dynamically translate text between + thousands of language pairs. + +setup: +- auth +- install_deps + +samples: +- name: Using glossaries with vision and text-to-speech + file: hybrid_tutorial.py + +cloud_client_library: true + diff --git a/translate/cloud-client/hybrid_glossaries/hybrid_tutorial.py b/translate/cloud-client/hybrid_glossaries/hybrid_tutorial.py new file mode 100644 index 00000000000..be73960f64f --- /dev/null +++ b/translate/cloud-client/hybrid_glossaries/hybrid_tutorial.py @@ -0,0 +1,249 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# [START translate_hybrid_imports] +import io +import os +import html + +# Imports the Google Cloud client libraries +from google.api_core.exceptions import AlreadyExists +from google.cloud import translate_v3beta1 as translate +from google.cloud import vision +from google.cloud import texttospeech +# [END translate_hybrid_imports] + + +# [START translate_hybrid_project_id] +# extract GCP project id +PROJECT_ID = os.environ['GCLOUD_PROJECT'] +# [END translate_hybrid_project_id] + + +# [START translate_hybrid_vision] +def pic_to_text(infile): + """Detects text in an image file + + ARGS + infile: path to image file + + RETURNS + String of text detected in image + """ + + # Instantiates a client + client = vision.ImageAnnotatorClient() + + # Opens the input image file + with io.open(infile, 'rb') as image_file: + content = image_file.read() + + image = vision.types.Image(content=content) + + # For dense text, use document_text_detection + # For less dense text, use text_detection + response = client.document_text_detection(image=image) + text = response.full_text_annotation.text + + return text + # [END translate_hybrid_vision] + + +# [START translate_hybrid_create_glossary] +def create_glossary(languages, project_id, glossary_name, glossary_uri): + """Creates a GCP glossary resource + Assumes you've already manually uploaded a glossary to Cloud Storage + + ARGS + languages: list of languages in the glossary + project_id: GCP project id + glossary_name: name you want to give this glossary resource + glossary_uri: the uri of the glossary you uploaded to Cloud Storage + + RETURNS + nothing + """ + + # Instantiates a client + client = translate.TranslationServiceClient() + + # Designates the data center location that you want to use + location = 'us-central1' + + # Set glossary resource name + name = client.glossary_path( + project_id, + location, + glossary_name) + + # Set language codes + language_codes_set = translate.types.Glossary.LanguageCodesSet( + language_codes=languages) + + gcs_source = translate.types.GcsSource( + input_uri=glossary_uri) + + input_config = translate.types.GlossaryInputConfig( + gcs_source=gcs_source) + + # Set glossary resource information + glossary = translate.types.Glossary( + name=name, + language_codes_set=language_codes_set, + input_config=input_config) + + parent = client.location_path(project_id, location) + + # Create glossary resource + # Handle exception for case in which a glossary + # with glossary_name already exists + try: + operation = client.create_glossary(parent=parent, glossary=glossary) + operation.result(timeout=90) + print('Created glossary ' + glossary_name + '.') + except AlreadyExists: + print('The glossary ' + glossary_name + + ' already exists. No new glossary was created.') + # [END translate_hybrid_create_glossary] + + +# [START translate_hybrid_translate] +def translate_text(text, source_language_code, target_language_code, + project_id, glossary_name): + """Translates text to a given language using a glossary + + ARGS + text: String of text to translate + prev_lang: language of input text + new_lang: language of output text + project_id: GCP project id + glossary_name: name you gave your project's glossary + resource when you created it + + RETURNS + String of translated text + """ + + # Instantiates a client + client = translate.TranslationServiceClient() + + # Designates the data center location that you want to use + location = 'us-central1' + + glossary = client.glossary_path( + project_id, + location, + glossary_name) + + glossary_config = translate.types.TranslateTextGlossaryConfig( + glossary=glossary) + + parent = client.location_path(project_id, location) + + result = client.translate_text( + parent=parent, + contents=[text], + mime_type='text/plain', # mime types: text/plain, text/html + source_language_code=source_language_code, + target_language_code=target_language_code, + glossary_config=glossary_config) + + # Extract translated text from API response + return result.glossary_translations[0].translated_text + # [END translate_hybrid_translate] + + +# [START translate_hybrid_tts] +def text_to_speech(text, outfile): + """Converts plaintext to SSML and + generates synthetic audio from SSML + + ARGS + text: text to synthesize + outfile: filename to use to store synthetic audio + + RETURNS + nothing + """ + + # Replace special characters with HTML Ampersand Character Codes + # These Codes prevent the API from confusing text with + # SSML commands + # For example, '<' --> '<' and '&' --> '&' + escaped_lines = html.escape(text) + + # Convert plaintext to SSML in order to wait two seconds + # between each line in synthetic speech + ssml = '{}'.format( + escaped_lines.replace('\n', '\n')) + + # Instantiates a client + client = texttospeech.TextToSpeechClient() + + # Sets the text input to be synthesized + synthesis_input = texttospeech.types.SynthesisInput(ssml=ssml) + + # Builds the voice request, selects the language code ("en-US") and + # the SSML voice gender ("MALE") + voice = texttospeech.types.VoiceSelectionParams( + language_code='en-US', + ssml_gender=texttospeech.enums.SsmlVoiceGender.MALE) + + # Selects the type of audio file to return + audio_config = texttospeech.types.AudioConfig( + audio_encoding=texttospeech.enums.AudioEncoding.MP3) + + # Performs the text-to-speech request on the text input with the selected + # voice parameters and audio file type + response = client.synthesize_speech(synthesis_input, voice, audio_config) + + # Writes the synthetic audio to the output file. + with open(outfile, 'wb') as out: + out.write(response.audio_content) + print('Audio content written to file ' + outfile) + # [END translate_hybrid_tts] + + +# [START translate_hybrid_integration] +def main(): + + # Photo from which to extract text + infile = 'resources/example.png' + # Name of file that will hold synthetic speech + outfile = 'resources/example.mp3' + + # Defines the languages in the glossary + # This list must match the languages in the glossary + # Here, the glossary includes French and English + glossary_langs = ['fr', 'en'] + # Name that will be assigned to your project's glossary resource + glossary_name = 'bistro-glossary' + # uri of .csv file uploaded to Cloud Storage + glossary_uri = 'gs://cloud-samples-data/translation/bistro_glossary.csv' + + create_glossary(glossary_langs, PROJECT_ID, glossary_name, glossary_uri) + + # photo -> detected text + text_to_translate = pic_to_text(infile) + # detected text -> translated text + text_to_speak = translate_text(text_to_translate, 'fr', 'en', + PROJECT_ID, glossary_name) + # translated text -> synthetic audio + text_to_speech(text_to_speak, outfile) + # [END transalte_hybrid_integration] + + +if __name__ == '__main__': + main() diff --git a/translate/cloud-client/hybrid_glossaries/hybrid_tutorial_tests.py b/translate/cloud-client/hybrid_glossaries/hybrid_tutorial_tests.py new file mode 100644 index 00000000000..bdd8251ba86 --- /dev/null +++ b/translate/cloud-client/hybrid_glossaries/hybrid_tutorial_tests.py @@ -0,0 +1,122 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import filecmp +import os +import sys + +from hybrid_tutorial import pic_to_text +from hybrid_tutorial import create_glossary +from hybrid_tutorial import translate_text +from hybrid_tutorial import text_to_speech + + +PROJECT_ID = os.environ['GCLOUD_PROJECT'] + + +# VISION TESTS + + +def test_vision_standard_format(): + + expected_text = 'This is\na test!\n' + alt_expected_text = 'This\nis\na test!\n' + + # Generate text using Vision API + text = pic_to_text('resources/standard_format.jpeg') + + assert (text == expected_text) or (text == alt_expected_text) + + +def test_vision_non_standard_format(): + + # Generate text + text = pic_to_text('resources/non_standard_format.png') + + # Read expected text + with open('resources/non_standard_format.txt') as f: + expected_text = f.read() + + assert text == expected_text + + +# TRANSLATE TESTS + + +def test_create_and_delete_glossary(): + sys.path.insert(1, '../') + from beta_snippets import delete_glossary + + languages = ['fr', 'en'] + glossary_name = 'test-glossary' + glossary_uri = 'gs://cloud-samples-data/translation/bistro_glossary.csv' + + # create_glossary will raise an exception if creation fails + create_glossary(languages, PROJECT_ID, glossary_name, + glossary_uri) + + # Delete glossary so that future tests will pass + # delete_glossary will raise an exception if deletion fails + delete_glossary(PROJECT_ID, glossary_name) + + +def test_translate_standard(): + + expected_text = 'Hello' + + text = translate_text('Bonjour', 'fr', 'en', PROJECT_ID, + 'bistro-glossary') + + assert text == expected_text + + +def test_translate_glossary(): + + expected_text = 'I eat goat cheese' + input_text = 'Je mange du chevre' + + text = translate_text(input_text, 'fr', 'en', PROJECT_ID, + 'bistro-glossary') + + assert text == expected_text + + +# TEXT-TO-SPEECH TESTS + + +def test_tts_standard(capsys): + outfile = 'resources/test_standard_text.mp3' + expected_outfile = 'resources/expected_standard_text.mp3' + textfile = 'resources/standard_format.txt' + + with open(textfile, 'r') as f: + text = f.read() + + text_to_speech(text, outfile) + + # Assert audio file generated + assert os.path.isfile(outfile) + + # Assert audio file generated correctly + assert filecmp.cmp(outfile, + expected_outfile, + shallow=True) + + out, err = capsys.readouterr() + + # Assert success message printed + assert 'Audio content written to file ' + outfile in out + + # Delete test file + os.remove(outfile) diff --git a/translate/cloud-client/hybrid_glossaries/requirements.txt b/translate/cloud-client/hybrid_glossaries/requirements.txt new file mode 100644 index 00000000000..29c4261a7cf --- /dev/null +++ b/translate/cloud-client/hybrid_glossaries/requirements.txt @@ -0,0 +1,3 @@ +google-cloud-translate==1.4.0 +google-cloud-vision==0.35.2 +google-cloud-texttospeech==0.4.0 \ No newline at end of file diff --git a/translate/cloud-client/hybrid_glossaries/resources/bistro_glossary.csv b/translate/cloud-client/hybrid_glossaries/resources/bistro_glossary.csv new file mode 100644 index 00000000000..99b45144552 --- /dev/null +++ b/translate/cloud-client/hybrid_glossaries/resources/bistro_glossary.csv @@ -0,0 +1,13 @@ +fr,en, +chevre,goat cheese, +Chevre,Goat cheese, +chèvre,goat cheese, +Chèvre,Goat cheese, +crème brulée,crème brulée, +Crème brulée,Crème brulée, +Crème Brulée,Crème Brulée, +bouillabaisse,fish stew, +Bouillabaisse,Fish stew, +steak frites,steak with french fries, +Steak frites,Steak with french fries, +Steak Frites,Steak with French Fries, diff --git a/translate/cloud-client/hybrid_glossaries/resources/example.png b/translate/cloud-client/hybrid_glossaries/resources/example.png new file mode 100644 index 00000000000..a3ac25ab053 Binary files /dev/null and b/translate/cloud-client/hybrid_glossaries/resources/example.png differ diff --git a/translate/cloud-client/hybrid_glossaries/resources/expected_standard_text.mp3 b/translate/cloud-client/hybrid_glossaries/resources/expected_standard_text.mp3 new file mode 100644 index 00000000000..c731fbd787d Binary files /dev/null and b/translate/cloud-client/hybrid_glossaries/resources/expected_standard_text.mp3 differ diff --git a/translate/cloud-client/hybrid_glossaries/resources/non_standard_format.png b/translate/cloud-client/hybrid_glossaries/resources/non_standard_format.png new file mode 100644 index 00000000000..eeee9c7f6a6 Binary files /dev/null and b/translate/cloud-client/hybrid_glossaries/resources/non_standard_format.png differ diff --git a/translate/cloud-client/hybrid_glossaries/resources/non_standard_format.txt b/translate/cloud-client/hybrid_glossaries/resources/non_standard_format.txt new file mode 100644 index 00000000000..8a6e3c113d1 --- /dev/null +++ b/translate/cloud-client/hybrid_glossaries/resources/non_standard_format.txt @@ -0,0 +1,30 @@ +MENU +Google Cloud Bistro +SALADS +SANDWICHES +GCP Green Salad +Fresh Greens +$5 +Kubernetes Sandwich +ham and cheese sandwich +$10 +Cloud Caprese +Mozzarella, tomatoes, basil, +balsamic reduction +$8 +Dialogflow Panini +chicken, pesto, and +mozzarella panini +$10 +Firebase Fruit Salad +watermelon, honeydew melon, +and pineapple +Compute Engine Burger +quarter-pound burger with +cheddar cheese +$10 +$6 +BigQuery BLT +bacon, lettuce, and tomato +sandwich +$10 diff --git a/translate/cloud-client/hybrid_glossaries/resources/standard_format.jpeg b/translate/cloud-client/hybrid_glossaries/resources/standard_format.jpeg new file mode 100644 index 00000000000..f985a540dad Binary files /dev/null and b/translate/cloud-client/hybrid_glossaries/resources/standard_format.jpeg differ diff --git a/translate/cloud-client/hybrid_glossaries/resources/standard_format.txt b/translate/cloud-client/hybrid_glossaries/resources/standard_format.txt new file mode 100644 index 00000000000..d91d49e8680 --- /dev/null +++ b/translate/cloud-client/hybrid_glossaries/resources/standard_format.txt @@ -0,0 +1,2 @@ +This is +a test! diff --git a/translate/cloud-client/snippets_test.py b/translate/cloud-client/snippets_test.py index 5123576698a..6d63759daee 100644 --- a/translate/cloud-client/snippets_test.py +++ b/translate/cloud-client/snippets_test.py @@ -46,4 +46,4 @@ def test_translate_utf8(capsys): text = u'나는 파인애플을 좋아한다.' snippets.translate_text('en', text) out, _ = capsys.readouterr() - assert u'I like pineapples.' in out + assert u'I like pineapple' in out diff --git a/vision/automl/edge_container_predict/automl_vision_edge_container_predict_test.py b/vision/automl/edge_container_predict/automl_vision_edge_container_predict_test.py index e9fedbb28e8..24f7b8f8dee 100644 --- a/vision/automl/edge_container_predict/automl_vision_edge_container_predict_test.py +++ b/vision/automl/edge_container_predict/automl_vision_edge_container_predict_test.py @@ -37,14 +37,17 @@ # The absolute path of the current file. This will locate the model_path when # run docker containers. -ROOT_DIR = os.environ.get('KOKORO_ROOT', os.path.abspath(os.path.dirname(__file__))) +ROOT_DIR = os.environ.get( + 'KOKORO_ROOT', os.path.abspath(os.path.dirname(__file__))) MODEL_PATH = os.path.join(ROOT_DIR, 'model_path') IMAGE_FILE_PATH = os.path.join(os.path.dirname(__file__), 'test.jpg') # The cpu docker gcs path is from 'Edge container tutorial'. -CPU_DOCKER_GCS_PATH = 'gcr.io/automl-vision-ondevice/gcloud-container-1.12.0:latest' +CPU_DOCKER_GCS_PATH = '{}'.format( + 'gcr.io/automl-vision-ondevice/gcloud-container-1.12.0:latest') # The path of a sample saved model. -SAMPLE_SAVED_MODEL = 'gs://cloud-samples-data/vision/edge_container_predict/saved_model.pb' +SAMPLE_SAVED_MODEL = '{}'.format( + 'gs://cloud-samples-data/vision/edge_container_predict/saved_model.pb') # Container Name. NAME = 'AutomlVisionEdgeContainerPredictTest' # Port Number. diff --git a/vision/cloud-client/quickstart/quickstart.py b/vision/cloud-client/quickstart/quickstart.py index 8bb674eb118..a8c7a5f23ef 100644 --- a/vision/cloud-client/quickstart/quickstart.py +++ b/vision/cloud-client/quickstart/quickstart.py @@ -32,9 +32,7 @@ def run_quickstart(): # [END vision_python_migration_client] # The name of the image file to annotate - file_name = os.path.join( - os.path.dirname(__file__), - 'resources/wakeupcat.jpg') + file_name = os.path.abspath('resources/wakeupcat.jpg') # Loads the image into memory with io.open(file_name, 'rb') as image_file: