diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 07d9416e13c9..b8ca6cb4be0d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -69,5 +69,6 @@ /trace/**/* @ymotongpoo @GoogleCloudPlatform/python-samples-reviewers /translate/**/* @nicain @GoogleCloudPlatform/python-samples-reviewers /talent/**/* @GoogleCloudPlatform/python-samples-reviewers +/vision/**/* @GoogleCloudPlatform/python-samples-reviewers /workflows/**/* @GoogleCloudPlatform/python-samples-reviewers /kms/**/** @GoogleCloudPlatform/dee-infra @GoogleCloudPlatform/python-samples-reviewers diff --git a/.github/blunderbuss.yml b/.github/blunderbuss.yml index 2eb298809bb4..5a26676163b6 100644 --- a/.github/blunderbuss.yml +++ b/.github/blunderbuss.yml @@ -131,6 +131,10 @@ assign_issues_by: - 'api: cloudkms' to: - GoogleCloudPlatform/dee-infra +- labels: + - 'api: vision' + to: + - GoogleCloudPlatform/python-samples-reviewers assign_prs_by: - labels: diff --git a/vision/AUTHORING_GUIDE.md b/vision/AUTHORING_GUIDE.md new file mode 100644 index 000000000000..8249522ffc2d --- /dev/null +++ b/vision/AUTHORING_GUIDE.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/AUTHORING_GUIDE.md \ No newline at end of file diff --git a/vision/CONTRIBUTING.md b/vision/CONTRIBUTING.md new file mode 100644 index 000000000000..f5fe2e6baf13 --- /dev/null +++ b/vision/CONTRIBUTING.md @@ -0,0 +1 @@ +See https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/CONTRIBUTING.md \ No newline at end of file diff --git a/vision/snippets/crop_hints/.gitignore b/vision/snippets/crop_hints/.gitignore new file mode 100644 index 000000000000..69e003866fb2 --- /dev/null +++ b/vision/snippets/crop_hints/.gitignore @@ -0,0 +1,2 @@ +output-crop.jpg +output-hint.jpg diff --git a/vision/snippets/crop_hints/README.rst b/vision/snippets/crop_hints/README.rst new file mode 100644 index 000000000000..4ca8652f5a2d --- /dev/null +++ b/vision/snippets/crop_hints/README.rst @@ -0,0 +1,111 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/crop_hints/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Crop Hints Tutorial ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/crop_hints/crop_hints.py,vision/cloud-client/crop_hints/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python crop_hints.py + + usage: crop_hints.py [-h] image_file mode + + positional arguments: + image_file The image you'd like to crop. + mode Set to "crop" or "draw". + + optional arguments: + -h, --help show this help message and exit + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/vision/snippets/crop_hints/README.rst.in b/vision/snippets/crop_hints/README.rst.in new file mode 100644 index 000000000000..113d27710441 --- /dev/null +++ b/vision/snippets/crop_hints/README.rst.in @@ -0,0 +1,30 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +setup: +- auth +- install_deps + +samples: +- name: Crop Hints Tutorial + file: crop_hints.py + show_help: True + +cloud_client_library: true + +folder: vision/cloud-client/crop_hints \ No newline at end of file diff --git a/vision/snippets/crop_hints/crop_hints.py b/vision/snippets/crop_hints/crop_hints.py new file mode 100644 index 000000000000..76efad621b39 --- /dev/null +++ b/vision/snippets/crop_hints/crop_hints.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python + +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Outputs a cropped image or an image highlighting crop regions on an image. + +Examples: + python crop_hints.py resources/cropme.jpg draw + python crop_hints.py resources/cropme.jpg crop +""" +# [START vision_crop_hints_tutorial] +# [START vision_crop_hints_tutorial_imports] +import argparse +import io + +from google.cloud import vision +from PIL import Image, ImageDraw +# [END vision_crop_hints_tutorial_imports] + + +def get_crop_hint(path): + # [START vision_crop_hints_tutorial_get_crop_hints] + """Detect crop hints on a single image and return the first result.""" + client = vision.ImageAnnotatorClient() + + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77]) + image_context = vision.ImageContext(crop_hints_params=crop_hints_params) + + response = client.crop_hints(image=image, image_context=image_context) + hints = response.crop_hints_annotation.crop_hints + + # Get bounds for the first crop hint using an aspect ratio of 1.77. + vertices = hints[0].bounding_poly.vertices + # [END vision_crop_hints_tutorial_get_crop_hints] + + return vertices + + +def draw_hint(image_file): + """Draw a border around the image using the hints in the vector list.""" + # [START vision_crop_hints_tutorial_draw_crop_hints] + vects = get_crop_hint(image_file) + + im = Image.open(image_file) + draw = ImageDraw.Draw(im) + draw.polygon([ + vects[0].x, vects[0].y, + vects[1].x, vects[1].y, + vects[2].x, vects[2].y, + vects[3].x, vects[3].y], None, 'red') + im.save('output-hint.jpg', 'JPEG') + print('Saved new image to output-hint.jpg') + # [END vision_crop_hints_tutorial_draw_crop_hints] + + +def crop_to_hint(image_file): + """Crop the image using the hints in the vector list.""" + # [START vision_crop_hints_tutorial_crop_to_hints] + vects = get_crop_hint(image_file) + + im = Image.open(image_file) + im2 = im.crop([vects[0].x, vects[0].y, + vects[2].x - 1, vects[2].y - 1]) + im2.save('output-crop.jpg', 'JPEG') + print('Saved new image to output-crop.jpg') + # [END vision_crop_hints_tutorial_crop_to_hints] + + +if __name__ == '__main__': + # [START vision_crop_hints_tutorial_run_application] + parser = argparse.ArgumentParser() + parser.add_argument('image_file', help='The image you\'d like to crop.') + parser.add_argument('mode', help='Set to "crop" or "draw".') + args = parser.parse_args() + + if args.mode == 'crop': + crop_to_hint(args.image_file) + elif args.mode == 'draw': + draw_hint(args.image_file) + # [END vision_crop_hints_tutorial_run_application] +# [END vision_crop_hints_tutorial] diff --git a/vision/snippets/crop_hints/crop_hints_test.py b/vision/snippets/crop_hints/crop_hints_test.py new file mode 100644 index 000000000000..7268cf4fbfd0 --- /dev/null +++ b/vision/snippets/crop_hints/crop_hints_test.py @@ -0,0 +1,37 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import crop_hints + + +def test_crop(capsys): + """Checks the output image for cropping the image is created.""" + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/cropme.jpg') + crop_hints.crop_to_hint(file_name) + out, _ = capsys.readouterr() + assert os.path.isfile('output-crop.jpg') + + +def test_draw(capsys): + """Checks the output image for drawing the crop hint is created.""" + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/cropme.jpg') + crop_hints.draw_hint(file_name) + out, _ = capsys.readouterr() + assert os.path.isfile('output-hint.jpg') diff --git a/vision/snippets/crop_hints/noxfile_config.py b/vision/snippets/crop_hints/noxfile_config.py new file mode 100644 index 000000000000..e0014e50418e --- /dev/null +++ b/vision/snippets/crop_hints/noxfile_config.py @@ -0,0 +1,27 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default TEST_CONFIG_OVERRIDE for python repos. + +# You can copy this file into your directory, then it will be inported from +# the noxfile.py. + +# The source of truth: +# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py + +TEST_CONFIG_OVERRIDE = { + # You can opt out from the test for specific Python versions. + # Pillow 9.0.0 does not support python 3.6 + "ignored_versions": ["2.7", "3.6"], +} diff --git a/vision/snippets/crop_hints/requirements-test.txt b/vision/snippets/crop_hints/requirements-test.txt new file mode 100644 index 000000000000..49780e035690 --- /dev/null +++ b/vision/snippets/crop_hints/requirements-test.txt @@ -0,0 +1 @@ +pytest==7.2.0 diff --git a/vision/snippets/crop_hints/requirements.txt b/vision/snippets/crop_hints/requirements.txt new file mode 100644 index 000000000000..9c9ade3be0e8 --- /dev/null +++ b/vision/snippets/crop_hints/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-vision==3.1.4 +pillow==9.3.0 diff --git a/vision/snippets/crop_hints/resources/cropme.jpg b/vision/snippets/crop_hints/resources/cropme.jpg new file mode 100644 index 000000000000..501458958639 Binary files /dev/null and b/vision/snippets/crop_hints/resources/cropme.jpg differ diff --git a/vision/snippets/detect/README.rst b/vision/snippets/detect/README.rst new file mode 100644 index 000000000000..a06e286b95b9 --- /dev/null +++ b/vision/snippets/detect/README.rst @@ -0,0 +1,261 @@ + +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/detect/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + + +Setup +------------------------------------------------------------------------------- + + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + + + + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 3.6+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + + + + + + +Samples +------------------------------------------------------------------------------- + + +Detect ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/detect/detect.py,vision/cloud-client/detect/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python detect.py + + + usage: detect.py [-h] + {faces,faces-uri,labels,labels-uri,landmarks,landmarks-uri,text,text-uri,logos,logos-uri,safe-search,safe-search-uri,properties,properties-uri,web,web-uri,web-geo,web-geo-uri,crophints,crophints-uri,document,document-uri,ocr-uri,object-localization,object-localization-uri} + ... + + This application demonstrates how to perform basic operations with the + Google Cloud Vision API. + + Example Usage: + python detect.py text ./resources/wakeupcat.jpg + python detect.py labels ./resources/landmark.jpg + python detect.py web ./resources/landmark.jpg + python detect.py web-uri http://wheresgus.com/dog.JPG + python detect.py web-geo ./resources/city.jpg + python detect.py faces-uri gs://your-bucket/file.jpg + python detect.py ocr-uri gs://python-docs-samples-tests/HodgeConj.pdf gs://BUCKET_NAME/PREFIX/ + python detect.py object-localization ./resources/puppies.jpg + python detect.py object-localization-uri gs://... + + For more information, the documentation at + https://cloud.google.com/vision/docs. + + positional arguments: + {faces,faces-uri,labels,labels-uri,landmarks,landmarks-uri,text,text-uri,logos,logos-uri,safe-search,safe-search-uri,properties,properties-uri,web,web-uri,web-geo,web-geo-uri,crophints,crophints-uri,document,document-uri,ocr-uri,object-localization,object-localization-uri} + faces Detects faces in an image. + faces-uri Detects faces in the file located in Google Cloud + Storage or the web. + labels Detects labels in the file. + labels-uri Detects labels in the file located in Google Cloud + Storage or on the Web. + landmarks Detects landmarks in the file. + landmarks-uri Detects landmarks in the file located in Google Cloud + Storage or on the Web. + text Detects text in the file. + text-uri Detects text in the file located in Google Cloud + Storage or on the Web. + logos Detects logos in the file. + logos-uri Detects logos in the file located in Google Cloud + Storage or on the Web. + safe-search Detects unsafe features in the file. + safe-search-uri Detects unsafe features in the file located in Google + Cloud Storage or on the Web. + properties Detects image properties in the file. + properties-uri Detects image properties in the file located in Google + Cloud Storage or on the Web. + web Detects web annotations given an image. + web-uri Detects web annotations in the file located in Google + Cloud Storage. + web-geo Detects web annotations given an image, using the + geotag metadata in the image to detect web entities. + web-geo-uri Detects web annotations given an image in the file + located in Google Cloud Storage., using the geotag + metadata in the image to detect web entities. + crophints Detects crop hints in an image. + crophints-uri Detects crop hints in the file located in Google Cloud + Storage. + document Detects document features in an image. + document-uri Detects document features in the file located in + Google Cloud Storage. + ocr-uri OCR with PDF/TIFF as source files on GCS + object-localization + OCR with PDF/TIFF as source files on GCS + object-localization-uri + OCR with PDF/TIFF as source files on GCS + + optional arguments: + -h, --help show this help message and exit + + + + + +Beta Detect ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/detect/beta_snippets.py,vision/cloud-client/detect/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python beta_snippets.py + + + usage: beta_snippets.py [-h] + {object-localization,object-localization-uri,handwritten-ocr,handwritten-ocr-uri,batch-annotate-files,batch-annotate-files-uri,batch-annotate-images-uri} + ... + + Google Cloud Vision API Python Beta Snippets + + Example Usage: + python beta_snippets.py -h + python beta_snippets.py object-localization INPUT_IMAGE + python beta_snippets.py object-localization-uri gs://... + python beta_snippets.py handwritten-ocr INPUT_IMAGE + python beta_snippets.py handwritten-ocr-uri gs://... + python beta_snippets.py batch-annotate-files INPUT_PDF + python beta_snippets.py batch-annotate-files-uri gs://... + python beta_snippets.py batch-annotate-images-uri gs://... gs://... + + For more information, the documentation at + https://cloud.google.com/vision/docs. + + positional arguments: + {object-localization,object-localization-uri,handwritten-ocr,handwritten-ocr-uri,batch-annotate-files,batch-annotate-files-uri,batch-annotate-images-uri} + object-localization + Localize objects in the local image. Args: path: The + path to the local file. + object-localization-uri + Localize objects in the image on Google Cloud Storage + Args: uri: The path to the file in Google Cloud + Storage (gs://...) + handwritten-ocr Detects handwritten characters in a local image. Args: + path: The path to the local file. + handwritten-ocr-uri + Detects handwritten characters in the file located in + Google Cloud Storage. Args: uri: The path to the file + in Google Cloud Storage (gs://...) + batch-annotate-files + Detects document features in a PDF/TIFF/GIF file. + While your PDF file may have several pages, this API + can process up to 5 pages only. Args: path: The path + to the local file. + batch-annotate-files-uri + Detects document features in a PDF/TIFF/GIF file. + While your PDF file may have several pages, this API + can process up to 5 pages only. Args: uri: The path to + the file in Google Cloud Storage (gs://...) + batch-annotate-images-uri + Batch annotation of images on Google Cloud Storage + asynchronously. Args: input_image_uri: The path to the + image in Google Cloud Storage (gs://...) output_uri: + The path to the output path in Google Cloud Storage + (gs://...) + + optional arguments: + -h, --help show this help message and exit + + + + + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ diff --git a/vision/snippets/detect/README.rst.in b/vision/snippets/detect/README.rst.in new file mode 100644 index 000000000000..0d105411cff6 --- /dev/null +++ b/vision/snippets/detect/README.rst.in @@ -0,0 +1,33 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +setup: +- auth +- install_deps + +samples: +- name: Detect + file: detect.py + show_help: True +- name: Beta Detect + file: beta_snippets.py + show_help: True + +cloud_client_library: true + +folder: vision/cloud-client/detect \ No newline at end of file diff --git a/vision/snippets/detect/beta_snippets.py b/vision/snippets/detect/beta_snippets.py new file mode 100644 index 000000000000..03dba60ea217 --- /dev/null +++ b/vision/snippets/detect/beta_snippets.py @@ -0,0 +1,412 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Google Cloud Vision API Python Beta Snippets + +Example Usage: +python beta_snippets.py -h +python beta_snippets.py object-localization INPUT_IMAGE +python beta_snippets.py object-localization-uri gs://... +python beta_snippets.py handwritten-ocr INPUT_IMAGE +python beta_snippets.py handwritten-ocr-uri gs://... +python beta_snippets.py batch-annotate-files INPUT_PDF +python beta_snippets.py batch-annotate-files-uri gs://... +python beta_snippets.py batch-annotate-images-uri gs://... gs://... + + +For more information, the documentation at +https://cloud.google.com/vision/docs. +""" + +import argparse +import io + + +# [START vision_localize_objects_beta] +def localize_objects(path): + """Localize objects in the local image. + + Args: + path: The path to the local file. + """ + from google.cloud import vision_v1p3beta1 as vision + client = vision.ImageAnnotatorClient() + + with open(path, 'rb') as image_file: + content = image_file.read() + image = vision.Image(content=content) + + objects = client.object_localization( + image=image).localized_object_annotations + + print('Number of objects found: {}'.format(len(objects))) + for object_ in objects: + print('\n{} (confidence: {})'.format(object_.name, object_.score)) + print('Normalized bounding polygon vertices: ') + for vertex in object_.bounding_poly.normalized_vertices: + print(' - ({}, {})'.format(vertex.x, vertex.y)) +# [END vision_localize_objects_beta] + + +# [START vision_localize_objects_gcs_beta] +def localize_objects_uri(uri): + """Localize objects in the image on Google Cloud Storage + + Args: + uri: The path to the file in Google Cloud Storage (gs://...) + """ + from google.cloud import vision_v1p3beta1 as vision + client = vision.ImageAnnotatorClient() + + image = vision.Image() + image.source.image_uri = uri + + objects = client.object_localization( + image=image).localized_object_annotations + + print('Number of objects found: {}'.format(len(objects))) + for object_ in objects: + print('\n{} (confidence: {})'.format(object_.name, object_.score)) + print('Normalized bounding polygon vertices: ') + for vertex in object_.bounding_poly.normalized_vertices: + print(' - ({}, {})'.format(vertex.x, vertex.y)) +# [END vision_localize_objects_gcs_beta] + + +# [START vision_handwritten_ocr_beta] +def detect_handwritten_ocr(path): + """Detects handwritten characters in a local image. + + Args: + path: The path to the local file. + """ + from google.cloud import vision_v1p3beta1 as vision + client = vision.ImageAnnotatorClient() + + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + # Language hint codes for handwritten OCR: + # en-t-i0-handwrit, mul-Latn-t-i0-handwrit + # Note: Use only one language hint code per request for handwritten OCR. + image_context = vision.ImageContext( + language_hints=['en-t-i0-handwrit']) + + response = client.document_text_detection(image=image, + image_context=image_context) + + print('Full Text: {}'.format(response.full_text_annotation.text)) + for page in response.full_text_annotation.pages: + for block in page.blocks: + print('\nBlock confidence: {}\n'.format(block.confidence)) + + for paragraph in block.paragraphs: + print('Paragraph confidence: {}'.format( + paragraph.confidence)) + + for word in paragraph.words: + word_text = ''.join([ + symbol.text for symbol in word.symbols + ]) + print('Word text: {} (confidence: {})'.format( + word_text, word.confidence)) + + for symbol in word.symbols: + print('\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_handwritten_ocr_beta] + + +# [START vision_handwritten_ocr_gcs_beta] +def detect_handwritten_ocr_uri(uri): + """Detects handwritten characters in the file located in Google Cloud + Storage. + + Args: + uri: The path to the file in Google Cloud Storage (gs://...) + """ + from google.cloud import vision_v1p3beta1 as vision + client = vision.ImageAnnotatorClient() + image = vision.Image() + image.source.image_uri = uri + + # Language hint codes for handwritten OCR: + # en-t-i0-handwrit, mul-Latn-t-i0-handwrit + # Note: Use only one language hint code per request for handwritten OCR. + image_context = vision.ImageContext( + language_hints=['en-t-i0-handwrit']) + + response = client.document_text_detection(image=image, + image_context=image_context) + + print('Full Text: {}'.format(response.full_text_annotation.text)) + for page in response.full_text_annotation.pages: + for block in page.blocks: + print('\nBlock confidence: {}\n'.format(block.confidence)) + + for paragraph in block.paragraphs: + print('Paragraph confidence: {}'.format( + paragraph.confidence)) + + for word in paragraph.words: + word_text = ''.join([ + symbol.text for symbol in word.symbols + ]) + print('Word text: {} (confidence: {})'.format( + word_text, word.confidence)) + + for symbol in word.symbols: + print('\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_handwritten_ocr_gcs_beta] + + +# [START vision_batch_annotate_files_beta] +def detect_batch_annotate_files(path): + """Detects document features in a PDF/TIFF/GIF file. + + While your PDF file may have several pages, + this API can process up to 5 pages only. + + Args: + path: The path to the local file. + """ + from google.cloud import vision_v1p4beta1 as vision + client = vision.ImageAnnotatorClient() + + with open(path, 'rb') as pdf_file: + content = pdf_file.read() + + # Other supported mime_types: image/tiff' or 'image/gif' + mime_type = 'application/pdf' + input_config = vision.InputConfig( + content=content, mime_type=mime_type) + + feature = vision.Feature( + type_=vision.Feature.Type.DOCUMENT_TEXT_DETECTION) + # Annotate the first two pages and the last one (max 5 pages) + # First page starts at 1, and not 0. Last page is -1. + pages = [1, 2, -1] + + request = vision.AnnotateFileRequest( + input_config=input_config, + features=[feature], + pages=pages) + + response = client.batch_annotate_files(requests=[request]) + + for image_response in response.responses[0].responses: + for page in image_response.full_text_annotation.pages: + for block in page.blocks: + print(u'\nBlock confidence: {}\n'.format(block.confidence)) + for par in block.paragraphs: + print(u'\tParagraph confidence: {}'.format(par.confidence)) + for word in par.words: + symbol_texts = [symbol.text for symbol in word.symbols] + word_text = ''.join(symbol_texts) + print(u'\t\tWord text: {} (confidence: {})'.format( + word_text, word.confidence)) + for symbol in word.symbols: + print(u'\t\t\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) +# [END vision_batch_annotate_files_beta] + + +# [START vision_batch_annotate_files_gcs_beta] +def detect_batch_annotate_files_uri(gcs_uri): + """Detects document features in a PDF/TIFF/GIF file. + + While your PDF file may have several pages, + this API can process up to 5 pages only. + + Args: + uri: The path to the file in Google Cloud Storage (gs://...) + """ + from google.cloud import vision_v1p4beta1 as vision + client = vision.ImageAnnotatorClient() + + # Other supported mime_types: image/tiff' or 'image/gif' + mime_type = 'application/pdf' + input_config = vision.InputConfig( + gcs_source=vision.GcsSource(uri=gcs_uri), mime_type=mime_type) + + feature = vision.Feature( + type_=vision.Feature.Type.DOCUMENT_TEXT_DETECTION) + # Annotate the first two pages and the last one (max 5 pages) + # First page starts at 1, and not 0. Last page is -1. + pages = [1, 2, -1] + + request = vision.AnnotateFileRequest( + input_config=input_config, + features=[feature], + pages=pages) + + response = client.batch_annotate_files(requests=[request]) + + for image_response in response.responses[0].responses: + for page in image_response.full_text_annotation.pages: + for block in page.blocks: + print(u'\nBlock confidence: {}\n'.format(block.confidence)) + for par in block.paragraphs: + print(u'\tParagraph confidence: {}'.format(par.confidence)) + for word in par.words: + symbol_texts = [symbol.text for symbol in word.symbols] + word_text = ''.join(symbol_texts) + print(u'\t\tWord text: {} (confidence: {})'.format( + word_text, word.confidence)) + for symbol in word.symbols: + print(u'\t\t\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) +# [END vision_batch_annotate_files_gcs_beta] + + +# [START vision_async_batch_annotate_images_beta] +def async_batch_annotate_images_uri(input_image_uri, output_uri): + """Batch annotation of images on Google Cloud Storage asynchronously. + + Args: + input_image_uri: The path to the image in Google Cloud Storage (gs://...) + output_uri: The path to the output path in Google Cloud Storage (gs://...) + """ + import re + + from google.cloud import storage + + from google.cloud import vision_v1p4beta1 as vision + client = vision.ImageAnnotatorClient() + + # Construct the request for the image(s) to be annotated: + image_source = vision.ImageSource(image_uri=input_image_uri) + image = vision.Image(source=image_source) + features = [ + vision.Feature(type_=vision.Feature.Type.LABEL_DETECTION), + vision.Feature(type_=vision.Feature.Type.TEXT_DETECTION), + vision.Feature(type_=vision.Feature.Type.IMAGE_PROPERTIES), + ] + requests = [ + vision.AnnotateImageRequest(image=image, features=features), + ] + + gcs_destination = vision.GcsDestination(uri=output_uri) + output_config = vision.OutputConfig( + gcs_destination=gcs_destination, batch_size=2) + + operation = client.async_batch_annotate_images( + requests=requests, output_config=output_config) + + print('Waiting for the operation to finish.') + operation.result(timeout=10000) + + # Once the request has completed and the output has been + # written to Google Cloud Storage, we can list all the output files. + storage_client = storage.Client() + + match = re.match(r'gs://([^/]+)/(.+)', output_uri) + bucket_name = match.group(1) + prefix = match.group(2) + + bucket = storage_client.get_bucket(bucket_name) + + # Lists objects with the given prefix. + blob_list = list(bucket.list_blobs(prefix=prefix)) + print('Output files:') + for blob in blob_list: + print(blob.name) + + # Processes the first output file from Google Cloud Storage. + # Since we specified batch_size=2, the first response contains + # annotations for the first two annotate image requests. + output = blob_list[0] + + json_string = output.download_as_string() + response = vision.BatchAnnotateImagesResponse.from_json(json_string) + + # Prints the actual response for the first annotate image request. + print(u'The annotation response for the first request: {}'.format( + response.responses[0])) +# [END vision_async_batch_annotate_images_beta] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + + object_parser = subparsers.add_parser( + 'object-localization', help=localize_objects.__doc__) + object_parser.add_argument('path') + + object_uri_parser = subparsers.add_parser( + 'object-localization-uri', help=localize_objects_uri.__doc__) + object_uri_parser.add_argument('uri') + + handwritten_parser = subparsers.add_parser( + 'handwritten-ocr', help=detect_handwritten_ocr.__doc__) + handwritten_parser.add_argument('path') + + handwritten_uri_parser = subparsers.add_parser( + 'handwritten-ocr-uri', help=detect_handwritten_ocr_uri.__doc__) + handwritten_uri_parser.add_argument('uri') + + batch_annotate_parser = subparsers.add_parser( + 'batch-annotate-files', help=detect_batch_annotate_files.__doc__) + batch_annotate_parser.add_argument('path') + + batch_annotate_uri_parser = subparsers.add_parser( + 'batch-annotate-files-uri', + help=detect_batch_annotate_files_uri.__doc__) + batch_annotate_uri_parser.add_argument('uri') + + batch_annotate__image_uri_parser = subparsers.add_parser( + 'batch-annotate-images-uri', + help=async_batch_annotate_images_uri.__doc__) + batch_annotate__image_uri_parser.add_argument('uri') + batch_annotate__image_uri_parser.add_argument('output') + + args = parser.parse_args() + + if 'uri' in args.command: + if 'object-localization-uri' in args.command: + localize_objects_uri(args.uri) + elif 'handwritten-ocr-uri' in args.command: + detect_handwritten_ocr_uri(args.uri) + elif 'batch-annotate-files-uri' in args.command: + detect_batch_annotate_files_uri(args.uri) + elif 'batch-annotate-images-uri' in args.command: + async_batch_annotate_images_uri(args.uri, args.output) + else: + if 'object-localization' in args.command: + localize_objects(args.path) + elif 'handwritten-ocr' in args.command: + detect_handwritten_ocr(args.path) + elif 'batch-annotate-files' in args.command: + detect_batch_annotate_files(args.path) diff --git a/vision/snippets/detect/beta_snippets_test.py b/vision/snippets/detect/beta_snippets_test.py new file mode 100644 index 000000000000..3ba90d119642 --- /dev/null +++ b/vision/snippets/detect/beta_snippets_test.py @@ -0,0 +1,91 @@ +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import beta_snippets + +RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') +GCS_ROOT = 'gs://cloud-samples-data/vision/' + +BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] +OUTPUT_PREFIX = 'TEST_OUTPUT_{}'.format(uuid.uuid4()) +GCS_DESTINATION_URI = 'gs://{}/{}/'.format(BUCKET, OUTPUT_PREFIX) + + +def test_localize_objects(capsys): + path = os.path.join(RESOURCES, 'puppies.jpg') + + beta_snippets.localize_objects(path) + + out, _ = capsys.readouterr() + assert 'Dog' in out + + +def test_localize_objects_uri(capsys): + uri = GCS_ROOT + 'puppies.jpg' + + beta_snippets.localize_objects_uri(uri) + + out, _ = capsys.readouterr() + assert 'Dog' in out + + +def test_handwritten_ocr(capsys): + path = os.path.join(RESOURCES, 'handwritten.jpg') + + beta_snippets.detect_handwritten_ocr(path) + + out, _ = capsys.readouterr() + assert 'Cloud Vision API' in out + + +def test_handwritten_ocr_uri(capsys): + uri = GCS_ROOT + 'handwritten.jpg' + + beta_snippets.detect_handwritten_ocr_uri(uri) + + out, _ = capsys.readouterr() + assert 'Cloud Vision API' in out + + +def test_detect_batch_annotate_files(capsys): + file_name = os.path.join(RESOURCES, 'kafka.pdf') + beta_snippets.detect_batch_annotate_files(file_name) + out, _ = capsys.readouterr() + assert 'Symbol: a' in out + assert 'Word text: evenings' in out + + +def test_detect_batch_annotate_files_uri(capsys): + gcs_uri = GCS_ROOT + 'document_understanding/kafka.pdf' + beta_snippets.detect_batch_annotate_files_uri(gcs_uri) + out, _ = capsys.readouterr() + assert 'Symbol' in out + assert 'Word text' in out + + +def test_async_batch_annotate_images(capsys): + gcs_uri = GCS_ROOT + 'landmark/eiffel_tower.jpg' + beta_snippets.async_batch_annotate_images_uri(gcs_uri, GCS_DESTINATION_URI) + out, _ = capsys.readouterr() + assert 'description: "Tower"' in out + + from google.cloud import storage + storage_client = storage.Client() + bucket = storage_client.get_bucket(BUCKET) + if len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) > 0: + for blob in bucket.list_blobs(prefix=OUTPUT_PREFIX): + blob.delete() diff --git a/vision/snippets/detect/detect.py b/vision/snippets/detect/detect.py new file mode 100644 index 000000000000..8ec5bcf04d79 --- /dev/null +++ b/vision/snippets/detect/detect.py @@ -0,0 +1,1104 @@ +#!/usr/bin/env python + +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations with the +Google Cloud Vision API. + +Example Usage: +python detect.py text ./resources/wakeupcat.jpg +python detect.py labels ./resources/landmark.jpg +python detect.py web ./resources/landmark.jpg +python detect.py web-uri http://wheresgus.com/dog.JPG +python detect.py web-geo ./resources/city.jpg +python detect.py faces-uri gs://your-bucket/file.jpg +python detect.py ocr-uri gs://python-docs-samples-tests/HodgeConj.pdf \ +gs://BUCKET_NAME/PREFIX/ +python detect.py object-localization ./resources/puppies.jpg +python detect.py object-localization-uri gs://... + +For more information, the documentation at +https://cloud.google.com/vision/docs. +""" + +import argparse + + +# [START vision_face_detection] +def detect_faces(path): + """Detects faces in an image.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_face_detection] + # [START vision_python_migration_image_file] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + # [END vision_python_migration_image_file] + + response = client.face_detection(image=image) + faces = response.face_annotations + + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') + print('Faces:') + + for face in faces: + print('anger: {}'.format(likelihood_name[face.anger_likelihood])) + print('joy: {}'.format(likelihood_name[face.joy_likelihood])) + print('surprise: {}'.format(likelihood_name[face.surprise_likelihood])) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in face.bounding_poly.vertices]) + + print('face bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_face_detection] +# [END vision_face_detection] + + +# [START vision_face_detection_gcs] +def detect_faces_uri(uri): + """Detects faces in the file located in Google Cloud Storage or the web.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + # [START vision_python_migration_image_uri] + image = vision.Image() + image.source.image_uri = uri + # [END vision_python_migration_image_uri] + + response = client.face_detection(image=image) + faces = response.face_annotations + + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') + print('Faces:') + + for face in faces: + print('anger: {}'.format(likelihood_name[face.anger_likelihood])) + print('joy: {}'.format(likelihood_name[face.joy_likelihood])) + print('surprise: {}'.format(likelihood_name[face.surprise_likelihood])) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in face.bounding_poly.vertices]) + + print('face bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_face_detection_gcs] + + +# [START vision_label_detection] +def detect_labels(path): + """Detects labels in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_label_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + response = client.label_detection(image=image) + labels = response.label_annotations + print('Labels:') + + for label in labels: + print(label.description) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_label_detection] +# [END vision_label_detection] + + +# [START vision_label_detection_gcs] +def detect_labels_uri(uri): + """Detects labels in the file located in Google Cloud Storage or on the + Web.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.Image() + image.source.image_uri = uri + + response = client.label_detection(image=image) + labels = response.label_annotations + print('Labels:') + + for label in labels: + print(label.description) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_label_detection_gcs] + + +# [START vision_landmark_detection] +def detect_landmarks(path): + """Detects landmarks in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_landmark_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + response = client.landmark_detection(image=image) + landmarks = response.landmark_annotations + print('Landmarks:') + + for landmark in landmarks: + print(landmark.description) + for location in landmark.locations: + lat_lng = location.lat_lng + print('Latitude {}'.format(lat_lng.latitude)) + print('Longitude {}'.format(lat_lng.longitude)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_landmark_detection] +# [END vision_landmark_detection] + + +# [START vision_landmark_detection_gcs] +def detect_landmarks_uri(uri): + """Detects landmarks in the file located in Google Cloud Storage or on the + Web.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.Image() + image.source.image_uri = uri + + response = client.landmark_detection(image=image) + landmarks = response.landmark_annotations + print('Landmarks:') + + for landmark in landmarks: + print(landmark.description) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_landmark_detection_gcs] + + +# [START vision_logo_detection] +def detect_logos(path): + """Detects logos in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_logo_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + response = client.logo_detection(image=image) + logos = response.logo_annotations + print('Logos:') + + for logo in logos: + print(logo.description) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_logo_detection] +# [END vision_logo_detection] + + +# [START vision_logo_detection_gcs] +def detect_logos_uri(uri): + """Detects logos in the file located in Google Cloud Storage or on the Web. + """ + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.Image() + image.source.image_uri = uri + + response = client.logo_detection(image=image) + logos = response.logo_annotations + print('Logos:') + + for logo in logos: + print(logo.description) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_logo_detection_gcs] + + +# [START vision_safe_search_detection] +def detect_safe_search(path): + """Detects unsafe features in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_safe_search_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + response = client.safe_search_detection(image=image) + safe = response.safe_search_annotation + + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') + print('Safe search:') + + print('adult: {}'.format(likelihood_name[safe.adult])) + print('medical: {}'.format(likelihood_name[safe.medical])) + print('spoofed: {}'.format(likelihood_name[safe.spoof])) + print('violence: {}'.format(likelihood_name[safe.violence])) + print('racy: {}'.format(likelihood_name[safe.racy])) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_safe_search_detection] +# [END vision_safe_search_detection] + + +# [START vision_safe_search_detection_gcs] +def detect_safe_search_uri(uri): + """Detects unsafe features in the file located in Google Cloud Storage or + on the Web.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.Image() + image.source.image_uri = uri + + response = client.safe_search_detection(image=image) + safe = response.safe_search_annotation + + # Names of likelihood from google.cloud.vision.enums + likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE', + 'LIKELY', 'VERY_LIKELY') + print('Safe search:') + + print('adult: {}'.format(likelihood_name[safe.adult])) + print('medical: {}'.format(likelihood_name[safe.medical])) + print('spoofed: {}'.format(likelihood_name[safe.spoof])) + print('violence: {}'.format(likelihood_name[safe.violence])) + print('racy: {}'.format(likelihood_name[safe.racy])) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_safe_search_detection_gcs] + + +# [START vision_text_detection] +def detect_text(path): + """Detects text in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_text_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + response = client.text_detection(image=image) + texts = response.text_annotations + print('Texts:') + + for text in texts: + print('\n"{}"'.format(text.description)) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in text.bounding_poly.vertices]) + + print('bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_text_detection] +# [END vision_text_detection] + + +# [START vision_text_detection_gcs] +def detect_text_uri(uri): + """Detects text in the file located in Google Cloud Storage or on the Web. + """ + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.Image() + image.source.image_uri = uri + + response = client.text_detection(image=image) + texts = response.text_annotations + print('Texts:') + + for text in texts: + print('\n"{}"'.format(text.description)) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in text.bounding_poly.vertices]) + + print('bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_text_detection_gcs] + + +# [START vision_image_property_detection] +def detect_properties(path): + """Detects image properties in the file.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_image_properties] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + response = client.image_properties(image=image) + props = response.image_properties_annotation + print('Properties:') + + for color in props.dominant_colors.colors: + print('fraction: {}'.format(color.pixel_fraction)) + print('\tr: {}'.format(color.color.red)) + print('\tg: {}'.format(color.color.green)) + print('\tb: {}'.format(color.color.blue)) + print('\ta: {}'.format(color.color.alpha)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_image_properties] +# [END vision_image_property_detection] + + +# [START vision_image_property_detection_gcs] +def detect_properties_uri(uri): + """Detects image properties in the file located in Google Cloud Storage or + on the Web.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.Image() + image.source.image_uri = uri + + response = client.image_properties(image=image) + props = response.image_properties_annotation + print('Properties:') + + for color in props.dominant_colors.colors: + print('frac: {}'.format(color.pixel_fraction)) + print('\tr: {}'.format(color.color.red)) + print('\tg: {}'.format(color.color.green)) + print('\tb: {}'.format(color.color.blue)) + print('\ta: {}'.format(color.color.alpha)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_image_property_detection_gcs] + + +# [START vision_web_detection] +def detect_web(path): + """Detects web annotations given an image.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_web_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + response = client.web_detection(image=image) + annotations = response.web_detection + + if annotations.best_guess_labels: + for label in annotations.best_guess_labels: + print('\nBest guess label: {}'.format(label.label)) + + if annotations.pages_with_matching_images: + print('\n{} Pages with matching images found:'.format( + len(annotations.pages_with_matching_images))) + + for page in annotations.pages_with_matching_images: + print('\n\tPage url : {}'.format(page.url)) + + if page.full_matching_images: + print('\t{} Full Matches found: '.format( + len(page.full_matching_images))) + + for image in page.full_matching_images: + print('\t\tImage url : {}'.format(image.url)) + + if page.partial_matching_images: + print('\t{} Partial Matches found: '.format( + len(page.partial_matching_images))) + + for image in page.partial_matching_images: + print('\t\tImage url : {}'.format(image.url)) + + if annotations.web_entities: + print('\n{} Web entities found: '.format( + len(annotations.web_entities))) + + for entity in annotations.web_entities: + print('\n\tScore : {}'.format(entity.score)) + print(u'\tDescription: {}'.format(entity.description)) + + if annotations.visually_similar_images: + print('\n{} visually similar images found:\n'.format( + len(annotations.visually_similar_images))) + + for image in annotations.visually_similar_images: + print('\tImage url : {}'.format(image.url)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_web_detection] +# [END vision_web_detection] + + +# [START vision_web_detection_gcs] +def detect_web_uri(uri): + """Detects web annotations in the file located in Google Cloud Storage.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.Image() + image.source.image_uri = uri + + response = client.web_detection(image=image) + annotations = response.web_detection + + if annotations.best_guess_labels: + for label in annotations.best_guess_labels: + print('\nBest guess label: {}'.format(label.label)) + + if annotations.pages_with_matching_images: + print('\n{} Pages with matching images found:'.format( + len(annotations.pages_with_matching_images))) + + for page in annotations.pages_with_matching_images: + print('\n\tPage url : {}'.format(page.url)) + + if page.full_matching_images: + print('\t{} Full Matches found: '.format( + len(page.full_matching_images))) + + for image in page.full_matching_images: + print('\t\tImage url : {}'.format(image.url)) + + if page.partial_matching_images: + print('\t{} Partial Matches found: '.format( + len(page.partial_matching_images))) + + for image in page.partial_matching_images: + print('\t\tImage url : {}'.format(image.url)) + + if annotations.web_entities: + print('\n{} Web entities found: '.format( + len(annotations.web_entities))) + + for entity in annotations.web_entities: + print('\n\tScore : {}'.format(entity.score)) + print(u'\tDescription: {}'.format(entity.description)) + + if annotations.visually_similar_images: + print('\n{} visually similar images found:\n'.format( + len(annotations.visually_similar_images))) + + for image in annotations.visually_similar_images: + print('\tImage url : {}'.format(image.url)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_web_detection_gcs] + + +# [START vision_web_detection_include_geo] +def web_entities_include_geo_results(path): + """Detects web annotations given an image, using the geotag metadata + in the image to detect web entities.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + web_detection_params = vision.WebDetectionParams( + include_geo_results=True) + image_context = vision.ImageContext( + web_detection_params=web_detection_params) + + response = client.web_detection(image=image, image_context=image_context) + + for entity in response.web_detection.web_entities: + print('\n\tScore : {}'.format(entity.score)) + print(u'\tDescription: {}'.format(entity.description)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_web_detection_include_geo] + + +# [START vision_web_detection_include_geo_gcs] +def web_entities_include_geo_results_uri(uri): + """Detects web annotations given an image in the file located in + Google Cloud Storage., using the geotag metadata in the image to + detect web entities.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + + image = vision.Image() + image.source.image_uri = uri + + web_detection_params = vision.WebDetectionParams( + include_geo_results=True) + image_context = vision.ImageContext( + web_detection_params=web_detection_params) + + response = client.web_detection(image=image, image_context=image_context) + + for entity in response.web_detection.web_entities: + print('\n\tScore : {}'.format(entity.score)) + print(u'\tDescription: {}'.format(entity.description)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_web_detection_include_geo_gcs] + + +# [START vision_crop_hint_detection] +def detect_crop_hints(path): + """Detects crop hints in an image.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_crop_hints] + with io.open(path, 'rb') as image_file: + content = image_file.read() + image = vision.Image(content=content) + + crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77]) + image_context = vision.ImageContext( + crop_hints_params=crop_hints_params) + + response = client.crop_hints(image=image, image_context=image_context) + hints = response.crop_hints_annotation.crop_hints + + for n, hint in enumerate(hints): + print('\nCrop Hint: {}'.format(n)) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in hint.bounding_poly.vertices]) + + print('bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_crop_hints] +# [END vision_crop_hint_detection] + + +# [START vision_crop_hint_detection_gcs] +def detect_crop_hints_uri(uri): + """Detects crop hints in the file located in Google Cloud Storage.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.Image() + image.source.image_uri = uri + + crop_hints_params = vision.CropHintsParams(aspect_ratios=[1.77]) + image_context = vision.ImageContext( + crop_hints_params=crop_hints_params) + + response = client.crop_hints(image=image, image_context=image_context) + hints = response.crop_hints_annotation.crop_hints + + for n, hint in enumerate(hints): + print('\nCrop Hint: {}'.format(n)) + + vertices = (['({},{})'.format(vertex.x, vertex.y) + for vertex in hint.bounding_poly.vertices]) + + print('bounds: {}'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_crop_hint_detection_gcs] + + +# [START vision_fulltext_detection] +def detect_document(path): + """Detects document features in an image.""" + from google.cloud import vision + import io + client = vision.ImageAnnotatorClient() + + # [START vision_python_migration_document_text_detection] + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + response = client.document_text_detection(image=image) + + for page in response.full_text_annotation.pages: + for block in page.blocks: + print('\nBlock confidence: {}\n'.format(block.confidence)) + + for paragraph in block.paragraphs: + print('Paragraph confidence: {}'.format( + paragraph.confidence)) + + for word in paragraph.words: + word_text = ''.join([ + symbol.text for symbol in word.symbols + ]) + print('Word text: {} (confidence: {})'.format( + word_text, word.confidence)) + + for symbol in word.symbols: + print('\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + # [END vision_python_migration_document_text_detection] +# [END vision_fulltext_detection] + + +# [START vision_fulltext_detection_gcs] +def detect_document_uri(uri): + """Detects document features in the file located in Google Cloud + Storage.""" + from google.cloud import vision + client = vision.ImageAnnotatorClient() + image = vision.Image() + image.source.image_uri = uri + + response = client.document_text_detection(image=image) + + for page in response.full_text_annotation.pages: + for block in page.blocks: + print('\nBlock confidence: {}\n'.format(block.confidence)) + + for paragraph in block.paragraphs: + print('Paragraph confidence: {}'.format( + paragraph.confidence)) + + for word in paragraph.words: + word_text = ''.join([ + symbol.text for symbol in word.symbols + ]) + print('Word text: {} (confidence: {})'.format( + word_text, word.confidence)) + + for symbol in word.symbols: + print('\tSymbol: {} (confidence: {})'.format( + symbol.text, symbol.confidence)) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) +# [END vision_fulltext_detection_gcs] + + +# [START vision_text_detection_pdf_gcs] +def async_detect_document(gcs_source_uri, gcs_destination_uri): + """OCR with PDF/TIFF as source files on GCS""" + import json + import re + from google.cloud import vision + from google.cloud import storage + + # Supported mime_types are: 'application/pdf' and 'image/tiff' + mime_type = 'application/pdf' + + # How many pages should be grouped into each json output file. + batch_size = 2 + + client = vision.ImageAnnotatorClient() + + feature = vision.Feature( + type_=vision.Feature.Type.DOCUMENT_TEXT_DETECTION) + + gcs_source = vision.GcsSource(uri=gcs_source_uri) + input_config = vision.InputConfig( + gcs_source=gcs_source, mime_type=mime_type) + + gcs_destination = vision.GcsDestination(uri=gcs_destination_uri) + output_config = vision.OutputConfig( + gcs_destination=gcs_destination, batch_size=batch_size) + + async_request = vision.AsyncAnnotateFileRequest( + features=[feature], input_config=input_config, + output_config=output_config) + + operation = client.async_batch_annotate_files( + requests=[async_request]) + + print('Waiting for the operation to finish.') + operation.result(timeout=420) + + # Once the request has completed and the output has been + # written to GCS, we can list all the output files. + storage_client = storage.Client() + + match = re.match(r'gs://([^/]+)/(.+)', gcs_destination_uri) + bucket_name = match.group(1) + prefix = match.group(2) + + bucket = storage_client.get_bucket(bucket_name) + + # List objects with the given prefix, filtering out folders. + blob_list = [blob for blob in list(bucket.list_blobs( + prefix=prefix)) if not blob.name.endswith('/')] + print('Output files:') + for blob in blob_list: + print(blob.name) + + # Process the first output file from GCS. + # Since we specified batch_size=2, the first response contains + # the first two pages of the input file. + output = blob_list[0] + + json_string = output.download_as_string() + response = json.loads(json_string) + + # The actual response for the first page of the input file. + first_page_response = response['responses'][0] + annotation = first_page_response['fullTextAnnotation'] + + # Here we print the full text from the first page. + # The response contains more information: + # annotation/pages/blocks/paragraphs/words/symbols + # including confidence scores and bounding boxes + print('Full text:\n') + print(annotation['text']) +# [END vision_text_detection_pdf_gcs] + + +# [START vision_localize_objects] +def localize_objects(path): + """Localize objects in the local image. + + Args: + path: The path to the local file. + """ + from google.cloud import vision + client = vision.ImageAnnotatorClient() + + with open(path, 'rb') as image_file: + content = image_file.read() + image = vision.Image(content=content) + + objects = client.object_localization( + image=image).localized_object_annotations + + print('Number of objects found: {}'.format(len(objects))) + for object_ in objects: + print('\n{} (confidence: {})'.format(object_.name, object_.score)) + print('Normalized bounding polygon vertices: ') + for vertex in object_.bounding_poly.normalized_vertices: + print(' - ({}, {})'.format(vertex.x, vertex.y)) +# [END vision_localize_objects] + + +# [START vision_localize_objects_gcs] +def localize_objects_uri(uri): + """Localize objects in the image on Google Cloud Storage + + Args: + uri: The path to the file in Google Cloud Storage (gs://...) + """ + from google.cloud import vision + client = vision.ImageAnnotatorClient() + + image = vision.Image() + image.source.image_uri = uri + + objects = client.object_localization( + image=image).localized_object_annotations + + print('Number of objects found: {}'.format(len(objects))) + for object_ in objects: + print('\n{} (confidence: {})'.format(object_.name, object_.score)) + print('Normalized bounding polygon vertices: ') + for vertex in object_.bounding_poly.normalized_vertices: + print(' - ({}, {})'.format(vertex.x, vertex.y)) +# [END vision_localize_objects_gcs] + + +def run_local(args): + if args.command == 'faces': + detect_faces(args.path) + elif args.command == 'labels': + detect_labels(args.path) + elif args.command == 'landmarks': + detect_landmarks(args.path) + elif args.command == 'text': + detect_text(args.path) + elif args.command == 'logos': + detect_logos(args.path) + elif args.command == 'safe-search': + detect_safe_search(args.path) + elif args.command == 'properties': + detect_properties(args.path) + elif args.command == 'web': + detect_web(args.path) + elif args.command == 'crophints': + detect_crop_hints(args.path) + elif args.command == 'document': + detect_document(args.path) + elif args.command == 'web-geo': + web_entities_include_geo_results(args.path) + elif args.command == 'object-localization': + localize_objects(args.path) + + +def run_uri(args): + if args.command == 'text-uri': + detect_text_uri(args.uri) + elif args.command == 'faces-uri': + detect_faces_uri(args.uri) + elif args.command == 'labels-uri': + detect_labels_uri(args.uri) + elif args.command == 'landmarks-uri': + detect_landmarks_uri(args.uri) + elif args.command == 'logos-uri': + detect_logos_uri(args.uri) + elif args.command == 'safe-search-uri': + detect_safe_search_uri(args.uri) + elif args.command == 'properties-uri': + detect_properties_uri(args.uri) + elif args.command == 'web-uri': + detect_web_uri(args.uri) + elif args.command == 'crophints-uri': + detect_crop_hints_uri(args.uri) + elif args.command == 'document-uri': + detect_document_uri(args.uri) + elif args.command == 'web-geo-uri': + web_entities_include_geo_results_uri(args.uri) + elif args.command == 'ocr-uri': + async_detect_document(args.uri, args.destination_uri) + elif args.command == 'object-localization-uri': + localize_objects_uri(args.uri) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + + detect_faces_parser = subparsers.add_parser( + 'faces', help=detect_faces.__doc__) + detect_faces_parser.add_argument('path') + + faces_file_parser = subparsers.add_parser( + 'faces-uri', help=detect_faces_uri.__doc__) + faces_file_parser.add_argument('uri') + + detect_labels_parser = subparsers.add_parser( + 'labels', help=detect_labels.__doc__) + detect_labels_parser.add_argument('path') + + labels_file_parser = subparsers.add_parser( + 'labels-uri', help=detect_labels_uri.__doc__) + labels_file_parser.add_argument('uri') + + detect_landmarks_parser = subparsers.add_parser( + 'landmarks', help=detect_landmarks.__doc__) + detect_landmarks_parser.add_argument('path') + + landmark_file_parser = subparsers.add_parser( + 'landmarks-uri', help=detect_landmarks_uri.__doc__) + landmark_file_parser.add_argument('uri') + + detect_text_parser = subparsers.add_parser( + 'text', help=detect_text.__doc__) + detect_text_parser.add_argument('path') + + text_file_parser = subparsers.add_parser( + 'text-uri', help=detect_text_uri.__doc__) + text_file_parser.add_argument('uri') + + detect_logos_parser = subparsers.add_parser( + 'logos', help=detect_logos.__doc__) + detect_logos_parser.add_argument('path') + + logos_file_parser = subparsers.add_parser( + 'logos-uri', help=detect_logos_uri.__doc__) + logos_file_parser.add_argument('uri') + + safe_search_parser = subparsers.add_parser( + 'safe-search', help=detect_safe_search.__doc__) + safe_search_parser.add_argument('path') + + safe_search_file_parser = subparsers.add_parser( + 'safe-search-uri', + help=detect_safe_search_uri.__doc__) + safe_search_file_parser.add_argument('uri') + + properties_parser = subparsers.add_parser( + 'properties', help=detect_properties.__doc__) + properties_parser.add_argument('path') + + properties_file_parser = subparsers.add_parser( + 'properties-uri', + help=detect_properties_uri.__doc__) + properties_file_parser.add_argument('uri') + + # 1.1 Vision features + web_parser = subparsers.add_parser( + 'web', help=detect_web.__doc__) + web_parser.add_argument('path') + + web_uri_parser = subparsers.add_parser( + 'web-uri', + help=detect_web_uri.__doc__) + web_uri_parser.add_argument('uri') + + web_geo_parser = subparsers.add_parser( + 'web-geo', help=web_entities_include_geo_results.__doc__) + web_geo_parser.add_argument('path') + + web_geo_uri_parser = subparsers.add_parser( + 'web-geo-uri', + help=web_entities_include_geo_results_uri.__doc__) + web_geo_uri_parser.add_argument('uri') + + crop_hints_parser = subparsers.add_parser( + 'crophints', help=detect_crop_hints.__doc__) + crop_hints_parser.add_argument('path') + + crop_hints_uri_parser = subparsers.add_parser( + 'crophints-uri', help=detect_crop_hints_uri.__doc__) + crop_hints_uri_parser.add_argument('uri') + + document_parser = subparsers.add_parser( + 'document', help=detect_document.__doc__) + document_parser.add_argument('path') + + document_uri_parser = subparsers.add_parser( + 'document-uri', help=detect_document_uri.__doc__) + document_uri_parser.add_argument('uri') + + ocr_uri_parser = subparsers.add_parser( + 'ocr-uri', help=async_detect_document.__doc__) + ocr_uri_parser.add_argument('uri') + ocr_uri_parser.add_argument('destination_uri') + + object_localization_parser = subparsers.add_parser( + 'object-localization', help=async_detect_document.__doc__) + object_localization_parser.add_argument('path') + + object_localization_uri_parser = subparsers.add_parser( + 'object-localization-uri', help=async_detect_document.__doc__) + object_localization_uri_parser.add_argument('uri') + + args = parser.parse_args() + + if 'uri' in args.command: + run_uri(args) + else: + run_local(args) diff --git a/vision/snippets/detect/detect_test.py b/vision/snippets/detect/detect_test.py new file mode 100644 index 000000000000..ab5db829bee8 --- /dev/null +++ b/vision/snippets/detect/detect_test.py @@ -0,0 +1,285 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import backoff +from google.cloud import storage +import pytest + +import detect + +ASSET_BUCKET = "cloud-samples-data" + +BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] +OUTPUT_PREFIX = 'TEST_OUTPUT_{}'.format(uuid.uuid4()) +GCS_SOURCE_URI = 'gs://{}/HodgeConj.pdf'.format(BUCKET) +GCS_DESTINATION_URI = 'gs://{}/{}/'.format(BUCKET, OUTPUT_PREFIX) + + +def test_labels(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/wakeupcat.jpg') + detect.detect_labels(file_name) + out, _ = capsys.readouterr() + assert 'Labels' in out + + +def test_labels_uri(capsys): + file_name = 'gs://{}/vision/label/wakeupcat.jpg'.format(ASSET_BUCKET) + detect.detect_labels_uri(file_name) + out, _ = capsys.readouterr() + assert 'Labels' in out + + +def test_landmarks(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/landmark.jpg') + detect.detect_landmarks(file_name) + out, _ = capsys.readouterr() + assert 'palace' in out.lower() + + +def test_landmarks_uri(capsys): + file_name = 'gs://{}/vision/landmark/pofa.jpg'.format(ASSET_BUCKET) + detect.detect_landmarks_uri(file_name) + out, _ = capsys.readouterr() + assert 'palace' in out.lower() + + +def test_faces(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/face_no_surprise.jpg') + detect.detect_faces(file_name) + out, _ = capsys.readouterr() + assert 'face bound' in out + + +def test_faces_uri(capsys): + file_name = 'gs://{}/vision/face/face_no_surprise.jpg'.format(ASSET_BUCKET) + detect.detect_faces_uri(file_name) + out, _ = capsys.readouterr() + assert 'face bound' in out + + +def test_logos(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/logos.png') + detect.detect_logos(file_name) + out, _ = capsys.readouterr() + assert 'google' in out.lower() + + +def test_logos_uri(capsys): + file_name = 'gs://{}/vision/logo/logo_google.png'.format(ASSET_BUCKET) + detect.detect_logos_uri(file_name) + out, _ = capsys.readouterr() + assert 'google' in out.lower() + + +def test_safe_search(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/wakeupcat.jpg') + detect.detect_safe_search(file_name) + out, _ = capsys.readouterr() + assert 'VERY_LIKELY' in out + assert 'racy: ' in out + + +def test_safe_search_uri(capsys): + file_name = 'gs://{}/vision/label/wakeupcat.jpg'.format(ASSET_BUCKET) + detect.detect_safe_search_uri(file_name) + out, _ = capsys.readouterr() + assert 'VERY_LIKELY' in out + assert 'racy: ' in out + + +def test_detect_text(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/text.jpg') + detect.detect_text(file_name) + out, _ = capsys.readouterr() + assert '37%' in out + + +def test_detect_text_uri(capsys): + file_name = 'gs://{}/vision/text/screen.jpg'.format(ASSET_BUCKET) + detect.detect_text_uri(file_name) + out, _ = capsys.readouterr() + assert '37%' in out + + +def test_detect_properties(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/landmark.jpg') + detect.detect_properties(file_name) + out, _ = capsys.readouterr() + assert 'frac' in out + + +def test_detect_properties_uri(capsys): + file_name = 'gs://{}/vision/landmark/pofa.jpg'.format(ASSET_BUCKET) + detect.detect_properties_uri(file_name) + out, _ = capsys.readouterr() + assert 'frac' in out + + +def only_sample_error(e): + """A callback for giving up upon Exceptions. + + Giving up upon any Exceptions other than the ones that sample code + throws at the end of the function. + """ + return 'https://cloud.google.com/apis/design/errors' not in str(e) + + +# Vision 1.1 tests +def test_detect_web(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/landmark.jpg') + + @backoff.on_exception( + backoff.expo, Exception, max_time=60, giveup=only_sample_error) + def run_sample(): + detect.detect_web(file_name) + + run_sample() + out, _ = capsys.readouterr() + assert 'best guess label: palace of fine arts' in out.lower() + + +def test_detect_web_uri(capsys): + file_name = 'gs://{}/vision/landmark/pofa.jpg'.format(ASSET_BUCKET) + + @backoff.on_exception( + backoff.expo, Exception, max_time=60, giveup=only_sample_error) + def run_sample(): + detect.detect_web_uri(file_name) + + run_sample() + out, _ = capsys.readouterr() + assert 'best guess label: palace of fine arts' in out.lower() + + +def test_detect_web_with_geo(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/city.jpg') + + @backoff.on_exception( + backoff.expo, Exception, max_time=60, giveup=only_sample_error) + def run_sample(): + detect.web_entities_include_geo_results(file_name) + + run_sample() + out, _ = capsys.readouterr() + out = out.lower() + assert 'description' in out + + +def test_detect_web_with_geo_uri(capsys): + file_name = 'gs://{}/vision/web/city.jpg'.format(ASSET_BUCKET) + + @backoff.on_exception( + backoff.expo, Exception, max_time=60, giveup=only_sample_error) + def run_sample(): + detect.web_entities_include_geo_results_uri(file_name) + + run_sample() + out, _ = capsys.readouterr() + out = out.lower() + assert 'description' in out + + +def test_detect_document(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/text.jpg') + detect.detect_document(file_name) + out, _ = capsys.readouterr() + assert 'class' in out + + +def test_detect_document_uri(capsys): + file_name = 'gs://{}/vision/text/screen.jpg'.format(ASSET_BUCKET) + detect.detect_document_uri(file_name) + out, _ = capsys.readouterr() + assert 'class' in out + + +def test_detect_crop_hints(capsys): + file_name = os.path.join( + os.path.dirname(__file__), + 'resources/wakeupcat.jpg') + detect.detect_crop_hints(file_name) + out, _ = capsys.readouterr() + assert 'bounds: ' in out + + +def test_detect_crop_hints_uri(capsys): + file_name = 'gs://{}/vision/label/wakeupcat.jpg'.format(ASSET_BUCKET) + detect.detect_crop_hints_uri(file_name) + out, _ = capsys.readouterr() + assert 'bounds: ' in out + + +@pytest.mark.flaky(max_runs=3, min_passes=1) +def test_async_detect_document(capsys): + storage_client = storage.Client() + bucket = storage_client.get_bucket(BUCKET) + if len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) > 0: + for blob in bucket.list_blobs(prefix=OUTPUT_PREFIX): + blob.delete() + + assert len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) == 0 + + uri = 'gs://{}/vision/document/custom_0773375000_title_only.pdf'.format( + ASSET_BUCKET) + detect.async_detect_document( + gcs_source_uri=uri, + gcs_destination_uri=GCS_DESTINATION_URI) + out, _ = capsys.readouterr() + + assert 'OIL, GAS AND MINERAL LEASE' in out + assert len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) > 0 + + for blob in bucket.list_blobs(prefix=OUTPUT_PREFIX): + blob.delete() + + assert len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) == 0 + + +def test_localize_objects(capsys): + detect.localize_objects('resources/puppies.jpg') + + out, _ = capsys.readouterr() + assert 'dog' in out.lower() + + +def test_localize_objects_uri(capsys): + uri = 'gs://cloud-samples-data/vision/puppies.jpg' + + detect.localize_objects_uri(uri) + + out, _ = capsys.readouterr() + assert 'dog' in out.lower() diff --git a/vision/snippets/detect/requirements-test.txt b/vision/snippets/detect/requirements-test.txt new file mode 100644 index 000000000000..ac4968a16f56 --- /dev/null +++ b/vision/snippets/detect/requirements-test.txt @@ -0,0 +1,3 @@ +backoff==2.2.1 +pytest==7.2.0 +flaky==3.7.0 diff --git a/vision/snippets/detect/requirements.txt b/vision/snippets/detect/requirements.txt new file mode 100644 index 000000000000..b4bac43e7127 --- /dev/null +++ b/vision/snippets/detect/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-vision==3.1.4 +google-cloud-storage==2.5.0 diff --git a/vision/snippets/detect/resources/city.jpg b/vision/snippets/detect/resources/city.jpg new file mode 100644 index 000000000000..b14282e75393 Binary files /dev/null and b/vision/snippets/detect/resources/city.jpg differ diff --git a/vision/snippets/detect/resources/cloud-samples-data/vision/document/custom_0773375000_title_only.pdf b/vision/snippets/detect/resources/cloud-samples-data/vision/document/custom_0773375000_title_only.pdf new file mode 100644 index 000000000000..514ca6b1bf9a Binary files /dev/null and b/vision/snippets/detect/resources/cloud-samples-data/vision/document/custom_0773375000_title_only.pdf differ diff --git a/vision/snippets/detect/resources/duck_and_truck.jpg b/vision/snippets/detect/resources/duck_and_truck.jpg new file mode 100644 index 000000000000..5c560fe774f5 Binary files /dev/null and b/vision/snippets/detect/resources/duck_and_truck.jpg differ diff --git a/vision/snippets/detect/resources/face_no_surprise.jpg b/vision/snippets/detect/resources/face_no_surprise.jpg new file mode 100644 index 000000000000..0e2894adb833 Binary files /dev/null and b/vision/snippets/detect/resources/face_no_surprise.jpg differ diff --git a/vision/snippets/detect/resources/handwritten.jpg b/vision/snippets/detect/resources/handwritten.jpg new file mode 100644 index 000000000000..50a9575b5add Binary files /dev/null and b/vision/snippets/detect/resources/handwritten.jpg differ diff --git a/vision/snippets/detect/resources/kafka.pdf b/vision/snippets/detect/resources/kafka.pdf new file mode 100644 index 000000000000..ffa2e2fac2f4 Binary files /dev/null and b/vision/snippets/detect/resources/kafka.pdf differ diff --git a/vision/snippets/detect/resources/landmark.jpg b/vision/snippets/detect/resources/landmark.jpg new file mode 100644 index 000000000000..41c3d0fc9356 Binary files /dev/null and b/vision/snippets/detect/resources/landmark.jpg differ diff --git a/vision/snippets/detect/resources/logos.png b/vision/snippets/detect/resources/logos.png new file mode 100644 index 000000000000..5538eaed2bdf Binary files /dev/null and b/vision/snippets/detect/resources/logos.png differ diff --git a/vision/snippets/detect/resources/puppies.jpg b/vision/snippets/detect/resources/puppies.jpg new file mode 100644 index 000000000000..1bfbbc9c5e41 Binary files /dev/null and b/vision/snippets/detect/resources/puppies.jpg differ diff --git a/vision/snippets/detect/resources/text.jpg b/vision/snippets/detect/resources/text.jpg new file mode 100644 index 000000000000..3b17d55de0eb Binary files /dev/null and b/vision/snippets/detect/resources/text.jpg differ diff --git a/vision/snippets/detect/resources/wakeupcat.jpg b/vision/snippets/detect/resources/wakeupcat.jpg new file mode 100644 index 000000000000..139cf461ecae Binary files /dev/null and b/vision/snippets/detect/resources/wakeupcat.jpg differ diff --git a/vision/snippets/detect/set_endpoint.py b/vision/snippets/detect/set_endpoint.py new file mode 100644 index 000000000000..1261b024187e --- /dev/null +++ b/vision/snippets/detect/set_endpoint.py @@ -0,0 +1,48 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def set_endpoint(): + """Change your endpoint""" + # [START vision_set_endpoint] + from google.cloud import vision + + client_options = {'api_endpoint': 'eu-vision.googleapis.com'} + + client = vision.ImageAnnotatorClient(client_options=client_options) + # [END vision_set_endpoint] + image_source = vision.ImageSource( + image_uri='gs://cloud-samples-data/vision/text/screen.jpg') + image = vision.Image(source=image_source) + + response = client.text_detection(image=image) + + print('Texts:') + for text in response.text_annotations: + print('{}'.format(text.description)) + + vertices = ['({},{})'.format(vertex.x, vertex.y) + for vertex in text.bounding_poly.vertices] + + print('bounds: {}\n'.format(','.join(vertices))) + + if response.error.message: + raise Exception( + '{}\nFor more info on error messages, check: ' + 'https://cloud.google.com/apis/design/errors'.format( + response.error.message)) + + +if __name__ == '__main__': + set_endpoint() diff --git a/vision/snippets/detect/set_endpoint_test.py b/vision/snippets/detect/set_endpoint_test.py new file mode 100644 index 000000000000..37bd590ca9a3 --- /dev/null +++ b/vision/snippets/detect/set_endpoint_test.py @@ -0,0 +1,23 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import set_endpoint + + +def test_set_endpoint(capsys): + set_endpoint.set_endpoint() + + out, _ = capsys.readouterr() + assert 'System' in out + assert 'bounds:' in out diff --git a/vision/snippets/detect/vision_async_batch_annotate_images.py b/vision/snippets/detect/vision_async_batch_annotate_images.py new file mode 100644 index 000000000000..68d4a29837ed --- /dev/null +++ b/vision/snippets/detect/vision_async_batch_annotate_images.py @@ -0,0 +1,55 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START vision_async_batch_annotate_images] + +from google.cloud import vision_v1 + + +def sample_async_batch_annotate_images( + input_image_uri="gs://cloud-samples-data/vision/label/wakeupcat.jpg", + output_uri="gs://your-bucket/prefix/", +): + """Perform async batch image annotation.""" + client = vision_v1.ImageAnnotatorClient() + + source = {"image_uri": input_image_uri} + image = {"source": source} + features = [ + {"type_": vision_v1.Feature.Type.LABEL_DETECTION}, + {"type_": vision_v1.Feature.Type.IMAGE_PROPERTIES}, + ] + + # Each requests element corresponds to a single image. To annotate more + # images, create a request element for each image and add it to + # the array of requests + requests = [{"image": image, "features": features}] + gcs_destination = {"uri": output_uri} + + # The max number of responses to output in each JSON file + batch_size = 2 + output_config = {"gcs_destination": gcs_destination, + "batch_size": batch_size} + + operation = client.async_batch_annotate_images(requests=requests, output_config=output_config) + + print("Waiting for operation to complete...") + response = operation.result(90) + + # The output is written to GCS with the provided output_uri as prefix + gcs_output_uri = response.output_config.gcs_destination.uri + print("Output written to GCS with prefix: {}".format(gcs_output_uri)) + + +# [END vision_async_batch_annotate_images] diff --git a/vision/snippets/detect/vision_async_batch_annotate_images_test.py b/vision/snippets/detect/vision_async_batch_annotate_images_test.py new file mode 100644 index 000000000000..b73d826bcc4b --- /dev/null +++ b/vision/snippets/detect/vision_async_batch_annotate_images_test.py @@ -0,0 +1,63 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +from google.cloud import storage +import pytest + +import vision_async_batch_annotate_images + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") +GCS_ROOT = "gs://cloud-samples-data/vision/" + +BUCKET = os.environ["CLOUD_STORAGE_BUCKET"] +OUTPUT_PREFIX = "TEST_OUTPUT_{}".format(uuid.uuid4()) +GCS_DESTINATION_URI = "gs://{}/{}/".format(BUCKET, OUTPUT_PREFIX) + + +@pytest.fixture() +def storage_client(): + yield storage.Client() + + +@pytest.fixture() +def bucket(storage_client): + bucket = storage_client.get_bucket(BUCKET) + + try: + for blob in bucket.list_blobs(prefix=OUTPUT_PREFIX): + blob.delete() + except Exception: + pass + + yield bucket + + for blob in bucket.list_blobs(prefix=OUTPUT_PREFIX): + blob.delete() + + +@pytest.mark.flaky(max_runs=3, min_passes=1) +def test_sample_asyn_batch_annotate_images(storage_client, bucket, capsys): + input_image_uri = os.path.join(GCS_ROOT, "label/wakeupcat.jpg") + + vision_async_batch_annotate_images.sample_async_batch_annotate_images( + input_image_uri=input_image_uri, output_uri=GCS_DESTINATION_URI + ) + + out, _ = capsys.readouterr() + + assert "Output written to GCS" in out + assert len(list(bucket.list_blobs(prefix=OUTPUT_PREFIX))) > 0 diff --git a/vision/snippets/detect/vision_batch_annotate_files.py b/vision/snippets/detect/vision_batch_annotate_files.py new file mode 100644 index 000000000000..9834584058d1 --- /dev/null +++ b/vision/snippets/detect/vision_batch_annotate_files.py @@ -0,0 +1,56 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START vision_batch_annotate_files] + +import io + +from google.cloud import vision_v1 + + +def sample_batch_annotate_files(file_path="path/to/your/document.pdf"): + """Perform batch file annotation.""" + client = vision_v1.ImageAnnotatorClient() + + # Supported mime_type: application/pdf, image/tiff, image/gif + mime_type = "application/pdf" + with io.open(file_path, "rb") as f: + content = f.read() + input_config = {"mime_type": mime_type, "content": content} + features = [{"type_": vision_v1.Feature.Type.DOCUMENT_TEXT_DETECTION}] + + # The service can process up to 5 pages per document file. Here we specify + # the first, second, and last page of the document to be processed. + pages = [1, 2, -1] + requests = [{"input_config": input_config, "features": features, "pages": pages}] + + response = client.batch_annotate_files(requests=requests) + for image_response in response.responses[0].responses: + print(u"Full text: {}".format(image_response.full_text_annotation.text)) + for page in image_response.full_text_annotation.pages: + for block in page.blocks: + print(u"\nBlock confidence: {}".format(block.confidence)) + for par in block.paragraphs: + print(u"\tParagraph confidence: {}".format(par.confidence)) + for word in par.words: + print(u"\t\tWord confidence: {}".format(word.confidence)) + for symbol in word.symbols: + print( + u"\t\t\tSymbol: {}, (confidence: {})".format( + symbol.text, symbol.confidence + ) + ) + + +# [END vision_batch_annotate_files] diff --git a/vision/snippets/detect/vision_batch_annotate_files_gcs.py b/vision/snippets/detect/vision_batch_annotate_files_gcs.py new file mode 100644 index 000000000000..0cd0c204e6cd --- /dev/null +++ b/vision/snippets/detect/vision_batch_annotate_files_gcs.py @@ -0,0 +1,56 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START vision_batch_annotate_files_gcs] + +from google.cloud import vision_v1 + + +def sample_batch_annotate_files( + storage_uri="gs://cloud-samples-data/vision/document_understanding/kafka.pdf", +): + """Perform batch file annotation.""" + mime_type = "application/pdf" + + client = vision_v1.ImageAnnotatorClient() + + gcs_source = {"uri": storage_uri} + input_config = {"gcs_source": gcs_source, "mime_type": mime_type} + features = [{"type_": vision_v1.Feature.Type.DOCUMENT_TEXT_DETECTION}] + + # The service can process up to 5 pages per document file. + # Here we specify the first, second, and last page of the document to be + # processed. + pages = [1, 2, -1] + requests = [{"input_config": input_config, "features": features, "pages": pages}] + + response = client.batch_annotate_files(requests=requests) + for image_response in response.responses[0].responses: + print(u"Full text: {}".format(image_response.full_text_annotation.text)) + for page in image_response.full_text_annotation.pages: + for block in page.blocks: + print(u"\nBlock confidence: {}".format(block.confidence)) + for par in block.paragraphs: + print(u"\tParagraph confidence: {}".format(par.confidence)) + for word in par.words: + print(u"\t\tWord confidence: {}".format(word.confidence)) + for symbol in word.symbols: + print( + u"\t\t\tSymbol: {}, (confidence: {})".format( + symbol.text, symbol.confidence + ) + ) + + +# [END vision_batch_annotate_files_gcs] diff --git a/vision/snippets/detect/vision_batch_annotate_files_gcs_test.py b/vision/snippets/detect/vision_batch_annotate_files_gcs_test.py new file mode 100644 index 000000000000..0f7190137b07 --- /dev/null +++ b/vision/snippets/detect/vision_batch_annotate_files_gcs_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import vision_batch_annotate_files_gcs + +GCS_ROOT = "gs://cloud-samples-data/vision/" + + +def test_sample_batch_annotate_files_gcs(capsys): + storage_uri = os.path.join(GCS_ROOT, "document_understanding/kafka.pdf") + + vision_batch_annotate_files_gcs.sample_batch_annotate_files(storage_uri=storage_uri) + + out, _ = capsys.readouterr() + + assert "Full text" in out + assert "Block confidence" in out diff --git a/vision/snippets/detect/vision_batch_annotate_files_test.py b/vision/snippets/detect/vision_batch_annotate_files_test.py new file mode 100644 index 000000000000..f8dbe7329165 --- /dev/null +++ b/vision/snippets/detect/vision_batch_annotate_files_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import vision_batch_annotate_files + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") + + +def test_sample_batch_annotate_files(capsys): + file_path = os.path.join(RESOURCES, "kafka.pdf") + + vision_batch_annotate_files.sample_batch_annotate_files(file_path=file_path) + + out, _ = capsys.readouterr() + + assert "Full text" in out + assert "Block confidence" in out diff --git a/vision/snippets/document_text/.gitignore b/vision/snippets/document_text/.gitignore new file mode 100644 index 000000000000..a4c44706caf8 --- /dev/null +++ b/vision/snippets/document_text/.gitignore @@ -0,0 +1 @@ +output-text.jpg diff --git a/vision/snippets/document_text/README.rst b/vision/snippets/document_text/README.rst new file mode 100644 index 000000000000..a38564a27d01 --- /dev/null +++ b/vision/snippets/document_text/README.rst @@ -0,0 +1,111 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/document_text/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Document Text tutorial ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/document_text/doctext.py,vision/cloud-client/document_text/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python doctext.py + + usage: doctext.py [-h] [-out_file OUT_FILE] detect_file + + positional arguments: + detect_file The image for text detection. + + optional arguments: + -h, --help show this help message and exit + -out_file OUT_FILE Optional output file + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/vision/snippets/document_text/README.rst.in b/vision/snippets/document_text/README.rst.in new file mode 100644 index 000000000000..4746e327eca7 --- /dev/null +++ b/vision/snippets/document_text/README.rst.in @@ -0,0 +1,30 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +setup: +- auth +- install_deps + +samples: +- name: Document Text tutorial + file: doctext.py + show_help: True + +cloud_client_library: true + +folder: vision/cloud-client/document_text \ No newline at end of file diff --git a/vision/snippets/document_text/doctext.py b/vision/snippets/document_text/doctext.py new file mode 100644 index 000000000000..d2f01f497e7d --- /dev/null +++ b/vision/snippets/document_text/doctext.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python + +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Outlines document text given an image. + +Example: + python doctext.py resources/text_menu.jpg +""" +# [START vision_document_text_tutorial] +# [START vision_document_text_tutorial_imports] +import argparse +from enum import Enum +import io + +from google.cloud import vision +from PIL import Image, ImageDraw +# [END vision_document_text_tutorial_imports] + + +class FeatureType(Enum): + PAGE = 1 + BLOCK = 2 + PARA = 3 + WORD = 4 + SYMBOL = 5 + + +def draw_boxes(image, bounds, color): + """Draw a border around the image using the hints in the vector list.""" + draw = ImageDraw.Draw(image) + + for bound in bounds: + draw.polygon( + [ + bound.vertices[0].x, + bound.vertices[0].y, + bound.vertices[1].x, + bound.vertices[1].y, + bound.vertices[2].x, + bound.vertices[2].y, + bound.vertices[3].x, + bound.vertices[3].y, + ], + None, + color, + ) + return image + + +# [START vision_document_text_tutorial_detect_bounds] +def get_document_bounds(image_file, feature): + """Returns document bounds given an image.""" + client = vision.ImageAnnotatorClient() + + bounds = [] + + with io.open(image_file, "rb") as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + response = client.document_text_detection(image=image) + document = response.full_text_annotation + + # Collect specified feature bounds by enumerating all document features + for page in document.pages: + for block in page.blocks: + for paragraph in block.paragraphs: + for word in paragraph.words: + for symbol in word.symbols: + if feature == FeatureType.SYMBOL: + bounds.append(symbol.bounding_box) + + if feature == FeatureType.WORD: + bounds.append(word.bounding_box) + + if feature == FeatureType.PARA: + bounds.append(paragraph.bounding_box) + + if feature == FeatureType.BLOCK: + bounds.append(block.bounding_box) + + # The list `bounds` contains the coordinates of the bounding boxes. + return bounds +# [END vision_document_text_tutorial_detect_bounds] + + +def render_doc_text(filein, fileout): + image = Image.open(filein) + bounds = get_document_bounds(filein, FeatureType.BLOCK) + draw_boxes(image, bounds, "blue") + bounds = get_document_bounds(filein, FeatureType.PARA) + draw_boxes(image, bounds, "red") + bounds = get_document_bounds(filein, FeatureType.WORD) + draw_boxes(image, bounds, "yellow") + + if fileout != 0: + image.save(fileout) + else: + image.show() + + +if __name__ == "__main__": + # [START vision_document_text_tutorial_run_application] + parser = argparse.ArgumentParser() + parser.add_argument("detect_file", help="The image for text detection.") + parser.add_argument("-out_file", help="Optional output file", default=0) + args = parser.parse_args() + + render_doc_text(args.detect_file, args.out_file) + # [END vision_document_text_tutorial_run_application] +# [END vision_document_text_tutorial] diff --git a/vision/snippets/document_text/doctext_test.py b/vision/snippets/document_text/doctext_test.py new file mode 100644 index 000000000000..0083abd64393 --- /dev/null +++ b/vision/snippets/document_text/doctext_test.py @@ -0,0 +1,24 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import doctext + + +def test_text(capsys): + """Checks the output image for drawing the crop hint is created.""" + doctext.render_doc_text('resources/text_menu.jpg', 'output-text.jpg') + out, _ = capsys.readouterr() + assert os.path.isfile('output-text.jpg') diff --git a/vision/snippets/document_text/noxfile_config.py b/vision/snippets/document_text/noxfile_config.py new file mode 100644 index 000000000000..e0014e50418e --- /dev/null +++ b/vision/snippets/document_text/noxfile_config.py @@ -0,0 +1,27 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default TEST_CONFIG_OVERRIDE for python repos. + +# You can copy this file into your directory, then it will be inported from +# the noxfile.py. + +# The source of truth: +# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py + +TEST_CONFIG_OVERRIDE = { + # You can opt out from the test for specific Python versions. + # Pillow 9.0.0 does not support python 3.6 + "ignored_versions": ["2.7", "3.6"], +} diff --git a/vision/snippets/document_text/requirements-test.txt b/vision/snippets/document_text/requirements-test.txt new file mode 100644 index 000000000000..49780e035690 --- /dev/null +++ b/vision/snippets/document_text/requirements-test.txt @@ -0,0 +1 @@ +pytest==7.2.0 diff --git a/vision/snippets/document_text/requirements.txt b/vision/snippets/document_text/requirements.txt new file mode 100644 index 000000000000..9c9ade3be0e8 --- /dev/null +++ b/vision/snippets/document_text/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-vision==3.1.4 +pillow==9.3.0 diff --git a/vision/snippets/document_text/resources/text_menu.jpg b/vision/snippets/document_text/resources/text_menu.jpg new file mode 100644 index 000000000000..caa678b3e7ca Binary files /dev/null and b/vision/snippets/document_text/resources/text_menu.jpg differ diff --git a/vision/snippets/face_detection/.gitignore b/vision/snippets/face_detection/.gitignore new file mode 100644 index 000000000000..01f02dff9a77 --- /dev/null +++ b/vision/snippets/face_detection/.gitignore @@ -0,0 +1 @@ +out.jpg diff --git a/vision/snippets/face_detection/README.rst b/vision/snippets/face_detection/README.rst new file mode 100644 index 000000000000..b04a344ecfba --- /dev/null +++ b/vision/snippets/face_detection/README.rst @@ -0,0 +1,101 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/face_detection/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + +This sample demonstrates how to use the Cloud Vision API to do face detection. + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Face detection ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/face_detection/faces.py,vision/cloud-client/face_detection/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python faces.py + + usage: faces.py [-h] [--out OUTPUT] [--max-results MAX_RESULTS] input_image + + Detects faces in the given image. + + positional arguments: + input_image the image you'd like to detect faces in. + + optional arguments: + -h, --help show this help message and exit + --out OUTPUT the name of the output file. + --max-results MAX_RESULTS + the max results of face detection. + + + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/vision/snippets/face_detection/README.rst.in b/vision/snippets/face_detection/README.rst.in new file mode 100644 index 000000000000..422cec1d11da --- /dev/null +++ b/vision/snippets/face_detection/README.rst.in @@ -0,0 +1,31 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +description: > + This sample demonstrates how to use the Cloud Vision API to do face detection. + +setup: +- auth +- install_deps + +samples: +- name: Face detection + file: faces.py + show_help: true + +folder: vision/cloud-client/face_detection \ No newline at end of file diff --git a/vision/snippets/face_detection/faces.py b/vision/snippets/face_detection/faces.py new file mode 100755 index 000000000000..25014048a790 --- /dev/null +++ b/vision/snippets/face_detection/faces.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python + +# Copyright 2015 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Draws squares around detected faces in the given image.""" + +import argparse + +# [START vision_face_detection_tutorial_imports] +from google.cloud import vision +from PIL import Image, ImageDraw +# [END vision_face_detection_tutorial_imports] + + +# [START vision_face_detection_tutorial_send_request] +def detect_face(face_file, max_results=4): + """Uses the Vision API to detect faces in the given file. + + Args: + face_file: A file-like object containing an image with faces. + + Returns: + An array of Face objects with information about the picture. + """ + # [START vision_face_detection_tutorial_client] + client = vision.ImageAnnotatorClient() + # [END vision_face_detection_tutorial_client] + + content = face_file.read() + image = vision.Image(content=content) + + return client.face_detection( + image=image, max_results=max_results).face_annotations +# [END vision_face_detection_tutorial_send_request] + + +# [START vision_face_detection_tutorial_process_response] +def highlight_faces(image, faces, output_filename): + """Draws a polygon around the faces, then saves to output_filename. + + Args: + image: a file containing the image with the faces. + faces: a list of faces found in the file. This should be in the format + returned by the Vision API. + output_filename: the name of the image file to be created, where the + faces have polygons drawn around them. + """ + im = Image.open(image) + draw = ImageDraw.Draw(im) + # Sepecify the font-family and the font-size + for face in faces: + box = [(vertex.x, vertex.y) + for vertex in face.bounding_poly.vertices] + draw.line(box + [box[0]], width=5, fill='#00ff00') + # Place the confidence value/score of the detected faces above the + # detection box in the output image + draw.text(((face.bounding_poly.vertices)[0].x, + (face.bounding_poly.vertices)[0].y - 30), + str(format(face.detection_confidence, '.3f')) + '%', + fill='#FF0000') + im.save(output_filename) +# [END vision_face_detection_tutorial_process_response] + + +# [START vision_face_detection_tutorial_run_application] +def main(input_filename, output_filename, max_results): + with open(input_filename, 'rb') as image: + faces = detect_face(image, max_results) + print('Found {} face{}'.format( + len(faces), '' if len(faces) == 1 else 's')) + + print('Writing to file {}'.format(output_filename)) + # Reset the file pointer, so we can read the file again + image.seek(0) + highlight_faces(image, faces, output_filename) +# [END vision_face_detection_tutorial_run_application] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Detects faces in the given image.') + parser.add_argument( + 'input_image', help='the image you\'d like to detect faces in.') + parser.add_argument( + '--out', dest='output', default='out.jpg', + help='the name of the output file.') + parser.add_argument( + '--max-results', dest='max_results', default=4, + help='the max results of face detection.') + args = parser.parse_args() + + main(args.input_image, args.output, args.max_results) diff --git a/vision/snippets/face_detection/faces_test.py b/vision/snippets/face_detection/faces_test.py new file mode 100644 index 000000000000..4d967fd418bd --- /dev/null +++ b/vision/snippets/face_detection/faces_test.py @@ -0,0 +1,39 @@ +# Copyright 2016 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from PIL import Image + +from faces import main + +RESOURCES = os.path.join(os.path.dirname(__file__), 'resources') + + +def test_main(tmpdir): + out_file = os.path.join(tmpdir.dirname, 'face-output.jpg') + in_file = os.path.join(RESOURCES, 'face-input.jpg') + + # Make sure there isn't already a green box + im = Image.open(in_file) + pixels = im.getdata() + greens = sum(1 for (r, g, b) in pixels if r == 0 and g == 255 and b == 0) + assert greens < 1 + + main(in_file, out_file, 10) + + # Make sure there now is some green drawn + im = Image.open(out_file) + pixels = im.getdata() + greens = sum(1 for (r, g, b) in pixels if r == 0 and g == 255 and b == 0) + assert greens > 10 diff --git a/vision/snippets/face_detection/noxfile_config.py b/vision/snippets/face_detection/noxfile_config.py new file mode 100644 index 000000000000..e0014e50418e --- /dev/null +++ b/vision/snippets/face_detection/noxfile_config.py @@ -0,0 +1,27 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default TEST_CONFIG_OVERRIDE for python repos. + +# You can copy this file into your directory, then it will be inported from +# the noxfile.py. + +# The source of truth: +# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py + +TEST_CONFIG_OVERRIDE = { + # You can opt out from the test for specific Python versions. + # Pillow 9.0.0 does not support python 3.6 + "ignored_versions": ["2.7", "3.6"], +} diff --git a/vision/snippets/face_detection/requirements-test.txt b/vision/snippets/face_detection/requirements-test.txt new file mode 100644 index 000000000000..49780e035690 --- /dev/null +++ b/vision/snippets/face_detection/requirements-test.txt @@ -0,0 +1 @@ +pytest==7.2.0 diff --git a/vision/snippets/face_detection/requirements.txt b/vision/snippets/face_detection/requirements.txt new file mode 100644 index 000000000000..9c9ade3be0e8 --- /dev/null +++ b/vision/snippets/face_detection/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-vision==3.1.4 +pillow==9.3.0 diff --git a/vision/snippets/face_detection/resources/face-input.jpg b/vision/snippets/face_detection/resources/face-input.jpg new file mode 100644 index 000000000000..c0ee5580b374 Binary files /dev/null and b/vision/snippets/face_detection/resources/face-input.jpg differ diff --git a/vision/snippets/product_search/create_product_set_test.py b/vision/snippets/product_search/create_product_set_test.py new file mode 100644 index 000000000000..5dc5190c3500 --- /dev/null +++ b/vision/snippets/product_search/create_product_set_test.py @@ -0,0 +1,45 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_set_management import ( + create_product_set, delete_product_set, list_product_sets) + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_SET_DISPLAY_NAME = 'fake_product_set_display_name_for_testing' +PRODUCT_SET_ID = 'test_{}'.format(uuid.uuid4()) + + +@pytest.fixture(scope="function", autouse=True) +def teardown(): + yield + + # tear down + delete_product_set(PROJECT_ID, LOCATION, PRODUCT_SET_ID) + + +def test_create_product_set(capsys): + create_product_set( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, + PRODUCT_SET_DISPLAY_NAME) + list_product_sets(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_SET_ID in out diff --git a/vision/snippets/product_search/create_product_test.py b/vision/snippets/product_search/create_product_test.py new file mode 100644 index 000000000000..a4e55f14ee4e --- /dev/null +++ b/vision/snippets/product_search/create_product_test.py @@ -0,0 +1,45 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_management import create_product, delete_product, list_products + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_DISPLAY_NAME = 'fake_product_display_name_for_testing' +PRODUCT_CATEGORY = 'homegoods' +PRODUCT_ID = 'test_{}'.format(uuid.uuid4()) + + +@pytest.fixture(scope="function", autouse=True) +def teardown(): + yield + + # tear down + delete_product(PROJECT_ID, LOCATION, PRODUCT_ID) + + +def test_create_product(capsys): + create_product( + PROJECT_ID, LOCATION, PRODUCT_ID, + PRODUCT_DISPLAY_NAME, PRODUCT_CATEGORY) + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID in out diff --git a/vision/snippets/product_search/import_product_sets.py b/vision/snippets/product_search/import_product_sets.py new file mode 100755 index 000000000000..fe8a880129f6 --- /dev/null +++ b/vision/snippets/product_search/import_product_sets.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform import product sets operations +on Product set in Cloud Vision Product Search. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_tutorial_import] +from google.cloud import vision +# [END vision_product_search_tutorial_import] + + +# [START vision_product_search_import_product_images] +def import_product_sets(project_id, location, gcs_uri): + """Import images of different products in the product set. + Args: + project_id: Id of the project. + location: A compute region name. + gcs_uri: Google Cloud Storage URI. + Target files must be in Product Search CSV format. + """ + client = vision.ProductSearchClient() + + # A resource that represents Google Cloud Platform location. + location_path = f"projects/{project_id}/locations/{location}" + + # Set the input configuration along with Google Cloud Storage URI + gcs_source = vision.ImportProductSetsGcsSource( + csv_file_uri=gcs_uri) + input_config = vision.ImportProductSetsInputConfig( + gcs_source=gcs_source) + + # Import the product sets from the input URI. + response = client.import_product_sets( + parent=location_path, input_config=input_config) + + print('Processing operation name: {}'.format(response.operation.name)) + # synchronous check of operation status + result = response.result() + print('Processing done.') + + for i, status in enumerate(result.statuses): + print('Status of processing line {} of the csv: {}'.format( + i, status)) + # Check the status of reference image + # `0` is the code for OK in google.rpc.Code. + if status.code == 0: + reference_image = result.reference_images[i] + print(reference_image) + else: + print('Status code not OK: {}'.format(status.message)) +# [END vision_product_search_import_product_images] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + + import_product_sets_parser = subparsers.add_parser( + 'import_product_sets', help=import_product_sets.__doc__) + import_product_sets_parser.add_argument('gcs_uri') + + args = parser.parse_args() + + if args.command == 'import_product_sets': + import_product_sets(args.project_id, args.location, args.gcs_uri) diff --git a/vision/snippets/product_search/import_product_sets_test.py b/vision/snippets/product_search/import_product_sets_test.py new file mode 100644 index 000000000000..d9d27ff794d3 --- /dev/null +++ b/vision/snippets/product_search/import_product_sets_test.py @@ -0,0 +1,79 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +from google.cloud import storage +import pytest + +from import_product_sets import import_product_sets +from product_in_product_set_management import list_products_in_product_set +from product_management import delete_product, list_products +from product_set_management import delete_product_set, list_product_sets +from reference_image_management import list_reference_images + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +FILENAME = uuid.uuid4() +GCS_URI = 'gs://{}/vision/{}.csv'.format(PROJECT_ID, FILENAME) +PRODUCT_SET_DISPLAY_NAME = 'fake_product_set_display_name_for_testing' +PRODUCT_SET_ID = 'test_{}'.format(uuid.uuid4()) +PRODUCT_ID_1 = 'test_{}'.format(uuid.uuid4()) +IMAGE_URI_1 = 'shoes_1.jpg' + + +@pytest.fixture(scope="function", autouse=True) +def setup_teardown(): + # Create the product set csv file locally and upload it to GCS + # This is so that there is a unique product set ID for all python version + # tests. + client = storage.Client(project=PROJECT_ID) + bucket = client.get_bucket(PROJECT_ID) + blob = storage.Blob("vision/{}.csv".format(FILENAME), bucket) + blob.upload_from_string( + '"gs://cloud-samples-data/vision/product_search/shoes_1.jpg",' + + '"{}",'.format(IMAGE_URI_1) + + '"{}",'.format(PRODUCT_SET_ID) + + '"{}",'.format(PRODUCT_ID_1) + + '"apparel",,"style=womens","0.1,0.1,0.9,0.1,0.9,0.9,0.1,0.9"') + + yield + + delete_product(PROJECT_ID, LOCATION, PRODUCT_ID_1) + delete_product_set(PROJECT_ID, LOCATION, PRODUCT_SET_ID) + # Delete the created file + blob.delete(client) + + +def test_import_product_sets(capsys): + import_product_sets(PROJECT_ID, LOCATION, GCS_URI) + + list_product_sets(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_SET_ID in out + + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + + list_products_in_product_set(PROJECT_ID, LOCATION, PRODUCT_SET_ID) + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + + list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID_1) + out, _ = capsys.readouterr() + assert IMAGE_URI_1 in out diff --git a/vision/snippets/product_search/product_in_product_set_management.py b/vision/snippets/product_search/product_in_product_set_management.py new file mode 100755 index 000000000000..e620b35e3447 --- /dev/null +++ b/vision/snippets/product_search/product_in_product_set_management.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform create operations +on Product set in Cloud Vision Product Search. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_add_product_to_product_set] +# [START vision_product_search_remove_product_from_product_set] +# [START vision_product_search_purge_products_in_product_set] +from google.cloud import vision + +# [END vision_product_search_add_product_to_product_set] +# [END vision_product_search_remove_product_from_product_set] +# [END vision_product_search_purge_products_in_product_set] + + +# [START vision_product_search_add_product_to_product_set] +def add_product_to_product_set( + project_id, location, product_id, product_set_id): + """Add a product to a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + product_set_id: Id of the product set. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product set. + product_set_path = client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Add the product to the product set. + client.add_product_to_product_set( + name=product_set_path, product=product_path) + print('Product added to product set.') +# [END vision_product_search_add_product_to_product_set] + + +# [START vision_product_search_list_products_in_product_set] +def list_products_in_product_set( + project_id, location, product_set_id): + """List all products in a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product set. + product_set_path = client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + + # List all the products available in the product set. + products = client.list_products_in_product_set(name=product_set_path) + + # Display the product information. + for product in products: + print('Product name: {}'.format(product.name)) + print('Product id: {}'.format(product.name.split('/')[-1])) + print('Product display name: {}'.format(product.display_name)) + print('Product description: {}'.format(product.description)) + print('Product category: {}'.format(product.product_category)) + print('Product labels: {}'.format(product.product_labels)) +# [END vision_product_search_list_products_in_product_set] + + +# [START vision_product_search_remove_product_from_product_set] +def remove_product_from_product_set( + project_id, location, product_id, product_set_id): + """Remove a product from a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + product_set_id: Id of the product set. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product set. + product_set_path = client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Remove the product from the product set. + client.remove_product_from_product_set( + name=product_set_path, product=product_path) + print('Product removed from product set.') +# [END vision_product_search_remove_product_from_product_set] + + +# [START vision_product_search_purge_products_in_product_set] +def purge_products_in_product_set( + project_id, location, product_set_id, force): + """Delete all products in a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + force: Perform the purge only when force is set to True. + """ + client = vision.ProductSearchClient() + + parent = f"projects/{project_id}/locations/{location}" + + product_set_purge_config = vision.ProductSetPurgeConfig( + product_set_id=product_set_id) + + # The purge operation is async. + operation = client.purge_products(request={ + "parent": parent, + "product_set_purge_config": product_set_purge_config, + # The operation is irreversible and removes multiple products. + # The user is required to pass in force=True to actually perform the + # purge. + # If force is not set to True, the service raises an exception. + "force": force + }) + + operation.result(timeout=500) + + print('Deleted products in product set.') +# [END vision_product_search_purge_products_in_product_set] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + + add_product_to_product_set_parser = subparsers.add_parser( + 'add_product_to_product_set', help=add_product_to_product_set.__doc__) + add_product_to_product_set_parser.add_argument('product_id') + add_product_to_product_set_parser.add_argument('product_set_id') + + list_products_in_product_set_parser = subparsers.add_parser( + 'list_products_in_product_set', + help=list_products_in_product_set.__doc__) + list_products_in_product_set_parser.add_argument('product_set_id') + + remove_product_from_product_set_parser = subparsers.add_parser( + 'remove_product_from_product_set', + help=remove_product_from_product_set.__doc__) + remove_product_from_product_set_parser.add_argument('product_id') + remove_product_from_product_set_parser.add_argument('product_set_id') + + purge_products_in_product_set_parser = subparsers.add_parser( + 'purge_products_in_product_set', + help=purge_products_in_product_set.__doc__) + purge_products_in_product_set_parser.add_argument('product_set_id') + purge_products_in_product_set_parser.add_argument( + '--force', action='store_true') + + args = parser.parse_args() + + if args.command == 'add_product_to_product_set': + add_product_to_product_set( + args.project_id, args.location, args.product_id, + args.product_set_id) + elif args.command == 'list_products_in_product_set': + list_products_in_product_set( + args.project_id, args.location, args.product_set_id) + elif args.command == 'remove_product_from_product_set': + remove_product_from_product_set( + args.project_id, args.location, args.product_id, + args.product_set_id) + elif args.command == 'purge_products_in_product_set': + purge_products_in_product_set( + args.project_id, args.location, args.product_set_id, args.force) diff --git a/vision/snippets/product_search/product_in_product_set_management_test.py b/vision/snippets/product_search/product_in_product_set_management_test.py new file mode 100644 index 000000000000..653defb9f45f --- /dev/null +++ b/vision/snippets/product_search/product_in_product_set_management_test.py @@ -0,0 +1,92 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_in_product_set_management import ( + add_product_to_product_set, list_products_in_product_set, + purge_products_in_product_set, remove_product_from_product_set) +from product_management import create_product, delete_product, list_products +from product_set_management import ( + create_product_set, delete_product_set) + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_SET_DISPLAY_NAME = 'fake_product_set_display_name_for_testing' + +PRODUCT_DISPLAY_NAME = 'fake_product_display_name_for_testing' +PRODUCT_CATEGORY = 'homegoods' + + +@pytest.fixture(scope="function") +def test_resources(): + # set up + product_set_id = f'test_set_{uuid.uuid4()}' + product_id = f'test_product_{uuid.uuid4()}' + create_product_set( + PROJECT_ID, LOCATION, product_set_id, PRODUCT_SET_DISPLAY_NAME) + create_product( + PROJECT_ID, LOCATION, product_id, + PRODUCT_DISPLAY_NAME, PRODUCT_CATEGORY) + + yield product_set_id, product_id + + # tear down + delete_product(PROJECT_ID, LOCATION, product_id) + delete_product_set(PROJECT_ID, LOCATION, product_set_id) + + +def test_add_product_to_product_set(capsys, test_resources): + product_set_id, product_id = test_resources + add_product_to_product_set( + PROJECT_ID, LOCATION, product_id, product_set_id) + list_products_in_product_set(PROJECT_ID, LOCATION, product_set_id) + out, _ = capsys.readouterr() + assert 'Product id: {}'.format(product_id) in out + + +def test_remove_product_from_product_set(capsys, test_resources): + product_set_id, product_id = test_resources + add_product_to_product_set( + PROJECT_ID, LOCATION, product_id, product_set_id) + list_products_in_product_set(PROJECT_ID, LOCATION, product_set_id) + out, _ = capsys.readouterr() + assert 'Product id: {}'.format(product_id) in out + + remove_product_from_product_set( + PROJECT_ID, LOCATION, product_id, product_set_id) + list_products_in_product_set(PROJECT_ID, LOCATION, product_set_id) + out, _ = capsys.readouterr() + assert 'Product id: {}'.format(product_id) not in out + + +def test_purge_products_in_product_set(capsys, test_resources): + product_set_id, product_id = test_resources + add_product_to_product_set( + PROJECT_ID, LOCATION, product_id, product_set_id) + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert 'Product id: {}'.format(product_id) in out + + purge_products_in_product_set( + PROJECT_ID, LOCATION, product_set_id, force=True) + + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert 'Product id: {}'.format(product_id) not in out diff --git a/vision/snippets/product_search/product_management.py b/vision/snippets/product_search/product_management.py new file mode 100755 index 000000000000..f28a90bcd9cb --- /dev/null +++ b/vision/snippets/product_search/product_management.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations on Product +in Cloud Vision Product Search. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_create_product] +# [START vision_product_search_delete_product] +# [START vision_product_search_list_products] +# [START vision_product_search_get_product] +# [START vision_product_search_update_product_labels] +# [START vision_product_search_purge_orphan_products] +from google.cloud import vision +from google.protobuf import field_mask_pb2 as field_mask + +# [END vision_product_search_create_product] +# [END vision_product_search_delete_product] +# [END vision_product_search_list_products] +# [END vision_product_search_get_product] +# [END vision_product_search_update_product_labels] +# [END vision_product_search_purge_orphan_products] + + +# [START vision_product_search_create_product] +def create_product( + project_id, location, product_id, product_display_name, + product_category): + """Create one product. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + product_display_name: Display name of the product. + product_category: Category of the product. + """ + client = vision.ProductSearchClient() + + # A resource that represents Google Cloud Platform location. + location_path = f"projects/{project_id}/locations/{location}" + + # Create a product with the product specification in the region. + # Set product display name and product category. + product = vision.Product( + display_name=product_display_name, + product_category=product_category) + + # The response is the product with the `name` field populated. + response = client.create_product( + parent=location_path, + product=product, + product_id=product_id) + + # Display the product information. + print('Product name: {}'.format(response.name)) +# [END vision_product_search_create_product] + + +# [START vision_product_search_list_products] +def list_products(project_id, location): + """List all products. + Args: + project_id: Id of the project. + location: A compute region name. + """ + client = vision.ProductSearchClient() + + # A resource that represents Google Cloud Platform location. + location_path = f"projects/{project_id}/locations/{location}" + + # List all the products available in the region. + products = client.list_products(parent=location_path) + + # Display the product information. + for product in products: + print('Product name: {}'.format(product.name)) + print('Product id: {}'.format(product.name.split('/')[-1])) + print('Product display name: {}'.format(product.display_name)) + print('Product description: {}'.format(product.description)) + print('Product category: {}'.format(product.product_category)) + print('Product labels: {}\n'.format(product.product_labels)) +# [END vision_product_search_list_products] + + +# [START vision_product_search_get_product] +def get_product(project_id, location, product_id): + """Get information about a product. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Get complete detail of the product. + product = client.get_product(name=product_path) + + # Display the product information. + print('Product name: {}'.format(product.name)) + print('Product id: {}'.format(product.name.split('/')[-1])) + print('Product display name: {}'.format(product.display_name)) + print('Product description: {}'.format(product.description)) + print('Product category: {}'.format(product.product_category)) + print('Product labels: {}'.format(product.product_labels)) +# [END vision_product_search_get_product] + + +# [START vision_product_search_update_product_labels] +def update_product_labels( + project_id, location, product_id, key, value): + """Update the product labels. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + key: The key of the label. + value: The value of the label. + """ + client = vision.ProductSearchClient() + + # Get the name of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Set product name, product label and product display name. + # Multiple labels are also supported. + key_value = vision.Product.KeyValue(key=key, value=value) + product = vision.Product( + name=product_path, + product_labels=[key_value]) + + # Updating only the product_labels field here. + update_mask = field_mask.FieldMask(paths=['product_labels']) + + # This overwrites the product_labels. + updated_product = client.update_product( + product=product, update_mask=update_mask) + + # Display the updated product information. + print('Product name: {}'.format(updated_product.name)) + print('Updated product labels: {}'.format(product.product_labels)) +# [END vision_product_search_update_product_labels] + + +# [START vision_product_search_delete_product] +def delete_product(project_id, location, product_id): + """Delete the product and all its reference images. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Delete a product. + client.delete_product(name=product_path) + print('Product deleted.') +# [END vision_product_search_delete_product] + + +# [START vision_product_search_purge_orphan_products] +def purge_orphan_products(project_id, location, force): + """Delete all products not in any product sets. + Args: + project_id: Id of the project. + location: A compute region name. + """ + client = vision.ProductSearchClient() + + parent = f"projects/{project_id}/locations/{location}" + + # The purge operation is async. + operation = client.purge_products(request={ + "parent": parent, + "delete_orphan_products": True, + # The operation is irreversible and removes multiple products. + # The user is required to pass in force=True to actually perform the + # purge. + # If force is not set to True, the service raises an exception. + "force": force + }) + + operation.result(timeout=500) + + print('Orphan products deleted.') +# [END vision_product_search_purge_orphan_products] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + + subparsers = parser.add_subparsers(dest='command') + + create_product_parser = subparsers.add_parser( + 'create_product', help=create_product.__doc__) + create_product_parser.add_argument('product_id') + create_product_parser.add_argument('product_display_name') + create_product_parser.add_argument('product_category') + + list_products_parser = subparsers.add_parser( + 'list_products', help=list_products.__doc__) + + get_product_parser = subparsers.add_parser( + 'get_product', help=get_product.__doc__) + get_product_parser.add_argument('product_id') + + update_product_labels_parser = subparsers.add_parser( + 'update_product_labels', help=update_product_labels.__doc__) + update_product_labels_parser.add_argument('product_id') + update_product_labels_parser.add_argument('key') + update_product_labels_parser.add_argument('value') + + delete_product_parser = subparsers.add_parser( + 'delete_product', help=delete_product.__doc__) + delete_product_parser.add_argument('product_id') + + purge_orphan_products_parser = subparsers.add_parser( + 'purge_orphan_products', help=purge_orphan_products.__doc__) + purge_orphan_products_parser.add_argument('--force', action='store_true') + + args = parser.parse_args() + + if args.command == 'create_product': + create_product( + args.project_id, args.location, args.product_id, + args.product_display_name, args.product_category) + elif args.command == 'list_products': + list_products(args.project_id, args.location) + elif args.command == 'get_product': + get_product(args.project_id, args.location, args.product_id) + elif args.command == 'update_product_labels': + update_product_labels( + args.project_id, args.location, args.product_id, + args.key, args.value) + elif args.command == 'delete_product': + delete_product(args.project_id, args.location, args.product_id) + elif args.command == 'purge_orphan_products': + purge_orphan_products(args.project_id, args.location, args.force) diff --git a/vision/snippets/product_search/product_management_test.py b/vision/snippets/product_search/product_management_test.py new file mode 100644 index 000000000000..22fa20a2e573 --- /dev/null +++ b/vision/snippets/product_search/product_management_test.py @@ -0,0 +1,76 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_management import ( + create_product, delete_product, list_products, + purge_orphan_products, update_product_labels) + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_DISPLAY_NAME = 'fake_product_display_name_for_testing' +PRODUCT_CATEGORY = 'homegoods' +PRODUCT_ID = 'test_{}'.format(uuid.uuid4()) +KEY = 'fake_key_for_testing' +VALUE = 'fake_value_for_testing' + + +@pytest.fixture(scope="function", autouse=True) +def setup_teardown(): + # set up + create_product( + PROJECT_ID, LOCATION, PRODUCT_ID, + PRODUCT_DISPLAY_NAME, PRODUCT_CATEGORY) + + yield None + + # tear down + delete_product(PROJECT_ID, LOCATION, PRODUCT_ID) + + +def test_delete_product(capsys): + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID in out + + delete_product(PROJECT_ID, LOCATION, PRODUCT_ID) + + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID not in out + + +def test_update_product_labels(capsys): + update_product_labels(PROJECT_ID, LOCATION, PRODUCT_ID, KEY, VALUE) + out, _ = capsys.readouterr() + assert KEY in out + assert VALUE in out + + +def test_purge_orphan_products(capsys): + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID in out + + purge_orphan_products(PROJECT_ID, LOCATION, force=True) + + list_products(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_ID not in out diff --git a/vision/snippets/product_search/product_search.py b/vision/snippets/product_search/product_search.py new file mode 100755 index 000000000000..576aa264ebf3 --- /dev/null +++ b/vision/snippets/product_search/product_search.py @@ -0,0 +1,205 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This tutorial demonstrates how users query the product set with their +own images and find the products similer to the image using the Cloud +Vision Product Search API. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_get_similar_products] +# [START vision_product_search_get_similar_products_gcs] +from google.cloud import vision + +# [END vision_product_search_get_similar_products] +# [END vision_product_search_get_similar_products_gcs] + + +# [START vision_product_search_get_similar_products] +def get_similar_products_file( + project_id, + location, + product_set_id, + product_category, + file_path, + filter, + max_results +): + """Search similar products to image. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + product_category: Category of the product. + file_path: Local file path of the image to be searched. + filter: Condition to be applied on the labels. + Example for filter: (color = red OR color = blue) AND style = kids + It will search on all products with the following labels: + color:red AND style:kids + color:blue AND style:kids + max_results: The maximum number of results (matches) to return. If omitted, all results are returned. + """ + # product_search_client is needed only for its helper methods. + product_search_client = vision.ProductSearchClient() + image_annotator_client = vision.ImageAnnotatorClient() + + # Read the image as a stream of bytes. + with open(file_path, 'rb') as image_file: + content = image_file.read() + + # Create annotate image request along with product search feature. + image = vision.Image(content=content) + + # product search specific parameters + product_set_path = product_search_client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + product_search_params = vision.ProductSearchParams( + product_set=product_set_path, + product_categories=[product_category], + filter=filter) + image_context = vision.ImageContext( + product_search_params=product_search_params) + + # Search products similar to the image. + response = image_annotator_client.product_search( + image, + image_context=image_context, + max_results=max_results + ) + + index_time = response.product_search_results.index_time + print('Product set index time: ') + print(index_time) + + results = response.product_search_results.results + + print('Search results:') + for result in results: + product = result.product + + print('Score(Confidence): {}'.format(result.score)) + print('Image name: {}'.format(result.image)) + + print('Product name: {}'.format(product.name)) + print('Product display name: {}'.format( + product.display_name)) + print('Product description: {}\n'.format(product.description)) + print('Product labels: {}\n'.format(product.product_labels)) +# [END vision_product_search_get_similar_products] + + +# [START vision_product_search_get_similar_products_gcs] +def get_similar_products_uri( + project_id, location, product_set_id, product_category, + image_uri, filter): + """Search similar products to image. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + product_category: Category of the product. + image_uri: Cloud Storage location of image to be searched. + filter: Condition to be applied on the labels. + Example for filter: (color = red OR color = blue) AND style = kids + It will search on all products with the following labels: + color:red AND style:kids + color:blue AND style:kids + """ + # product_search_client is needed only for its helper methods. + product_search_client = vision.ProductSearchClient() + image_annotator_client = vision.ImageAnnotatorClient() + + # Create annotate image request along with product search feature. + image_source = vision.ImageSource(image_uri=image_uri) + image = vision.Image(source=image_source) + + # product search specific parameters + product_set_path = product_search_client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + product_search_params = vision.ProductSearchParams( + product_set=product_set_path, + product_categories=[product_category], + filter=filter) + image_context = vision.ImageContext( + product_search_params=product_search_params) + + # Search products similar to the image. + response = image_annotator_client.product_search( + image, image_context=image_context) + + index_time = response.product_search_results.index_time + print('Product set index time: ') + print(index_time) + + results = response.product_search_results.results + + print('Search results:') + for result in results: + product = result.product + + print('Score(Confidence): {}'.format(result.score)) + print('Image name: {}'.format(result.image)) + + print('Product name: {}'.format(product.name)) + print('Product display name: {}'.format( + product.display_name)) + print('Product description: {}\n'.format(product.description)) + print('Product labels: {}\n'.format(product.product_labels)) +# [END vision_product_search_get_similar_products_gcs] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + parser.add_argument('--product_set_id') + parser.add_argument('--product_category') + parser.add_argument('--filter', default='') + parser.add_argument('--max_results', default='') + + get_similar_products_file_parser = subparsers.add_parser( + 'get_similar_products_file', help=get_similar_products_file.__doc__) + get_similar_products_file_parser.add_argument('--file_path') + + get_similar_products_uri_parser = subparsers.add_parser( + 'get_similar_products_uri', help=get_similar_products_uri.__doc__) + get_similar_products_uri_parser.add_argument('--image_uri') + + args = parser.parse_args() + + if args.command == 'get_similar_products_file': + get_similar_products_file( + args.project_id, args.location, args.product_set_id, + args.product_category, args.file_path, args.filter, args.max_results) + elif args.command == 'get_similar_products_uri': + get_similar_products_uri( + args.project_id, args.location, args.product_set_id, + args.product_category, args.image_uri, args.filter, args.max_results) diff --git a/vision/snippets/product_search/product_search_test.py b/vision/snippets/product_search/product_search_test.py new file mode 100644 index 000000000000..1f8eb72b6d93 --- /dev/null +++ b/vision/snippets/product_search/product_search_test.py @@ -0,0 +1,70 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import pytest + +from product_search import get_similar_products_file, get_similar_products_uri + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_SET_ID = 'indexed_product_set_id_for_testing' +PRODUCT_CATEGORY = 'apparel' +PRODUCT_ID_1 = 'indexed_product_id_for_testing_1' +PRODUCT_ID_2 = 'indexed_product_id_for_testing_2' + +FILE_PATH_1 = 'resources/shoes_1.jpg' +IMAGE_URI_1 = 'gs://cloud-samples-data/vision/product_search/shoes_1.jpg' +FILTER = 'style=womens' +MAX_RESULTS = 6 + + +@pytest.mark.flaky(max_runs=5, min_passes=1) +def test_get_similar_products_file(capsys): + get_similar_products_file( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, FILE_PATH_1, + '', MAX_RESULTS) + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + assert PRODUCT_ID_2 in out + + +def test_get_similar_products_uri(capsys): + get_similar_products_uri( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, IMAGE_URI_1, + '') + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + assert PRODUCT_ID_2 in out + + +def test_get_similar_products_file_with_filter(capsys): + get_similar_products_file( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, FILE_PATH_1, + FILTER, MAX_RESULTS) + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + assert PRODUCT_ID_2 not in out + + +def test_get_similar_products_uri_with_filter(capsys): + get_similar_products_uri( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_CATEGORY, IMAGE_URI_1, + FILTER) + out, _ = capsys.readouterr() + assert PRODUCT_ID_1 in out + assert PRODUCT_ID_2 not in out diff --git a/vision/snippets/product_search/product_set_management.py b/vision/snippets/product_search/product_set_management.py new file mode 100755 index 000000000000..bafe7a515395 --- /dev/null +++ b/vision/snippets/product_search/product_set_management.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform operations +on Product set in Cloud Vision Product Search. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_delete_product_set] +# [START vision_product_search_list_product_sets] +# [START vision_product_search_get_product_set] +# [START vision_product_search_create_product_set] +from google.cloud import vision + +# [END vision_product_search_delete_product_set] +# [END vision_product_search_list_product_sets] +# [END vision_product_search_get_product_set] +# [END vision_product_search_create_product_set] + + +# [START vision_product_search_create_product_set] +def create_product_set( + project_id, location, product_set_id, product_set_display_name): + """Create a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + product_set_display_name: Display name of the product set. + """ + client = vision.ProductSearchClient() + + # A resource that represents Google Cloud Platform location. + location_path = f"projects/{project_id}/locations/{location}" + + # Create a product set with the product set specification in the region. + product_set = vision.ProductSet( + display_name=product_set_display_name) + + # The response is the product set with `name` populated. + response = client.create_product_set( + parent=location_path, + product_set=product_set, + product_set_id=product_set_id) + + # Display the product set information. + print('Product set name: {}'.format(response.name)) +# [END vision_product_search_create_product_set] + + +# [START vision_product_search_list_product_sets] +def list_product_sets(project_id, location): + """List all product sets. + Args: + project_id: Id of the project. + location: A compute region name. + """ + client = vision.ProductSearchClient() + + # A resource that represents Google Cloud Platform location. + location_path = f"projects/{project_id}/locations/{location}" + + # List all the product sets available in the region. + product_sets = client.list_product_sets(parent=location_path) + + # Display the product set information. + for product_set in product_sets: + print('Product set name: {}'.format(product_set.name)) + print('Product set id: {}'.format(product_set.name.split('/')[-1])) + print('Product set display name: {}'.format(product_set.display_name)) + print('Product set index time: ') + print(product_set.index_time) +# [END vision_product_search_list_product_sets] + + +# [START vision_product_search_get_product_set] +def get_product_set(project_id, location, product_set_id): + """Get info about the product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product set. + product_set_path = client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + + # Get complete detail of the product set. + product_set = client.get_product_set(name=product_set_path) + + # Display the product set information. + print('Product set name: {}'.format(product_set.name)) + print('Product set id: {}'.format(product_set.name.split('/')[-1])) + print('Product set display name: {}'.format(product_set.display_name)) + print('Product set index time: ') + print(product_set.index_time) +# [END vision_product_search_get_product_set] + + +# [START vision_product_search_delete_product_set] +def delete_product_set(project_id, location, product_set_id): + """Delete a product set. + Args: + project_id: Id of the project. + location: A compute region name. + product_set_id: Id of the product set. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product set. + product_set_path = client.product_set_path( + project=project_id, location=location, + product_set=product_set_id) + + # Delete the product set. + client.delete_product_set(name=product_set_path) + print('Product set deleted.') +# [END vision_product_search_delete_product_set] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + + create_product_set_parser = subparsers.add_parser( + 'create_product_set', help=create_product_set.__doc__) + create_product_set_parser.add_argument('product_set_id') + create_product_set_parser.add_argument('product_set_display_name') + + list_product_sets_parser = subparsers.add_parser( + 'list_product_sets', help=list_product_sets.__doc__) + + get_product_set_parser = subparsers.add_parser( + 'get_product_set', help=get_product_set.__doc__) + get_product_set_parser.add_argument('product_set_id') + + delete_product_set_parser = subparsers.add_parser( + 'delete_product_set', help=delete_product_set.__doc__) + delete_product_set_parser.add_argument('product_set_id') + + args = parser.parse_args() + + if args.command == 'create_product_set': + create_product_set( + args.project_id, args.location, args.product_set_id, + args.product_set_display_name) + elif args.command == 'list_product_sets': + list_product_sets(args.project_id, args.location) + elif args.command == 'get_product_set': + get_product_set(args.project_id, args.location, args.product_set_id) + elif args.command == 'delete_product_set': + delete_product_set( + args.project_id, args.location, args.product_set_id) diff --git a/vision/snippets/product_search/product_set_management_test.py b/vision/snippets/product_search/product_set_management_test.py new file mode 100644 index 000000000000..1e2c146b5397 --- /dev/null +++ b/vision/snippets/product_search/product_set_management_test.py @@ -0,0 +1,47 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_set_management import ( + create_product_set, delete_product_set, list_product_sets) + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_SET_DISPLAY_NAME = 'fake_product_set_display_name_for_testing' +PRODUCT_SET_ID = 'test_{}'.format(uuid.uuid4()) + + +@pytest.fixture(scope="function", autouse=True) +def setup(): + # set up + create_product_set( + PROJECT_ID, LOCATION, PRODUCT_SET_ID, PRODUCT_SET_DISPLAY_NAME) + + +def test_delete_product_set(capsys): + list_product_sets(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_SET_ID in out + + delete_product_set(PROJECT_ID, LOCATION, PRODUCT_SET_ID) + + list_product_sets(PROJECT_ID, LOCATION) + out, _ = capsys.readouterr() + assert PRODUCT_SET_ID not in out diff --git a/vision/snippets/product_search/reference_image_management.py b/vision/snippets/product_search/reference_image_management.py new file mode 100755 index 000000000000..f1b4962b5095 --- /dev/null +++ b/vision/snippets/product_search/reference_image_management.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This application demonstrates how to perform basic operations on reference +images in Cloud Vision Product Search. + +For more information, see the tutorial page at +https://cloud.google.com/vision/product-search/docs/ +""" + +import argparse + +# [START vision_product_search_create_reference_image] +# [START vision_product_search_delete_reference_image] +# [START vision_product_search_list_reference_images] +# [START vision_product_search_get_reference_image] +from google.cloud import vision + +# [END vision_product_search_create_reference_image] +# [END vision_product_search_delete_reference_image] +# [END vision_product_search_list_reference_images] +# [END vision_product_search_get_reference_image] + + +# [START vision_product_search_create_reference_image] +def create_reference_image( + project_id, location, product_id, reference_image_id, gcs_uri): + """Create a reference image. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + reference_image_id: Id of the reference image. + gcs_uri: Google Cloud Storage path of the input image. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # Create a reference image. + reference_image = vision.ReferenceImage(uri=gcs_uri) + + # The response is the reference image with `name` populated. + image = client.create_reference_image( + parent=product_path, + reference_image=reference_image, + reference_image_id=reference_image_id) + + # Display the reference image information. + print('Reference image name: {}'.format(image.name)) + print('Reference image uri: {}'.format(image.uri)) +# [END vision_product_search_create_reference_image] + + +# [START vision_product_search_list_reference_images] +def list_reference_images( + project_id, location, product_id): + """List all images in a product. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + """ + client = vision.ProductSearchClient() + + # Get the full path of the product. + product_path = client.product_path( + project=project_id, location=location, product=product_id) + + # List all the reference images available in the product. + reference_images = client.list_reference_images(parent=product_path) + + # Display the reference image information. + for image in reference_images: + print('Reference image name: {}'.format(image.name)) + print('Reference image id: {}'.format(image.name.split('/')[-1])) + print('Reference image uri: {}'.format(image.uri)) + print('Reference image bounding polygons: {}'.format( + image.bounding_polys)) +# [END vision_product_search_list_reference_images] + + +# [START vision_product_search_get_reference_image] +def get_reference_image( + project_id, location, product_id, reference_image_id): + """Get info about a reference image. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + reference_image_id: Id of the reference image. + """ + client = vision.ProductSearchClient() + + # Get the full path of the reference image. + reference_image_path = client.reference_image_path( + project=project_id, location=location, product=product_id, + reference_image=reference_image_id) + + # Get complete detail of the reference image. + image = client.get_reference_image(name=reference_image_path) + + # Display the reference image information. + print('Reference image name: {}'.format(image.name)) + print('Reference image id: {}'.format(image.name.split('/')[-1])) + print('Reference image uri: {}'.format(image.uri)) + print('Reference image bounding polygons: {}'.format(image.bounding_polys)) +# [END vision_product_search_get_reference_image] + + +# [START vision_product_search_delete_reference_image] +def delete_reference_image( + project_id, location, product_id, reference_image_id): + """Delete a reference image. + Args: + project_id: Id of the project. + location: A compute region name. + product_id: Id of the product. + reference_image_id: Id of the reference image. + """ + client = vision.ProductSearchClient() + + # Get the full path of the reference image. + reference_image_path = client.reference_image_path( + project=project_id, location=location, product=product_id, + reference_image=reference_image_id) + + # Delete the reference image. + client.delete_reference_image(name=reference_image_path) + print('Reference image deleted from product.') +# [END vision_product_search_delete_reference_image] + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + subparsers = parser.add_subparsers(dest='command') + parser.add_argument( + '--project_id', + help='Project id. Required', + required=True) + parser.add_argument( + '--location', + help='Compute region name', + default='us-west1') + + create_reference_image_parser = subparsers.add_parser( + 'create_reference_image', help=create_reference_image.__doc__) + create_reference_image_parser.add_argument('product_id') + create_reference_image_parser.add_argument('reference_image_id') + create_reference_image_parser.add_argument('gcs_uri') + + list_reference_images_parser = subparsers.add_parser( + 'list_reference_images', + help=list_reference_images.__doc__) + list_reference_images_parser.add_argument('product_id') + + get_reference_image_parser = subparsers.add_parser( + 'get_reference_image', help=get_reference_image.__doc__) + get_reference_image_parser.add_argument('product_id') + get_reference_image_parser.add_argument('reference_image_id') + + delete_reference_image_parser = subparsers.add_parser( + 'delete_reference_image', help=delete_reference_image.__doc__) + delete_reference_image_parser.add_argument('product_id') + delete_reference_image_parser.add_argument('reference_image_id') + + args = parser.parse_args() + + if args.command == 'create_reference_image': + create_reference_image( + args.project_id, args.location, args.product_id, + args.reference_image_id, args.gcs_uri) + elif args.command == 'list_reference_images': + list_reference_images( + args.project_id, args.location, args.product_id) + elif args.command == 'get_reference_image': + get_reference_image( + args.project_id, args.location, args.product_id, + args.reference_image_id) + elif args.command == 'delete_reference_image': + delete_reference_image( + args.project_id, args.location, args.product_id, + args.reference_image_id) diff --git a/vision/snippets/product_search/reference_image_management_test.py b/vision/snippets/product_search/reference_image_management_test.py new file mode 100644 index 000000000000..5337132acde9 --- /dev/null +++ b/vision/snippets/product_search/reference_image_management_test.py @@ -0,0 +1,70 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import uuid + +import pytest + +from product_management import create_product, delete_product +from reference_image_management import ( + create_reference_image, delete_reference_image, list_reference_images) + + +PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT') +LOCATION = 'us-west1' + +PRODUCT_DISPLAY_NAME = 'fake_product_display_name_for_testing' +PRODUCT_CATEGORY = 'homegoods' +PRODUCT_ID = 'test_{}'.format(uuid.uuid4()) + +REFERENCE_IMAGE_ID = 'fake_reference_image_id_for_testing' +GCS_URI = 'gs://cloud-samples-data/vision/product_search/shoes_1.jpg' + + +@pytest.fixture(scope="function", autouse=True) +def setup_teardown(): + # set up + create_product( + PROJECT_ID, LOCATION, PRODUCT_ID, + PRODUCT_DISPLAY_NAME, PRODUCT_CATEGORY) + + yield None + + # tear down + delete_product(PROJECT_ID, LOCATION, PRODUCT_ID) + + +def test_create_reference_image(capsys): + create_reference_image( + PROJECT_ID, LOCATION, PRODUCT_ID, REFERENCE_IMAGE_ID, + GCS_URI) + list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID) + out, _ = capsys.readouterr() + assert REFERENCE_IMAGE_ID in out + + +def test_delete_reference_image(capsys): + create_reference_image( + PROJECT_ID, LOCATION, PRODUCT_ID, REFERENCE_IMAGE_ID, + GCS_URI) + list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID) + out, _ = capsys.readouterr() + assert REFERENCE_IMAGE_ID in out + + delete_reference_image( + PROJECT_ID, LOCATION, PRODUCT_ID, REFERENCE_IMAGE_ID) + list_reference_images(PROJECT_ID, LOCATION, PRODUCT_ID) + out, _ = capsys.readouterr() + assert REFERENCE_IMAGE_ID not in out diff --git a/vision/snippets/product_search/requirements-test.txt b/vision/snippets/product_search/requirements-test.txt new file mode 100644 index 000000000000..d4c76f02e6d3 --- /dev/null +++ b/vision/snippets/product_search/requirements-test.txt @@ -0,0 +1,2 @@ +pytest==7.2.0 +flaky==3.7.0 \ No newline at end of file diff --git a/vision/snippets/product_search/requirements.txt b/vision/snippets/product_search/requirements.txt new file mode 100644 index 000000000000..b4bac43e7127 --- /dev/null +++ b/vision/snippets/product_search/requirements.txt @@ -0,0 +1,2 @@ +google-cloud-vision==3.1.4 +google-cloud-storage==2.5.0 diff --git a/vision/snippets/product_search/resources/indexed_product_sets.csv b/vision/snippets/product_search/resources/indexed_product_sets.csv new file mode 100644 index 000000000000..329ac2167c7e --- /dev/null +++ b/vision/snippets/product_search/resources/indexed_product_sets.csv @@ -0,0 +1,2 @@ +"gs://cloud-samples-data/vision/product_search/shoes_1.jpg","indexed_product_set_id_for_testing","indexed_product_id_for_testing_1","apparel","style=womens","0.1,0.1,0.9,0.1,0.9,0.9,0.1,0.9" +"gs://cloud-samples-data/vision/product_search/shoes_2.jpg","indexed_product_set_id_for_testing","indexed_product_id_for_testing_2","apparel",, \ No newline at end of file diff --git a/vision/snippets/product_search/resources/product_sets.csv b/vision/snippets/product_search/resources/product_sets.csv new file mode 100644 index 000000000000..68657eed631c --- /dev/null +++ b/vision/snippets/product_search/resources/product_sets.csv @@ -0,0 +1,2 @@ +"gs://cloud-samples-data/vision/product_search/shoes_1.jpg","fake_product_set_id_for_testing","fake_product_id_for_testing_1","apparel","style=womens","0.1,0.1,0.9,0.1,0.9,0.9,0.1,0.9" +"gs://cloud-samples-data/vision/product_search/shoes_2.jpg","fake_product_set_id_for_testing","fake_product_id_for_testing_2","apparel",, \ No newline at end of file diff --git a/vision/snippets/product_search/resources/shoes_1.jpg b/vision/snippets/product_search/resources/shoes_1.jpg new file mode 100644 index 000000000000..78318eeff667 Binary files /dev/null and b/vision/snippets/product_search/resources/shoes_1.jpg differ diff --git a/vision/snippets/product_search/resources/shoes_2.jpg b/vision/snippets/product_search/resources/shoes_2.jpg new file mode 100644 index 000000000000..cdfa80dd8991 Binary files /dev/null and b/vision/snippets/product_search/resources/shoes_2.jpg differ diff --git a/vision/snippets/quickstart/README.rst b/vision/snippets/quickstart/README.rst new file mode 100644 index 000000000000..aa4be034e4ca --- /dev/null +++ b/vision/snippets/quickstart/README.rst @@ -0,0 +1,101 @@ +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/quickstart/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + +Setup +------------------------------------------------------------------------------- + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + +Samples +------------------------------------------------------------------------------- + +Quickstart ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/quickstart/quickstart.py,vision/cloud-client/quickstart/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python quickstart.py + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/vision/snippets/quickstart/README.rst.in b/vision/snippets/quickstart/README.rst.in new file mode 100644 index 000000000000..bd650a6cb6f0 --- /dev/null +++ b/vision/snippets/quickstart/README.rst.in @@ -0,0 +1,29 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +setup: +- auth +- install_deps + +samples: +- name: Quickstart + file: quickstart.py + +cloud_client_library: true + +folder: vision/cloud-client/quickstart \ No newline at end of file diff --git a/vision/snippets/quickstart/quickstart.py b/vision/snippets/quickstart/quickstart.py new file mode 100644 index 000000000000..ce330fa7fe80 --- /dev/null +++ b/vision/snippets/quickstart/quickstart.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def run_quickstart(): + # [START vision_quickstart] + import io + import os + + # Imports the Google Cloud client library + # [START vision_python_migration_import] + from google.cloud import vision + # [END vision_python_migration_import] + + # Instantiates a client + # [START vision_python_migration_client] + client = vision.ImageAnnotatorClient() + # [END vision_python_migration_client] + + # The name of the image file to annotate + file_name = os.path.abspath('resources/wakeupcat.jpg') + + # Loads the image into memory + with io.open(file_name, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + # Performs label detection on the image file + response = client.label_detection(image=image) + labels = response.label_annotations + + print('Labels:') + for label in labels: + print(label.description) + # [END vision_quickstart] + + +if __name__ == '__main__': + run_quickstart() diff --git a/vision/snippets/quickstart/quickstart_test.py b/vision/snippets/quickstart/quickstart_test.py new file mode 100644 index 000000000000..fde81685dd3c --- /dev/null +++ b/vision/snippets/quickstart/quickstart_test.py @@ -0,0 +1,21 @@ +# Copyright 2016 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import quickstart + + +def test_quickstart(capsys): + quickstart.run_quickstart() + out, _ = capsys.readouterr() + assert 'Labels' in out diff --git a/vision/snippets/quickstart/requirements-test.txt b/vision/snippets/quickstart/requirements-test.txt new file mode 100644 index 000000000000..49780e035690 --- /dev/null +++ b/vision/snippets/quickstart/requirements-test.txt @@ -0,0 +1 @@ +pytest==7.2.0 diff --git a/vision/snippets/quickstart/requirements.txt b/vision/snippets/quickstart/requirements.txt new file mode 100644 index 000000000000..04e5e9f19008 --- /dev/null +++ b/vision/snippets/quickstart/requirements.txt @@ -0,0 +1 @@ +google-cloud-vision==3.1.4 diff --git a/vision/snippets/quickstart/resources/wakeupcat.jpg b/vision/snippets/quickstart/resources/wakeupcat.jpg new file mode 100644 index 000000000000..139cf461ecae Binary files /dev/null and b/vision/snippets/quickstart/resources/wakeupcat.jpg differ diff --git a/vision/snippets/web/README.rst b/vision/snippets/web/README.rst new file mode 100644 index 000000000000..fe3e900b41e4 --- /dev/null +++ b/vision/snippets/web/README.rst @@ -0,0 +1,136 @@ + +.. This file is automatically generated. Do not edit this file directly. + +Google Cloud Vision API Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/web/README.rst + + +This directory contains samples for Google Cloud Vision API. `Google Cloud Vision API`_ allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + +- See the `migration guide`_ for information about migrating to Python client library v0.25.1. + +.. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + + + + +.. _Google Cloud Vision API: https://cloud.google.com/vision/docs + + +Setup +------------------------------------------------------------------------------- + + + +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started + + + + +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 3.6+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ + + + + + + +Samples +------------------------------------------------------------------------------- + + +Web ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=vision/cloud-client/web/web_detect.py,vision/cloud-client/web/README.rst + + + + +To run this sample: + +.. code-block:: bash + + $ python web_detect.py + + + usage: web_detect.py [-h] image_url + + Demonstrates web detection using the Google Cloud Vision API. + + Example usage: + python web_detect.py https://goo.gl/X4qcB6 + python web_detect.py ../detect/resources/landmark.jpg + python web_detect.py gs://your-bucket/image.png + + positional arguments: + image_url The image to detect, can be web URI, Google Cloud Storage, or + path to local file. + + optional arguments: + -h, --help show this help message and exit + + + + + + + + + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + + + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ diff --git a/vision/snippets/web/README.rst.in b/vision/snippets/web/README.rst.in new file mode 100644 index 000000000000..8b8533b52614 --- /dev/null +++ b/vision/snippets/web/README.rst.in @@ -0,0 +1,30 @@ +# This file is used to generate README.rst + +product: + name: Google Cloud Vision API + short_name: Cloud Vision API + url: https://cloud.google.com/vision/docs + description: > + `Google Cloud Vision API`_ allows developers to easily integrate vision + detection features within applications, including image labeling, face and + landmark detection, optical character recognition (OCR), and tagging of + explicit content. + + + - See the `migration guide`_ for information about migrating to Python client library v0.25.1. + + + .. _migration guide: https://cloud.google.com/vision/docs/python-client-migration + +setup: +- auth +- install_deps + +samples: +- name: Web + file: web_detect.py + show_help: True + +cloud_client_library: true + +folder: vision/cloud-client/web \ No newline at end of file diff --git a/vision/snippets/web/requirements-test.txt b/vision/snippets/web/requirements-test.txt new file mode 100644 index 000000000000..03fbf94a92b3 --- /dev/null +++ b/vision/snippets/web/requirements-test.txt @@ -0,0 +1,2 @@ +flaky==3.7.0 +pytest==7.2.0 diff --git a/vision/snippets/web/requirements.txt b/vision/snippets/web/requirements.txt new file mode 100644 index 000000000000..04e5e9f19008 --- /dev/null +++ b/vision/snippets/web/requirements.txt @@ -0,0 +1 @@ +google-cloud-vision==3.1.4 diff --git a/vision/snippets/web/web_detect.py b/vision/snippets/web/web_detect.py new file mode 100644 index 000000000000..e7993d62a672 --- /dev/null +++ b/vision/snippets/web/web_detect.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python + +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Demonstrates web detection using the Google Cloud Vision API. + +Example usage: + python web_detect.py https://goo.gl/X4qcB6 + python web_detect.py ../detect/resources/landmark.jpg + python web_detect.py gs://your-bucket/image.png +""" +# [START vision_web_detection_tutorial] +# [START vision_web_detection_tutorial_imports] +import argparse +import io + +from google.cloud import vision +# [END vision_web_detection_tutorial_imports] + + +def annotate(path): + """Returns web annotations given the path to an image.""" + # [START vision_web_detection_tutorial_annotate] + client = vision.ImageAnnotatorClient() + + if path.startswith('http') or path.startswith('gs:'): + image = vision.Image() + image.source.image_uri = path + + else: + with io.open(path, 'rb') as image_file: + content = image_file.read() + + image = vision.Image(content=content) + + web_detection = client.web_detection(image=image).web_detection + # [END vision_web_detection_tutorial_annotate] + + return web_detection + + +def report(annotations): + """Prints detected features in the provided web annotations.""" + # [START vision_web_detection_tutorial_print_annotations] + if annotations.pages_with_matching_images: + print('\n{} Pages with matching images retrieved'.format( + len(annotations.pages_with_matching_images))) + + for page in annotations.pages_with_matching_images: + print('Url : {}'.format(page.url)) + + if annotations.full_matching_images: + print('\n{} Full Matches found: '.format( + len(annotations.full_matching_images))) + + for image in annotations.full_matching_images: + print('Url : {}'.format(image.url)) + + if annotations.partial_matching_images: + print('\n{} Partial Matches found: '.format( + len(annotations.partial_matching_images))) + + for image in annotations.partial_matching_images: + print('Url : {}'.format(image.url)) + + if annotations.web_entities: + print('\n{} Web entities found: '.format( + len(annotations.web_entities))) + + for entity in annotations.web_entities: + print('Score : {}'.format(entity.score)) + print('Description: {}'.format(entity.description)) + # [END vision_web_detection_tutorial_print_annotations] + + +if __name__ == '__main__': + # [START vision_web_detection_tutorial_run_application] + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + path_help = str('The image to detect, can be web URI, ' + 'Google Cloud Storage, or path to local file.') + parser.add_argument('image_url', help=path_help) + args = parser.parse_args() + + report(annotate(args.image_url)) + # [END vision_web_detection_tutorial_run_application] +# [END vision_web_detection_tutorial] diff --git a/vision/snippets/web/web_detect_test.py b/vision/snippets/web/web_detect_test.py new file mode 100644 index 000000000000..3f57cc637e81 --- /dev/null +++ b/vision/snippets/web/web_detect_test.py @@ -0,0 +1,35 @@ +# Copyright 2017 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +import web_detect + +ASSET_BUCKET = "cloud-samples-data" + + +def test_detect_file(capsys): + file_name = ('../detect/resources/landmark.jpg') + web_detect.report(web_detect.annotate(file_name)) + out, _ = capsys.readouterr() + assert 'description' in out.lower() + + +@pytest.mark.flaky(max_runs=3, min_passes=1) +def test_detect_web_gsuri(capsys): + file_name = ('gs://{}/vision/landmark/pofa.jpg'.format( + ASSET_BUCKET)) + web_detect.report(web_detect.annotate(file_name)) + out, _ = capsys.readouterr() + assert 'description:' in out.lower()