diff --git a/packages/google-cloud-vision/.appveyor.yml b/packages/google-cloud-vision/.appveyor.yml new file mode 100644 index 00000000000..24082152655 --- /dev/null +++ b/packages/google-cloud-vision/.appveyor.yml @@ -0,0 +1,20 @@ +environment: + matrix: + - nodejs_version: 8 + +install: + - ps: Install-Product node $env:nodejs_version + - npm install -g npm # Force using the latest npm to get dedupe during install + - set PATH=%APPDATA%\npm;%PATH% + - npm install --force --ignore-scripts + +test_script: + - node --version + - npm --version + - npm rebuild + - npm test + +build: off + +matrix: + fast_finish: true diff --git a/packages/google-cloud-vision/.circleci/config.yml b/packages/google-cloud-vision/.circleci/config.yml new file mode 100644 index 00000000000..7d8903ac2c9 --- /dev/null +++ b/packages/google-cloud-vision/.circleci/config.yml @@ -0,0 +1,225 @@ +--- +# "Include" for unit tests definition. +unit_tests: &unit_tests + steps: + - checkout + - run: + name: Install modules and dependencies. + command: npm install + - run: + name: Run unit tests. + command: npm test + - run: + name: Submit coverage data to codecov. + command: node_modules/.bin/codecov + when: always + +version: 2.0 +workflows: + version: 2 + tests: + jobs: + - node4: + filters: + tags: + only: /.*/ + - node6: + filters: + tags: + only: /.*/ + - node7: + filters: + tags: + only: /.*/ + - node8: + filters: + tags: + only: /.*/ + - node9: + filters: + tags: + only: /.*/ + - lint: + requires: + - node4 + - node6 + - node7 + - node8 + - node9 + filters: + tags: + only: /.*/ + - docs: + requires: + - node4 + - node6 + - node7 + - node8 + - node9 + filters: + tags: + only: /.*/ + - system_tests: + requires: + - lint + - docs + filters: + branches: + only: master + tags: + only: /^v[\d.]+$/ + - sample_tests: + requires: + - lint + - docs + filters: + branches: + only: master + tags: + only: /^v[\d.]+$/ + - publish_npm: + requires: + - system_tests + - sample_tests + filters: + branches: + ignore: /.*/ + tags: + only: /^v[\d.]+$/ + +jobs: + node4: + docker: + - image: node:4 + steps: + - checkout + - run: + name: Install modules and dependencies. + command: npm install --unsafe-perm + - run: + name: Run unit tests. + command: npm test + - run: + name: Submit coverage data to codecov. + command: node_modules/.bin/codecov + when: always + node6: + docker: + - image: node:6 + <<: *unit_tests + node7: + docker: + - image: node:7 + <<: *unit_tests + node8: + docker: + - image: node:8 + <<: *unit_tests + node9: + docker: + - image: node:9 + <<: *unit_tests + + lint: + docker: + - image: node:8 + steps: + - checkout + - run: + name: Install modules and dependencies. + command: | + npm install + npm link + - run: + name: Link the module being tested to the samples. + command: | + cd samples/ + npm link @google-cloud/vision + npm install + cd .. + - run: + name: Run linting. + command: npm run lint + + docs: + docker: + - image: node:8 + steps: + - checkout + - run: + name: Install modules and dependencies. + command: npm install + - run: + name: Build documentation. + command: npm run docs + + sample_tests: + docker: + - image: node:8 + steps: + - checkout + - run: + name: Decrypt credentials. + command: | + openssl aes-256-cbc -d -in .circleci/key.json.enc \ + -out .circleci/key.json \ + -k "${SYSTEM_TESTS_ENCRYPTION_KEY}" + - run: + name: Install and link the module. + command: | + npm install + npm link + - run: + name: Link the module being tested to the samples. + command: | + cd samples/ + npm link @google-cloud/vision + npm install + cd .. + - run: + name: Run sample tests. + command: npm run samples-test + environment: + GCLOUD_PROJECT: long-door-651 + GOOGLE_APPLICATION_CREDENTIALS: /var/vision/.circleci/key.json + - run: + name: Remove unencrypted key. + command: rm .circleci/key.json + when: always + working_directory: /var/vision/ + + system_tests: + docker: + - image: node:8 + steps: + - checkout + - run: + name: Decrypt credentials. + command: | + openssl aes-256-cbc -d -in .circleci/key.json.enc \ + -out .circleci/key.json \ + -k "${SYSTEM_TESTS_ENCRYPTION_KEY}" + - run: + name: Install modules and dependencies. + command: npm install + - run: + name: Run system tests. + command: npm run system-test + environment: + GOOGLE_APPLICATION_CREDENTIALS: .circleci/key.json + - run: + name: Remove unencrypted key. + command: rm .circleci/key.json + when: always + + publish_npm: + docker: + - image: node:8 + steps: + - checkout + - run: + name: Set NPM authentication. + command: echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > ~/.npmrc + - run: + name: Publish the module to npm. + command: npm publish diff --git a/packages/google-cloud-vision/.circleci/key.json.enc b/packages/google-cloud-vision/.circleci/key.json.enc new file mode 100644 index 00000000000..40235801133 Binary files /dev/null and b/packages/google-cloud-vision/.circleci/key.json.enc differ diff --git a/packages/google-cloud-vision/.cloud-repo-tools.json b/packages/google-cloud-vision/.cloud-repo-tools.json new file mode 100644 index 00000000000..354f8b8dacb --- /dev/null +++ b/packages/google-cloud-vision/.cloud-repo-tools.json @@ -0,0 +1,16 @@ +{ + "requiresKeyFile": true, + "requiresProjectId": true, + "product": "vision", + "client_reference_url": "https://cloud.google.com/nodejs/docs/reference/vision/latest/", + "release_quality": "beta", + "samples": [ + { + "id": "detect", + "name": "Detection samples", + "file": "detect.js", + "docs_link": "https://cloud.google.com/vision/docs", + "usage": "node detect.js --help" + } + ] +} diff --git a/packages/google-cloud-vision/.eslintignore b/packages/google-cloud-vision/.eslintignore new file mode 100644 index 00000000000..f6fac98b0a8 --- /dev/null +++ b/packages/google-cloud-vision/.eslintignore @@ -0,0 +1,3 @@ +node_modules/* +samples/node_modules/* +src/**/doc/* diff --git a/packages/google-cloud-vision/.eslintrc.yml b/packages/google-cloud-vision/.eslintrc.yml new file mode 100644 index 00000000000..bed57fbc42c --- /dev/null +++ b/packages/google-cloud-vision/.eslintrc.yml @@ -0,0 +1,13 @@ +--- +extends: + - 'eslint:recommended' + - 'plugin:node/recommended' + - prettier +plugins: + - node + - prettier +rules: + prettier/prettier: error + block-scoped-var: error + eqeqeq: error + no-warning-comments: warn diff --git a/packages/google-cloud-vision/.gitignore b/packages/google-cloud-vision/.gitignore index 0aedda85e3c..6b80718f261 100644 --- a/packages/google-cloud-vision/.gitignore +++ b/packages/google-cloud-vision/.gitignore @@ -1,2 +1,10 @@ -out.png -out.* +**/*.log +**/node_modules +.coverage +.nyc_output +docs/ +out/ +system-test/secrets.js +system-test/*key.json +*.lock +*-lock.js* diff --git a/packages/google-cloud-vision/.jsdoc.js b/packages/google-cloud-vision/.jsdoc.js new file mode 100644 index 00000000000..dce8e9ffbbb --- /dev/null +++ b/packages/google-cloud-vision/.jsdoc.js @@ -0,0 +1,45 @@ +/*! + * Copyright 2017 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/ink-docstrap/template', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'src' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2017 Google, Inc.', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/vision', + theme: 'lumen' + } +}; diff --git a/packages/google-cloud-vision/.mailmap b/packages/google-cloud-vision/.mailmap new file mode 100644 index 00000000000..bac1d09b6e6 --- /dev/null +++ b/packages/google-cloud-vision/.mailmap @@ -0,0 +1,6 @@ +Jason Dobry Jason Dobry +Jason Dobry Jason Dobry +Jun Mukai Jun Mukai +Luke Sneeringer Luke Sneeringer +Stephen Sawchuk Stephen Sawchuk +Stephen Sawchuk Stephen Sawchuk diff --git a/packages/google-cloud-vision/.nycrc b/packages/google-cloud-vision/.nycrc new file mode 100644 index 00000000000..a1a8e6920ce --- /dev/null +++ b/packages/google-cloud-vision/.nycrc @@ -0,0 +1,26 @@ +{ + "report-dir": "./.coverage", + "exclude": [ + "src/*{/*,/**/*}.js", + "src/*/v*/*.js", + "test/**/*.js" + ], + "watermarks": { + "branches": [ + 95, + 100 + ], + "functions": [ + 95, + 100 + ], + "lines": [ + 95, + 100 + ], + "statements": [ + 95, + 100 + ] + } +} diff --git a/packages/google-cloud-vision/.prettierignore b/packages/google-cloud-vision/.prettierignore new file mode 100644 index 00000000000..f6fac98b0a8 --- /dev/null +++ b/packages/google-cloud-vision/.prettierignore @@ -0,0 +1,3 @@ +node_modules/* +samples/node_modules/* +src/**/doc/* diff --git a/packages/google-cloud-vision/.prettierrc b/packages/google-cloud-vision/.prettierrc new file mode 100644 index 00000000000..df6eac07446 --- /dev/null +++ b/packages/google-cloud-vision/.prettierrc @@ -0,0 +1,8 @@ +--- +bracketSpacing: false +printWidth: 80 +semi: true +singleQuote: true +tabWidth: 2 +trailingComma: es5 +useTabs: false diff --git a/packages/google-cloud-vision/CODE_OF_CONDUCT.md b/packages/google-cloud-vision/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..46b2a08ea6d --- /dev/null +++ b/packages/google-cloud-vision/CODE_OF_CONDUCT.md @@ -0,0 +1,43 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) diff --git a/packages/google-cloud-vision/CONTRIBUTORS b/packages/google-cloud-vision/CONTRIBUTORS new file mode 100644 index 00000000000..caefac6e083 --- /dev/null +++ b/packages/google-cloud-vision/CONTRIBUTORS @@ -0,0 +1,18 @@ +# The names of individuals who have contributed to this project. +# +# Names are formatted as: +# name +# +Ace Nassri +Ali Ijaz Sheikh +Dave Gramlich +Ernest Landrito +Gus Class +Jason Dobry +Jun Mukai +Luke Sneeringer +Song Wang +Stephen Sawchuk +Tim Swast +calibr +rtw diff --git a/packages/google-cloud-vision/LICENSE b/packages/google-cloud-vision/LICENSE new file mode 100644 index 00000000000..7a4a3ea2424 --- /dev/null +++ b/packages/google-cloud-vision/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/packages/google-cloud-vision/README.md b/packages/google-cloud-vision/README.md index f99e475ccd5..9646eba0bf2 100644 --- a/packages/google-cloud-vision/README.md +++ b/packages/google-cloud-vision/README.md @@ -1,60 +1,135 @@ -# Node.js Client for Google Cloud Vision API ([Beta](https://github.com/GoogleCloudPlatform/google-cloud-node#versioning)) +Google Cloud Platform logo -[Google Cloud Vision API][Product Documentation]: Integrates Google Vision features, including image labeling, face, logo, and landmark detection, optical character recognition (OCR), and detection of explicit content, into applications. -- [Client Library Documentation][] -- [Product Documentation][] +# [Google Cloud Vision API: Node.js Client](https://github.com/googleapis/nodejs-vision) -## Quick Start -In order to use this library, you first need to go through the following steps: +[![release level](https://img.shields.io/badge/release%20level-beta-yellow.svg?style=flat)](https://cloud.google.com/terms/launch-stages) +[![CircleCI](https://img.shields.io/circleci/project/github/googleapis/nodejs-vision.svg?style=flat)](https://circleci.com/gh/googleapis/nodejs-vision) +[![AppVeyor](https://ci.appveyor.com/api/projects/status/github/googleapis/nodejs-vision?branch=master&svg=true)](https://ci.appveyor.com/project/googleapis/nodejs-vision) +[![codecov](https://img.shields.io/codecov/c/github/googleapis/nodejs-vision/master.svg?style=flat)](https://codecov.io/gh/googleapis/nodejs-vision) -1. [Select or create a Cloud Platform project.](https://console.cloud.google.com/project) -2. [Enable the Google Cloud Vision API.](https://console.cloud.google.com/apis/api/vision) -3. [Setup Authentication.](https://googlecloudplatform.github.io/google-cloud-node/#/docs/google-cloud/master/guides/authentication) +> Node.js idiomatic client for [Vision API][product-docs]. -### Installation -``` -$ npm install --save @google-cloud/vision -``` +The [Cloud Vision API](https://cloud.google.com/vision/docs) allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. + + +* [Vision API Node.js Client API Reference][client-docs] +* [github.com/googleapis/nodejs-vision](https://github.com/googleapis/nodejs-vision) +* [Vision API Documentation][product-docs] + +Read more about the client libraries for Cloud APIs, including the older +Google APIs Client Libraries, in [Client Libraries Explained][explained]. + +[explained]: https://cloud.google.com/apis/docs/client-libraries-explained + +**Table of contents:** + +* [Quickstart](#quickstart) + * [Before you begin](#before-you-begin) + * [Installing the client library](#installing-the-client-library) + * [Using the client library](#using-the-client-library) +* [Samples](#samples) +* [Versioning](#versioning) +* [Contributing](#contributing) +* [License](#license) + +## Quickstart + +### Before you begin + +1. Select or create a Cloud Platform project. + + [Go to the projects page][projects] + +1. Enable billing for your project. + + [Enable billing][billing] + +1. Enable the Google Cloud Vision API API. + + [Enable the API][enable_api] + +1. [Set up authentication with a service account][auth] so you can access the + API from your local workstation. + +[projects]: https://console.cloud.google.com/project +[billing]: https://support.google.com/cloud/answer/6293499#enable-billing +[enable_api]: https://console.cloud.google.com/flows/enableapi?apiid=vision.googleapis.com +[auth]: https://cloud.google.com/docs/authentication/getting-started + +### Installing the client library + + npm install --save @google-cloud/vision -### Preview -#### ImageAnnotatorClient -```js - var vision = require('@google-cloud/vision'); - - var client = vision({ - // optional auth parameters. - }); - - var gcsImageUri = 'gs://gapic-toolkit/President_Barack_Obama.jpg'; - var source = { - gcsImageUri : gcsImageUri - }; - var image = { - source : source - }; - var type = vision.v1.types.Feature.Type.FACE_DETECTION; - var featuresElement = { - type : type - }; - var features = [featuresElement]; - var requestsElement = { - image : image, - features : features - }; - var requests = [requestsElement]; - client.batchAnnotateImages({requests: requests}).then(function(responses) { - var response = responses[0]; - // doThingsWith(response) - }) - .catch(function(err) { - console.error(err); - }); +### Using the client library + +```javascript +// Imports the Google Cloud client library +const vision = require('@google-cloud/vision'); + +// Creates a client +const client = new vision.ImageAnnotatorClient(); + +// The name of the image file to annotate +const fileName = './resources/wakeupcat.jpg'; + +// Prepare the request object +const request = { + image: { + source: { + filename: fileName, + }, + }, +}; + +// Performs label detection on the image file +client + .labelDetection(request) + .then(results => { + const labels = results[0].labelAnnotations; + + console.log('Labels:'); + labels.forEach(label => console.log(label.description)); + }) + .catch(err => { + console.error('ERROR:', err); + }); ``` -### Next Steps -- Read the [Client Library Documentation][] for Google Cloud Vision API to see other available methods on the client. -- Read the [Google Cloud Vision API Product documentation][Product Documentation] to learn more about the product and see How-to Guides. -- View this [repository's main README](https://github.com/GoogleCloudPlatform/google-cloud-node/blob/master/README.md) to see the full list of Cloud APIs that we cover. +## Samples + +Samples are in the [`samples/`](https://github.com/googleapis/nodejs-vision/tree/master/samples) directory. The samples' `README.md` +has instructions for running the samples. + +| Sample | Source Code | Try it | +| --------------------------- | --------------------------------- | ------ | +| Detection samples | [source code](https://github.com/googleapis/nodejs-vision/blob/master/samples/detect.js) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-vision&page=editor&open_in_editor=samples/detect.js,samples/README.md) | + +The [Vision API Node.js Client API Reference][client-docs] documentation +also contains samples. + +## Versioning + +This library follows [Semantic Versioning](http://semver.org/). + +This library is considered to be in **beta**. This means it is expected to be +mostly stable while we work toward a general availability release; however, +complete stability is not guaranteed. We will address issues and requests +against beta libraries with a high priority. + +More Information: [Google Cloud Platform Launch Stages][launch_stages] + +[launch_stages]: https://cloud.google.com/terms/launch-stages + +## Contributing + +Contributions welcome! See the [Contributing Guide](https://github.com/googleapis/nodejs-vision/blob/master/.github/CONTRIBUTING.md). + +## License + +Apache Version 2.0 + +See [LICENSE](https://github.com/googleapis/nodejs-vision/blob/master/LICENSE) -[Client Library Documentation]: https://googlecloudplatform.github.io/google-cloud-node/#/docs/vision -[Product Documentation]: https://cloud.google.com/vision \ No newline at end of file +[client-docs]: https://cloud.google.com/nodejs/docs/reference/vision/latest/ +[product-docs]: https://cloud.google.com/vision/docs +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png diff --git a/packages/google-cloud-vision/package.json b/packages/google-cloud-vision/package.json index 572732ea2d7..65a2fb60c66 100644 --- a/packages/google-cloud-vision/package.json +++ b/packages/google-cloud-vision/package.json @@ -1,37 +1,16 @@ { - "repository": "GoogleCloudPlatform/google-cloud-node", "name": "@google-cloud/vision", + "description": "Google Cloud Vision API client for Node.js", "version": "0.12.0", + "license": "Apache-2.0", "author": "Google Inc", - "description": "Google Cloud Vision API client for Node.js", - "contributors": [ - { - "name": "Burcu Dogan", - "email": "jbd@google.com" - }, - { - "name": "Johan Euphrosine", - "email": "proppy@google.com" - }, - { - "name": "Patrick Costello", - "email": "pcostell@google.com" - }, - { - "name": "Ryan Seys", - "email": "ryan@ryanseys.com" - }, - { - "name": "Silvano Luciani", - "email": "silvano@google.com" - }, - { - "name": "Stephen Sawchuk", - "email": "sawchuk@gmail.com" - } - ], + "engines": { + "node": ">=4.0.0" + }, + "repository": "googleapis/nodejs-vision", "main": "src/index.js", "files": [ + "protos", "src", "AUTHORS", "LICENSE" @@ -49,32 +28,58 @@ "vision", "Google Cloud Vision API" ], + "contributors": [ + "Ace Nassri ", + "Ali Ijaz Sheikh ", + "Dave Gramlich ", + "Ernest Landrito ", + "Gus Class ", + "Jason Dobry ", + "Jun Mukai ", + "Luke Sneeringer ", + "Song Wang ", + "Stephen Sawchuk ", + "Tim Swast ", + "calibr ", + "rtw " + ], + "scripts": { + "cover": "nyc --reporter=lcov mocha --require intelli-espower-loader test/*.js && nyc report", + "docs": "repo-tools exec -- jsdoc -c .jsdoc.js", + "generate-scaffolding": "repo-tools generate all && repo-tools generate lib_samples_readme -l samples/ --config ../.cloud-repo-tools.json", + "lint": "repo-tools lint --cmd eslint -- src/ samples/ system-test/ test/", + "prettier": "repo-tools exec -- prettier --write src/*.js src/*/*.js samples/*.js samples/*/*.js test/*.js test/*/*.js system-test/*.js system-test/*/*.js", + "samples-test": "cd samples/ && npm link ../ && npm test && cd ../", + "system-test": "repo-tools test run --cmd mocha -- system-test/*.js --no-timeouts", + "test-no-cover": "repo-tools test run --cmd mocha -- test/*.js --no-timeouts", + "test": "repo-tools test run --cmd npm -- run cover" + }, "dependencies": { "@google-cloud/common": "^0.13.0", "async": "^2.0.1", "extend": "^3.0.0", "google-gax": "^0.14.2", "google-proto-files": "^0.13.1", - "is": "^3.0.1" + "is": "^3.0.1", + "lodash.merge": "^4.6.0" }, "devDependencies": { + "@google-cloud/nodejs-repo-tools": "^2.1.1", "@google-cloud/storage": "*", + "codecov": "^3.0.0", + "eslint": "^4.10.0", + "eslint-config-prettier": "^2.7.0", + "eslint-plugin-node": "^5.2.1", + "eslint-plugin-prettier": "^2.3.1", + "ink-docstrap": "^1.3.0", "intelli-espower-loader": "^1.0.1", - "mocha": "^3.0.1", + "jsdoc": "^3.5.5", + "mocha": "^3.5.3", "node-uuid": "^1.4.7", - "nyc": "^10.3.0", - "power-assert": "^1.4.2", + "nyc": "^10.3.2", + "power-assert": "^1.4.4", + "prettier": "^1.7.4", + "safe-buffer": "^5.1.1", "sinon": "^2.2.0" - }, - "scripts": { - "cover": "nyc --reporter=lcov --reporter=html mocha --no-timeouts --require intelli-espower-loader test/*.js && nyc report", - "publish-module": "node ../../scripts/publish.js vision", - "test": "mocha --require intelli-espower-loader test/*.js", - "smoke-test": "mocha smoke-test/*.js --timeout 5000", - "system-test": "mocha system-test/*.js --require intelli-espower-loader --no-timeouts --bail" - }, - "license": "Apache-2.0", - "engines": { - "node": ">=4.0.0" } } diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1/geometry.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1/geometry.proto new file mode 100644 index 00000000000..5586c2eb3ad --- /dev/null +++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1/geometry.proto @@ -0,0 +1,54 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.vision.v1; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision"; +option java_multiple_files = true; +option java_outer_classname = "GeometryProto"; +option java_package = "com.google.cloud.vision.v1"; + + +// A vertex represents a 2D point in the image. +// NOTE: the vertex coordinates are in the same scale as the original image. +message Vertex { + // X coordinate. + int32 x = 1; + + // Y coordinate. + int32 y = 2; +} + +// A bounding polygon for the detected image annotation. +message BoundingPoly { + // The bounding polygon vertices. + repeated Vertex vertices = 1; +} + +// A 3D position in the image, used primarily for Face detection landmarks. +// A valid Position must have both x and y coordinates. +// The position coordinates are in the same scale as the original image. +message Position { + // X coordinate. + float x = 1; + + // Y coordinate. + float y = 2; + + // Z coordinate (or depth). + float z = 3; +} diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1/image_annotator.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1/image_annotator.proto new file mode 100644 index 00000000000..c17f8aeb6fe --- /dev/null +++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1/image_annotator.proto @@ -0,0 +1,569 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.vision.v1; + +import "google/api/annotations.proto"; +import "google/cloud/vision/v1/geometry.proto"; +import "google/cloud/vision/v1/text_annotation.proto"; +import "google/cloud/vision/v1/web_detection.proto"; +import "google/rpc/status.proto"; +import "google/type/color.proto"; +import "google/type/latlng.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision"; +option java_multiple_files = true; +option java_outer_classname = "ImageAnnotatorProto"; +option java_package = "com.google.cloud.vision.v1"; + + +// Service that performs Google Cloud Vision API detection tasks over client +// images, such as face, landmark, logo, label, and text detection. The +// ImageAnnotator service returns detected entities from the images. +service ImageAnnotator { + // Run image detection and annotation for a batch of images. + rpc BatchAnnotateImages(BatchAnnotateImagesRequest) returns (BatchAnnotateImagesResponse) { + option (google.api.http) = { post: "/v1/images:annotate" body: "*" }; + } +} + +// Users describe the type of Google Cloud Vision API tasks to perform over +// images by using *Feature*s. Each Feature indicates a type of image +// detection task to perform. Features encode the Cloud Vision API +// vertical to operate on and the number of top-scoring results to return. +message Feature { + // Type of image feature. + enum Type { + // Unspecified feature type. + TYPE_UNSPECIFIED = 0; + + // Run face detection. + FACE_DETECTION = 1; + + // Run landmark detection. + LANDMARK_DETECTION = 2; + + // Run logo detection. + LOGO_DETECTION = 3; + + // Run label detection. + LABEL_DETECTION = 4; + + // Run OCR. + TEXT_DETECTION = 5; + + // Run dense text document OCR. Takes precedence when both + // DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present. + DOCUMENT_TEXT_DETECTION = 11; + + // Run computer vision models to compute image safe-search properties. + SAFE_SEARCH_DETECTION = 6; + + // Compute a set of image properties, such as the image's dominant colors. + IMAGE_PROPERTIES = 7; + + // Run crop hints. + CROP_HINTS = 9; + + // Run web detection. + WEB_DETECTION = 10; + } + + // The feature type. + Type type = 1; + + // Maximum number of results of this type. + int32 max_results = 2; +} + +// External image source (Google Cloud Storage image location). +message ImageSource { + // NOTE: For new code `image_uri` below is preferred. + // Google Cloud Storage image URI, which must be in the following form: + // `gs://bucket_name/object_name` (for details, see + // [Google Cloud Storage Request + // URIs](https://cloud.google.com/storage/docs/reference-uris)). + // NOTE: Cloud Storage object versioning is not supported. + string gcs_image_uri = 1; + + // Image URI which supports: + // 1) Google Cloud Storage image URI, which must be in the following form: + // `gs://bucket_name/object_name` (for details, see + // [Google Cloud Storage Request + // URIs](https://cloud.google.com/storage/docs/reference-uris)). + // NOTE: Cloud Storage object versioning is not supported. + // 2) Publicly accessible image HTTP/HTTPS URL. + // This is preferred over the legacy `gcs_image_uri` above. When both + // `gcs_image_uri` and `image_uri` are specified, `image_uri` takes + // precedence. + string image_uri = 2; +} + +// Client image to perform Google Cloud Vision API tasks over. +message Image { + // Image content, represented as a stream of bytes. + // Note: as with all `bytes` fields, protobuffers use a pure binary + // representation, whereas JSON representations use base64. + bytes content = 1; + + // Google Cloud Storage image location. If both `content` and `source` + // are provided for an image, `content` takes precedence and is + // used to perform the image annotation request. + ImageSource source = 2; +} + +// A face annotation object contains the results of face detection. +message FaceAnnotation { + // A face-specific landmark (for example, a face feature). + // Landmark positions may fall outside the bounds of the image + // if the face is near one or more edges of the image. + // Therefore it is NOT guaranteed that `0 <= x < width` or + // `0 <= y < height`. + message Landmark { + // Face landmark (feature) type. + // Left and right are defined from the vantage of the viewer of the image + // without considering mirror projections typical of photos. So, `LEFT_EYE`, + // typically, is the person's right eye. + enum Type { + // Unknown face landmark detected. Should not be filled. + UNKNOWN_LANDMARK = 0; + + // Left eye. + LEFT_EYE = 1; + + // Right eye. + RIGHT_EYE = 2; + + // Left of left eyebrow. + LEFT_OF_LEFT_EYEBROW = 3; + + // Right of left eyebrow. + RIGHT_OF_LEFT_EYEBROW = 4; + + // Left of right eyebrow. + LEFT_OF_RIGHT_EYEBROW = 5; + + // Right of right eyebrow. + RIGHT_OF_RIGHT_EYEBROW = 6; + + // Midpoint between eyes. + MIDPOINT_BETWEEN_EYES = 7; + + // Nose tip. + NOSE_TIP = 8; + + // Upper lip. + UPPER_LIP = 9; + + // Lower lip. + LOWER_LIP = 10; + + // Mouth left. + MOUTH_LEFT = 11; + + // Mouth right. + MOUTH_RIGHT = 12; + + // Mouth center. + MOUTH_CENTER = 13; + + // Nose, bottom right. + NOSE_BOTTOM_RIGHT = 14; + + // Nose, bottom left. + NOSE_BOTTOM_LEFT = 15; + + // Nose, bottom center. + NOSE_BOTTOM_CENTER = 16; + + // Left eye, top boundary. + LEFT_EYE_TOP_BOUNDARY = 17; + + // Left eye, right corner. + LEFT_EYE_RIGHT_CORNER = 18; + + // Left eye, bottom boundary. + LEFT_EYE_BOTTOM_BOUNDARY = 19; + + // Left eye, left corner. + LEFT_EYE_LEFT_CORNER = 20; + + // Right eye, top boundary. + RIGHT_EYE_TOP_BOUNDARY = 21; + + // Right eye, right corner. + RIGHT_EYE_RIGHT_CORNER = 22; + + // Right eye, bottom boundary. + RIGHT_EYE_BOTTOM_BOUNDARY = 23; + + // Right eye, left corner. + RIGHT_EYE_LEFT_CORNER = 24; + + // Left eyebrow, upper midpoint. + LEFT_EYEBROW_UPPER_MIDPOINT = 25; + + // Right eyebrow, upper midpoint. + RIGHT_EYEBROW_UPPER_MIDPOINT = 26; + + // Left ear tragion. + LEFT_EAR_TRAGION = 27; + + // Right ear tragion. + RIGHT_EAR_TRAGION = 28; + + // Left eye pupil. + LEFT_EYE_PUPIL = 29; + + // Right eye pupil. + RIGHT_EYE_PUPIL = 30; + + // Forehead glabella. + FOREHEAD_GLABELLA = 31; + + // Chin gnathion. + CHIN_GNATHION = 32; + + // Chin left gonion. + CHIN_LEFT_GONION = 33; + + // Chin right gonion. + CHIN_RIGHT_GONION = 34; + } + + // Face landmark type. + Type type = 3; + + // Face landmark position. + Position position = 4; + } + + // The bounding polygon around the face. The coordinates of the bounding box + // are in the original image's scale, as returned in `ImageParams`. + // The bounding box is computed to "frame" the face in accordance with human + // expectations. It is based on the landmarker results. + // Note that one or more x and/or y coordinates may not be generated in the + // `BoundingPoly` (the polygon will be unbounded) if only a partial face + // appears in the image to be annotated. + BoundingPoly bounding_poly = 1; + + // The `fd_bounding_poly` bounding polygon is tighter than the + // `boundingPoly`, and encloses only the skin part of the face. Typically, it + // is used to eliminate the face from any image analysis that detects the + // "amount of skin" visible in an image. It is not based on the + // landmarker results, only on the initial face detection, hence + // the fd (face detection) prefix. + BoundingPoly fd_bounding_poly = 2; + + // Detected face landmarks. + repeated Landmark landmarks = 3; + + // Roll angle, which indicates the amount of clockwise/anti-clockwise rotation + // of the face relative to the image vertical about the axis perpendicular to + // the face. Range [-180,180]. + float roll_angle = 4; + + // Yaw angle, which indicates the leftward/rightward angle that the face is + // pointing relative to the vertical plane perpendicular to the image. Range + // [-180,180]. + float pan_angle = 5; + + // Pitch angle, which indicates the upwards/downwards angle that the face is + // pointing relative to the image's horizontal plane. Range [-180,180]. + float tilt_angle = 6; + + // Detection confidence. Range [0, 1]. + float detection_confidence = 7; + + // Face landmarking confidence. Range [0, 1]. + float landmarking_confidence = 8; + + // Joy likelihood. + Likelihood joy_likelihood = 9; + + // Sorrow likelihood. + Likelihood sorrow_likelihood = 10; + + // Anger likelihood. + Likelihood anger_likelihood = 11; + + // Surprise likelihood. + Likelihood surprise_likelihood = 12; + + // Under-exposed likelihood. + Likelihood under_exposed_likelihood = 13; + + // Blurred likelihood. + Likelihood blurred_likelihood = 14; + + // Headwear likelihood. + Likelihood headwear_likelihood = 15; +} + +// Detected entity location information. +message LocationInfo { + // lat/long location coordinates. + google.type.LatLng lat_lng = 1; +} + +// A `Property` consists of a user-supplied name/value pair. +message Property { + // Name of the property. + string name = 1; + + // Value of the property. + string value = 2; +} + +// Set of detected entity features. +message EntityAnnotation { + // Opaque entity ID. Some IDs may be available in + // [Google Knowledge Graph Search API](https://developers.google.com/knowledge-graph/). + string mid = 1; + + // The language code for the locale in which the entity textual + // `description` is expressed. + string locale = 2; + + // Entity textual description, expressed in its `locale` language. + string description = 3; + + // Overall score of the result. Range [0, 1]. + float score = 4; + + // The accuracy of the entity detection in an image. + // For example, for an image in which the "Eiffel Tower" entity is detected, + // this field represents the confidence that there is a tower in the query + // image. Range [0, 1]. + float confidence = 5; + + // The relevancy of the ICA (Image Content Annotation) label to the + // image. For example, the relevancy of "tower" is likely higher to an image + // containing the detected "Eiffel Tower" than to an image containing a + // detected distant towering building, even though the confidence that + // there is a tower in each image may be the same. Range [0, 1]. + float topicality = 6; + + // Image region to which this entity belongs. Currently not produced + // for `LABEL_DETECTION` features. For `TEXT_DETECTION` (OCR), `boundingPoly`s + // are produced for the entire text detected in an image region, followed by + // `boundingPoly`s for each word within the detected text. + BoundingPoly bounding_poly = 7; + + // The location information for the detected entity. Multiple + // `LocationInfo` elements can be present because one location may + // indicate the location of the scene in the image, and another location + // may indicate the location of the place where the image was taken. + // Location information is usually present for landmarks. + repeated LocationInfo locations = 8; + + // Some entities may have optional user-supplied `Property` (name/value) + // fields, such a score or string that qualifies the entity. + repeated Property properties = 9; +} + +// Set of features pertaining to the image, computed by computer vision +// methods over safe-search verticals (for example, adult, spoof, medical, +// violence). +message SafeSearchAnnotation { + // Represents the adult content likelihood for the image. + Likelihood adult = 1; + + // Spoof likelihood. The likelihood that an modification + // was made to the image's canonical version to make it appear + // funny or offensive. + Likelihood spoof = 2; + + // Likelihood that this is a medical image. + Likelihood medical = 3; + + // Violence likelihood. + Likelihood violence = 4; +} + +// Rectangle determined by min and max `LatLng` pairs. +message LatLongRect { + // Min lat/long pair. + google.type.LatLng min_lat_lng = 1; + + // Max lat/long pair. + google.type.LatLng max_lat_lng = 2; +} + +// Color information consists of RGB channels, score, and the fraction of +// the image that the color occupies in the image. +message ColorInfo { + // RGB components of the color. + google.type.Color color = 1; + + // Image-specific score for this color. Value in range [0, 1]. + float score = 2; + + // The fraction of pixels the color occupies in the image. + // Value in range [0, 1]. + float pixel_fraction = 3; +} + +// Set of dominant colors and their corresponding scores. +message DominantColorsAnnotation { + // RGB color values with their score and pixel fraction. + repeated ColorInfo colors = 1; +} + +// Stores image properties, such as dominant colors. +message ImageProperties { + // If present, dominant colors completed successfully. + DominantColorsAnnotation dominant_colors = 1; +} + +// Single crop hint that is used to generate a new crop when serving an image. +message CropHint { + // The bounding polygon for the crop region. The coordinates of the bounding + // box are in the original image's scale, as returned in `ImageParams`. + BoundingPoly bounding_poly = 1; + + // Confidence of this being a salient region. Range [0, 1]. + float confidence = 2; + + // Fraction of importance of this salient region with respect to the original + // image. + float importance_fraction = 3; +} + +// Set of crop hints that are used to generate new crops when serving images. +message CropHintsAnnotation { + repeated CropHint crop_hints = 1; +} + +// Parameters for crop hints annotation request. +message CropHintsParams { + // Aspect ratios in floats, representing the ratio of the width to the height + // of the image. For example, if the desired aspect ratio is 4/3, the + // corresponding float value should be 1.33333. If not specified, the + // best possible crop is returned. The number of provided aspect ratios is + // limited to a maximum of 16; any aspect ratios provided after the 16th are + // ignored. + repeated float aspect_ratios = 1; +} + +// Image context and/or feature-specific parameters. +message ImageContext { + // lat/long rectangle that specifies the location of the image. + LatLongRect lat_long_rect = 1; + + // List of languages to use for TEXT_DETECTION. In most cases, an empty value + // yields the best results since it enables automatic language detection. For + // languages based on the Latin alphabet, setting `language_hints` is not + // needed. In rare cases, when the language of the text in the image is known, + // setting a hint will help get better results (although it will be a + // significant hindrance if the hint is wrong). Text detection returns an + // error if one or more of the specified languages is not one of the + // [supported languages](/vision/docs/languages). + repeated string language_hints = 2; + + // Parameters for crop hints annotation request. + CropHintsParams crop_hints_params = 4; +} + +// Request for performing Google Cloud Vision API tasks over a user-provided +// image, with user-requested features. +message AnnotateImageRequest { + // The image to be processed. + Image image = 1; + + // Requested features. + repeated Feature features = 2; + + // Additional context that may accompany the image. + ImageContext image_context = 3; +} + +// Response to an image annotation request. +message AnnotateImageResponse { + // If present, face detection has completed successfully. + repeated FaceAnnotation face_annotations = 1; + + // If present, landmark detection has completed successfully. + repeated EntityAnnotation landmark_annotations = 2; + + // If present, logo detection has completed successfully. + repeated EntityAnnotation logo_annotations = 3; + + // If present, label detection has completed successfully. + repeated EntityAnnotation label_annotations = 4; + + // If present, text (OCR) detection or document (OCR) text detection has + // completed successfully. + repeated EntityAnnotation text_annotations = 5; + + // If present, text (OCR) detection or document (OCR) text detection has + // completed successfully. + // This annotation provides the structural hierarchy for the OCR detected + // text. + TextAnnotation full_text_annotation = 12; + + // If present, safe-search annotation has completed successfully. + SafeSearchAnnotation safe_search_annotation = 6; + + // If present, image properties were extracted successfully. + ImageProperties image_properties_annotation = 8; + + // If present, crop hints have completed successfully. + CropHintsAnnotation crop_hints_annotation = 11; + + // If present, web detection has completed successfully. + WebDetection web_detection = 13; + + // If set, represents the error message for the operation. + // Note that filled-in image annotations are guaranteed to be + // correct, even when `error` is set. + google.rpc.Status error = 9; +} + +// Multiple image annotation requests are batched into a single service call. +message BatchAnnotateImagesRequest { + // Individual image annotation requests for this batch. + repeated AnnotateImageRequest requests = 1; +} + +// Response to a batch image annotation request. +message BatchAnnotateImagesResponse { + // Individual responses to image annotation requests within the batch. + repeated AnnotateImageResponse responses = 1; +} + +// A bucketized representation of likelihood, which is intended to give clients +// highly stable results across model upgrades. +enum Likelihood { + // Unknown likelihood. + UNKNOWN = 0; + + // It is very unlikely that the image belongs to the specified vertical. + VERY_UNLIKELY = 1; + + // It is unlikely that the image belongs to the specified vertical. + UNLIKELY = 2; + + // It is possible that the image belongs to the specified vertical. + POSSIBLE = 3; + + // It is likely that the image belongs to the specified vertical. + LIKELY = 4; + + // It is very likely that the image belongs to the specified vertical. + VERY_LIKELY = 5; +} diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1/text_annotation.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1/text_annotation.proto new file mode 100644 index 00000000000..938820a3a00 --- /dev/null +++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1/text_annotation.proto @@ -0,0 +1,237 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.vision.v1; + +import "google/api/annotations.proto"; +import "google/cloud/vision/v1/geometry.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision"; +option java_multiple_files = true; +option java_outer_classname = "TextAnnotationProto"; +option java_package = "com.google.cloud.vision.v1"; + + +// TextAnnotation contains a structured representation of OCR extracted text. +// The hierarchy of an OCR extracted text structure is like this: +// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol +// Each structural component, starting from Page, may further have their own +// properties. Properties describe detected languages, breaks etc.. Please +// refer to the [google.cloud.vision.v1.TextAnnotation.TextProperty][google.cloud.vision.v1.TextAnnotation.TextProperty] message +// definition below for more detail. +message TextAnnotation { + // Detected language for a structural component. + message DetectedLanguage { + // The BCP-47 language code, such as "en-US" or "sr-Latn". For more + // information, see + // http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. + string language_code = 1; + + // Confidence of detected language. Range [0, 1]. + float confidence = 2; + } + + // Detected start or end of a structural component. + message DetectedBreak { + // Enum to denote the type of break found. New line, space etc. + enum BreakType { + // Unknown break label type. + UNKNOWN = 0; + + // Regular space. + SPACE = 1; + + // Sure space (very wide). + SURE_SPACE = 2; + + // Line-wrapping break. + EOL_SURE_SPACE = 3; + + // End-line hyphen that is not present in text; does + HYPHEN = 4; + + // not co-occur with SPACE, LEADER_SPACE, or + // LINE_BREAK. + // Line break that ends a paragraph. + LINE_BREAK = 5; + } + + BreakType type = 1; + + // True if break prepends the element. + bool is_prefix = 2; + } + + // Additional information detected on the structural component. + message TextProperty { + // A list of detected languages together with confidence. + repeated DetectedLanguage detected_languages = 1; + + // Detected start or end of a text segment. + DetectedBreak detected_break = 2; + } + + // List of pages detected by OCR. + repeated Page pages = 1; + + // UTF-8 text detected on the pages. + string text = 2; +} + +// Detected page from OCR. +message Page { + // Additional information detected on the page. + TextAnnotation.TextProperty property = 1; + + // Page width in pixels. + int32 width = 2; + + // Page height in pixels. + int32 height = 3; + + // List of blocks of text, images etc on this page. + repeated Block blocks = 4; +} + +// Logical element on the page. +message Block { + // Type of a block (text, image etc) as identified by OCR. + enum BlockType { + // Unknown block type. + UNKNOWN = 0; + + // Regular text block. + TEXT = 1; + + // Table block. + TABLE = 2; + + // Image block. + PICTURE = 3; + + // Horizontal/vertical line box. + RULER = 4; + + // Barcode block. + BARCODE = 5; + } + + // Additional information detected for the block. + TextAnnotation.TextProperty property = 1; + + // The bounding box for the block. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingPoly bounding_box = 2; + + // List of paragraphs in this block (if this blocks is of type text). + repeated Paragraph paragraphs = 3; + + // Detected block type (text, image etc) for this block. + BlockType block_type = 4; +} + +// Structural unit of text representing a number of words in certain order. +message Paragraph { + // Additional information detected for the paragraph. + TextAnnotation.TextProperty property = 1; + + // The bounding box for the paragraph. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingPoly bounding_box = 2; + + // List of words in this paragraph. + repeated Word words = 3; +} + +// A word representation. +message Word { + // Additional information detected for the word. + TextAnnotation.TextProperty property = 1; + + // The bounding box for the word. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingPoly bounding_box = 2; + + // List of symbols in the word. + // The order of the symbols follows the natural reading order. + repeated Symbol symbols = 3; +} + +// A single symbol representation. +message Symbol { + // Additional information detected for the symbol. + TextAnnotation.TextProperty property = 1; + + // The bounding box for the symbol. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingPoly bounding_box = 2; + + // The actual UTF-8 representation of the symbol. + string text = 3; +} diff --git a/packages/google-cloud-vision/protos/google/cloud/vision/v1/web_detection.proto b/packages/google-cloud-vision/protos/google/cloud/vision/v1/web_detection.proto new file mode 100644 index 00000000000..6da89756ee3 --- /dev/null +++ b/packages/google-cloud-vision/protos/google/cloud/vision/v1/web_detection.proto @@ -0,0 +1,78 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.vision.v1; + +import "google/api/annotations.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/cloud/vision/v1;vision"; +option java_multiple_files = true; +option java_outer_classname = "WebDetectionProto"; +option java_package = "com.google.cloud.vision.v1"; + + +// Relevant information for the image from the Internet. +message WebDetection { + // Entity deduced from similar images on the Internet. + message WebEntity { + // Opaque entity ID. + string entity_id = 1; + + // Overall relevancy score for the entity. + // Not normalized and not comparable across different image queries. + float score = 2; + + // Canonical description of the entity, in English. + string description = 3; + } + + // Metadata for online images. + message WebImage { + // The result image URL. + string url = 1; + + // Overall relevancy score for the image. + // Not normalized and not comparable across different image queries. + float score = 2; + } + + // Metadata for web pages. + message WebPage { + // The result web page URL. + string url = 1; + + // Overall relevancy score for the web page. + // Not normalized and not comparable across different image queries. + float score = 2; + } + + // Deduced entities from similar images on the Internet. + repeated WebEntity web_entities = 1; + + // Fully matching images from the Internet. + // They're definite neardups and most often a copy of the query image with + // merely a size change. + repeated WebImage full_matching_images = 2; + + // Partial matching images from the Internet. + // Those images are similar enough to share some key-point features. For + // example an original image will likely have partial matching for its crops. + repeated WebImage partial_matching_images = 3; + + // Web pages containing the matching images from the Internet. + repeated WebPage pages_with_matching_images = 4; +} diff --git a/packages/google-cloud-vision/samples/.eslintrc.yml b/packages/google-cloud-vision/samples/.eslintrc.yml new file mode 100644 index 00000000000..282535f55f6 --- /dev/null +++ b/packages/google-cloud-vision/samples/.eslintrc.yml @@ -0,0 +1,3 @@ +--- +rules: + no-console: off diff --git a/packages/google-cloud-vision/samples/README.md b/packages/google-cloud-vision/samples/README.md index 4b5688b3562..8d78307fed0 100644 --- a/packages/google-cloud-vision/samples/README.md +++ b/packages/google-cloud-vision/samples/README.md @@ -1,65 +1,62 @@ Google Cloud Platform logo -# Google Cloud Vision API Node.js Samples +# Google Cloud Vision API: Node.js Samples -The [Cloud Vision API][vision_docs] allows developers to easily integrate vision -detection features within applications, including image labeling, face and -landmark detection, optical character recognition (OCR), and tagging of explicit -content. +[![Open in Cloud Shell][shell_img]][shell_link] -[vision_docs]: https://cloud.google.com/vision/docs/ +The [Cloud Vision API](https://cloud.google.com/vision/docs) allows developers to easily integrate vision detection features within applications, including image labeling, face and landmark detection, optical character recognition (OCR), and tagging of explicit content. ## Table of Contents -* [Setup](#setup) +* [Before you begin](#before-you-begin) * [Samples](#samples) - * [Detection snippets](#detection-snippets) - * [Face detection](#face-detection) - * [Text detection](#text-detection) + * [Detection samples](#detection-samples) -## Setup +## Before you begin -1. Read [Prerequisites][prereq] and [How to run a sample][run] first. -1. Install dependencies: - - npm install - -[prereq]: ../README.md#prerequisities -[run]: ../README.md#how-to-run-a-sample +Before running the samples, make sure you've followed the steps in the +[Before you begin section](../README.md#before-you-begin) of the client +library's README. ## Samples -### Detection snippets +### Detection samples + +View the [source code][detect_0_code]. -View the [documentation][detect_docs] or the [source code][detect_code]. +[![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-vision&page=editor&open_in_editor=samples/detect.js,samples/README.md) __Usage:__ `node detect.js --help` ``` +detect.js + Commands: - faces Detects faces in a local image file. - faces-gcs Detects faces in an image in Google Cloud Storage. - labels Detects labels in a local image file. - labels-gcs Detects labels in an image in Google Cloud Storage. - landmarks Detects landmarks in a local image file. - landmarks-gcs Detects landmarks in an image in Google Cloud Storage. - text Detects text in a local image file. - text-gcs Detects text in an image in Google Cloud Storage. - logos Detects logos in a local image file. - logos-gcs Detects logos in an image in Google Cloud Storage. - properties Detects image properties in a local image file. - properties-gcs Detects image properties in an image in Google Cloud Storage. - safe-search Detects safe search properties in a local image file. - safe-search-gcs Detects safe search properties in an image in Google Cloud Storage. - crops Detects crop hints in a local image file. - crops-gcs Detects crop hints in an image in Google Cloud Storage. - web Finds similar photos on the web for a local image file. - web-gcs Finds similar photos on the web for an image in Google Cloud Storage. - fulltext Extracts full text from a local image file. - fulltext-gcs Extracts full text from an image in Google Cloud Storage. + detect.js faces Detects faces in a local image file. + detect.js faces-gcs Detects faces in an image in Google Cloud Storage. + detect.js labels Detects labels in a local image file. + detect.js labels-gcs Detects labels in an image in Google Cloud Storage. + detect.js landmarks Detects landmarks in a local image file. + detect.js landmarks-gcs Detects landmarks in an image in Google Cloud Storage. + detect.js text Detects text in a local image file. + detect.js text-gcs Detects text in an image in Google Cloud Storage. + detect.js logos Detects logos in a local image file. + detect.js logos-gcs Detects logos in an image in Google Cloud Storage. + detect.js properties Detects image properties in a local image file. + detect.js properties-gcs Detects image properties in an image in Google Cloud Storage. + detect.js safe-search Detects safe search properties in a local image file. + detect.js safe-search-gcs Detects safe search properties in an image in Google Cloud Storage. + detect.js crops Detects crop hints in a local image file. + detect.js crops-gcs Detects crop hints in an image in Google Cloud Storage. + detect.js web Finds similar photos on the web for a local image file. + detect.js web-gcs Finds similar photos on the web for an image in Google Cloud + Storage. + detect.js fulltext Extracts full text from a local image file. + detect.js fulltext-gcs Extracts full text from an image in Google Cloud Storage. Options: - --help Show help [boolean] + --version Show version number [boolean] + --help Show help [boolean] Examples: node detect.js faces ./resources/face_no_surprise.jpg @@ -86,45 +83,8 @@ Examples: For more information, see https://cloud.google.com/vision/docs ``` -[detect_docs]: https://cloud.google.com/vision/docs -[detect_code]: detect.js - -### Face detection tutorial - -View the [documentation][face_docs] or the [source code][face_code]. - -This sample uses [node-canvas](https://github.com/Automattic/node-canvas) to -draw an output image. node-canvas depends on Cairo, which may require separate -installation. See the node-canvas [installation section][canvas-install] for -details. - -[canvas-install]: https://github.com/Automattic/node-canvas#installation - -__Run the sample:__ - -Usage: `node faceDetection ` - -Example: - - node faceDetection "./resources/face.png" - -[face_docs]: https://cloud.google.com/vision/docs/face-tutorial -[face_code]: faceDetection.js - -### Text detection tutorial - -View the [source code][text_code]. - -__Run the sample:__ - -Usage: `node textDetection [args]...` - -Analyze images: - - node textDetection analyze "./resources/" - -Look up words: - - node textDetection lookup the sunbeams in +[detect_0_docs]: https://cloud.google.com/vision/docs +[detect_0_code]: detect.js -[text_code]: textDetection.js +[shell_img]: http://gstatic.com/cloudssh/images/open-btn.png +[shell_link]: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/nodejs-vision&page=editor&open_in_editor=samples/README.md diff --git a/packages/google-cloud-vision/samples/package.json b/packages/google-cloud-vision/samples/package.json index be92f874ebe..b61683f51e8 100644 --- a/packages/google-cloud-vision/samples/package.json +++ b/packages/google-cloud-vision/samples/package.json @@ -4,37 +4,27 @@ "private": true, "license": "Apache-2.0", "author": "Google Inc.", - "repository": { - "type": "git", - "url": "https://github.com/GoogleCloudPlatform/nodejs-docs-samples.git" - }, + "repository": "googleapis/nodejs-vision", "engines": { - "node": ">=4.3.2" + "node": ">=4.0.0" }, "scripts": { - "lint": "samples lint", - "pretest": "npm run lint", - "test": "samples test run --cmd ava -- -T 1m --verbose system-test/*.test.js" + "test": "repo-tools test run --cmd ava -- -T 1m --verbose system-test/*.test.js" }, "dependencies": { - "@google-cloud/storage": "1.1.0", - "@google-cloud/vision": "^0.12.0", - "async": "2.3.0", - "natural": "0.5.1", - "redis": "2.7.1", - "yargs": "7.1.0" + "@google-cloud/storage": "1.4.0", + "@google-cloud/vision": "0.12.0", + "async": "2.5.0", + "natural": "0.5.4", + "redis": "2.8.0", + "yargs": "10.0.3" }, "devDependencies": { - "@google-cloud/nodejs-repo-tools": "1.4.14", - "ava": "0.19.1", - "proxyquire": "1.7.11", - "sinon": "2.1.0" + "@google-cloud/nodejs-repo-tools": "2.1.1", + "ava": "0.23.0", + "uuid": "3.1.0" }, "optionalDependencies": { "canvas": "1.6.5" - }, - "cloud-repo-tools": { - "requiresKeyFile": true, - "requiresProjectId": true } } diff --git a/packages/google-cloud-vision/samples/quickstart.js b/packages/google-cloud-vision/samples/quickstart.js index 60d9ea4ce86..79e265f0965 100644 --- a/packages/google-cloud-vision/samples/quickstart.js +++ b/packages/google-cloud-vision/samples/quickstart.js @@ -17,30 +17,33 @@ // [START vision_quickstart] // Imports the Google Cloud client library -const Vision = require('@google-cloud/vision'); +const vision = require('@google-cloud/vision'); // Creates a client -const vision = new Vision(); +const client = new vision.ImageAnnotatorClient(); // The name of the image file to annotate const fileName = './resources/wakeupcat.jpg'; // Prepare the request object const request = { - source: { - filename: fileName - } + image: { + source: { + filename: fileName, + }, + }, }; // Performs label detection on the image file -vision.labelDetection(request) - .then((results) => { +client + .labelDetection(request) + .then(results => { const labels = results[0].labelAnnotations; console.log('Labels:'); - labels.forEach((label) => console.log(label.description)); + labels.forEach(label => console.log(label.description)); }) - .catch((err) => { + .catch(err => { console.error('ERROR:', err); }); // [END vision_quickstart] diff --git a/packages/google-cloud-vision/smoke-test/image_annotator_smoke_test.js b/packages/google-cloud-vision/smoke-test/image_annotator_smoke_test.js deleted file mode 100644 index 6ee7a38e231..00000000000 --- a/packages/google-cloud-vision/smoke-test/image_annotator_smoke_test.js +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -'use strict'; - -describe('ImageAnnotatorSmokeTest', function() { - - it('successfully makes a call to the service', function(done) { - var vision = require('../src'); - - var client = vision.v1({ - // optional auth parameters. - }); - - var gcsImageUri = 'gs://gapic-toolkit/President_Barack_Obama.jpg'; - var source = { - gcsImageUri : gcsImageUri - }; - var image = { - source : source - }; - var type = vision.v1.types.Feature.Type.FACE_DETECTION; - var featuresElement = { - type : type - }; - var features = [featuresElement]; - var requestsElement = { - image : image, - features : features - }; - var requests = [requestsElement]; - client.batchAnnotateImages({requests: requests}).then(function(responses) { - var response = responses[0]; - console.log(response); - }) - .then(done) - .catch(done); - }); -}); \ No newline at end of file diff --git a/packages/google-cloud-vision/src/helpers.js b/packages/google-cloud-vision/src/helpers.js index 2ec01d4cf0f..4af31eb8740 100644 --- a/packages/google-cloud-vision/src/helpers.js +++ b/packages/google-cloud-vision/src/helpers.js @@ -14,33 +14,28 @@ * limitations under the License. */ -/*! - * @module vision/helpers - */ - 'use strict'; -var fs = require('fs'); -var is = require('is'); - -var promisify = require('@google-cloud/common').util.promisify; -var gax = require('google-gax'); -var protoFiles = require('google-proto-files'); +const fs = require('fs'); +const is = require('is'); +const path = require('path'); +const promisify = require('@google-cloud/common').util.promisify; +const gax = require('google-gax'); /*! * Find a given image and fire a callback with the appropriate image structure. * - * @param {Object} image - An object representing what is known about the + * @param {object} image An object representing what is known about the * image. - * @param {Function} callback - The callback to run. + * @param {function} callback The callback to run. */ var coerceImage = (image, callback) => { // If this is a buffer, read it and send the object // that the Vision API expects. if (Buffer.isBuffer(image)) { callback(null, { - content: image.toString('base64') + content: image.toString('base64'), }); return; } @@ -62,29 +57,30 @@ var coerceImage = (image, callback) => { return; }; - /*! * * Return a method that calls annotateImage asking for a single feature. * - * @param {Number} featureValue - The feature being requested. This is taken + * @param {number} featureValue The feature being requested. This is taken * from the Feature.Type enum, and will be an integer. * - * @return {Function} - The function that, when called, will call annotateImage + * @returns {function} The function that, when called, will call annotateImage * asking for the single feature annotation. */ var _createSingleFeatureMethod = featureValue => { return function(annotateImageRequest, callOptions) { - annotateImageRequest.features = annotateImageRequest.features || [{ - type: featureValue, - }]; + annotateImageRequest.features = annotateImageRequest.features || [ + { + type: featureValue, + }, + ]; // If the user submitted explicit features that do not line up with // the precise method called, throw an exception. for (let feature of annotateImageRequest.features) { if (feature.type !== featureValue) { throw new Error( 'Setting explicit features is not supported on this method. ' + - 'Use the #annotateImage method instead.' + 'Use the #annotateImage method instead.' ); } } @@ -93,14 +89,13 @@ var _createSingleFeatureMethod = featureValue => { }; }; - /*! * Return a dictionary-like object with helpers to augment the Vision * GAPIC. * - * @param {string} apiVersion - The API version (e.g. "v1") + * @param {string} apiVersion The API version (e.g. "v1") * - * @return {Object} - An object with keys and functions which are placed + * @returns {object} An object with keys and functions which are placed * onto the pure GAPIC. */ module.exports = apiVersion => { @@ -109,48 +104,56 @@ module.exports = apiVersion => { /** * Annotate a single image with the requested features. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Array} request.features - * An array of the specific annotation features being requested. - * This should take a form such as: - * [{type: vision.types.Feature.Type.FACE_DETECTION}, - * {type: vision.types.Feature.Type.WEB_DETECTION}] - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#annotateImage + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object[]} request.features An array of the specific annotation + * features being requested. This should take a form such as: + * + * [{type: vision.types.Feature.Type.FACE_DETECTION}, + * {type: vision.types.Feature.Type.WEB_DETECTION}] + * + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example - * var request = { + * const vision = require('@google-cloud/vision'); + * const client = new vision.ImageAnnotatorClient(); + * + * const request = { * image: {source: {imageUri: 'gs://path/to/image.jpg'}}, * features: [], * }; - * vision.annotateImage(request).then(response => { - * // doThingsWith(response); - * }).catch(err => { - * console.error(err); - * }); + * client + * .annotateImage(request) + * .then(response => { + * // doThingsWith(response); + * }) + * .catch(err => { + * console.error(err); + * }); */ methods.annotateImage = promisify(function(request, callOptions, callback) { // If a callback was provided and options were skipped, normalize @@ -174,7 +177,8 @@ module.exports = apiVersion => { request.image = image; // Call the GAPIC batch annotation function. - return this.batchAnnotateImages([request], callOptions, (err, r) => { + let requests = {requests: [request]}; + return this.batchAnnotateImages(requests, callOptions, (err, r) => { // If there is an error, handle it. if (err) { return callback(err); @@ -194,126 +198,151 @@ module.exports = apiVersion => { // them and create single-feature methods for each dynamically, for // documentation purpose, we manually list all the single-feature methods // below. - const features = gax.grpc().load([{ - root: protoFiles('..'), - file: `google/cloud/vision/${apiVersion}/image_annotator.proto`, - }]).google.cloud.vision[apiVersion].Feature.Type; + const features = gax + .grpc() + .loadProto( + path.join(__dirname, '..', 'protos'), + `google/cloud/vision/${apiVersion}/image_annotator.proto` + ).google.cloud.vision[apiVersion].Feature.Type.values; /** * Annotate a single image with face detection. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#faceDetection + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example - * var image = { - * source: {imageUri: 'gs://path/to/image.jpg'} + * const vision = require('@google-cloud/vision'); + * const client = new vision.ImageAnnotatorClient(); + * + * const request = { + * image: { + * source: {imageUri: 'gs://path/to/image.jpg'} + * } * }; - * vision.faceDetection(image).then(response => { - * // doThingsWith(response); - * }).catch(err => { - * console.error(err); - * }); + * + * client + * .faceDetection(request) + * .then(response => { + * // doThingsWith(response); + * }) + * .catch(err => { + * console.error(err); + * }); */ - methods.faceDetection = - promisify(_createSingleFeatureMethod(features.FACE_DETECTION)); + methods.faceDetection = promisify( + _createSingleFeatureMethod(features.FACE_DETECTION) + ); /** * Annotate a single image with landmark detection. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#landmarkDetection + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example - * var image = { - * source: {imageUri: 'gs://path/to/image.jpg'} + * const vision = require('@google-cloud/vision'); + * const client = new vision.ImageAnnotatorClient(); + * + * const request = { + * image: { + * source: {imageUri: 'gs://path/to/image.jpg'} + * } * }; - * vision.landmarkDetection(image).then(response => { - * // doThingsWith(response); - * }).catch(err => { - * console.error(err); - * }); + * + * client + * .landmarkDetection(request) + * .then(response => { + * // doThingsWith(response); + * }) + * .catch(err => { + * console.error(err); + * }); */ - methods.landmarkDetection = - promisify(_createSingleFeatureMethod(features.LANDMARK_DETECTION)); + methods.landmarkDetection = promisify( + _createSingleFeatureMethod(features.LANDMARK_DETECTION) + ); /** * Annotate a single image with logo detection. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#logoDetection + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example * var image = { @@ -325,38 +354,40 @@ module.exports = apiVersion => { * console.error(err); * }); */ - methods.logoDetection = - promisify(_createSingleFeatureMethod(features.LOGO_DETECTION)); + methods.logoDetection = promisify( + _createSingleFeatureMethod(features.LOGO_DETECTION) + ); /** * Annotate a single image with label detection. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#labelDetection + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example * var image = { @@ -368,38 +399,40 @@ module.exports = apiVersion => { * console.error(err); * }); */ - methods.labelDetection = - promisify(_createSingleFeatureMethod(features.LABEL_DETECTION)); + methods.labelDetection = promisify( + _createSingleFeatureMethod(features.LABEL_DETECTION) + ); /** * Annotate a single image with text detection. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#textDetection + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example * var image = { @@ -411,38 +444,40 @@ module.exports = apiVersion => { * console.error(err); * }); */ - methods.textDetection = - promisify(_createSingleFeatureMethod(features.TEXT_DETECTION)); + methods.textDetection = promisify( + _createSingleFeatureMethod(features.TEXT_DETECTION) + ); /** * Annotate a single image with document text detection. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#documentTextDetection + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example * var image = { @@ -454,38 +489,40 @@ module.exports = apiVersion => { * console.error(err); * }); */ - methods.documentTextDetection = - promisify(_createSingleFeatureMethod(features.DOCUMENT_TEXT_DETECTION)); + methods.documentTextDetection = promisify( + _createSingleFeatureMethod(features.DOCUMENT_TEXT_DETECTION) + ); /** * Annotate a single image with safe search detection. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#safeSearchDetection + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example * var image = { @@ -497,38 +534,40 @@ module.exports = apiVersion => { * console.error(err); * }); */ - methods.safeSearchDetection = - promisify(_createSingleFeatureMethod(features.SAFE_SEARCH_DETECTION)); + methods.safeSearchDetection = promisify( + _createSingleFeatureMethod(features.SAFE_SEARCH_DETECTION) + ); /** * Annotate a single image with image properties. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#imageProperties + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example * var image = { @@ -540,38 +579,40 @@ module.exports = apiVersion => { * console.error(err); * }); */ - methods.imageProperties = - promisify(_createSingleFeatureMethod(features.IMAGE_PROPERTIES)); + methods.imageProperties = promisify( + _createSingleFeatureMethod(features.IMAGE_PROPERTIES) + ); /** * Annotate a single image with crop hints. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#cropHints + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example * var image = { @@ -583,38 +624,40 @@ module.exports = apiVersion => { * console.error(err); * }); */ - methods.cropHints = - promisify(_createSingleFeatureMethod(features.CROP_HINTS)); + methods.cropHints = promisify( + _createSingleFeatureMethod(features.CROP_HINTS) + ); /** * Annotate a single image with web detection. * - * @param {Object} request - * A representation of the request being sent to the Vision API. - * This is an [AnnotateImageRequest]{@link AnnotateImageRequest}. - * @param {Object} request.image - * A dictionary-like object representing the image. This should have a - * single key (`source`, `content`). - * - * If the key is `source`, the value should be another object containing - * `imageUri` or `filename` as a key and a string as a value. - * - * If the key is `content`, the value should be a Buffer. - * @param {Object=} callOptions - * Optional parameters. You can override the default settings for this - * call, e.g, timeout, retries, paginations, etc. See - * [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} - * for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing - * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing - * API call. + * @see v1.ImageAnnotatorClient#batchAnnotateImages + * @see google.cloud.vision.v1.AnnotateImageRequest + * + * @method v1.ImageAnnotatorClient#webDetection + * @param {object} request A representation of the request being sent to the + * Vision API. This is an {@link google.cloud.vision.v1.AnnotateImageRequest AnnotateImageRequest}. + * @param {object} request.image A dictionary-like object representing the + * image. This should have a single key (`source`, `content`). + * + * If the key is `source`, the value should be another object containing + * `imageUri` or `filename` as a key and a string as a value. + * + * If the key is `content`, the value should be a Buffer. + * @param {object} [callOptions] Optional parameters. You can override the + * default settings for this call, e.g, timeout, retries, paginations, + * etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} + * for the details. + * @param {function(?Error, ?object)} [callback] The function which will be + * called with the result of the API call. + * + * The second parameter to the callback is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * @returns {Promise} The promise which resolves to an array. The first + * element of the array is an object representing + * [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API + * call. * * @example * var image = { @@ -626,8 +669,9 @@ module.exports = apiVersion => { * console.error(err); * }); */ - methods.webDetection = - promisify(_createSingleFeatureMethod(features.WEB_DETECTION)); + methods.webDetection = promisify( + _createSingleFeatureMethod(features.WEB_DETECTION) + ); return methods; }; diff --git a/packages/google-cloud-vision/src/index.js b/packages/google-cloud-vision/src/index.js index a001f886c94..c0f800792f7 100644 --- a/packages/google-cloud-vision/src/index.js +++ b/packages/google-cloud-vision/src/index.js @@ -1,80 +1,88 @@ -/*! - * Copyright 2015 Google Inc. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -/*! - * @module vision - * @name Vision +/** + * @namespace google + */ +/** + * @namespace google.cloud + */ +/** + * @namespace google.cloud.vision + */ +/** + * @namespace google.cloud.vision.v1 + */ +/** + * @namespace google.protobuf + */ +/** + * @namespace google.rpc + */ +/** + * @namespace google.type */ 'use strict'; -var is = require('is'); -var extend = require('extend'); +const helpers = require('./helpers'); -var gapic = { +// Import the clients for each version supported by this package. +const gapic = Object.freeze({ v1: require('./v1'), -}; -var gaxGrpc = require('google-gax').grpc(); - -var helpers = require('./helpers'); - -const VERSION = require('../package.json').version; +}); +// Augment the SpeechClient objects with the helpers. +for (let gapicVersion of Object.keys(gapic)) { + let clientProto = gapic[gapicVersion].ImageAnnotatorClient.prototype; + Object.assign(clientProto, helpers(gapicVersion)); +} /** - * Create an imageAnnotatorClient with additional helpers for common - * tasks. + * The `@google-cloud/vision` package has the following named exports: + * + * - `ImageAnnotatorClient` - Reference to {@link v1.ImageAnnotatorClient}. + * - `v1` - This is used for selecting or pinning a particular backend service + * version. It exports: + * - `ImageAnnotatorClient` - Reference to {@link v1.ImageAnnotatorClient}. + * + * @module {object} @google-cloud/vision + * @alias nodejs-vision * - * @constructor - * @alias module:vision - * @mixes module:vision/helpers + * @example Install the client library with npm: + * npm install --save @google-cloud/vision * - * @param {Object=} opts - The optional parameters. - * @param {String=} opts.servicePath - * The domain name of the API remote host. - * @param {number=} opts.port - * The port on which to connect to the remote host. - * @param {grpc.ClientCredentials=} opts.sslCreds - * A ClientCredentials for use with an SSL-enabled channel. - * @param {Object=} opts.clientConfig - * The customized config to build the call settings. See - * {@link gax.constructSettings} for the format. + * @example Import the client library: + * const vision = require('@google-cloud/vision'); + * + * @example Create a client that uses Application Default Credentials (ADC): + * const client = new vision.ImageAnnotatorClient(); + * + * @example Create a client with explicit credentials: + * const client = new vision.ImageAnnotatorClient({ + * projectId: 'your-project-id', + * keyFilename: '/path/to/keyfile.json', + * }); */ -function visionV1(opts) { - // Define the header options. - opts = opts || {}; - opts.libName = 'gccl'; - opts.libVersion = VERSION; - - // Create the image annotator client with the provided options. - var client = gapic.v1(opts).imageAnnotatorClient(opts); - if (is.undefined(client.annotateImage)) { - Object.assign(client.constructor.prototype, helpers('v1')); - } - return client; -} +module.exports = gapic.v1; -var v1Protos = {}; - -extend(v1Protos, gaxGrpc.load([{ - root: require('google-proto-files')('..'), - file: 'google/cloud/vision/v1/image_annotator.proto' -}]).google.cloud.vision.v1); +/** + * @type {object} + * @property {constructor} ImageAnnotatorClient + * Reference to {@link v1.ImageAnnotatorClient} + */ +module.exports.v1 = gapic.v1; -module.exports = visionV1; -module.exports.types = v1Protos; -module.exports.v1 = visionV1; -module.exports.v1.types = v1Protos; +// Alias `module.exports` as `module.exports.default`, for future-proofing. +module.exports.default = Object.assign({}, module.exports); diff --git a/packages/google-cloud-vision/src/v1/doc/doc_geometry.js b/packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_geometry.js similarity index 65% rename from packages/google-cloud-vision/src/v1/doc/doc_geometry.js rename to packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_geometry.js index cfff72bd26c..c8b9a1a8ff7 100644 --- a/packages/google-cloud-vision/src/v1/doc/doc_geometry.js +++ b/packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_geometry.js @@ -1,23 +1,19 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -/* - * Note: this file is purely for documentation. Any contents are not expected - * to be loaded as the JS file. - */ +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. /** * A vertex represents a 2D point in the image. @@ -29,7 +25,8 @@ * @property {number} y * Y coordinate. * - * @class + * @typedef Vertex + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.Vertex definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/geometry.proto} */ var Vertex = { @@ -42,9 +39,10 @@ var Vertex = { * @property {Object[]} vertices * The bounding polygon vertices. * - * This object should have the same structure as [Vertex]{@link Vertex} + * This object should have the same structure as [Vertex]{@link google.cloud.vision.v1.Vertex} * - * @class + * @typedef BoundingPoly + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.BoundingPoly definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/geometry.proto} */ var BoundingPoly = { @@ -65,7 +63,8 @@ var BoundingPoly = { * @property {number} z * Z coordinate (or depth). * - * @class + * @typedef Position + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.Position definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/geometry.proto} */ var Position = { diff --git a/packages/google-cloud-vision/src/v1/doc/doc_image_annotator.js b/packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_image_annotator.js similarity index 85% rename from packages/google-cloud-vision/src/v1/doc/doc_image_annotator.js rename to packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_image_annotator.js index e7845a3df3c..f2b24a27c67 100644 --- a/packages/google-cloud-vision/src/v1/doc/doc_image_annotator.js +++ b/packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_image_annotator.js @@ -1,23 +1,19 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Note: this file is purely for documentation. Any contents are not expected - * to be loaded as the JS file. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. /** * Users describe the type of Google Cloud Vision API tasks to perform over @@ -28,12 +24,13 @@ * @property {number} type * The feature type. * - * The number should be among the values of [Type]{@link Type} + * The number should be among the values of [Type]{@link google.cloud.vision.v1.Type} * * @property {number} maxResults * Maximum number of results of this type. * - * @class + * @typedef Feature + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.Feature definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var Feature = { @@ -43,6 +40,7 @@ var Feature = { * Type of image feature. * * @enum {number} + * @memberof google.cloud.vision.v1 */ Type: { @@ -127,7 +125,8 @@ var Feature = { * `gcs_image_uri` and `image_uri` are specified, `image_uri` takes * precedence. * - * @class + * @typedef ImageSource + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.ImageSource definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var ImageSource = { @@ -147,9 +146,10 @@ var ImageSource = { * are provided for an image, `content` takes precedence and is * used to perform the image annotation request. * - * This object should have the same structure as [ImageSource]{@link ImageSource} + * This object should have the same structure as [ImageSource]{@link google.cloud.vision.v1.ImageSource} * - * @class + * @typedef Image + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.Image definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var Image = { @@ -168,7 +168,7 @@ var Image = { * `BoundingPoly` (the polygon will be unbounded) if only a partial face * appears in the image to be annotated. * - * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1.BoundingPoly} * * @property {Object} fdBoundingPoly * The `fd_bounding_poly` bounding polygon is tighter than the @@ -178,12 +178,12 @@ var Image = { * landmarker results, only on the initial face detection, hence * the fd (face detection) prefix. * - * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1.BoundingPoly} * * @property {Object[]} landmarks * Detected face landmarks. * - * This object should have the same structure as [Landmark]{@link Landmark} + * This object should have the same structure as [Landmark]{@link google.cloud.vision.v1.Landmark} * * @property {number} rollAngle * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation @@ -208,39 +208,40 @@ var Image = { * @property {number} joyLikelihood * Joy likelihood. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * * @property {number} sorrowLikelihood * Sorrow likelihood. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * * @property {number} angerLikelihood * Anger likelihood. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * * @property {number} surpriseLikelihood * Surprise likelihood. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * * @property {number} underExposedLikelihood * Under-exposed likelihood. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * * @property {number} blurredLikelihood * Blurred likelihood. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * * @property {number} headwearLikelihood * Headwear likelihood. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * - * @class + * @typedef FaceAnnotation + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.FaceAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var FaceAnnotation = { @@ -256,14 +257,15 @@ var FaceAnnotation = { * @property {number} type * Face landmark type. * - * The number should be among the values of [Type]{@link Type} + * The number should be among the values of [Type]{@link google.cloud.vision.v1.Type} * * @property {Object} position * Face landmark position. * - * This object should have the same structure as [Position]{@link Position} + * This object should have the same structure as [Position]{@link google.cloud.vision.v1.Position} * - * @class + * @typedef Landmark + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.FaceAnnotation.Landmark definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ Landmark: { @@ -276,6 +278,7 @@ var FaceAnnotation = { * typically, is the person's right eye. * * @enum {number} + * @memberof google.cloud.vision.v1 */ Type: { @@ -463,9 +466,10 @@ var FaceAnnotation = { * @property {Object} latLng * lat/long location coordinates. * - * This object should have the same structure as [google.type.LatLng]{@link external:"google.type.LatLng"} + * This object should have the same structure as [LatLng]{@link google.type.LatLng} * - * @class + * @typedef LocationInfo + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.LocationInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var LocationInfo = { @@ -481,7 +485,8 @@ var LocationInfo = { * @property {string} value * Value of the property. * - * @class + * @typedef Property + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.Property definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var Property = { @@ -524,7 +529,7 @@ var Property = { * are produced for the entire text detected in an image region, followed by * `boundingPoly`s for each word within the detected text. * - * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1.BoundingPoly} * * @property {Object[]} locations * The location information for the detected entity. Multiple @@ -533,15 +538,16 @@ var Property = { * may indicate the location of the place where the image was taken. * Location information is usually present for landmarks. * - * This object should have the same structure as [LocationInfo]{@link LocationInfo} + * This object should have the same structure as [LocationInfo]{@link google.cloud.vision.v1.LocationInfo} * * @property {Object[]} properties * Some entities may have optional user-supplied `Property` (name/value) * fields, such a score or string that qualifies the entity. * - * This object should have the same structure as [Property]{@link Property} + * This object should have the same structure as [Property]{@link google.cloud.vision.v1.Property} * - * @class + * @typedef EntityAnnotation + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.EntityAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var EntityAnnotation = { @@ -556,26 +562,27 @@ var EntityAnnotation = { * @property {number} adult * Represents the adult content likelihood for the image. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * * @property {number} spoof * Spoof likelihood. The likelihood that an modification * was made to the image's canonical version to make it appear * funny or offensive. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * * @property {number} medical * Likelihood that this is a medical image. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * * @property {number} violence * Violence likelihood. * - * The number should be among the values of [Likelihood]{@link Likelihood} + * The number should be among the values of [Likelihood]{@link google.cloud.vision.v1.Likelihood} * - * @class + * @typedef SafeSearchAnnotation + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.SafeSearchAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var SafeSearchAnnotation = { @@ -588,14 +595,15 @@ var SafeSearchAnnotation = { * @property {Object} minLatLng * Min lat/long pair. * - * This object should have the same structure as [google.type.LatLng]{@link external:"google.type.LatLng"} + * This object should have the same structure as [LatLng]{@link google.type.LatLng} * * @property {Object} maxLatLng * Max lat/long pair. * - * This object should have the same structure as [google.type.LatLng]{@link external:"google.type.LatLng"} + * This object should have the same structure as [LatLng]{@link google.type.LatLng} * - * @class + * @typedef LatLongRect + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.LatLongRect definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var LatLongRect = { @@ -609,7 +617,7 @@ var LatLongRect = { * @property {Object} color * RGB components of the color. * - * This object should have the same structure as [google.type.Color]{@link external:"google.type.Color"} + * This object should have the same structure as [Color]{@link google.type.Color} * * @property {number} score * Image-specific score for this color. Value in range [0, 1]. @@ -618,7 +626,8 @@ var LatLongRect = { * The fraction of pixels the color occupies in the image. * Value in range [0, 1]. * - * @class + * @typedef ColorInfo + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.ColorInfo definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var ColorInfo = { @@ -631,9 +640,10 @@ var ColorInfo = { * @property {Object[]} colors * RGB color values with their score and pixel fraction. * - * This object should have the same structure as [ColorInfo]{@link ColorInfo} + * This object should have the same structure as [ColorInfo]{@link google.cloud.vision.v1.ColorInfo} * - * @class + * @typedef DominantColorsAnnotation + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.DominantColorsAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var DominantColorsAnnotation = { @@ -646,9 +656,10 @@ var DominantColorsAnnotation = { * @property {Object} dominantColors * If present, dominant colors completed successfully. * - * This object should have the same structure as [DominantColorsAnnotation]{@link DominantColorsAnnotation} + * This object should have the same structure as [DominantColorsAnnotation]{@link google.cloud.vision.v1.DominantColorsAnnotation} * - * @class + * @typedef ImageProperties + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.ImageProperties definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var ImageProperties = { @@ -662,7 +673,7 @@ var ImageProperties = { * The bounding polygon for the crop region. The coordinates of the bounding * box are in the original image's scale, as returned in `ImageParams`. * - * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1.BoundingPoly} * * @property {number} confidence * Confidence of this being a salient region. Range [0, 1]. @@ -671,7 +682,8 @@ var ImageProperties = { * Fraction of importance of this salient region with respect to the original * image. * - * @class + * @typedef CropHint + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.CropHint definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var CropHint = { @@ -682,9 +694,10 @@ var CropHint = { * Set of crop hints that are used to generate new crops when serving images. * * @property {Object[]} cropHints - * This object should have the same structure as [CropHint]{@link CropHint} + * This object should have the same structure as [CropHint]{@link google.cloud.vision.v1.CropHint} * - * @class + * @typedef CropHintsAnnotation + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.CropHintsAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var CropHintsAnnotation = { @@ -702,7 +715,8 @@ var CropHintsAnnotation = { * limited to a maximum of 16; any aspect ratios provided after the 16th are * ignored. * - * @class + * @typedef CropHintsParams + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.CropHintsParams definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var CropHintsParams = { @@ -715,7 +729,7 @@ var CropHintsParams = { * @property {Object} latLongRect * lat/long rectangle that specifies the location of the image. * - * This object should have the same structure as [LatLongRect]{@link LatLongRect} + * This object should have the same structure as [LatLongRect]{@link google.cloud.vision.v1.LatLongRect} * * @property {string[]} languageHints * List of languages to use for TEXT_DETECTION. In most cases, an empty value @@ -730,9 +744,10 @@ var CropHintsParams = { * @property {Object} cropHintsParams * Parameters for crop hints annotation request. * - * This object should have the same structure as [CropHintsParams]{@link CropHintsParams} + * This object should have the same structure as [CropHintsParams]{@link google.cloud.vision.v1.CropHintsParams} * - * @class + * @typedef ImageContext + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.ImageContext definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var ImageContext = { @@ -746,19 +761,20 @@ var ImageContext = { * @property {Object} image * The image to be processed. * - * This object should have the same structure as [Image]{@link Image} + * This object should have the same structure as [Image]{@link google.cloud.vision.v1.Image} * * @property {Object[]} features * Requested features. * - * This object should have the same structure as [Feature]{@link Feature} + * This object should have the same structure as [Feature]{@link google.cloud.vision.v1.Feature} * * @property {Object} imageContext * Additional context that may accompany the image. * - * This object should have the same structure as [ImageContext]{@link ImageContext} + * This object should have the same structure as [ImageContext]{@link google.cloud.vision.v1.ImageContext} * - * @class + * @typedef AnnotateImageRequest + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.AnnotateImageRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var AnnotateImageRequest = { @@ -771,28 +787,28 @@ var AnnotateImageRequest = { * @property {Object[]} faceAnnotations * If present, face detection has completed successfully. * - * This object should have the same structure as [FaceAnnotation]{@link FaceAnnotation} + * This object should have the same structure as [FaceAnnotation]{@link google.cloud.vision.v1.FaceAnnotation} * * @property {Object[]} landmarkAnnotations * If present, landmark detection has completed successfully. * - * This object should have the same structure as [EntityAnnotation]{@link EntityAnnotation} + * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1.EntityAnnotation} * * @property {Object[]} logoAnnotations * If present, logo detection has completed successfully. * - * This object should have the same structure as [EntityAnnotation]{@link EntityAnnotation} + * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1.EntityAnnotation} * * @property {Object[]} labelAnnotations * If present, label detection has completed successfully. * - * This object should have the same structure as [EntityAnnotation]{@link EntityAnnotation} + * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1.EntityAnnotation} * * @property {Object[]} textAnnotations * If present, text (OCR) detection or document (OCR) text detection has * completed successfully. * - * This object should have the same structure as [EntityAnnotation]{@link EntityAnnotation} + * This object should have the same structure as [EntityAnnotation]{@link google.cloud.vision.v1.EntityAnnotation} * * @property {Object} fullTextAnnotation * If present, text (OCR) detection or document (OCR) text detection has @@ -800,36 +816,37 @@ var AnnotateImageRequest = { * This annotation provides the structural hierarchy for the OCR detected * text. * - * This object should have the same structure as [TextAnnotation]{@link TextAnnotation} + * This object should have the same structure as [TextAnnotation]{@link google.cloud.vision.v1.TextAnnotation} * * @property {Object} safeSearchAnnotation * If present, safe-search annotation has completed successfully. * - * This object should have the same structure as [SafeSearchAnnotation]{@link SafeSearchAnnotation} + * This object should have the same structure as [SafeSearchAnnotation]{@link google.cloud.vision.v1.SafeSearchAnnotation} * * @property {Object} imagePropertiesAnnotation * If present, image properties were extracted successfully. * - * This object should have the same structure as [ImageProperties]{@link ImageProperties} + * This object should have the same structure as [ImageProperties]{@link google.cloud.vision.v1.ImageProperties} * * @property {Object} cropHintsAnnotation * If present, crop hints have completed successfully. * - * This object should have the same structure as [CropHintsAnnotation]{@link CropHintsAnnotation} + * This object should have the same structure as [CropHintsAnnotation]{@link google.cloud.vision.v1.CropHintsAnnotation} * * @property {Object} webDetection * If present, web detection has completed successfully. * - * This object should have the same structure as [WebDetection]{@link WebDetection} + * This object should have the same structure as [WebDetection]{@link google.cloud.vision.v1.WebDetection} * * @property {Object} error * If set, represents the error message for the operation. * Note that filled-in image annotations are guaranteed to be * correct, even when `error` is set. * - * This object should have the same structure as [google.rpc.Status]{@link external:"google.rpc.Status"} + * This object should have the same structure as [Status]{@link google.rpc.Status} * - * @class + * @typedef AnnotateImageResponse + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.AnnotateImageResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var AnnotateImageResponse = { @@ -842,9 +859,10 @@ var AnnotateImageResponse = { * @property {Object[]} requests * Individual image annotation requests for this batch. * - * This object should have the same structure as [AnnotateImageRequest]{@link AnnotateImageRequest} + * This object should have the same structure as [AnnotateImageRequest]{@link google.cloud.vision.v1.AnnotateImageRequest} * - * @class + * @typedef BatchAnnotateImagesRequest + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.BatchAnnotateImagesRequest definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var BatchAnnotateImagesRequest = { @@ -857,9 +875,10 @@ var BatchAnnotateImagesRequest = { * @property {Object[]} responses * Individual responses to image annotation requests within the batch. * - * This object should have the same structure as [AnnotateImageResponse]{@link AnnotateImageResponse} + * This object should have the same structure as [AnnotateImageResponse]{@link google.cloud.vision.v1.AnnotateImageResponse} * - * @class + * @typedef BatchAnnotateImagesResponse + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.BatchAnnotateImagesResponse definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto} */ var BatchAnnotateImagesResponse = { @@ -871,6 +890,7 @@ var BatchAnnotateImagesResponse = { * highly stable results across model upgrades. * * @enum {number} + * @memberof google.cloud.vision.v1 */ var Likelihood = { diff --git a/packages/google-cloud-vision/src/v1/doc/doc_text_annotation.js b/packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_text_annotation.js similarity index 83% rename from packages/google-cloud-vision/src/v1/doc/doc_text_annotation.js rename to packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_text_annotation.js index 96e5ccba01a..51f65b6f952 100644 --- a/packages/google-cloud-vision/src/v1/doc/doc_text_annotation.js +++ b/packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_text_annotation.js @@ -1,23 +1,19 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -/* - * Note: this file is purely for documentation. Any contents are not expected - * to be loaded as the JS file. - */ +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. /** * TextAnnotation contains a structured representation of OCR extracted text. @@ -25,18 +21,19 @@ * TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol * Each structural component, starting from Page, may further have their own * properties. Properties describe detected languages, breaks etc.. Please - * refer to the {@link google.cloud.vision.v1.TextAnnotation.TextProperty} message + * refer to the google.cloud.vision.v1.TextAnnotation.TextProperty message * definition below for more detail. * * @property {Object[]} pages * List of pages detected by OCR. * - * This object should have the same structure as [Page]{@link Page} + * This object should have the same structure as [Page]{@link google.cloud.vision.v1.Page} * * @property {string} text * UTF-8 text detected on the pages. * - * @class + * @typedef TextAnnotation + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.TextAnnotation definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} */ var TextAnnotation = { @@ -53,7 +50,8 @@ var TextAnnotation = { * @property {number} confidence * Confidence of detected language. Range [0, 1]. * - * @class + * @typedef DetectedLanguage + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.TextAnnotation.DetectedLanguage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} */ DetectedLanguage: { @@ -64,12 +62,13 @@ var TextAnnotation = { * Detected start or end of a structural component. * * @property {number} type - * The number should be among the values of [BreakType]{@link BreakType} + * The number should be among the values of [BreakType]{@link google.cloud.vision.v1.BreakType} * * @property {boolean} isPrefix * True if break prepends the element. * - * @class + * @typedef DetectedBreak + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.TextAnnotation.DetectedBreak definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} */ DetectedBreak: { @@ -79,6 +78,7 @@ var TextAnnotation = { * Enum to denote the type of break found. New line, space etc. * * @enum {number} + * @memberof google.cloud.vision.v1 */ BreakType: { @@ -122,14 +122,15 @@ var TextAnnotation = { * @property {Object[]} detectedLanguages * A list of detected languages together with confidence. * - * This object should have the same structure as [DetectedLanguage]{@link DetectedLanguage} + * This object should have the same structure as [DetectedLanguage]{@link google.cloud.vision.v1.DetectedLanguage} * * @property {Object} detectedBreak * Detected start or end of a text segment. * - * This object should have the same structure as [DetectedBreak]{@link DetectedBreak} + * This object should have the same structure as [DetectedBreak]{@link google.cloud.vision.v1.DetectedBreak} * - * @class + * @typedef TextProperty + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.TextAnnotation.TextProperty definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} */ TextProperty: { @@ -143,7 +144,7 @@ var TextAnnotation = { * @property {Object} property * Additional information detected on the page. * - * This object should have the same structure as [TextProperty]{@link TextProperty} + * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1.TextProperty} * * @property {number} width * Page width in pixels. @@ -154,9 +155,10 @@ var TextAnnotation = { * @property {Object[]} blocks * List of blocks of text, images etc on this page. * - * This object should have the same structure as [Block]{@link Block} + * This object should have the same structure as [Block]{@link google.cloud.vision.v1.Block} * - * @class + * @typedef Page + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.Page definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} */ var Page = { @@ -169,7 +171,7 @@ var Page = { * @property {Object} property * Additional information detected for the block. * - * This object should have the same structure as [TextProperty]{@link TextProperty} + * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1.TextProperty} * * @property {Object} boundingBox * The bounding box for the block. @@ -188,19 +190,20 @@ var Page = { * 1----0 * and the vertice order will still be (0, 1, 2, 3). * - * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1.BoundingPoly} * * @property {Object[]} paragraphs * List of paragraphs in this block (if this blocks is of type text). * - * This object should have the same structure as [Paragraph]{@link Paragraph} + * This object should have the same structure as [Paragraph]{@link google.cloud.vision.v1.Paragraph} * * @property {number} blockType * Detected block type (text, image etc) for this block. * - * The number should be among the values of [BlockType]{@link BlockType} + * The number should be among the values of [BlockType]{@link google.cloud.vision.v1.BlockType} * - * @class + * @typedef Block + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.Block definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} */ var Block = { @@ -210,6 +213,7 @@ var Block = { * Type of a block (text, image etc) as identified by OCR. * * @enum {number} + * @memberof google.cloud.vision.v1 */ BlockType: { @@ -251,7 +255,7 @@ var Block = { * @property {Object} property * Additional information detected for the paragraph. * - * This object should have the same structure as [TextProperty]{@link TextProperty} + * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1.TextProperty} * * @property {Object} boundingBox * The bounding box for the paragraph. @@ -270,14 +274,15 @@ var Block = { * 1----0 * and the vertice order will still be (0, 1, 2, 3). * - * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1.BoundingPoly} * * @property {Object[]} words * List of words in this paragraph. * - * This object should have the same structure as [Word]{@link Word} + * This object should have the same structure as [Word]{@link google.cloud.vision.v1.Word} * - * @class + * @typedef Paragraph + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.Paragraph definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} */ var Paragraph = { @@ -290,7 +295,7 @@ var Paragraph = { * @property {Object} property * Additional information detected for the word. * - * This object should have the same structure as [TextProperty]{@link TextProperty} + * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1.TextProperty} * * @property {Object} boundingBox * The bounding box for the word. @@ -309,15 +314,16 @@ var Paragraph = { * 1----0 * and the vertice order will still be (0, 1, 2, 3). * - * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1.BoundingPoly} * * @property {Object[]} symbols * List of symbols in the word. * The order of the symbols follows the natural reading order. * - * This object should have the same structure as [Symbol]{@link Symbol} + * This object should have the same structure as [Symbol]{@link google.cloud.vision.v1.Symbol} * - * @class + * @typedef Word + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.Word definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} */ var Word = { @@ -330,7 +336,7 @@ var Word = { * @property {Object} property * Additional information detected for the symbol. * - * This object should have the same structure as [TextProperty]{@link TextProperty} + * This object should have the same structure as [TextProperty]{@link google.cloud.vision.v1.TextProperty} * * @property {Object} boundingBox * The bounding box for the symbol. @@ -349,12 +355,13 @@ var Word = { * 1----0 * and the vertice order will still be (0, 1, 2, 3). * - * This object should have the same structure as [BoundingPoly]{@link BoundingPoly} + * This object should have the same structure as [BoundingPoly]{@link google.cloud.vision.v1.BoundingPoly} * * @property {string} text * The actual UTF-8 representation of the symbol. * - * @class + * @typedef Symbol + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.Symbol definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/text_annotation.proto} */ var Symbol = { diff --git a/packages/google-cloud-vision/src/v1/doc/doc_web_detection.js b/packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_web_detection.js similarity index 74% rename from packages/google-cloud-vision/src/v1/doc/doc_web_detection.js rename to packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_web_detection.js index 12e2da864f6..1872b45d158 100644 --- a/packages/google-cloud-vision/src/v1/doc/doc_web_detection.js +++ b/packages/google-cloud-vision/src/v1/doc/google/cloud/vision/v1/doc_web_detection.js @@ -1,23 +1,19 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -/* - * Note: this file is purely for documentation. Any contents are not expected - * to be loaded as the JS file. - */ +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. /** * Relevant information for the image from the Internet. @@ -25,28 +21,29 @@ * @property {Object[]} webEntities * Deduced entities from similar images on the Internet. * - * This object should have the same structure as [WebEntity]{@link WebEntity} + * This object should have the same structure as [WebEntity]{@link google.cloud.vision.v1.WebEntity} * * @property {Object[]} fullMatchingImages * Fully matching images from the Internet. * They're definite neardups and most often a copy of the query image with * merely a size change. * - * This object should have the same structure as [WebImage]{@link WebImage} + * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1.WebImage} * * @property {Object[]} partialMatchingImages * Partial matching images from the Internet. * Those images are similar enough to share some key-point features. For * example an original image will likely have partial matching for its crops. * - * This object should have the same structure as [WebImage]{@link WebImage} + * This object should have the same structure as [WebImage]{@link google.cloud.vision.v1.WebImage} * * @property {Object[]} pagesWithMatchingImages * Web pages containing the matching images from the Internet. * - * This object should have the same structure as [WebPage]{@link WebPage} + * This object should have the same structure as [WebPage]{@link google.cloud.vision.v1.WebPage} * - * @class + * @typedef WebDetection + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.WebDetection definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/web_detection.proto} */ var WebDetection = { @@ -65,7 +62,8 @@ var WebDetection = { * @property {string} description * Canonical description of the entity, in English. * - * @class + * @typedef WebEntity + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.WebDetection.WebEntity definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/web_detection.proto} */ WebEntity: { @@ -82,7 +80,8 @@ var WebDetection = { * Overall relevancy score for the image. * Not normalized and not comparable across different image queries. * - * @class + * @typedef WebImage + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.WebDetection.WebImage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/web_detection.proto} */ WebImage: { @@ -99,7 +98,8 @@ var WebDetection = { * Overall relevancy score for the web page. * Not normalized and not comparable across different image queries. * - * @class + * @typedef WebPage + * @memberof google.cloud.vision.v1 * @see [google.cloud.vision.v1.WebDetection.WebPage definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/web_detection.proto} */ WebPage: { diff --git a/packages/google-cloud-vision/src/v1/doc/doc_google_protobuf_any.js b/packages/google-cloud-vision/src/v1/doc/google/protobuf/doc_any.js similarity index 74% rename from packages/google-cloud-vision/src/v1/doc/doc_google_protobuf_any.js rename to packages/google-cloud-vision/src/v1/doc/google/protobuf/doc_any.js index 0697ec15814..21feb744243 100644 --- a/packages/google-cloud-vision/src/v1/doc/doc_google_protobuf_any.js +++ b/packages/google-cloud-vision/src/v1/doc/google/protobuf/doc_any.js @@ -1,23 +1,19 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -/* - * Note: this file is purely for documentation. Any contents are not expected - * to be loaded as the JS file. - */ +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. /** * `Any` contains an arbitrary serialized protocol buffer message along with a @@ -55,6 +51,16 @@ * any.Unpack(foo) * ... * + * Example 4: Pack and unpack a message in Go + * + * foo := &pb.Foo{...} + * any, err := ptypes.MarshalAny(foo) + * ... + * foo := &pb.Foo{} + * if err := ptypes.UnmarshalAny(any, foo); err != nil { + * ... + * } + * * The pack methods provided by protobuf library will by default use * 'type.googleapis.com/full.type.name' as the type URL and the unpack * methods only use the fully qualified type name after the last '/' @@ -83,14 +89,13 @@ * If the embedded message type is well-known and has a custom JSON * representation, that representation will be embedded adding a field * `value` which holds the custom JSON in addition to the `@type` - * field. Example (for message {@link google.protobuf.Duration}): + * field. Example (for message google.protobuf.Duration): * * { * "@type": "type.googleapis.com/google.protobuf.Duration", * "value": "1.212s" * } * - * @external "google.protobuf.Any" * @property {string} typeUrl * A URL/resource name whose content describes the type of the * serialized protocol buffer message. @@ -103,7 +108,7 @@ * qualified name of the type (as in `path/google.protobuf.Duration`). * The name should be in a canonical form (e.g., leading "." is * not accepted). - * * An HTTP GET on the URL must yield a {@link google.protobuf.Type} + * * An HTTP GET on the URL must yield a google.protobuf.Type * value in binary format, or produce an error. * * Applications are allowed to cache lookup results based on the * URL, or have them precompiled into a binary to avoid any @@ -117,5 +122,10 @@ * @property {string} value * Must be a valid serialized protocol buffer of the above specified type. * + * @typedef Any + * @memberof google.protobuf * @see [google.protobuf.Any definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/any.proto} - */ \ No newline at end of file + */ +var Any = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1/doc/doc_google_protobuf_wrappers.js b/packages/google-cloud-vision/src/v1/doc/google/protobuf/doc_wrappers.js similarity index 59% rename from packages/google-cloud-vision/src/v1/doc/doc_google_protobuf_wrappers.js rename to packages/google-cloud-vision/src/v1/doc/google/protobuf/doc_wrappers.js index 46a5e3e2213..0ccf17f236b 100644 --- a/packages/google-cloud-vision/src/v1/doc/doc_google_protobuf_wrappers.js +++ b/packages/google-cloud-vision/src/v1/doc/google/protobuf/doc_wrappers.js @@ -1,128 +1,160 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -/* - * Note: this file is purely for documentation. Any contents are not expected - * to be loaded as the JS file. - */ +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. /** * Wrapper message for `double`. * * The JSON representation for `DoubleValue` is JSON number. * - * @external "google.protobuf.DoubleValue" * @property {number} value * The double value. * + * @typedef DoubleValue + * @memberof google.protobuf * @see [google.protobuf.DoubleValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} */ +var DoubleValue = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; /** * Wrapper message for `float`. * * The JSON representation for `FloatValue` is JSON number. * - * @external "google.protobuf.FloatValue" * @property {number} value * The float value. * + * @typedef FloatValue + * @memberof google.protobuf * @see [google.protobuf.FloatValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} */ +var FloatValue = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; /** * Wrapper message for `int64`. * * The JSON representation for `Int64Value` is JSON string. * - * @external "google.protobuf.Int64Value" * @property {number} value * The int64 value. * + * @typedef Int64Value + * @memberof google.protobuf * @see [google.protobuf.Int64Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} */ +var Int64Value = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; /** * Wrapper message for `uint64`. * * The JSON representation for `UInt64Value` is JSON string. * - * @external "google.protobuf.UInt64Value" * @property {number} value * The uint64 value. * + * @typedef UInt64Value + * @memberof google.protobuf * @see [google.protobuf.UInt64Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} */ +var UInt64Value = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; /** * Wrapper message for `int32`. * * The JSON representation for `Int32Value` is JSON number. * - * @external "google.protobuf.Int32Value" * @property {number} value * The int32 value. * + * @typedef Int32Value + * @memberof google.protobuf * @see [google.protobuf.Int32Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} */ +var Int32Value = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; /** * Wrapper message for `uint32`. * * The JSON representation for `UInt32Value` is JSON number. * - * @external "google.protobuf.UInt32Value" * @property {number} value * The uint32 value. * + * @typedef UInt32Value + * @memberof google.protobuf * @see [google.protobuf.UInt32Value definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} */ +var UInt32Value = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; /** * Wrapper message for `bool`. * * The JSON representation for `BoolValue` is JSON `true` and `false`. * - * @external "google.protobuf.BoolValue" * @property {boolean} value * The bool value. * + * @typedef BoolValue + * @memberof google.protobuf * @see [google.protobuf.BoolValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} */ +var BoolValue = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; /** * Wrapper message for `string`. * * The JSON representation for `StringValue` is JSON string. * - * @external "google.protobuf.StringValue" * @property {string} value * The string value. * + * @typedef StringValue + * @memberof google.protobuf * @see [google.protobuf.StringValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} */ +var StringValue = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; /** * Wrapper message for `bytes`. * * The JSON representation for `BytesValue` is JSON string. * - * @external "google.protobuf.BytesValue" * @property {string} value * The bytes value. * + * @typedef BytesValue + * @memberof google.protobuf * @see [google.protobuf.BytesValue definition in proto format]{@link https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto} - */ \ No newline at end of file + */ +var BytesValue = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1/doc/doc_google_rpc_status.js b/packages/google-cloud-vision/src/v1/doc/google/rpc/doc_status.js similarity index 67% rename from packages/google-cloud-vision/src/v1/doc/doc_google_rpc_status.js rename to packages/google-cloud-vision/src/v1/doc/google/rpc/doc_status.js index c85f1befe90..be5e96ce26d 100644 --- a/packages/google-cloud-vision/src/v1/doc/doc_google_rpc_status.js +++ b/packages/google-cloud-vision/src/v1/doc/google/rpc/doc_status.js @@ -1,23 +1,19 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -/* - * Note: this file is purely for documentation. Any contents are not expected - * to be loaded as the JS file. - */ +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. /** * The `Status` type defines a logical error model that is suitable for different @@ -31,13 +27,13 @@ * * The `Status` message contains three pieces of data: error code, error message, * and error details. The error code should be an enum value of - * {@link google.rpc.Code}, but it may accept additional error codes if needed. The + * google.rpc.Code, but it may accept additional error codes if needed. The * error message should be a developer-facing English message that helps * developers *understand* and *resolve* the error. If a localized user-facing * error message is needed, put the localized message in the error details or * localize it in the client. The optional error details may contain arbitrary * information about the error. There is a predefined set of error detail types - * in the package `google.rpc` which can be used for common error conditions. + * in the package `google.rpc` that can be used for common error conditions. * * # Language mapping * @@ -60,7 +56,7 @@ * errors. * * - Workflow errors. A typical workflow has multiple steps. Each step may - * have a `Status` message for error reporting purpose. + * have a `Status` message for error reporting. * * - Batch operations. If a client uses batch request and batch response, the * `Status` message should be used directly inside batch response, one for @@ -73,20 +69,24 @@ * - Logging. If some API errors are stored in logs, the message `Status` could * be used directly after any stripping needed for security/privacy reasons. * - * @external "google.rpc.Status" * @property {number} code - * The status code, which should be an enum value of {@link google.rpc.Code}. + * The status code, which should be an enum value of google.rpc.Code. * * @property {string} message * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the - * {@link google.rpc.Status.details} field, or localized by the client. + * google.rpc.Status.details field, or localized by the client. * * @property {Object[]} details - * A list of messages that carry the error details. There will be a - * common set of message types for APIs to use. + * A list of messages that carry the error details. There is a common set of + * message types for APIs to use. * - * This object should have the same structure as [google.protobuf.Any]{@link external:"google.protobuf.Any"} + * This object should have the same structure as [Any]{@link google.protobuf.Any} * + * @typedef Status + * @memberof google.rpc * @see [google.rpc.Status definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto} - */ \ No newline at end of file + */ +var Status = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1/doc/doc_google_type_color.js b/packages/google-cloud-vision/src/v1/doc/google/type/doc_color.js similarity index 85% rename from packages/google-cloud-vision/src/v1/doc/doc_google_type_color.js rename to packages/google-cloud-vision/src/v1/doc/google/type/doc_color.js index 679c7f72339..a2ea753d8a1 100644 --- a/packages/google-cloud-vision/src/v1/doc/doc_google_type_color.js +++ b/packages/google-cloud-vision/src/v1/doc/google/type/doc_color.js @@ -1,23 +1,19 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -/* - * Note: this file is purely for documentation. Any contents are not expected - * to be loaded as the JS file. - */ +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. /** * Represents a color in the RGBA color space. This representation is designed @@ -135,7 +131,6 @@ * * // ... * - * @external "google.type.Color" * @property {number} red * The amount of red in the color as a value in the interval [0, 1]. * @@ -158,7 +153,12 @@ * If omitted, this color object is to be rendered as a solid color * (as if the alpha value had been explicitly given with a value of 1.0). * - * This object should have the same structure as [google.protobuf.FloatValue]{@link external:"google.protobuf.FloatValue"} + * This object should have the same structure as [FloatValue]{@link google.protobuf.FloatValue} * + * @typedef Color + * @memberof google.type * @see [google.type.Color definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/type/color.proto} - */ \ No newline at end of file + */ +var Color = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1/doc/doc_google_type_latlng.js b/packages/google-cloud-vision/src/v1/doc/google/type/doc_latlng.js similarity index 73% rename from packages/google-cloud-vision/src/v1/doc/doc_google_type_latlng.js rename to packages/google-cloud-vision/src/v1/doc/google/type/doc_latlng.js index 82dd2e824f9..e54b84d63c7 100644 --- a/packages/google-cloud-vision/src/v1/doc/doc_google_type_latlng.js +++ b/packages/google-cloud-vision/src/v1/doc/google/type/doc_latlng.js @@ -1,23 +1,19 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -/* - * Note: this file is purely for documentation. Any contents are not expected - * to be loaded as the JS file. - */ +// Note: this file is purely for documentation. Any contents are not expected +// to be loaded as the JS file. /** * An object representing a latitude/longitude pair. This is expressed as a pair @@ -60,12 +56,16 @@ * assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) * assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) * - * @external "google.type.LatLng" * @property {number} latitude * The latitude in degrees. It must be in the range [-90.0, +90.0]. * * @property {number} longitude * The longitude in degrees. It must be in the range [-180.0, +180.0]. * + * @typedef LatLng + * @memberof google.type * @see [google.type.LatLng definition in proto format]{@link https://github.com/googleapis/googleapis/blob/master/google/type/latlng.proto} - */ \ No newline at end of file + */ +var LatLng = { + // This is for documentation. Actual contents will be loaded by gRPC. +}; \ No newline at end of file diff --git a/packages/google-cloud-vision/src/v1/image_annotator_client.js b/packages/google-cloud-vision/src/v1/image_annotator_client.js index bfbad78422d..2dfaee881a2 100644 --- a/packages/google-cloud-vision/src/v1/image_annotator_client.js +++ b/packages/google-cloud-vision/src/v1/image_annotator_client.js @@ -1,197 +1,226 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * EDITING INSTRUCTIONS - * This file was generated from the file - * https://github.com/googleapis/googleapis/blob/master/google/cloud/vision/v1/image_annotator.proto, - * and updates to that file get reflected here through a refresh process. - * For the short term, the refresh process will only be runnable by Google - * engineers. - * - * The only allowed edits are to method and file documentation. A 3-way - * merge preserves those additions if the generated source changes. - */ -/* TODO: introduce line-wrapping so that it never exceeds the limit. */ -/* jscs: disable maximumLineLength */ -'use strict'; +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -var configData = require('./image_annotator_client_config'); -var extend = require('extend'); -var gax = require('google-gax'); - -var SERVICE_ADDRESS = 'vision.googleapis.com'; +'use strict'; -var DEFAULT_SERVICE_PORT = 443; +const gapicConfig = require('./image_annotator_client_config'); +const gax = require('google-gax'); +const merge = require('lodash.merge'); +const path = require('path'); -var CODE_GEN_NAME_VERSION = 'gapic/0.7.1'; - -/** - * The scopes needed to make gRPC calls to all of the methods defined in - * this service. - */ -var ALL_SCOPES = [ - 'https://www.googleapis.com/auth/cloud-platform' -]; +const VERSION = require('../../package.json').version; /** * Service that performs Google Cloud Vision API detection tasks over client * images, such as face, landmark, logo, label, and text detection. The * ImageAnnotator service returns detected entities from the images. * - * * @class + * @memberof v1 */ -function ImageAnnotatorClient(gaxGrpc, grpcClients, opts) { - opts = extend({ - servicePath: SERVICE_ADDRESS, - port: DEFAULT_SERVICE_PORT, - clientConfig: {} - }, opts); - - var googleApiClient = [ - 'gl-node/' + process.versions.node - ]; - if (opts.libName && opts.libVersion) { - googleApiClient.push(opts.libName + '/' + opts.libVersion); - } - googleApiClient.push( - CODE_GEN_NAME_VERSION, - 'gax/' + gax.version, - 'grpc/' + gaxGrpc.grpcVersion - ); - - var defaults = gaxGrpc.constructSettings( +class ImageAnnotatorClient { + /** + * Construct an instance of ImageAnnotatorClient. + * + * @param {object} [options] - The configuration object. See the subsequent + * parameters for more details. + * @param {object} [options.credentials] - Credentials object. + * @param {string} [options.credentials.client_email] + * @param {string} [options.credentials.private_key] + * @param {string} [options.email] - Account email address. Required when + * usaing a .pem or .p12 keyFilename. + * @param {string} [options.keyFilename] - Full path to the a .json, .pem, or + * .p12 key downloaded from the Google Developers Console. If you provide + * a path to a JSON file, the projectId option above is not necessary. + * NOTE: .pem and .p12 require you to specify options.email as well. + * @param {number} [options.port] - The port on which to connect to + * the remote host. + * @param {string} [options.projectId] - The project ID from the Google + * Developer's Console, e.g. 'grape-spaceship-123'. We will also check + * the environment variable GCLOUD_PROJECT for your project ID. If your + * app is running in an environment which supports + * {@link https://developers.google.com/identity/protocols/application-default-credentials Application Default Credentials}, + * your project ID will be detected automatically. + * @param {function} [options.promise] - Custom promise module to use instead + * of native Promises. + * @param {string} [options.servicePath] - The domain name of the + * API remote host. + */ + constructor(opts) { + this._descriptors = {}; + + // Ensure that options include the service address and port. + opts = Object.assign( + { + clientConfig: {}, + port: this.constructor.port, + servicePath: this.constructor.servicePath, + }, + opts + ); + + // Create a `gaxGrpc` object, with any grpc-specific options + // sent to the client. + opts.scopes = this.constructor.scopes; + var gaxGrpc = gax.grpc(opts); + + // Save the auth object to the client, for use by other methods. + this.auth = gaxGrpc.auth; + + // Determine the client header string. + var clientHeader = [ + `gl-node/${process.version.node}`, + `grpc/${gaxGrpc.grpcVersion}`, + `gax/${gax.version}`, + `gapic/${VERSION}`, + ]; + if (opts.libName && opts.libVersion) { + clientHeader.push(`${opts.libName}/${opts.libVersion}`); + } + + // Load the applicable protos. + var protos = merge( + {}, + gaxGrpc.loadProto( + path.join(__dirname, '..', '..', 'protos'), + 'google/cloud/vision/v1/image_annotator.proto' + ) + ); + + // Put together the default options sent with requests. + var defaults = gaxGrpc.constructSettings( 'google.cloud.vision.v1.ImageAnnotator', - configData, + gapicConfig, opts.clientConfig, - {'x-goog-api-client': googleApiClient.join(' ')}); - - var self = this; - - this.auth = gaxGrpc.auth; - var imageAnnotatorStub = gaxGrpc.createStub( - grpcClients.google.cloud.vision.v1.ImageAnnotator, - opts); - var imageAnnotatorStubMethods = [ - 'batchAnnotateImages' - ]; - imageAnnotatorStubMethods.forEach(function(methodName) { - self['_' + methodName] = gax.createApiCall( - imageAnnotatorStub.then(function(imageAnnotatorStub) { - return function() { - var args = Array.prototype.slice.call(arguments, 0); - return imageAnnotatorStub[methodName].apply(imageAnnotatorStub, args); - }; - }), - defaults[methodName], - null); - }); -} - - -/** - * Get the project ID used by this class. - * @param {function(Error, string)} callback - the callback to be called with - * the current project Id. - */ -ImageAnnotatorClient.prototype.getProjectId = function(callback) { - return this.auth.getProjectId(callback); -}; - -// Service calls - -/** - * Run image detection and annotation for a batch of images. - * - * @param {Object} request - * The request object that will be sent. - * @param {Object[]} request.requests - * Individual image annotation requests for this batch. - * - * This object should have the same structure as [AnnotateImageRequest]{@link AnnotateImageRequest} - * @param {Object=} options - * Optional parameters. You can override the default settings for this call, e.g, timeout, - * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details. - * @param {function(?Error, ?Object)=} callback - * The function which will be called with the result of the API call. - * - * The second parameter to the callback is an object representing [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [BatchAnnotateImagesResponse]{@link BatchAnnotateImagesResponse}. - * The promise has a method named "cancel" which cancels the ongoing API call. - * - * @example - * - * var vision = require('@google-cloud/vision'); - * - * var client = vision.v1({ - * // optional auth parameters. - * }); - * - * var requests = []; - * client.batchAnnotateImages({requests: requests}).then(function(responses) { - * var response = responses[0]; - * // doThingsWith(response) - * }) - * .catch(function(err) { - * console.error(err); - * }); - */ -ImageAnnotatorClient.prototype.batchAnnotateImages = function(request, options, callback) { - if (options instanceof Function && callback === undefined) { - callback = options; - options = {}; + {'x-goog-api-client': clientHeader.join(' ')} + ); + + // Set up a dictionary of "inner API calls"; the core implementation + // of calling the API is handled in `google-gax`, with this code + // merely providing the destination and request information. + this._innerApiCalls = {}; + + // Put together the "service stub" for + // google.cloud.vision.v1.ImageAnnotator. + var imageAnnotatorStub = gaxGrpc.createStub( + protos.google.cloud.vision.v1.ImageAnnotator, + opts + ); + + // Iterate over each of the methods that the service provides + // and create an API call method for each. + var imageAnnotatorStubMethods = ['batchAnnotateImages']; + for (let methodName of imageAnnotatorStubMethods) { + this._innerApiCalls[methodName] = gax.createApiCall( + imageAnnotatorStub.then( + stub => + function() { + var args = Array.prototype.slice.call(arguments, 0); + return stub[methodName].apply(stub, args); + } + ), + defaults[methodName], + null + ); + } } - if (options === undefined) { - options = {}; + + /** + * The DNS address for this API service. + */ + static get servicePath() { + return 'vision.googleapis.com'; } - return this._batchAnnotateImages(request, options, callback); -}; + /** + * The port for this API service. + */ + static get port() { + return 443; + } -function ImageAnnotatorClientBuilder(gaxGrpc) { - if (!(this instanceof ImageAnnotatorClientBuilder)) { - return new ImageAnnotatorClientBuilder(gaxGrpc); + /** + * The scopes needed to make gRPC calls for every method defined + * in this service. + */ + static get scopes() { + return [ + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-vision', + ]; } - var imageAnnotatorClient = gaxGrpc.load([{ - root: require('google-proto-files')('..'), - file: 'google/cloud/vision/v1/image_annotator.proto' - }]); - extend(this, imageAnnotatorClient.google.cloud.vision.v1); + /** + * Return the project ID used by this class. + * @param {function(Error, string)} callback - the callback to + * be called with the current project Id. + */ + getProjectId(callback) { + return this.auth.getProjectId(callback); + } + // ------------------- + // -- Service calls -- + // ------------------- /** - * Build a new instance of {@link ImageAnnotatorClient}. + * Run image detection and annotation for a batch of images. + * + * @param {Object} request + * The request object that will be sent. + * @param {Object[]} request.requests + * Individual image annotation requests for this batch. + * + * This object should have the same structure as [AnnotateImageRequest]{@link google.cloud.vision.v1.AnnotateImageRequest} + * @param {Object} [options] + * Optional parameters. You can override the default settings for this call, e.g, timeout, + * retries, paginations, etc. See [gax.CallOptions]{@link https://googleapis.github.io/gax-nodejs/global.html#CallOptions} for the details. + * @param {function(?Error, ?Object)} [callback] + * The function which will be called with the result of the API call. * - * @param {Object=} opts - The optional parameters. - * @param {String=} opts.servicePath - * The domain name of the API remote host. - * @param {number=} opts.port - * The port on which to connect to the remote host. - * @param {grpc.ClientCredentials=} opts.sslCreds - * A ClientCredentials for use with an SSL-enabled channel. - * @param {Object=} opts.clientConfig - * The customized config to build the call settings. See - * {@link gax.constructSettings} for the format. + * The second parameter to the callback is an object representing [BatchAnnotateImagesResponse]{@link google.cloud.vision.v1.BatchAnnotateImagesResponse}. + * @returns {Promise} - The promise which resolves to an array. + * The first element of the array is an object representing [BatchAnnotateImagesResponse]{@link google.cloud.vision.v1.BatchAnnotateImagesResponse}. + * The promise has a method named "cancel" which cancels the ongoing API call. + * + * @example + * + * const vision = require('@google-cloud/vision'); + * + * var client = new vision.v1.ImageAnnotatorClient({ + * // optional auth parameters. + * }); + * + * var requests = []; + * client.batchAnnotateImages({requests: requests}) + * .then(responses => { + * var response = responses[0]; + * // doThingsWith(response) + * }) + * .catch(err => { + * console.error(err); + * }); */ - this.imageAnnotatorClient = function(opts) { - return new ImageAnnotatorClient(gaxGrpc, imageAnnotatorClient, opts); - }; - extend(this.imageAnnotatorClient, ImageAnnotatorClient); + batchAnnotateImages(request, options, callback) { + if (options instanceof Function && callback === undefined) { + callback = options; + options = {}; + } + options = options || {}; + + return this._innerApiCalls.batchAnnotateImages(request, options, callback); + } } -module.exports = ImageAnnotatorClientBuilder; -module.exports.SERVICE_ADDRESS = SERVICE_ADDRESS; -module.exports.ALL_SCOPES = ALL_SCOPES; + +module.exports = ImageAnnotatorClient; diff --git a/packages/google-cloud-vision/src/v1/index.js b/packages/google-cloud-vision/src/v1/index.js index df112c350a4..a41489d4fd3 100644 --- a/packages/google-cloud-vision/src/v1/index.js +++ b/packages/google-cloud-vision/src/v1/index.js @@ -1,34 +1,19 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -'use strict'; - -var imageAnnotatorClient = require('./image_annotator_client'); -var gax = require('google-gax'); -var extend = require('extend'); +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -function v1(options) { - options = extend({ - scopes: v1.ALL_SCOPES - }, options); - var gaxGrpc = gax.grpc(options); - return imageAnnotatorClient(gaxGrpc); -} +'use strict'; -v1.GAPIC_VERSION = '0.7.1'; -v1.SERVICE_ADDRESS = imageAnnotatorClient.SERVICE_ADDRESS; -v1.ALL_SCOPES = imageAnnotatorClient.ALL_SCOPES; +const ImageAnnotatorClient = require('./image_annotator_client'); -module.exports = v1; \ No newline at end of file +module.exports.ImageAnnotatorClient = ImageAnnotatorClient; diff --git a/packages/google-cloud-vision/system-test/.eslintrc.yml b/packages/google-cloud-vision/system-test/.eslintrc.yml new file mode 100644 index 00000000000..2e6882e46d2 --- /dev/null +++ b/packages/google-cloud-vision/system-test/.eslintrc.yml @@ -0,0 +1,6 @@ +--- +env: + mocha: true +rules: + node/no-unpublished-require: off + no-console: off diff --git a/packages/google-cloud-vision/system-test/vision.js b/packages/google-cloud-vision/system-test/vision.js index bccdf698932..ab12d91c40b 100644 --- a/packages/google-cloud-vision/system-test/vision.js +++ b/packages/google-cloud-vision/system-test/vision.js @@ -16,31 +16,30 @@ 'use strict'; -var assert = require('assert'); -var async = require('async'); -var fs = require('fs'); -var path = require('path'); -var Storage = require('@google-cloud/storage'); -var uuid = require('node-uuid'); +const assert = require('assert'); +const async = require('async'); +const fs = require('fs'); +const path = require('path'); +const Storage = require('@google-cloud/storage'); +const uuid = require('node-uuid'); -var env = require('../../../system-test/env.js'); -var Vision = require('../'); +const vision = require('../'); describe('Vision', function() { - var IMAGES = { + const IMAGES = Object.freeze({ document: path.join(__dirname, 'data/document.jpg'), logo: path.join(__dirname, 'data/logo.jpg'), rushmore: path.join(__dirname, 'data/rushmore.jpg'), text: path.join(__dirname, 'data/text.png'), - malformed: __filename - }; + malformed: __filename, + }); - var TESTS_PREFIX = 'gcloud-vision-test'; + const TESTS_PREFIX = 'gcloud-vision-test'; - var storage = new Storage(env); - var vision = new Vision(env); + let storage = new Storage(); + let client = new vision.v1.ImageAnnotatorClient(); - var bucket = storage.bucket(generateName()); + let bucket = storage.bucket(generateName()); before(function(done) { bucket.create(function(err) { @@ -54,62 +53,71 @@ describe('Vision', function() { }); after(function(done) { - storage.getBuckets({ - prefix: TESTS_PREFIX - }, function(err, buckets) { - if (err) { - done(err); - return; - } - - function deleteBucket(bucket, callback) { - bucket.deleteFiles(function(err) { - if (err) { - callback(err); - return; - } - - bucket.delete(callback); - }); + storage.getBuckets( + { + prefix: TESTS_PREFIX, + }, + function(err, buckets) { + if (err) { + done(err); + return; + } + + function deleteBucket(bucket, callback) { + bucket.deleteFiles(function(err) { + if (err) { + callback(err); + return; + } + + bucket.delete(callback); + }); + } + + async.each(buckets, deleteBucket, done); } - - async.each(buckets, deleteBucket, done); - }); + ); }); it('should detect from a URL', () => { var url = 'https://upload.wikimedia.org/wikipedia/commons/5/51/Google.png'; - return vision.logoDetection({ - image: { - source: {imageUri: url} - } - }).then(responses => { - var response = responses[0]; - assert.deepEqual(response.logoAnnotations[0].description, 'Google'); - }); + return client + .logoDetection({ + image: { + source: {imageUri: url}, + }, + }) + .then(responses => { + var response = responses[0]; + assert.deepEqual(response.logoAnnotations[0].description, 'Google'); + }); }); it('should detect from a filename', () => { - return vision.logoDetection({ - image: { - source: {filename: IMAGES.logo} - }, - }).then(responses => { - var response = responses[0]; - assert.deepEqual(response.logoAnnotations[0].description, 'Google'); - }); + return client + .logoDetection({ + image: { + source: {filename: IMAGES.logo}, + }, + }) + .then(responses => { + var response = responses[0]; + assert.deepEqual(response.logoAnnotations[0].description, 'Google'); + }); }); it('should detect from a Buffer', () => { var buffer = fs.readFileSync(IMAGES.logo); - return vision.logoDetection({ - image: { - content: buffer - } - }).then(responses => { - var response = responses[0]; - assert.deepEqual(response.logoAnnotations[0].description, 'Google'); - }); + return client + .logoDetection({ + image: { + content: buffer, + }, + }) + .then(responses => { + var response = responses[0]; + assert.deepEqual(response.logoAnnotations[0].description, 'Google'); + }); }); describe('single image', () => { @@ -119,15 +127,17 @@ describe('Vision', function() { {type: 'SAFE_SEARCH_DETECTION'}, ]; it('should perform multiple detections', () => { - return vision.annotateImage({ - features: TYPES, - image: {source: {filename: IMAGES.rushmore}}, - }).then(responses => { - var response = responses[0]; - assert(response.faceAnnotations.length >= 1); - assert(response.labelAnnotations.length >= 1); - assert(response.safeSearchAnnotation !== null); - }); + return client + .annotateImage({ + features: TYPES, + image: {source: {filename: IMAGES.rushmore}}, + }) + .then(responses => { + var response = responses[0]; + assert(response.faceAnnotations.length >= 1); + assert(response.labelAnnotations.length >= 1); + assert(response.safeSearchAnnotation !== null); + }); }); }); diff --git a/packages/google-cloud-vision/test/.eslintrc.yml b/packages/google-cloud-vision/test/.eslintrc.yml new file mode 100644 index 00000000000..73f7bbc946f --- /dev/null +++ b/packages/google-cloud-vision/test/.eslintrc.yml @@ -0,0 +1,5 @@ +--- +env: + mocha: true +rules: + node/no-unpublished-require: off diff --git a/packages/google-cloud-vision/test/gapic-v1.js b/packages/google-cloud-vision/test/gapic-v1.js index 1408a392924..6fd4b62478c 100644 --- a/packages/google-cloud-vision/test/gapic-v1.js +++ b/packages/google-cloud-vision/test/gapic-v1.js @@ -1,71 +1,84 @@ -/* - * Copyright 2017, Google Inc. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ +// Copyright 2017, Google LLC All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + 'use strict'; -var assert = require('assert'); -var vision = require('../src'); +const assert = require('assert'); + +const visionModule = require('../src'); var FAKE_STATUS_CODE = 1; var error = new Error(); error.code = FAKE_STATUS_CODE; -describe('ImageAnnotatorClient', function() { - describe('batchAnnotateImages', function() { - it('invokes batchAnnotateImages without error', function(done) { - var client = vision.v1(); +describe('ImageAnnotatorClient', () => { + describe('batchAnnotateImages', () => { + it('invokes batchAnnotateImages without error', done => { + var client = new visionModule.v1.ImageAnnotatorClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); // Mock request var requests = []; var request = { - requests : requests + requests: requests, }; // Mock response var expectedResponse = {}; // Mock Grpc layer - client._batchAnnotateImages = mockSimpleGrpcMethod(request, expectedResponse); + client._innerApiCalls.batchAnnotateImages = mockSimpleGrpcMethod( + request, + expectedResponse + ); - client.batchAnnotateImages(request, function(err, response) { + client.batchAnnotateImages(request, (err, response) => { assert.ifError(err); assert.deepStrictEqual(response, expectedResponse); done(); }); }); - it('invokes batchAnnotateImages with error', function(done) { - var client = vision.v1(); + it('invokes batchAnnotateImages with error', done => { + var client = new visionModule.v1.ImageAnnotatorClient({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); // Mock request var requests = []; var request = { - requests : requests + requests: requests, }; // Mock Grpc layer - client._batchAnnotateImages = mockSimpleGrpcMethod(request, null, error); + client._innerApiCalls.batchAnnotateImages = mockSimpleGrpcMethod( + request, + null, + error + ); - client.batchAnnotateImages(request, function(err, response) { + client.batchAnnotateImages(request, (err, response) => { assert(err instanceof Error); assert.equal(err.code, FAKE_STATUS_CODE); + assert(typeof response === 'undefined'); done(); }); }); }); - }); function mockSimpleGrpcMethod(expectedRequest, response, error) { diff --git a/packages/google-cloud-vision/test/helpers.test.js b/packages/google-cloud-vision/test/helpers.test.js index ca6bf48f813..26c13bc4d4c 100644 --- a/packages/google-cloud-vision/test/helpers.test.js +++ b/packages/google-cloud-vision/test/helpers.test.js @@ -16,16 +16,21 @@ 'use strict'; -var assert = require('assert'); -var fs = require('fs'); -var is = require('is'); -var sinon = require('sinon'); - -var Vision = require('../'); +const assert = require('assert'); +const Buffer = require('safe-buffer').Buffer; +const fs = require('fs'); +const is = require('is'); +const sinon = require('sinon'); +const vision = require('../'); describe('Vision helper methods', () => { - var sandbox = sinon.sandbox.create(); + const CREDENTIALS = Object.freeze({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + + let sandbox = sinon.sandbox.create(); afterEach(() => { sandbox.restore(); @@ -33,20 +38,24 @@ describe('Vision helper methods', () => { describe('annotateImage', () => { it('calls batchAnnotateImages correctly', () => { - var vision = Vision.v1(); - var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); - batchAnnotate.callsArgWith(2, undefined, {responses: [{ - logoAnnotations: [{description: 'Google'}], - }]}); + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, { + responses: [ + { + logoAnnotations: [{description: 'Google'}], + }, + ], + }); // Ensure that the annotateImage method arrifies the request and // passes it through to the batch annotation method. - var request = { - image: {content: new Buffer('bogus==')}, + let request = { + image: {content: Buffer.from('bogus==')}, features: {type: ['LOGO_DETECTION']}, }; - return vision.annotateImage(request).then(r => { - var response = r[0]; + return client.annotateImage(request).then(r => { + let response = r[0]; // Ensure that we got the slice of the response that we expected. assert.deepEqual(response, { @@ -56,27 +65,31 @@ describe('Vision helper methods', () => { // Inspect the calls to batchAnnotateImages and ensure they matched // the expected signature. assert(batchAnnotate.callCount === 1); - assert(batchAnnotate.calledWith([request])); + assert(batchAnnotate.calledWith({requests: [request]})); }); }); it('understands buffers', () => { - var vision = Vision.v1(); + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); // Stub out the batch annotation method. - var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); - batchAnnotate.callsArgWith(2, undefined, {responses: [{ - logoAnnotations: [{description: 'Google'}], - }]}); + let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, { + responses: [ + { + logoAnnotations: [{description: 'Google'}], + }, + ], + }); // Ensure that the annotateImage method arrifies the request and // passes it through to the batch annotation method. - var request = { - image: new Buffer('fakeImage'), + let request = { + image: Buffer.from('fakeImage'), features: {type: ['LOGO_DETECTION']}, }; - return vision.annotateImage(request).then(r => { - var response = r[0]; + return client.annotateImage(request).then(r => { + let response = r[0]; // Ensure that we got the slice of the response that we expected. assert.deepEqual(response, { @@ -90,35 +103,39 @@ describe('Vision helper methods', () => { image: {content: 'ZmFrZUltYWdl'}, features: {type: ['LOGO_DETECTION']}, }); - assert(batchAnnotate.calledWith([request])); + assert(batchAnnotate.calledWith({requests: [request]})); }); }); it('understands filenames', () => { - var vision = Vision.v1(); + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); // Stub out `fs.readFile` and return a bogus image object. // This allows us to test filename detection. - var readFile = sandbox.stub(fs, 'readFile'); - readFile.withArgs('image.jpg').callsArgWith(2, null, - new Buffer('fakeImage') - ); + let readFile = sandbox.stub(fs, 'readFile'); + readFile + .withArgs('image.jpg') + .callsArgWith(2, null, Buffer.from('fakeImage')); readFile.callThrough(); // Stub out the batch annotation method as before. - var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); - batchAnnotate.callsArgWith(2, undefined, {responses: [{ - logoAnnotations: [{description: 'Google'}], - }]}); + let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, { + responses: [ + { + logoAnnotations: [{description: 'Google'}], + }, + ], + }); // Ensure that the annotateImage method arrifies the request and // passes it through to the batch annotation method. - var request = { + let request = { image: {source: {filename: 'image.jpg'}}, features: {type: ['LOGO_DETECTION']}, }; - return vision.annotateImage(request).then(r => { - var response = r[0]; + return client.annotateImage(request).then(r => { + let response = r[0]; // Ensure that we got the slice of the response that we expected. assert.deepEqual(response, { @@ -137,45 +154,52 @@ describe('Vision helper methods', () => { image: {content: 'ZmFrZUltYWdl'}, features: {type: ['LOGO_DETECTION']}, }); - assert(batchAnnotate.calledWith([request])); + assert(batchAnnotate.calledWith({requests: [request]})); }); }); it('propagates the error if a file is not found', () => { - var vision = Vision.v1(); + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); // Stub out `fs.readFile` and return a bogus image object. // This allows us to test filename detection. - var readFile = sandbox.stub(fs, 'readFile'); + let readFile = sandbox.stub(fs, 'readFile'); readFile.withArgs('image.jpg').callsArgWith(2, {error: 404}); readFile.callThrough(); // Ensure that the annotateImage method arrifies the request and // passes it through to the batch annotation method. - var request = { + let request = { image: {source: {filename: 'image.jpg'}}, features: {type: ['LOGO_DETECTION']}, }; - return vision.annotateImage(request).then(assert.fail).catch(err => { - assert.deepEqual(err, {error: 404}); - }); + return client + .annotateImage(request) + .then(assert.fail) + .catch(err => { + assert.deepEqual(err, {error: 404}); + }); }); it('retains call options sent', () => { - var vision = Vision.v1(); - var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); - batchAnnotate.callsArgWith(2, undefined, {responses: [{ - logoAnnotations: [{description: 'Google'}], - }]}); + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, { + responses: [ + { + logoAnnotations: [{description: 'Google'}], + }, + ], + }); // Ensure that the annotateImage method arrifies the request and // passes it through to the batch annotation method. var request = { - image: {content: new Buffer('bogus==')}, + image: {content: Buffer.from('bogus==')}, features: {type: ['LOGO_DETECTION']}, }; - return vision.annotateImage(request, {foo: 'bar'}).then(r => { - var response = r[0]; + return client.annotateImage(request, {foo: 'bar'}).then(r => { + let response = r[0]; // Ensure that we got the slice of the response that we expected. assert.deepEqual(response, { @@ -185,24 +209,28 @@ describe('Vision helper methods', () => { // Inspect the calls to batchAnnotateImages and ensure they matched // the expected signature. assert(batchAnnotate.callCount === 1); - assert(batchAnnotate.calledWith([request], {foo: 'bar'})); + assert(batchAnnotate.calledWith({requests: [request]}, {foo: 'bar'})); }); }); it('fires a callback if provided', done => { - var vision = Vision.v1(); - var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); - batchAnnotate.callsArgWith(2, undefined, {responses: [{ - logoAnnotations: [{description: 'Google'}], - }]}); + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, { + responses: [ + { + logoAnnotations: [{description: 'Google'}], + }, + ], + }); // Ensure that the annotateImage method does *not* pass the callback // on to batchAnnotateImages, but rather handles it itself. - var request = { - image: {content: new Buffer('bogus==')}, + let request = { + image: {content: Buffer.from('bogus==')}, features: {type: ['LOGO_DETECTION']}, }; - vision.annotateImage(request, function(err, response) { + client.annotateImage(request, function(err, response) { // Establish that we got the expected response. assert(is.undefined(err)); assert.deepEqual(response, { @@ -212,57 +240,64 @@ describe('Vision helper methods', () => { // Inspect the calls to batchAnnotate and ensure that they match // what we expected. assert(batchAnnotate.callCount === 1); - assert(batchAnnotate.calledWith([request], undefined)); + assert(batchAnnotate.calledWith({requests: [request]}, undefined)); done(); }); }); it('fires the callback on error', () => { - var vision = Vision.v1(); - var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); batchAnnotate.callsArgWith(2, {message: 'Bad things!'}); // Ensure that the annotateImage method does *not* pass the callback // on to batchAnnotateImages, but rather handles it itself. - var request = { - image: {content: new Buffer('bogus==')}, + let request = { + image: {content: Buffer.from('bogus==')}, features: {type: ['LOGO_DETECTION']}, }; - return vision.annotateImage(request).catch(err => { + return client.annotateImage(request).catch(err => { // Establish that we got the expected response. assert.deepEqual(err, {message: 'Bad things!'}); // Inspect the calls to batchAnnotate and ensure that they match // what we expected. assert(batchAnnotate.callCount === 1); - assert(batchAnnotate.calledWith([request], undefined)); + assert(batchAnnotate.calledWith({requests: [request]}, undefined)); }); }); it('requires an image and throws without one', () => { - var vision = Vision.v1(); - var request = {}; - return vision.annotateImage(request).then(assert.fail).catch(err => { - var expected = 'Attempted to call `annotateImage` with no image.'; - assert(err.message === expected); - }); + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + let request = {}; + return client + .annotateImage(request) + .then(assert.fail) + .catch(err => { + let expected = 'Attempted to call `annotateImage` with no image.'; + assert(err.message === expected); + }); }); }); describe('single-feature methods', () => { it('calls annotateImage with the correct feature', () => { - var vision = Vision.v1(); - var annotate = sandbox.spy(vision, 'annotateImage'); - var batchAnnotate = sandbox.stub(vision, 'batchAnnotateImages'); - batchAnnotate.callsArgWith(2, undefined, {responses: [{ - logoAnnotations: [{description: 'Google'}], - }]}); + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + let annotate = sandbox.spy(client, 'annotateImage'); + let batchAnnotate = sandbox.stub(client, 'batchAnnotateImages'); + batchAnnotate.callsArgWith(2, undefined, { + responses: [ + { + logoAnnotations: [{description: 'Google'}], + }, + ], + }); // Ensure that the annotateImage method does *not* pass the callback // on to batchAnnotateImages, but rather handles it itself. - var imageRequest = {image: {content: new Buffer('bogus==')}}; - return vision.logoDetection(Object.assign({}, imageRequest)).then(r => { - var response = r[0]; + let imageRequest = {image: {content: Buffer.from('bogus==')}}; + return client.logoDetection(Object.assign({}, imageRequest)).then(r => { + let response = r[0]; // Ensure that we got the slice of the response that we expected. assert.deepEqual(response, { @@ -272,26 +307,33 @@ describe('Vision helper methods', () => { // Inspect the calls to annotateImage and batchAnnotateImages and // ensure they matched the expected signature. assert(annotate.callCount === 1); - assert(annotate.calledWith({ - features: [{type: 3}], - image: imageRequest.image, - })); + assert( + annotate.calledWith({ + features: [{type: 3}], + image: imageRequest.image, + }) + ); assert(batchAnnotate.callCount === 1); - assert(batchAnnotate.calledWith( - [{image: imageRequest.image, features: [{type: 3}]}] - )); + assert( + batchAnnotate.calledWith({ + requests: [{image: imageRequest.image, features: [{type: 3}]}], + }) + ); }); }); it('throws an exception if conflicting features are given', () => { - var vision = Vision.v1(); - var imageRequest = { - image: {content: new Buffer('bogus==')}, + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); + let imageRequest = { + image: {content: Buffer.from('bogus==')}, features: [{type: 0}], }; - vision.logoDetection(imageRequest).then(assert.fail).catch(ex => { - assert(ex.message.indexOf('Setting explicit') > -1); - }); + client + .logoDetection(imageRequest) + .then(assert.fail) + .catch(ex => { + assert(ex.message.indexOf('Setting explicit') > -1); + }); }); }); }); diff --git a/packages/google-cloud-vision/test/index.test.js b/packages/google-cloud-vision/test/index.test.js index 197c058fe95..bb7db6819a3 100644 --- a/packages/google-cloud-vision/test/index.test.js +++ b/packages/google-cloud-vision/test/index.test.js @@ -16,35 +16,39 @@ 'use strict'; -var assert = require('assert'); - -var Vision = require('../'); +const assert = require('assert'); +const vision = require('../'); describe('Vision', () => { + const CREDENTIALS = Object.freeze({ + credentials: {client_email: 'bogus', private_key: 'bogus'}, + projectId: 'bogus', + }); + describe('v1', () => { it('returns a v1 GAPIC augmented with helpers', () => { - var vision = Vision.v1(); + let client = new vision.v1.ImageAnnotatorClient(CREDENTIALS); // Assert that the GAPIC v1 methods are present on the object. - assert(vision.batchAnnotateImages instanceof Function); + assert(client.batchAnnotateImages instanceof Function); // Assert that the manual single-image helper method is present // on the object. - assert(vision.annotateImage instanceof Function); + assert(client.annotateImage instanceof Function); // Assert that some of the expected single-feature helper methods // are present on the object. - assert(vision.faceDetection instanceof Function); - assert(vision.landmarkDetection instanceof Function); - assert(vision.logoDetection instanceof Function); - assert(vision.labelDetection instanceof Function); - assert(vision.textDetection instanceof Function); - assert(vision.documentTextDetection instanceof Function); - assert(vision.safeSearchDetection instanceof Function); - assert(vision.imageProperties instanceof Function); - assert(vision.cropHints instanceof Function); - assert(vision.webDetection instanceof Function); + assert(client.faceDetection instanceof Function); + assert(client.landmarkDetection instanceof Function); + assert(client.logoDetection instanceof Function); + assert(client.labelDetection instanceof Function); + assert(client.textDetection instanceof Function); + assert(client.documentTextDetection instanceof Function); + assert(client.safeSearchDetection instanceof Function); + assert(client.imageProperties instanceof Function); + assert(client.cropHints instanceof Function); + assert(client.webDetection instanceof Function); }); }); });