From ad44aac8929ead8407562f4fe89377e36b4fe0e5 Mon Sep 17 00:00:00 2001 From: Ammar Dodin <8563179+ammardodin@users.noreply.github.com> Date: Fri, 9 Feb 2018 14:20:05 -0500 Subject: [PATCH 01/45] Generate Text to Speech (#332) * :new: generated text-to-speech with latest swagger * :green_heart: added missing tests and updated method names in tests * Changes as per review Mike's comments * Remove unnecessary overview * :unamused: Rename Bluemix to IBM Cloud * change Bluemix to IBM Cloud * fix capitalization of IBM CLoud * Update README.md * :memo: Update to use Watson APIs * Adding integration tests and removing REST description * enable dot_env to get env variables * Revert "enable dot_env to get env variables" This reverts commit f7ac60a596d89a12360489c9e2627a4dd3748dce. * Encrypt travis env file * Adding VCAP_SERVICES * Ignoring destructive tests * Remove the VCAP_SERVICES keyword * Regenerate encrypted env file for travis * Remove the skip test for integration test * Add in init file for integration tests * Update tests as per new env file --- .env.enc | Bin 3280 -> 2592 bytes .travis.yml | 3 +- README.md | 21 +- examples/README.md | 11 +- .../README.md | 8 +- examples/conversation_v1.py | 18 +- examples/dialog_v1.py | 49 - examples/natural_language_classifier_v1.py | 23 +- examples/retrieve_and_rank_v1.py | 79 -- examples/text_to_speech_v1.py | 13 +- examples/tradeoff_analytics_v1.py | 16 - test/__init__.py | 4 + test/integration/__init__.py | 17 + .../test_integration_speech_to_text_v1.py | 0 .../test_integration_text_to_speech_v1.py | 60 + .../test_integration_visual_recognition.py | 0 test/test_integration_text_to_speech_v1.py | 27 - test/test_text_to_speech_v1.py | 263 ++-- watson_developer_cloud/text_to_speech_v1.py | 1181 +++++++++++++++-- watson_developer_cloud/utils.py | 2 +- 20 files changed, 1402 insertions(+), 393 deletions(-) delete mode 100644 examples/dialog_v1.py delete mode 100644 examples/retrieve_and_rank_v1.py delete mode 100755 examples/tradeoff_analytics_v1.py create mode 100644 test/integration/__init__.py rename test/{ => integration}/test_integration_speech_to_text_v1.py (100%) create mode 100644 test/integration/test_integration_text_to_speech_v1.py rename test/{ => integration}/test_integration_visual_recognition.py (100%) delete mode 100644 test/test_integration_text_to_speech_v1.py diff --git a/.env.enc b/.env.enc index 2540a4f175708f8f5ba35f70beb68243bddb491e..5903c88749cdd1ae8aa30d7021ffb66353a8cdfe 100644 GIT binary patch literal 2592 zcmV+*3g7i)Hd2VlS4F!~1T4eq37^*ZzGm7<9-GEY^z_E7*qQ zjg({kfYOzakHX0^Dm2B#v!0|21Ej*$(4;R^Upbo_Fs4dMHOy(f>WjO+BBM*}f${OQ zD|A7Qw_|goue)DHZeaIsX7RQB4Wjsdh?O+z?FT|m|3c%0&_Bs`#~V}_&zyBEgKyEe z3t97EWrBx=X1d>3FZO%BNHWiQwh?e_A+Csl^=$Uduvm}E&V7E-S+s5fRVp9XTRRcY$1MCPly9{YzX6*Z5fvR zXIz@2tJ5N)>6Ao=DcDZSooaNxYLG5I9*9K1S86XtnFI_%O>aGmca&w&-&11vrtCMR zQ;8#1Peo>XG8gl?uI8(90{#TN7KJK`YjUqHN+|DfBbVSD3sBsTwVbWA8U^?2yZ9>B zD?~@e%Z@1Z$|zv7mu-A(a(q>fF1%l5pOZR-)9Ev@lcoo)@}86SN}7i+3L1_^JWYi! zVDIUPI0C{#27-V_UUC?(#_Z*hp=W9K1DFCnJh9JcG!;7^P!E&q{b?%M%moc*_tv0( zL=0JhFAIzWnTkNV2ssRUV=2>ENlL@!XsYPwPT=k01s%HrThhpJcEDn>2* zZ_k|pA(PmCmjMIk68lftag&6^*k2OdVi;b2GcL9&A-AvFScS%N{F1F+FC~r3k6)Y` zdwV842T4#&)!QB*3d5T;#wP60!b`w5`sLP2OSy`Wt9Jj% z>sh{VzK?zlM+1jk8}og&yZNB(b=PcN6_Ys^EMn4$=fKHyh`48hyavN1zj(3+GSlvf z`EAGnWVxRv&_Z&N86;1brL$Z9UhYKX4G|{^sWYH=)5*~&dpv&jO9GilKT(oxc$Pi( z(?)RPQQ^ub;*tJP^t_oT<8YKUt9cGpeA++r(;VQl_-~r+sbkU^(UAJYx&0+0fix+y z&^3g68dZ9Z&{!ljbf-m1pS)kMq2p~kz|Nha(0HMkzF;Pt#rRvxqO9_5<+E<2L0`-fQeI2nkBllsulk z5kCVpE%r~q;D-KP522kkjA;7CZ-~(#$MR5c?crXS zAFj3&S~OoZ8DvUQC+eK5K(|g=xrw)s^y)LdIyHsnd#souCiEg)`JgLj)9HheS#-n# z@cw*VF77*e;sh;9SkL|l_MrHzgUA9}8Y0rnrbO|1Vs_l{lrBGW;TM&N9oc8o3vD{2 zE3IQk?fM?e)vbDY7@;7&4`zjcLlj)zF$ZD@CGz=dYTE-%xG>k}EYI(O8rJUNm?MUp zeA0L@-3V_!WH-l**=lbG6~~A>?ybY#j@N88q2if`{>u&Vi8d!S3WaFX600%|H7-Nq zf;-~8OBbEzHZMgk?2dq%HH+{^_ye@&W&l{^G(^>v46%)|Augv0r4?Pi*d?l=cYsS+ zqUT0j_0ahsESf5KpEdIl>**qF6;6^+aU+puu*)N~|7VWqaC|!HR4;IhH2oJ*B{;no zwb+bw5b^(F>90t*?-h2W>#La;WV8`5D_?vf+Ldjvi9c@tA%nN4*7+-=uePZ7D_9jG zmL%qWS&EbX+Wh~&TSfZ`y;Xrta=mtD87sd}>}<&EPO$;ccYM_CU+h`2U6^!yk(92i z?6PvTcm-kn-tVqgCJkYL&sNtK2$Q*P zNvxF;zW()AjH`hko|PifcNi?sUME4zpVQBw4yo4vCiPyT73m*G8v9r zHtIwXRX47ZfFx1gXyqu72$PAp7AQGAtuda9dn_8V0muDVbTFG%WTgrzz>uAnQ}W*G zZm5y2?uc_O$Szs@Noh~>u2C&GrK+NKziG8ys8|(PR?^-suZi>z9AyLz!+IhOK zpL~sm`pg;3*u0=d(snv5nJ5FUYtcN<+zph>-AO!dE9$b#4W-nLy&=vvm|7gDYMXYhw1fG06+2kFTgD(O~eDrs&<7@hUzMoD##X+Xkhv+)?nhUj^@9G}=G33p&hG2;fgBYXe^arPMrWUXFICgGqF2WU$i zTxUJh_%W3~XT&s~vh(07zTW+(>izRE65P|cu1Orr?PU@C>}Q1qJUUg9A1+1iq;I?q Cv=dkW literal 3280 zcmV;>3@`IMaSf13ZMGYZLK$EO!O&f`8P9_)ly!s?^B~Z@H3ln1)`v*hwPT4I8Akds zvbQJ8# z^WNOoK5-i3VZ>uaJw7;`4D)l2Icq&g^#dQ8B^pApY0T#>8YwYK5(9VGHEtn@vfkW@ zbgPJm<1b_9j-B=@L?KqzxPSn9>K*NnB-Rg>f_zonormJ0_3E*1TBqzdm)=4xvQzWG zN0r)D=|)};oO@(!f(xUmiZ?5=U`2hj>#5f|7hC&12aV=A+g#pHmh|}52F&V|8L__t zAEgE>8t65N2W=mgk@CmFR_L=P0lA<^BM@u|Sq7o{e7iBwnfvJRo*+S#(v_Bw1@+t( zw6d75cyx8RuqLRLQg$LScMk0$xr9h84#=K(ODl_HgEHMvIhNY{!va=muZLs_8B~e0 zUHrH+MM*r%lAM)`-0-s3gY~!bopUT0>(TztWUZO8@-arM8c>hBJe{ zTt&H=DAVT^nkxmjJF8HF#74UEgzkl&brzcX8`Bxm{@iymNm{bfT--x}A>qCwm=6Yr z@yx)wRh!F{&I;n8N)xza5#X*T81O`vNQgZ%R?~u)n0Sj`H)C*_;|MG_%X-}ZfjL~@ zoMEa9m@F|BdZgy5ztl%Dt*_{ZWwA}v8;4ipElzvy6L79AKYYO7LaQfnB}hexoqv!9 z4JsPKax+V(+A4u$uWU^U9N2VMa!rOzgS zw!H2mEX(|Y=M8;F5iw0oGiM3EeFLK}#VINW^~=XpJSYI)5%`&Ec`D7GGDNv{4?$t^ z@bpe&gD&my|2fsF`g_bri3j4l56BaIq~ebi*wh(A=;G!&&bylevFVK{V!%br$J*UE zMzP0OD)Na)S+otZrgv1{&$Xb{F8vWORvIYkI>bB9G9rPPcV>sQVfmS~+_@_gvzL)D zj@l1@@8+GYc^iIzE88%1tx=R9-0HYRq(^WGP3ATt$!uMEYc@{QDYAcDQ-r zec(W*o!>?oP57eCg8P~oJP>nmSJ{q}@mL^2d|(?(iXLAx`4z=A} z9iVAJ-!myBRBjpzm3Hrdn0Xvqceb72TxOqY6n@@toG`zs=JD!XIQ%> z?9n$xs3oFm)8?<%lFr%(*7ZT}?FYYS)^EcLL~nlM^V!3^0Zz>5`rYjcY2KloO!V6w zeOirVx!EQy%ZR5Y!}W6nX|w@Vp66Z?aMe#Q-+jGDic?0xCnveXyC!&;B^f@REfgVsoN^2#HgL8}}*BTVEi@z%Y zEWE|gZftboNz4v!15b|4uiRK2MWHaZ#fnC(yma(8#(Gna25zr{ho~=76@jtZNMKQV zJW-yzXX}}fetJ@@#*7I{7%V7oz%Aa$*6YWqWKYzvk^lZ}xhIq=EGQZH7w z@^c#l?(6GUCn^Yh{eqx=8rHbdYsqB!tba$vh6ipuxmB`=fIO9F2uyRzijv%c@?5B^ zWI7B8(iz1?5r9;{)EZeYgAU9FZLz`6!DFX5Z8+R)&h86phG|rzCll{M7|%3u^*hV7 zNqxaT!{W{`ljGq*f%DJG$&BdJhl02N1ngL#=HL`++_Rq^rEQm7plbT}GYEiz=iJcW zz&2EyzE!qmlngv`-$=LHfOY&&{wOgRRJEr}f(lrb=qGKBy6;-k+=&bozj}E6Mox?S z`r%Bf>!OnoapNZ0C6%O5S>Ec|))Cp1=+d?ihN*%fwl%f4y4c^2!qAv>5e!7zWDn5@ zIc~KY7P7_>m+kR7Ndw?2q^Kz}=*R}O=lGx1?-Ar@8-TU(=SaEsnMi1h>T|{IAeP`82MXacMvvGMr55+w} zDtqXNroPR(YF^B>TjwgmhJ>PH*IeA8uu`cqvz!tewK3vINao^>jeX)9liPnKO8p{m z(RuWoz}Mxl4gTh6(~uU`eE~+UDuhloKmI z0|d${fLit4ct6yze-JsNOcOJOaQbPI03IpcriL%%{^@3yXHBfnKXlsjY$+cw54V$^ zjs}sJ|JZSPmEStUc(|?ivqxM@Cc7nMNN~&uP}ln63S&s853lkPn3eak!lk)An0ssW z?bn~J9_Fg;+)JUa01!l!ow5_(+2VzfK#~QBiR;SX#+ZUIaME!T3F#0-r6C>;I};&c z!s{{*MVS1VJ2SX5aEVRm;T)Cxc6(qkkQ=L3|HA(!gwJKV9}cpIjd~sn@S=Q{`ZswJ zVC)k1^NY`jm7gHuD%;{OGL;7GOZXf~x-{`!QnpqBHhTrZVX}pPTRipKuh3$NsG>Us zP-HfdM`y-er{#;~6|PFw2rL()|BhP@_&c`0vy5Xwk8c6Wm>fkvWwn%T&Q3WO%J6s6 zUcI|}ZRo0m@l7NuKB1qP5Q%0qy^6>%atM+Rym5BLh(T4_Z;08oW&0 zomtD?)Lqw6>oC^z+dQH%lDFWG+*z8qcoH=pjxk~c-arVFmqCYRwDEYFdYvKm0>jW@ zS;hhT?9t_avREf<7)kckAeR{p91nzZ{L@a#7cB5t3gBY7+1wp(5O(YI zu69n#2DQfbhT&`o`%Weyf0v984AmoYj8A!~dJD8Y2cYGAc}hE$t*N9pVsLvzYmG}n z$As1+N36~>r#kGM3KMEAzQNxmOp%dEG#0>w2Tu+vrF_!})L3tx!rBw*YNkdOPS_mB zxD~vfh_CallgfHIo+NK+@taq@2krUxHw`0yLkm}=A#i8L!UiRJ!Y`~d0E}iRr}cG0 zSH|y!4|Qs}3ObaA{b99l3|n=0?=If7IEXi>>h+mqUtK*xrmqiH`E7~baJR)L8o`fe zS&@N;8pGgpYBU!kG7mf=tDun8@?tXH%J%mC`hJ5w)w@%@eu+&o{HTa68d70s-EXfK zg>&l?n!Usrb5qT%?V!4ohY(>OGoIV6YWa@+&cPGNED}}@%|W{VaG5w3^Er>F)!@ib zVw_XU1Dsc8G)p7uR8?=Ire*vW|9FEdKll9G!wLy&tY`Iz<8}UPgZul#TxyK7Y&!Wg z6bMTGBC$5-bLtbhnD2z)C)(^%5T$)O~Q5|JdK*qShJqLtjT>NXEyyNzYejIF9_o%M4IsTSN(>JrlgiI?9n2g zbLG!J&v0}|&Bz73Yn^nsar`orE9sw5=aYWZTTBiqwn~bc3>@)O`~MKkT3#{0-gy_^#`l^w0r-qbCsef_BCc&Fu>b#!DqdkzG?buEZz(zWDT` O%S`Ax4Nt@$$^Qr=UTuW{ diff --git a/.travis.yml b/.travis.yml index 91744d29b..cbc1b4be4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,8 @@ python: - '3.6' cache: pip before_install: -- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && openssl aes-256-cbc -K $encrypted_6e98b3e8e789_key -iv $encrypted_6e98b3e8e789_iv -in .env.enc -out .env -d || true' +- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && openssl aes-256-cbc -K $encrypted_cebf25e6c525_key + -iv $encrypted_cebf25e6c525_iv -in .env.enc -out .env -d || true' install: pip install tox-travis script: tox before_deploy: diff --git a/README.md b/README.md index 736bc57b1..f73ca49ec 100755 --- a/README.md +++ b/README.md @@ -1,11 +1,11 @@ # Watson Developer Cloud Python SDK -[![Build Status](https://travis-ci.org/watson-developer-cloud/python-sdk.svg)](https://travis-ci.org/watson-developer-cloud/python-sdk) +[![Build Status](https://travis-ci.org/watson-developer-cloud/python-sdk.svg?branch=master)](https://travis-ci.org/watson-developer-cloud/python-sdk) [![Slack](https://wdc-slack-inviter.mybluemix.net/badge.svg)](https://wdc-slack-inviter.mybluemix.net) [![codecov.io](https://codecov.io/github/watson-developer-cloud/python-sdk/coverage.svg?branch=master)](https://codecov.io/github/watson-developer-cloud/python-sdk?branch=master) [![Latest Stable Version](https://img.shields.io/pypi/v/watson-developer-cloud.svg)](https://pypi.python.org/pypi/watson-developer-cloud) -Python client library to quickly get started with the various [Watson Developer Cloud][wdc] services. +Python client library to quickly get started with the various [Watson APIs][wdc] services. ## Installation @@ -37,24 +37,24 @@ The [examples][examples] folder has basic and advanced examples. Service credentials are required to access the APIs. -If you run your app in Bluemix, you don't need to specify the username and password. In that case, the SDK uses the `VCAP_SERVICES` environment variable to load the credentials. +If you run your app in IBM Cloud, you don't need to specify the username and password. In that case, the SDK uses the `VCAP_SERVICES` environment variable to load the credentials. -To run locally or outside of Bluemix you need the `username` and `password` credentials for each service. (Service credentials are different from your Bluemix account email and password.) +To run locally or outside of IBM Cloud you need the `username` and `password` credentials for each service. (Service credentials are different from your IBM Cloud account email and password.) To create an instance of the service: -1. Log in to [Bluemix][bluemix]. +1. Log in to [IBM Cloud][ibm_cloud]. 1. Create an instance of the service: - 1. In the Bluemix **Catalog**, select the Watson service you want to use. For example, select the Conversation service. + 1. In the IBM Cloud **Catalog**, select the Watson service you want to use. For example, select the Conversation service. 1. Type a unique name for the service instance in the **Service name** field. For example, type `my-service-name`. Leave the default values for the other options. 1. Click **Create**. To get your service credentials: -Copy your credentials from the **Service details** page. To find the the Service details page for an existing service, navigate to your Bluemix dashboard and click the service name. +Copy your credentials from the **Service details** page. To find the the Service details page for an existing service, navigate to your IBM Cloud dashboard and click the service name. 1. On the **Service Details** page, click **Service Credentials**, and then **View credentials**. -1. Copy `username` and `password`. +1. Copy `username`, `password`, and `url`. ## Python Version @@ -82,9 +82,6 @@ response = conversation.message(workspace_id=workspace_id, input={ 'text': 'What\'s the weather like?'}) print(json.dumps(response, indent=2)) ``` -## Known Issues - -See [issues](https://github.com/watson-developer-cloud/python-sdk/issues). ## Dependencies @@ -104,7 +101,7 @@ See [CONTRIBUTING.md][CONTRIBUTING]. This library is licensed under the [Apache 2.0 license][license]. [wdc]: http://www.ibm.com/watson/developercloud/ -[bluemix]: https://console.bluemix.net +[ibm_cloud]: https://console.bluemix.net [responses]: https://github.com/getsentry/responses [requests]: http://docs.python-requests.org/en/latest/ [examples]: https://github.com/watson-developer-cloud/python-sdk/tree/master/examples diff --git a/examples/README.md b/examples/README.md index 9b8a7056d..fcc43978b 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,12 +1,11 @@ ## Examples -To run the examples, you will need the `username` and `password` credentials. To get your service credentials, follow these steps: - 1. Log in to Bluemix at https://bluemix.net. +To run the examples, you will need a `username`, `password`, and `url`. To get your service credentials, follow these steps: + 1. Log in to IBM Cloud at https://console.bluemix.net. 1. Create an instance of the service: - 1. In the Bluemix **Catalog**, select the Natural Language Classifier service. - 1. Under **Add Service**, type a unique name for the service instance in the Service name field. For example, type `my-service-name`. Leave the default values for the other options. - 1. Click **Use**. + 1. In the IBM Cloud **Catalog**, select the Natural Language Classifier service. + 1. Click **Create**. 1. Copy your credentials: 1. On the left side of the page, click **Service Credentials** to view your service credentials. - 1. Copy `username` and `password` from these service credentials. + 1. Copy `username`, `password`, and `url` from these service credentials. diff --git a/examples/conversation_tone_analyzer_integration/README.md b/examples/conversation_tone_analyzer_integration/README.md index ccff19fe3..4aa14bd66 100644 --- a/examples/conversation_tone_analyzer_integration/README.md +++ b/examples/conversation_tone_analyzer_integration/README.md @@ -9,8 +9,8 @@ This example provides sample code for integrating [Tone Analyzer][tone_analyzer] Requirements to run the sample code - * [Tone Analyzer Service credentials][bluemix_tone_analyzer_service] - * [Conversation Service credentials][bluemix_conversation_service] + * [Tone Analyzer Service credentials][ibm_cloud_tone_analyzer_service] + * [Conversation Service credentials][ibm_cloud_conversation_service] * [Conversation Workspace ID][conversation_simple_workspace] Credentials & the Workspace ID can be set in environment properties, a .env file, or directly in the code. @@ -24,8 +24,8 @@ Command to run the sample code [conversation]: https://www.ibm.com/watson/developercloud/conversation.html [tone_analyzer]: http://www.ibm.com/watson/developercloud/tone-analyzer.html -[bluemix_conversation_service]: https://console.ng.bluemix.net/catalog/services/conversation/ -[bluemix_tone_analyzer_service]: https://console.ng.bluemix.net/catalog/services/tone-analyzer/ +[ibm_cloud_conversation_service]: https://console.ng.bluemix.net/catalog/services/conversation/ +[ibm_cloud_tone_analyzer_service]: https://console.ng.bluemix.net/catalog/services/tone-analyzer/ [conversation_simple_workspace]: https://github.com/watson-developer-cloud/conversation-simple#workspace [tone_conversation_integration_example]: https://github.com/watson-developer-cloud/python-sdk/tree/master/examples/tone_conversation_integration.v1.py [tone_conversation_integration_example_tone_detection]: https://github.com/watson-developer-cloud/python-sdk/tree/master/examples/conversation_addons/tone_detection.py diff --git a/examples/conversation_v1.py b/examples/conversation_v1.py index 2f48acddd..1ac66200a 100644 --- a/examples/conversation_v1.py +++ b/examples/conversation_v1.py @@ -3,24 +3,11 @@ import os from watson_developer_cloud import ConversationV1 -######################### -# message -######################### - conversation = ConversationV1( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD', version='2017-04-21') -# replace with your own workspace_id -workspace_id = '506e4a2e-3d5d-4dca-b374-38edbb4139ab' -if os.getenv("conversation_workspace_id") is not None: - workspace_id = os.getenv("conversation_workspace_id") - -response = conversation.message(workspace_id=workspace_id, input={ - 'text': 'What\'s the weather like?'}) -print(json.dumps(response, indent=2)) - # When you send multiple requests for the same conversation, include the # context object from the previous response. # response = conversation.message(workspace_id=workspace_id, message_input={ @@ -43,6 +30,11 @@ response = conversation.get_workspace(workspace_id=workspace_id, export=True) print(json.dumps(response, indent=2)) +# message +response = conversation.message(workspace_id=workspace_id, input={ + 'text': 'What\'s the weather like?'}) +print(json.dumps(response, indent=2)) + response = conversation.list_workspaces() print(json.dumps(response, indent=2)) diff --git a/examples/dialog_v1.py b/examples/dialog_v1.py deleted file mode 100644 index ea774b3ea..000000000 --- a/examples/dialog_v1.py +++ /dev/null @@ -1,49 +0,0 @@ -# coding=utf-8 -from __future__ import print_function -import json -from watson_developer_cloud import DialogV1 - -dialog = DialogV1( - username='YOUR SERVICE USERNAME', - password='YOUR SERVICE PASSWORD') - -print(json.dumps(dialog.get_dialogs(), indent=2)) - -# print(json.dumps(dialog.get_dialog('6250d170-41d6-468a-a697-5675578c8012'), -# indent=2)) - -# CREATE A DIALOG -# with open(join(dirname(__file__), '../resources/dialog.xml') as dialog_file: -# print(json.dumps(dialog.create_dialog( -# dialog_file=dialog_file, name='pizza_test_9'), indent=2)) - -# dialog_id = '98734721-8952-4a1c-bb72-ef9957d4be93' - -# with open(join(dirname(__file__), '../resources/dialog.xml') as dialog_file: -# print(json.dumps(dialog.update_dialog(dialog_file=dialog_file, -# dialog_id=dialog_id), indent=2)) - -# print(json.dumps(dialog.get_content(dialog_id), indent=2)) -# -# initial_response = dialog.conversation(dialog_id) -# -# print(json.dumps(initial_response, indent=2)) -# -# print(json.dumps(dialog.conversation(dialog_id=dialog_id, -# dialog_input='What type of toppings do -# you have?', -# conversation_id=initial_response[ -# 'conversation_id'], -# client_id=initial_response['client_id']), indent=2)) - -# print(json.dumps(dialog.delete_dialog( -# dialog_id='63b0489c-cd97-45ef-8800-4e7c310eeb19'), indent=2)) - -# print(json.dumps(dialog.update_profile( -# dialog_id='6250d170-41d6-468a-a697-5675578c8012', client_id=123, -# name_values=[{'name': 'test', 'value': 'v1'}]), -# indent=2)) -# -# print(json.dumps(dialog.get_profile( -# dialog_id='6250d170-41d6-468a-a697-5675578c8012', client_id=123), -# indent=2)) diff --git a/examples/natural_language_classifier_v1.py b/examples/natural_language_classifier_v1.py index 95954d550..783ead19e 100644 --- a/examples/natural_language_classifier_v1.py +++ b/examples/natural_language_classifier_v1.py @@ -4,10 +4,6 @@ # from os.path import join, dirname from watson_developer_cloud import NaturalLanguageClassifierV1 -# replace with your own classifier_id -classifier_id = 'e552ebx250-nlc-13834' -if os.getenv("natural_language_classifier_classifier_id") is not None: - classifier_id = os.getenv("natural_language_classifier_classifier_id") natural_language_classifier = NaturalLanguageClassifierV1( username='YOUR SERVICE USERNAME', @@ -17,13 +13,14 @@ print(json.dumps(classifiers, indent=2)) # create a classifier -# with open('../resources/weather_data_train.csv', 'rb') as training_data: -# metadata = json.dumps({'name': 'my-classifier', 'language': 'en'}) -# classifier = natural_language_classifier.create_classifier( -# metadata=metadata, -# training_data=training_data -# ) -# print(json.dumps(classifier, indent=2)) +with open(os.path.join(os.path.dirname(__file__), '../resources/weather_data_train.csv'), 'rb') as training_data: + metadata = json.dumps({'name': 'my-classifier', 'language': 'en'}) + classifier = natural_language_classifier.create_classifier( + metadata=metadata, + training_data=training_data + ) + classifier_id = classifier['classifier_id'] + print(json.dumps(classifier, indent=2)) status = natural_language_classifier.get_classifier(classifier_id) print(json.dumps(status, indent=2)) @@ -34,8 +31,8 @@ 'tomorrow?') print(json.dumps(classes, indent=2)) -# delete = natural_language_classifier.delete_classifier('2374f9x68-nlc-2697') -# print(json.dumps(delete, indent=2)) +delete = natural_language_classifier.delete_classifier(classifier_id) +print(json.dumps(delete, indent=2)) # example of raising a ValueError # print(json.dumps( diff --git a/examples/retrieve_and_rank_v1.py b/examples/retrieve_and_rank_v1.py deleted file mode 100644 index 6863d63a0..000000000 --- a/examples/retrieve_and_rank_v1.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import print_function -import json -from watson_developer_cloud import RetrieveAndRankV1 -import os - - -retrieve_and_rank = RetrieveAndRankV1( - username='YOUR SERVICE USERNAME', - password='YOUR SERVICE PASSWORD') - -# Solr clusters - -solr_clusters = retrieve_and_rank.list_solr_clusters() -print(json.dumps(solr_clusters, indent=2)) - -# created_cluster = retrieve_and_rank.create_solr_cluster(cluster_name='Test -# Cluster', cluster_size='1') -# print(json.dumps(created_cluster, indent=2)) - -# Replace with your own solr_cluster_id -solr_cluster_id = 'sc1264f746_d0f7_4840_90be_07164e6ed04b' -if os.getenv("retrieve_and_rank_solr_cluster_id") is not None: - solr_cluster_id = os.getenv("retrieve_and_rank_solr_cluster_id") - -status = retrieve_and_rank.get_solr_cluster_status( - solr_cluster_id=solr_cluster_id) -print(json.dumps(status, indent=2)) - -# Solr cluster config -# with open('../resources/solr_config.zip', 'rb') as config: -# config_status = retrieve_and_rank.create_config(solr_cluster_id, -# 'test-config', config) -# print(json.dumps(config_status, indent=2)) - -# deleted_response = retrieve_and_rank.delete_config(solr_cluster_id, -# 'test-config') -# print(json.dumps(deleted_response, indent=2)) - -configs = retrieve_and_rank.list_configs(solr_cluster_id=solr_cluster_id) -print(json.dumps(configs, indent=2)) - -# collection = retrieve_and_rank.create_collection(solr_cluster_id, -# 'test-collection', 'test-config') -# print(json.dumps(collection, indent=2)) - -if not configs['solr_configs']: - collections = retrieve_and_rank.list_collections( - solr_cluster_id=solr_cluster_id) - print(json.dumps(collections, indent=2)) - - pysolr_client = retrieve_and_rank.get_pysolr_client(solr_cluster_id, - collections[ - 'collections'][0]) - # Can also refer to config by name - - results = pysolr_client.search('bananas') - print('{0} documents found'.format(len(results.docs))) - -# Rankers - -# rankers = retrieve_and_rank.list_rankers() -# print(json.dumps(rankers, indent=2)) - -# create a ranker -# with open('../resources/ranker_training_data.csv', 'rb') as training_data: -# print(json.dumps(retrieve_and_rank.create_ranker( -# training_data=training_data, name='Ranker Test'), indent=2)) - -# replace YOUR RANKER ID -# status = retrieve_and_rank.get_ranker_status('42AF7Ex10-rank-47') -# print(json.dumps(status, indent=2)) - -# delete_results = retrieve_and_rank.delete_ranker('YOUR RANKER ID') -# print(json.dumps(delete_results)) - -# replace '42AF7Ex10-rank-47' with your ranker_id -# with open('../resources/ranker_answer_data.csv', 'rb') as answer_data: -# ranker_results = retrieve_and_rank.rank('42AF7Ex10-rank-47', answer_data) -# print(json.dumps(ranker_results, indent=2)) diff --git a/examples/text_to_speech_v1.py b/examples/text_to_speech_v1.py index b20382caf..34f0fa1c9 100644 --- a/examples/text_to_speech_v1.py +++ b/examples/text_to_speech_v1.py @@ -6,22 +6,19 @@ text_to_speech = TextToSpeechV1( username='YOUR SERVICE USERNAME', - password='YOUR SERVICE PASSWORD', - x_watson_learning_opt_out=True) # Optional flag + password='YOUR SERVICE PASSWORD') -print(json.dumps(text_to_speech.voices(), indent=2)) +print(json.dumps(text_to_speech.list_voices(), indent=2)) with open(join(dirname(__file__), '../resources/output.wav'), 'wb') as audio_file: audio_file.write( text_to_speech.synthesize('Hello world!', accept='audio/wav', - voice="en-US_AllisonVoice")) + voice="en-US_AllisonVoice").content) -print( - json.dumps(text_to_speech.pronunciation( - 'Watson', pronunciation_format='spr'), indent=2)) +print(json.dumps(text_to_speech.get_pronunciation('Watson', format='spr'), indent=2)) -print(json.dumps(text_to_speech.customizations(), indent=2)) +print(json.dumps(text_to_speech.list_voice_models(), indent=2)) # print(json.dumps(text_to_speech.create_customization('test-customization'), # indent=2)) diff --git a/examples/tradeoff_analytics_v1.py b/examples/tradeoff_analytics_v1.py deleted file mode 100755 index 7e92fd4bd..000000000 --- a/examples/tradeoff_analytics_v1.py +++ /dev/null @@ -1,16 +0,0 @@ -from __future__ import print_function -import json -import os -from watson_developer_cloud import TradeoffAnalyticsV1 - -tradeoff_analytics = TradeoffAnalyticsV1( - username='YOUR SERVICE USERNAME', - password='YOUR SERVICE PASSWORD') - -with open(os.path.join(os.path.dirname(__file__), - '../resources/problem.json')) as problem_json: - dilemma = tradeoff_analytics.dilemmas(json.load(problem_json), - generate_visualization=True, - find_preferable_options=True) - -print(json.dumps(dilemma, indent=2)) diff --git a/test/__init__.py b/test/__init__.py index 6dbd8572e..b8c9cd96b 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -11,3 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from dotenv import load_dotenv, find_dotenv + +# load the .env file containing your environment variables for the required +load_dotenv(find_dotenv()) \ No newline at end of file diff --git a/test/integration/__init__.py b/test/integration/__init__.py new file mode 100644 index 000000000..b8c9cd96b --- /dev/null +++ b/test/integration/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2015 IBM All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from dotenv import load_dotenv, find_dotenv + +# load the .env file containing your environment variables for the required +load_dotenv(find_dotenv()) \ No newline at end of file diff --git a/test/test_integration_speech_to_text_v1.py b/test/integration/test_integration_speech_to_text_v1.py similarity index 100% rename from test/test_integration_speech_to_text_v1.py rename to test/integration/test_integration_speech_to_text_v1.py diff --git a/test/integration/test_integration_text_to_speech_v1.py b/test/integration/test_integration_text_to_speech_v1.py new file mode 100644 index 000000000..aa474eb80 --- /dev/null +++ b/test/integration/test_integration_text_to_speech_v1.py @@ -0,0 +1,60 @@ +import pytest +import unittest +import watson_developer_cloud +import os + +class TestIntegrationTextToSpeechV1(unittest.TestCase): + def setUp(self): + self.text_to_speech = watson_developer_cloud.TextToSpeechV1() + self.original_customizations = self.text_to_speech.list_voice_models() + self.created_customization = self.text_to_speech.create_voice_model( + name="test_integration_customization", + description="customization for tests") + + def tearDown(self): + custid = self.created_customization['customization_id'] + self.text_to_speech.delete_voice_model(customization_id=custid) + + def test_voices(self): + output = self.text_to_speech.list_voices() + assert output['voices'] is not None + voice = self.text_to_speech.get_voice(output['voices'][0]['name']) + assert voice is not None + + def test_speak(self): + output = self.text_to_speech.synthesize( + text="my voice is my passport", + accept='audio/wav', + voice='en-US_AllisonVoice') + output.content is not None + + def test_pronunciation(self): + output = self.text_to_speech.get_pronunciation('hello') + output['pronunciation'] is not None + + def test_customizations(self): + old_length = len(self.original_customizations['customizations']) + new_length = len( + self.text_to_speech.list_voice_models()['customizations']) + assert new_length - old_length == 1 + + def test_custom_words(self): + customization_id = self.created_customization['customization_id'] + words = self.text_to_speech.list_words(customization_id)['words'] + assert len(words) == 0 + self.text_to_speech.add_word( + customization_id, + word="ACLs", + translation="ackles") + + words = [ + { + "word": "MACLs", + "translation": "mackles" + } + ] + + self.text_to_speech.add_words(customization_id, words) + self.text_to_speech.delete_word(customization_id, 'ACLs') + word = self.text_to_speech.get_word(customization_id, 'MACLs') + assert word['translation'] == 'mackles' diff --git a/test/test_integration_visual_recognition.py b/test/integration/test_integration_visual_recognition.py similarity index 100% rename from test/test_integration_visual_recognition.py rename to test/integration/test_integration_visual_recognition.py diff --git a/test/test_integration_text_to_speech_v1.py b/test/test_integration_text_to_speech_v1.py deleted file mode 100644 index ea0d88b22..000000000 --- a/test/test_integration_text_to_speech_v1.py +++ /dev/null @@ -1,27 +0,0 @@ -import pytest -import unittest -import watson_developer_cloud -import os - -@pytest.mark.skip("These are destructive, so run them manually") -class TestIntegrationTextToSpeechV1(unittest.TestCase): - - def setUp(self): - self.text_to_speech = watson_developer_cloud.TextToSpeechV1(username=os.getenv('TEXT_TO_SPEECH_USERNAME'), - password=os.getenv('TEXT_TO_SPEECH_PASSWORD')) - self.original_customizations = self.text_to_speech.customizations() - self.created_customization = self.text_to_speech.create_customization(name="test_integration_customization", - description="customization for tests") - - def tearDown(self): - custid = self.created_customization['customization_id'] - self.text_to_speech.delete_customization(customization_id=custid) - - def test_customizations(self): - old_length = len(self.original_customizations['customizations']) - new_length = len(self.text_to_speech.customizations()['customizations']) - assert new_length - old_length == 1 - - def test_speak(self): - output = self.text_to_speech.synthesize(text="my voice is my passport") - assert not output diff --git a/test/test_text_to_speech_v1.py b/test/test_text_to_speech_v1.py index 0bafceae7..bb18e4e46 100644 --- a/test/test_text_to_speech_v1.py +++ b/test/test_text_to_speech_v1.py @@ -1,126 +1,229 @@ # coding=utf-8 import responses import watson_developer_cloud +import json @responses.activate def test_success(): voices_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/voices' - voices_response = '{"voices": [{"url": "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/' \ - 'VoiceEnUsLisa", "gender": "female", "name": "VoiceEnUsLisa", "language": "en-US"}, {"url": ' \ - '"https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEsEsEnrique", ' \ - '"gender": "male", "name": "VoiceEsEsEnrique", "language": "es-ES"}, {"url": ' \ - '"https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsMichael", ' \ - '"gender": "male", "name": "VoiceEnUsMichael", "language": "en-US"}, {"url": ' \ - '"https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsAllison", ' \ - '"gender": "female", "name": "VoiceEnUsAllison", "language": "en-US"}]}' - - responses.add(responses.GET, voices_url, body=voices_response, - status=200, content_type='application/json') + voices_response = { + "voices": [{ + "url": + "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsLisa", + "gender": + "female", + "name": + "VoiceEnUsLisa", + "language": + "en-US" + }, { + "url": + "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEsEsEnrique", + "gender": + "male", + "name": + "VoiceEsEsEnrique", + "language": + "es-ES" + }, { + "url": + "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsMichael", + "gender": + "male", + "name": + "VoiceEnUsMichael", + "language": + "en-US" + }, { + "url": + "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/VoiceEnUsAllison", + "gender": + "female", + "name": + "VoiceEnUsAllison", + "language": + "en-US" + }] + } + voice_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/voices/en-us_AllisonVoice' + voice_response = { + "url": + "https://stream.watsonplatform.net/text-to-speech/api/v1/voices/en-US_AllisonVoice", + "name": + "en-US_AllisonVoice", + "language": + "en-US", + "customizable": + True, + "gender": + "female", + "description": + "Allison: American English female voice.", + "supported_features": { + "custom_pronunciation": True, + "voice_transformation": True + } + } + synthesize_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/synthesize' + synthesize_response_body = '' + + responses.add( + responses.GET, + voices_url, + body=json.dumps(voices_response), + status=200, + content_type='application/json') + responses.add( + responses.GET, + voice_url, + body=json.dumps(voice_response), + status=200, + content_type='application/json') + responses.add( + responses.POST, + synthesize_url, + body=synthesize_response_body, + status=200, + content_type='application/json', + match_querystring=True) text_to_speech = watson_developer_cloud.TextToSpeechV1( username="username", password="password") - text_to_speech.voices() + text_to_speech.list_voices() assert responses.calls[0].request.url == voices_url - assert responses.calls[0].response.text == voices_response - - synthesize_text = 'hello' - synthesize_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/synthesize' - synthesize_response_body = '' + assert responses.calls[0].response.text == json.dumps(voices_response) - responses.add(responses.POST, synthesize_url, - body=synthesize_response_body, status=200, - content_type='application/json', match_querystring=True) - text_to_speech.synthesize(synthesize_text) + text_to_speech.get_voice('en-us_AllisonVoice') + assert responses.calls[1].request.url == voice_url + assert responses.calls[1].response.text == json.dumps(voice_response) - assert responses.calls[1].request.url == synthesize_url - assert responses.calls[1].response.text == synthesize_response_body + text_to_speech.synthesize('hello') + assert responses.calls[2].request.url == synthesize_url + assert responses.calls[2].response.text == synthesize_response_body - assert len(responses.calls) == 2 + assert len(responses.calls) == 3 @responses.activate -def test_pronounciation(): +def test_get_pronunciation(): - responses.add(responses.GET, 'https://stream.watsonplatform.net/text-to-speech/api/v1/pronunciation', - body='{"pronunciation": "pronunciation info" }', - status=200, content_type='application_json') + responses.add( + responses.GET, + 'https://stream.watsonplatform.net/text-to-speech/api/v1/pronunciation', + body='{"pronunciation": "pronunciation info" }', + status=200, + content_type='application_json') text_to_speech = watson_developer_cloud.TextToSpeechV1( username="username", password="password") - text_to_speech.pronunciation(text="this is some text") - text_to_speech.pronunciation(text="yo", voice="VoiceEnUsLisa") - text_to_speech.pronunciation( - text="yo", voice="VoiceEnUsLisa", pronunciation_format='ipa') + + text_to_speech.get_pronunciation(text="this is some text") + text_to_speech.get_pronunciation(text="yo", voice="VoiceEnUsLisa") + text_to_speech.get_pronunciation( + text="yo", voice="VoiceEnUsLisa", format='ipa') assert len(responses.calls) == 3 @responses.activate -def test_customizations(): - responses.add(responses.GET, 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations', - body='{"customizations": "yep" }', - status=200, content_type='application_json') - responses.add(responses.POST, 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations', - body='{"customizations": "yep" }', - status=200, content_type='application_json') - responses.add(responses.GET, 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid', - body='{"customization": "yep, just one" }', - status=200, content_type='application_json') - responses.add(responses.POST, 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid', - body='{"customizations": "yep" }', - status=200, content_type='application_json') - responses.add(responses.DELETE, 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid', - body='{"customizations": "yep" }', - status=200, content_type='application_json') +def test_custom_voice_models(): + responses.add( + responses.GET, + 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations', + body='{"customizations": "yep" }', + status=200, + content_type='application_json') + responses.add( + responses.POST, + 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations', + body='{"customizations": "yep" }', + status=200, + content_type='application_json') + responses.add( + responses.GET, + 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid', + body='{"customization": "yep, just one" }', + status=200, + content_type='application_json') + responses.add( + responses.POST, + 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid', + body='{"customizations": "yep" }', + status=200, + content_type='application_json') + responses.add( + responses.DELETE, + 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations/custid', + body='{"customizations": "yep" }', + status=200, + content_type='application_json') text_to_speech = watson_developer_cloud.TextToSpeechV1( username="username", password="password") - text_to_speech.customizations() - text_to_speech.customizations(language="en-US") + text_to_speech.list_voice_models() + text_to_speech.list_voice_models(language="en-US") assert len(responses.calls) == 2 - text_to_speech.create_customization(name="name", description="description") - text_to_speech.get_customization(customization_id='custid') - text_to_speech.update_customization( + text_to_speech.create_voice_model(name="name", description="description") + text_to_speech.get_voice_model(customization_id='custid') + text_to_speech.update_voice_model( customization_id="custid", name="name", description="description") - text_to_speech.delete_customization(customization_id="custid") + text_to_speech.delete_voice_model(customization_id="custid") assert len(responses.calls) == 6 @responses.activate -def test_customization_words(): +def test_custom_words(): base_url = 'https://stream.watsonplatform.net/text-to-speech/api/v1/customizations' - responses.add(responses.GET, "{0}/{1}/words".format(base_url, "custid"), - body='{"customizations": "yep" }', - status=200, content_type='application_json') - responses.add(responses.POST, "{0}/{1}/words".format(base_url, "custid"), - body='{"customizations": "yep" }', - status=200, content_type='application_json') - responses.add(responses.GET, "{0}/{1}/words/{2}".format(base_url, "custid", "word"), - body='{"customization": "yep, just one" }', - status=200, content_type='application_json') - responses.add(responses.POST, "{0}/{1}/words/{2}".format(base_url, "custid", "word"), - body='{"customizations": "yep" }', - status=200, content_type='application_json') - responses.add(responses.PUT, "{0}/{1}/words/{2}".format(base_url, "custid", "word"), - body='{"customizations": "yep" }', - status=200, content_type='application_json') - responses.add(responses.DELETE, "{0}/{1}/words/{2}".format(base_url, "custid", "word"), - body='{"customizations": "yep" }', - status=200, content_type='application_json') + responses.add( + responses.GET, + "{0}/{1}/words".format(base_url, "custid"), + body='{"customizations": "yep" }', + status=200, + content_type='application_json') + responses.add( + responses.POST, + "{0}/{1}/words".format(base_url, "custid"), + body='{"customizations": "yep" }', + status=200, + content_type='application_json') + responses.add( + responses.GET, + "{0}/{1}/words/{2}".format(base_url, "custid", "word"), + body='{"customization": "yep, just one" }', + status=200, + content_type='application_json') + responses.add( + responses.POST, + "{0}/{1}/words/{2}".format(base_url, "custid", "word"), + body='{"customizations": "yep" }', + status=200, + content_type='application_json') + responses.add( + responses.PUT, + "{0}/{1}/words/{2}".format(base_url, "custid", "word"), + body='{"customizations": "yep" }', + status=200, + content_type='application_json') + responses.add( + responses.DELETE, + "{0}/{1}/words/{2}".format(base_url, "custid", "word"), + body='{"customizations": "yep" }', + status=200, + content_type='application_json') text_to_speech = watson_developer_cloud.TextToSpeechV1( username="username", password="password") - text_to_speech.get_customization_words(customization_id="custid") - text_to_speech.add_customization_words( + + text_to_speech.list_words(customization_id="custid") + text_to_speech.add_words( customization_id="custid", words=["one", "two", "three"]) - text_to_speech.get_customization_word( - customization_id="custid", word="word") - text_to_speech.set_customization_word( + text_to_speech.get_word(customization_id="custid", word="word") + text_to_speech.add_word( customization_id='custid', word="word", translation="I'm translated") - text_to_speech.delete_customization_word( - customization_id="custid", word="word") + text_to_speech.delete_word(customization_id="custid", word="word") + assert len(responses.calls) == 5 diff --git a/watson_developer_cloud/text_to_speech_v1.py b/watson_developer_cloud/text_to_speech_v1.py index 8cbc2ad1b..258b08e05 100644 --- a/watson_developer_cloud/text_to_speech_v1.py +++ b/watson_developer_cloud/text_to_speech_v1.py @@ -1,4 +1,6 @@ -# Copyright 2016 IBM All Rights Reserved. +# coding: utf-8 + +# Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,123 +14,1134 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -The v1 Text to Speech service -(https://www.ibm.com/watson/developercloud/text-to-speech.html) +For more information about the service and its various interfaces, see [About Text to +Speech](https://console.bluemix.net/docs/services/text-to-speech/index.html). """ +from __future__ import absolute_import + +import json from .watson_service import WatsonService +from .utils import deprecated + +############################################################################## +# Service +############################################################################## class TextToSpeechV1(WatsonService): - """Client for the Text to Speech service""" - default_url = "https://stream.watsonplatform.net/text-to-speech/api" + """The Text to Speech V1 service.""" + + default_url = 'https://stream.watsonplatform.net/text-to-speech/api' - def __init__(self, url=default_url, **kwargs): + def __init__(self, url=default_url, username=None, password=None): """ - Construct an instance. Fetches service parameters from VCAP_SERVICES - runtime variable for Bluemix, or it defaults to local URLs. + Construct a new client for the Text to Speech service. + + :param str url: The base url to use when contacting the service (e.g. + "https://gateway.watsonplatform.net/text-to-speech/api"). + The base url may differ between Bluemix regions. + + :param str username: The username used to authenticate with the service. + Username and password credentials are only required to run your + application locally or outside of Bluemix. When running on + Bluemix, the credentials will be automatically loaded from the + `VCAP_SERVICES` environment variable. + + :param str password: The password used to authenticate with the service. + Username and password credentials are only required to run your + application locally or outside of Bluemix. When running on + Bluemix, the credentials will be automatically loaded from the + `VCAP_SERVICES` environment variable. + """ - WatsonService.__init__(self, 'text_to_speech', url, **kwargs) - def synthesize(self, text, voice=None, accept=None, customization_id=None): + WatsonService.__init__( + self, + vcap_services_name='text_to_speech', + url=url, + username=username, + password=password, + use_vcap_services=True) + + ######################### + # voices + ######################### + + def get_voice(self, voice, customization_id=None): """ - Returns the get HTTP response by doing a POST to /synthesize with - text, voice, accept + Retrieves a specific voice available for speech synthesis. + + :param str voice: The voice for which information is to be returned. Retrieve available voices with the `GET /v1/voices` method. + :param str customization_id: The GUID of a custom voice model for which information is to be returned. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to see information about the specified voice with no customization. + :return: A `dict` containing the `Voice` response. + :rtype: dict """ - params = {'voice': voice, 'accept': accept, - 'customization_id': customization_id} - data = {'text': text} + if voice is None: + raise ValueError('voice must be provided') + params = {'customization_id': customization_id} + url = '/v1/voices/{0}'.format(*self._encode_path_vars(voice)) response = self.request( - method='POST', url='/v1/synthesize', stream=True, params=params, - json=data) - return response.content + method='GET', url=url, params=params, accept_json=True) + return response - def voices(self): + def list_voices(self): """ - Returns the list of available voices to use with synthesize + Retrieves all voices available for speech synthesis. + + :return: A `dict` containing the `Voices` response. + :rtype: dict """ - return self.request(method='GET', url='/v1/voices', accept_json=True) + url = '/v1/voices' + response = self.request(method='GET', url=url, accept_json=True) + return response - def pronunciation(self, text, voice=None, pronunciation_format='ipa'): + @deprecated('Use list_voices() instead') + def voices(self): + return self.list_voices() + + ######################### + # synthesize + ######################### + + def synthesize(self, + text, + accept=None, + voice=None, + customization_id=None): + """ + Streaming speech synthesis of the text in the body parameter. Synthesizes text to spoken audio, returning the synthesized audio stream as an array of bytes. + + :param str text: The text to synthesize. + :param str accept: The requested audio format (MIME type) of the audio. You can use this header or the `accept` query parameter to specify the audio format. (For the `audio/l16` format, you can optionally specify `endianness=big-endian` or `endianness=little-endian`; the default is little endian.). + :param str voice: The voice to use for synthesis. Retrieve available voices with the `GET /v1/voices` method. + :param str customization_id: The GUID of a custom voice model to use for the synthesis. If a custom voice model is specified, it is guaranteed to work only if it matches the language of the indicated voice. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to use the specified voice with no customization. + :return: A `Response ` object representing the response. + :rtype: requests.models.Response + """ + if text is None: + raise ValueError('text must be provided') + headers = {'Accept': accept} params = { - 'text': text, 'voice': voice, - 'format': pronunciation_format + 'customization_id': customization_id } - return self.request(method='GET', url='/v1/pronunciation', - params=params, accept_json=True) + data = {'text': text} + url = '/v1/synthesize' + response = self.request( + method='POST', + url=url, + headers=headers, + params=params, + json=data, + accept_json=False) - def customizations(self, language=None): + return response + + ######################### + # pronunciation + ######################### + + def get_pronunciation(self, + text, + voice=None, + format=None, + customization_id=None): + """ + Gets the pronunciation for a word. + + Returns the phonetic pronunciation for the word specified by the `text` parameter. + You can request the pronunciation for a specific format. You can also request the + pronunciation for a specific voice to see the default translation for the language + of that voice or for a specific custom voice model to see the translation for that + voice model. **Note:** This method is currently a beta release. + + :param str text: The word for which the pronunciation is requested. + :param str voice: A voice that specifies the language in which the pronunciation is to be returned. All voices for the same language (for example, `en-US`) return the same translation. Retrieve available voices with the `GET /v1/voices` method. + :param str format: The phoneme format in which to return the pronunciation. Omit the parameter to obtain the pronunciation in the default format. + :param str customization_id: The GUID of a custom voice model for which the pronunciation is to be returned. The language of a specified custom model must match the language of the specified voice. If the word is not defined in the specified custom model, the service returns the default translation for the custom model's language. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to see the translation for the specified voice with no customization. + :return: A `dict` containing the `Pronunciation` response. + :rtype: dict + """ + if text is None: + raise ValueError('text must be provided') params = { - 'language': language + 'text': text, + 'voice': voice, + 'format': format, + 'customization_id': customization_id } - return self.request(method='GET', url='/v1/customizations', - params=params, accept_json=True) + url = '/v1/pronunciation' + response = self.request( + method='GET', url=url, params=params, accept_json=True) + return response - def get_customization(self, customization_id): - customization_id = self.unpack_id(customization_id, 'customization_id') - return self.request(method='GET', url='/v1/customizations/{0}'.format( - customization_id), accept_json=True) + @deprecated('Use get_pronunciation() instead') + def pronunciation(self, text, voice=None, pronunciation_format='ipa'): + return self.get_pronunciation(text, voice, pronunciation_format) + + ######################### + # customVoiceModels + ######################### + + def create_voice_model(self, name, language=None, description=None): + """ + Creates a new custom voice model. + + Creates a new empty custom voice model. The model is owned by the instance of the + service whose credentials are used to create it. **Note:** This method is + currently a beta release. + + :param str name: The name of the new custom voice model. + :param str language: The language of the new custom voice model. Omit the parameter to use the the default language, `en-US`. + :param str description: A description of the new custom voice model. Specifying a description is recommended. + :return: A `dict` containing the `VoiceModel` response. + :rtype: dict + """ + if name is None: + raise ValueError('name must be provided') + data = {'name': name, 'language': language, 'description': description} + url = '/v1/customizations' + response = self.request( + method='POST', url=url, json=data, accept_json=True) + return response + @deprecated('Use create_voice_model() instead.') def create_customization(self, name, language=None, description=None): - body = { - 'name': name, - 'language': language, - 'description': description - } - return self.request(method='POST', url='/v1/customizations', json=body, - accept_json=True) + return self.create_voice_model(name, language, description) + + def delete_voice_model(self, customization_id): + """ + Deletes a custom voice model. + + Deletes the custom voice model with the specified `customization_id`. You must use + credentials for the instance of the service that owns a model to delete it. + **Note:** This method is currently a beta release. + + :param str customization_id: The GUID of the custom voice model that is to be deleted. You must make the request with service credentials created for the instance of the service that owns the custom model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/customizations/{0}'.format( + *self._encode_path_vars(customization_id)) + self.request(method='DELETE', url=url, accept_json=True) + return None + + @deprecated('Use delete_voice_model() instead.') + def delete_customization(self, customization_id): + return self.delete_voice_model(customization_id) + + def get_voice_model(self, customization_id): + """ + Queries the contents of a custom voice model. + + Lists all information about the custom voice model with the specified + `customization_id`. In addition to metadata such as the name and description of + the voice model, the output includes the words in the model and their translations + as defined in the model. **Note:** This method is currently a beta release. + + :param str customization_id: The GUID of the custom voice model that is to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. + :return: A `dict` containing the `VoiceModel` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/customizations/{0}'.format( + *self._encode_path_vars(customization_id)) + response = self.request(method='GET', url=url, accept_json=True) + return response + + @deprecated('Use get_voice_model instead.') + def get_customization(self, customization_id): + return self.get_voice_model(customization_id) + + def list_voice_models(self, language=None): + """ + Lists all available custom voice models for a language or for all languages. + **Note:** This method is currently a beta release. + + :param str language: The language for which custom voice models that are owned by the requesting service credentials are to be returned. Omit the parameter to see all custom voice models that are owned by the requester. + :return: A `dict` containing the `VoiceModels` response. + :rtype: dict + """ + params = {'language': language} + url = '/v1/customizations' + response = self.request( + method='GET', url=url, params=params, accept_json=True) + return response + + @deprecated('Use list_voice_models() instead.') + def customizations(self, language=None): + return self.list_voice_models(language) + def update_voice_model(self, + customization_id, + name=None, + description=None, + words=None): + """ + Updates information and words for a custom voice model. + + Updates information for the custom voice model with the specified + `customization_id`. You can update the metadata such as the name and description + of the voice model. You can also update the words in the model and their + translations. Adding a new translation for a word that already exists in a custom + model overwrites the word's existing translation. A custom model can contain no + more than 20,000 entries. You must use credentials for the instance of the service + that owns a model to update it. **Note:** This method is currently a beta + release. + + :param str customization_id: The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str name: A new name for the custom voice model. + :param str description: A new description for the custom voice model. + :param list[CustomWord] words: An array of words and their translations that are to be added or updated for the custom voice model. Pass an empty array to make no additions or updates. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if words is not None: + words = [self._convert_model(x) for x in words] + data = {'name': name, 'description': description, 'words': words} + url = '/v1/customizations/{0}'.format( + *self._encode_path_vars(customization_id)) + self.request(method='POST', url=url, json=data, accept_json=True) + return None + + @deprecated('Use update_voice_model() instead') def update_customization(self, customization_id, name=None, description=None, words=None): - body = { - 'name': name, - 'description': description, - 'words': words - } - return self.request(method='POST', url='/v1/customizations/{0}'.format( - customization_id), json=body) + return self.update_voice_model(customization_id, name, description, words) - def delete_customization(self, customization_id): - customization_id = self.unpack_id(customization_id, 'customization_id') - return self.request(method='DELETE', - url='/v1/customizations/{0}'.format( - customization_id)) + ######################### + # customWords + ######################### - def get_customization_words(self, customization_id): - customization_id = self.unpack_id(customization_id, 'customization_id') - return self.request(method='GET', - url='/v1/customizations/{0}/words'.format( - customization_id), accept_json=True) + def add_word(self, customization_id, word, translation, + part_of_speech=None): + """ + Adds a word to a custom voice model. - def add_customization_words(self, customization_id, words): - customization_id = self.unpack_id(customization_id, 'customization_id') - body = { - 'words': words - } - return self.request(method='POST', - url='/v1/customizations/{0}/words'.format( - customization_id), json=body) + Adds a single word and its translation to the custom voice model with the + specified `customization_id`. Adding a new translation for a word that already + exists in a custom model overwrites the word's existing translation. A custom + model can contain no more than 20,000 entries. You must use credentials for the + instance of the service that owns a model to add a word to it. **Note:** This + method is currently a beta release. - def get_customization_word(self, customization_id, word): - customization_id = self.unpack_id(customization_id, 'customization_id') - return self.request(method='GET', - url='/v1/customizations/{0}/words/{1}'.format( - customization_id, word), - accept_json=True) + :param str customization_id: The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str word: The word that is to be added or updated for the custom voice model. + :param str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is one or more words that, when combined, sound like the word. + :param str part_of_speech: **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if word is None: + raise ValueError('word must be provided') + if translation is None: + raise ValueError('translation must be provided') + data = {'translation': translation, 'part_of_speech': part_of_speech} + url = '/v1/customizations/{0}/words/{1}'.format(*self._encode_path_vars( + customization_id, word)) + self.request(method='PUT', url=url, json=data, accept_json=True) + return None + @deprecated('Use add_word() instead.') def set_customization_word(self, customization_id, word, translation): - customization_id = self.unpack_id(customization_id, 'customization_id') - body = { - 'translation': translation - } - return self.request(method='PUT', - url='/v1/customizations/{0}/words/{1}'.format( - customization_id, word), - json=body) + return self.add_word(customization_id, word, translation) + + def add_words(self, customization_id, words): + """ + Adds one or more words to a custom voice model. + + Adds one or more words and their translations to the custom voice model with the + specified `customization_id`. Adding a new translation for a word that already + exists in a custom model overwrites the word's existing translation. A custom + model can contain no more than 20,000 entries. You must use credentials for the + instance of the service that owns a model to add words to it. **Note:** This + method is currently a beta release. + + :param str customization_id: The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param list[CustomWord] words: An array of `CustomWord` objects that provides information about the words and their translations that are to be added or updated for the custom voice model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if words is None: + raise ValueError('words must be provided') + words = [self._convert_model(x) for x in words] + data = {'words': words} + url = '/v1/customizations/{0}/words'.format( + *self._encode_path_vars(customization_id)) + self.request(method='POST', url=url, json=data, accept_json=True) + return None + + @deprecated('Use add_words() instead.') + def add_customization_words(self, customization_id, words): + return self.add_words(customization_id, words) + def delete_word(self, customization_id, word): + """ + Deletes a word from a custom voice model. + + Deletes a single word from the custom voice model with the specified + `customization_id`. You must use credentials for the instance of the service that + owns a model to delete it. **Note:** This method is currently a beta release. + + :param str customization_id: The GUID of the custom voice model from which to delete a word. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str word: The word that is to be deleted from the custom voice model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if word is None: + raise ValueError('word must be provided') + url = '/v1/customizations/{0}/words/{1}'.format(*self._encode_path_vars( + customization_id, word)) + self.request(method='DELETE', url=url, accept_json=True) + return None + + @deprecated('Use delete_word() instead.') def delete_customization_word(self, customization_id, word): - customization_id = self.unpack_id(customization_id, 'customization_id') - return self.request(method='DELETE', - url='/v1/customizations/{0}/words/{1}'.format( - customization_id, word)) + return self.delete_word(customization_id, word) + + def get_word(self, customization_id, word): + """ + Queries details about a word in a custom voice model. + + Returns the translation for a single word from the custom model with the specified + `customization_id`. The output shows the translation as it is defined in the + model. You must use credentials for the instance of the service that owns a model + to query information about its words. **Note:** This method is currently a beta + release. + + :param str customization_id: The GUID of the custom voice model from which to query a word. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str word: The word that is to be queried from the custom voice model. + :return: A `dict` containing the `Translation` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if word is None: + raise ValueError('word must be provided') + url = '/v1/customizations/{0}/words/{1}'.format(*self._encode_path_vars( + customization_id, word)) + response = self.request(method='GET', url=url, accept_json=True) + return response + + @deprecated('Use get_word() instead.') + def get_customization_word(self, customization_id, word): + return self.get_word(customization_id, word) + + def list_words(self, customization_id): + """ + Queries details about the words in a custom voice model. + + Lists all of the words and their translations for the custom voice model with the + specified `customization_id`. The output shows the translations as they are + defined in the model. You must use credentials for the instance of the service + that owns a model to query information about its words. **Note:** This method is + currently a beta release. + + :param str customization_id: The GUID of the custom voice model that is to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. + :return: A `dict` containing the `Words` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/customizations/{0}/words'.format( + *self._encode_path_vars(customization_id)) + response = self.request(method='GET', url=url, accept_json=True) + return response + + @deprecated('Use list_words() instead.') + def get_customization_words(self, customization_id): + return self.list_words(customization_id) + + +############################################################################## +# Models +############################################################################## + + +class CustomWord(object): + """ + CustomWord. + + :attr str word: A word that is to be added or updated for the custom voice model. + :attr str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. A sounds-like translation consists of one or more words that, when combined, sound like the word. + :attr str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). + """ + + def __init__(self, word, translation, part_of_speech=None): + """ + Initialize a CustomWord object. + + :param str word: A word that is to be added or updated for the custom voice model. + :param str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. A sounds-like translation consists of one or more words that, when combined, sound like the word. + :param str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). + """ + self.word = word + self.translation = translation + self.part_of_speech = part_of_speech + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CustomWord object from a json dictionary.""" + args = {} + if 'word' in _dict: + args['word'] = _dict['word'] + else: + raise ValueError( + 'Required property \'word\' not present in CustomWord JSON') + if 'translation' in _dict: + args['translation'] = _dict['translation'] + else: + raise ValueError( + 'Required property \'translation\' not present in CustomWord JSON' + ) + if 'part_of_speech' in _dict: + args['part_of_speech'] = _dict['part_of_speech'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'word') and self.word is not None: + _dict['word'] = self.word + if hasattr(self, 'translation') and self.translation is not None: + _dict['translation'] = self.translation + if hasattr(self, 'part_of_speech') and self.part_of_speech is not None: + _dict['part_of_speech'] = self.part_of_speech + return _dict + + def __str__(self): + """Return a `str` version of this CustomWord object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Pronunciation(object): + """ + Pronunciation. + + :attr str pronunciation: The pronunciation of the requested text in the specified voice and format. + """ + + def __init__(self, pronunciation): + """ + Initialize a Pronunciation object. + + :param str pronunciation: The pronunciation of the requested text in the specified voice and format. + """ + self.pronunciation = pronunciation + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Pronunciation object from a json dictionary.""" + args = {} + if 'pronunciation' in _dict: + args['pronunciation'] = _dict['pronunciation'] + else: + raise ValueError( + 'Required property \'pronunciation\' not present in Pronunciation JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'pronunciation') and self.pronunciation is not None: + _dict['pronunciation'] = self.pronunciation + return _dict + + def __str__(self): + """Return a `str` version of this Pronunciation object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SupportedFeatures(object): + """ + SupportedFeatures. + + :attr bool custom_pronunciation: If `true`, the voice can be customized; if `false`, the voice cannot be customized. (Same as `customizable`.). + :attr bool voice_transformation: If `true`, the voice can be transformed by using the SSML <voice-transformation> element; if `false`, the voice cannot be transformed. + """ + + def __init__(self, custom_pronunciation, voice_transformation): + """ + Initialize a SupportedFeatures object. + + :param bool custom_pronunciation: If `true`, the voice can be customized; if `false`, the voice cannot be customized. (Same as `customizable`.). + :param bool voice_transformation: If `true`, the voice can be transformed by using the SSML <voice-transformation> element; if `false`, the voice cannot be transformed. + """ + self.custom_pronunciation = custom_pronunciation + self.voice_transformation = voice_transformation + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SupportedFeatures object from a json dictionary.""" + args = {} + if 'custom_pronunciation' in _dict: + args['custom_pronunciation'] = _dict['custom_pronunciation'] + else: + raise ValueError( + 'Required property \'custom_pronunciation\' not present in SupportedFeatures JSON' + ) + if 'voice_transformation' in _dict: + args['voice_transformation'] = _dict['voice_transformation'] + else: + raise ValueError( + 'Required property \'voice_transformation\' not present in SupportedFeatures JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'custom_pronunciation' + ) and self.custom_pronunciation is not None: + _dict['custom_pronunciation'] = self.custom_pronunciation + if hasattr(self, 'voice_transformation' + ) and self.voice_transformation is not None: + _dict['voice_transformation'] = self.voice_transformation + return _dict + + def __str__(self): + """Return a `str` version of this SupportedFeatures object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Translation(object): + """ + Translation. + + :attr str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is one or more words that, when combined, sound like the word. + :attr str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). + """ + + def __init__(self, translation, part_of_speech=None): + """ + Initialize a Translation object. + + :param str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA translation or as an IBM SPR translation. A sounds-like is one or more words that, when combined, sound like the word. + :param str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). + """ + self.translation = translation + self.part_of_speech = part_of_speech + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Translation object from a json dictionary.""" + args = {} + if 'translation' in _dict: + args['translation'] = _dict['translation'] + else: + raise ValueError( + 'Required property \'translation\' not present in Translation JSON' + ) + if 'part_of_speech' in _dict: + args['part_of_speech'] = _dict['part_of_speech'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'translation') and self.translation is not None: + _dict['translation'] = self.translation + if hasattr(self, 'part_of_speech') and self.part_of_speech is not None: + _dict['part_of_speech'] = self.part_of_speech + return _dict + + def __str__(self): + """Return a `str` version of this Translation object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Voice(object): + """ + Voice. + + :attr str url: The URI of the voice. + :attr str gender: The gender of the voice: `male` or `female`. + :attr str name: The name of the voice. Use this as the voice identifier in all requests. + :attr str language: The language and region of the voice (for example, `en-US`). + :attr str description: A textual description of the voice. + :attr bool customizable: If `true`, the voice can be customized; if `false`, the voice cannot be customized. (Same as `custom_pronunciation`; maintained for backward compatibility.). + :attr SupportedFeatures supported_features: Describes the additional service features supported with the voice. + :attr VoiceModel customization: (optional) Returns information about a specified custom voice model. **Note:** This field is returned only when you list information about a specific voice and specify the GUID of a custom voice model that is based on that voice. + """ + + def __init__(self, + url, + gender, + name, + language, + description, + customizable, + supported_features, + customization=None): + """ + Initialize a Voice object. + + :param str url: The URI of the voice. + :param str gender: The gender of the voice: `male` or `female`. + :param str name: The name of the voice. Use this as the voice identifier in all requests. + :param str language: The language and region of the voice (for example, `en-US`). + :param str description: A textual description of the voice. + :param bool customizable: If `true`, the voice can be customized; if `false`, the voice cannot be customized. (Same as `custom_pronunciation`; maintained for backward compatibility.). + :param SupportedFeatures supported_features: Describes the additional service features supported with the voice. + :param VoiceModel customization: (optional) Returns information about a specified custom voice model. **Note:** This field is returned only when you list information about a specific voice and specify the GUID of a custom voice model that is based on that voice. + """ + self.url = url + self.gender = gender + self.name = name + self.language = language + self.description = description + self.customizable = customizable + self.supported_features = supported_features + self.customization = customization + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Voice object from a json dictionary.""" + args = {} + if 'url' in _dict: + args['url'] = _dict['url'] + else: + raise ValueError( + 'Required property \'url\' not present in Voice JSON') + if 'gender' in _dict: + args['gender'] = _dict['gender'] + else: + raise ValueError( + 'Required property \'gender\' not present in Voice JSON') + if 'name' in _dict: + args['name'] = _dict['name'] + else: + raise ValueError( + 'Required property \'name\' not present in Voice JSON') + if 'language' in _dict: + args['language'] = _dict['language'] + else: + raise ValueError( + 'Required property \'language\' not present in Voice JSON') + if 'description' in _dict: + args['description'] = _dict['description'] + else: + raise ValueError( + 'Required property \'description\' not present in Voice JSON') + if 'customizable' in _dict: + args['customizable'] = _dict['customizable'] + else: + raise ValueError( + 'Required property \'customizable\' not present in Voice JSON') + if 'supported_features' in _dict: + args['supported_features'] = SupportedFeatures._from_dict( + _dict['supported_features']) + else: + raise ValueError( + 'Required property \'supported_features\' not present in Voice JSON' + ) + if 'customization' in _dict: + args['customization'] = VoiceModel._from_dict( + _dict['customization']) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'gender') and self.gender is not None: + _dict['gender'] = self.gender + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'customizable') and self.customizable is not None: + _dict['customizable'] = self.customizable + if hasattr( + self, + 'supported_features') and self.supported_features is not None: + _dict['supported_features'] = self.supported_features._to_dict() + if hasattr(self, 'customization') and self.customization is not None: + _dict['customization'] = self.customization._to_dict() + return _dict + + def __str__(self): + """Return a `str` version of this Voice object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class VoiceModel(object): + """ + VoiceModel. + + :attr str customization_id: The customization ID (GUID) of the custom voice model. **Note:** When you create a new custom voice model, the service returns only the GUID of the new custom model; it does not return the other fields of this object. + :attr str name: (optional) The name of the custom voice model. + :attr str language: (optional) The language identifier of the custom voice model (for example, `en-US`). + :attr str owner: (optional) The GUID of the service credentials for the instance of the service that owns the custom voice model. + :attr str created: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom voice model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :attr str last_modified: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom voice model was last modified. Equals `created` when a new voice model is first added but has yet to be updated. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :attr str description: (optional) The description of the custom voice model. + :attr list[Word] words: (optional) An array of words and their translations from the custom voice model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if the custom model contains no words. **Note:** This field is returned only when you list information about a specific custom voice model. + """ + + def __init__(self, + customization_id, + name=None, + language=None, + owner=None, + created=None, + last_modified=None, + description=None, + words=None): + """ + Initialize a VoiceModel object. + + :param str customization_id: The customization ID (GUID) of the custom voice model. **Note:** When you create a new custom voice model, the service returns only the GUID of the new custom model; it does not return the other fields of this object. + :param str name: (optional) The name of the custom voice model. + :param str language: (optional) The language identifier of the custom voice model (for example, `en-US`). + :param str owner: (optional) The GUID of the service credentials for the instance of the service that owns the custom voice model. + :param str created: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom voice model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str last_modified: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom voice model was last modified. Equals `created` when a new voice model is first added but has yet to be updated. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str description: (optional) The description of the custom voice model. + :param list[Word] words: (optional) An array of words and their translations from the custom voice model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if the custom model contains no words. **Note:** This field is returned only when you list information about a specific custom voice model. + """ + self.customization_id = customization_id + self.name = name + self.language = language + self.owner = owner + self.created = created + self.last_modified = last_modified + self.description = description + self.words = words + + @classmethod + def _from_dict(cls, _dict): + """Initialize a VoiceModel object from a json dictionary.""" + args = {} + if 'customization_id' in _dict: + args['customization_id'] = _dict['customization_id'] + else: + raise ValueError( + 'Required property \'customization_id\' not present in VoiceModel JSON' + ) + if 'name' in _dict: + args['name'] = _dict['name'] + if 'language' in _dict: + args['language'] = _dict['language'] + if 'owner' in _dict: + args['owner'] = _dict['owner'] + if 'created' in _dict: + args['created'] = _dict['created'] + if 'last_modified' in _dict: + args['last_modified'] = _dict['last_modified'] + if 'description' in _dict: + args['description'] = _dict['description'] + if 'words' in _dict: + args['words'] = [Word._from_dict(x) for x in _dict['words']] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'customization_id') and self.customization_id is not None: + _dict['customization_id'] = self.customization_id + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'owner') and self.owner is not None: + _dict['owner'] = self.owner + if hasattr(self, 'created') and self.created is not None: + _dict['created'] = self.created + if hasattr(self, 'last_modified') and self.last_modified is not None: + _dict['last_modified'] = self.last_modified + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'words') and self.words is not None: + _dict['words'] = [x._to_dict() for x in self.words] + return _dict + + def __str__(self): + """Return a `str` version of this VoiceModel object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class VoiceModels(object): + """ + VoiceModels. + + :attr list[VoiceModel] customizations: An array of `VoiceModel` objects that provides information about each available custom voice model. The array is empty if the requesting service credentials own no custom voice models (if no language is specified) or own no custom voice models for the specified language. + """ + + def __init__(self, customizations): + """ + Initialize a VoiceModels object. + + :param list[VoiceModel] customizations: An array of `VoiceModel` objects that provides information about each available custom voice model. The array is empty if the requesting service credentials own no custom voice models (if no language is specified) or own no custom voice models for the specified language. + """ + self.customizations = customizations + + @classmethod + def _from_dict(cls, _dict): + """Initialize a VoiceModels object from a json dictionary.""" + args = {} + if 'customizations' in _dict: + args['customizations'] = [ + VoiceModel._from_dict(x) for x in _dict['customizations'] + ] + else: + raise ValueError( + 'Required property \'customizations\' not present in VoiceModels JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'customizations') and self.customizations is not None: + _dict['customizations'] = [ + x._to_dict() for x in self.customizations + ] + return _dict + + def __str__(self): + """Return a `str` version of this VoiceModels object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Voices(object): + """ + Voices. + + :attr list[Voice] voices: A list of available voices. + """ + + def __init__(self, voices): + """ + Initialize a Voices object. + + :param list[Voice] voices: A list of available voices. + """ + self.voices = voices + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Voices object from a json dictionary.""" + args = {} + if 'voices' in _dict: + args['voices'] = [Voice._from_dict(x) for x in _dict['voices']] + else: + raise ValueError( + 'Required property \'voices\' not present in Voices JSON') + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'voices') and self.voices is not None: + _dict['voices'] = [x._to_dict() for x in self.voices] + return _dict + + def __str__(self): + """Return a `str` version of this Voices object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Word(object): + """ + Word. + + :attr str word: A word from the custom voice model. + :attr str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. A sounds-like translation consists of one or more words that, when combined, sound like the word. + :attr str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). + """ + + def __init__(self, word, translation, part_of_speech=None): + """ + Initialize a Word object. + + :param str word: A word from the custom voice model. + :param str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. A sounds-like translation consists of one or more words that, when combined, sound like the word. + :param str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). + """ + self.word = word + self.translation = translation + self.part_of_speech = part_of_speech + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Word object from a json dictionary.""" + args = {} + if 'word' in _dict: + args['word'] = _dict['word'] + else: + raise ValueError( + 'Required property \'word\' not present in Word JSON') + if 'translation' in _dict: + args['translation'] = _dict['translation'] + else: + raise ValueError( + 'Required property \'translation\' not present in Word JSON') + if 'part_of_speech' in _dict: + args['part_of_speech'] = _dict['part_of_speech'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'word') and self.word is not None: + _dict['word'] = self.word + if hasattr(self, 'translation') and self.translation is not None: + _dict['translation'] = self.translation + if hasattr(self, 'part_of_speech') and self.part_of_speech is not None: + _dict['part_of_speech'] = self.part_of_speech + return _dict + + def __str__(self): + """Return a `str` version of this Word object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Words(object): + """ + Words. + + :attr list[Word] words: An array of words and their translations from the custom voice model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if the custom model contains no words. + """ + + def __init__(self, words): + """ + Initialize a Words object. + + :param list[Word] words: An array of words and their translations from the custom voice model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if the custom model contains no words. + """ + self.words = words + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Words object from a json dictionary.""" + args = {} + if 'words' in _dict: + args['words'] = [Word._from_dict(x) for x in _dict['words']] + else: + raise ValueError( + 'Required property \'words\' not present in Words JSON') + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'words') and self.words is not None: + _dict['words'] = [x._to_dict() for x in self.words] + return _dict + + def __str__(self): + """Return a `str` version of this Words object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other diff --git a/watson_developer_cloud/utils.py b/watson_developer_cloud/utils.py index f7abf8d51..92ac98bf5 100644 --- a/watson_developer_cloud/utils.py +++ b/watson_developer_cloud/utils.py @@ -3,10 +3,10 @@ def deprecated(message): def deprecated_decorator(func): def deprecated_func(*args, **kwargs): + warnings.simplefilter('always', DeprecationWarning) warnings.warn("{} is a deprecated function. {}".format(func.__name__, message), category=DeprecationWarning, stacklevel=2) - warnings.simplefilter('default', DeprecationWarning) return func(*args, **kwargs) return deprecated_func return deprecated_decorator \ No newline at end of file From 85e8231cffdda5108aaeac3ee2ddc47173d9aac1 Mon Sep 17 00:00:00 2001 From: Ammar Dodin <8563179+ammardodin@users.noreply.github.com> Date: Tue, 13 Feb 2018 17:01:47 -0500 Subject: [PATCH 02/45] Generate Speech to Text (#335) * :new: generated speech-to-text from latest swagger * :green_heart: add tests for acoustic customizations and update old tests * Regenerate speech to text * Manual tweaks for add_word * Regenerate to handle single content type * Remove content-type from tests --- examples/speech_to_text_v1.py | 18 +- .../test_integration_speech_to_text_v1.py | 65 +- test/test_speech_to_text_v1.py | 494 ++- watson_developer_cloud/speech_to_text_v1.py | 3241 ++++++++++++++++- 4 files changed, 3512 insertions(+), 306 deletions(-) diff --git a/examples/speech_to_text_v1.py b/examples/speech_to_text_v1.py index afd9bb046..b89e99d51 100644 --- a/examples/speech_to_text_v1.py +++ b/examples/speech_to_text_v1.py @@ -5,17 +5,19 @@ speech_to_text = SpeechToTextV1( username='YOUR SERVICE USERNAME', - password='YOUR SERVICE PASSWORD', - x_watson_learning_opt_out=False -) + password='YOUR SERVICE PASSWORD') -print(json.dumps(speech_to_text.models(), indent=2)) +print(json.dumps(speech_to_text.list_models(), indent=2)) print(json.dumps(speech_to_text.get_model('en-US_BroadbandModel'), indent=2)) with open(join(dirname(__file__), '../resources/speech.wav'), 'rb') as audio_file: - print(json.dumps(speech_to_text.recognize( - audio_file, content_type='audio/wav', timestamps=True, - word_confidence=True), - indent=2)) + print( + json.dumps( + speech_to_text.recognize( + audio=audio_file, + content_type='audio/wav', + timestamps=True, + word_confidence=True), + indent=2)) diff --git a/test/integration/test_integration_speech_to_text_v1.py b/test/integration/test_integration_speech_to_text_v1.py index de32ee84a..cc00bf085 100644 --- a/test/integration/test_integration_speech_to_text_v1.py +++ b/test/integration/test_integration_speech_to_text_v1.py @@ -2,18 +2,67 @@ import pytest import os import watson_developer_cloud +from watson_developer_cloud.speech_to_text_v1 import SpeechRecognitionResults + -@pytest.mark.skip("These are destructive, so run them manually") class TestSpeechToTextV1(TestCase): def setUp(self): - self.speech_to_text = watson_developer_cloud.SpeechToTextV1(username=os.getenv('SPEECH_TO_TEXT_USERNAME'), - password=os.getenv('SPEECH_TO_TEXT_PASSWORD')) - self.custom_models = self.speech_to_text.list_custom_models() - self.create_custom_model = self.speech_to_text.create_custom_model(name="integration_test_model") + self.speech_to_text = watson_developer_cloud.SpeechToTextV1( + username=os.getenv('SPEECH_TO_TEXT_USERNAME'), + password=os.getenv('SPEECH_TO_TEXT_PASSWORD')) + self.custom_models = self.speech_to_text.list_language_models() + self.create_custom_model = self.speech_to_text.create_language_model( + name="integration_test_model", + base_model_name="en-US_BroadbandModel") + self.customization_id = self.create_custom_model['customization_id'] def tearDown(self): - self.speech_to_text.delete_custom_model(modelid=self.create_custom_model['customization_id']) + self.speech_to_text.delete_language_model( + customization_id=self.create_custom_model['customization_id']) + + def test_models(self): + output = self.speech_to_text.list_models() + assert output is not None + model = self.speech_to_text.get_model('ko-KR_BroadbandModel') + assert model is not None def test_create_custom_model(self): - current_custom_models = self.speech_to_text.list_custom_models() - assert len(current_custom_models['customizations']) - len(self.custom_models['customizations']) == 1 + current_custom_models = self.speech_to_text.list_language_models() + assert len(current_custom_models['customizations']) - len( + self.custom_models['customizations']) >= 1 + + def test_recognize(self): + with open( + os.path.join( + os.path.dirname(__file__), '../../resources/speech.wav'), + 'rb') as audio_file: + output = self.speech_to_text.recognize( + audio=audio_file, content_type='audio/l16; rate=44100') + assert output['results'][0]['alternatives'][0][ + 'transcript'] == 'thunderstorms could produce large hail isolated tornadoes and heavy rain ' + + def test_recognitions(self): + output = self.speech_to_text.check_jobs() + assert output is not None + + def test_custom_corpora(self): + output = self.speech_to_text.list_corpora(self.customization_id) + assert len(output['corpora']) == 0 + + def test_acoustic_model(self): + list_models = self.speech_to_text.list_acoustic_models() + assert list_models is not None + + create_acoustic_model = self.speech_to_text.create_acoustic_model( + name="integration_test_model_python", + base_model_name="en-US_BroadbandModel") + assert create_acoustic_model is not None + + get_acoustic_model = self.speech_to_text.get_acoustic_model( + create_acoustic_model['customization_id']) + assert get_acoustic_model is not None + + self.speech_to_text.reset_acoustic_model( + get_acoustic_model['customization_id']) + + self.speech_to_text.delete_acoustic_model(get_acoustic_model['customization_id']) diff --git a/test/test_speech_to_text_v1.py b/test/test_speech_to_text_v1.py index 4a5aeed74..3912e40d3 100755 --- a/test/test_speech_to_text_v1.py +++ b/test/test_speech_to_text_v1.py @@ -4,6 +4,7 @@ import pytest import responses import watson_developer_cloud +from watson_developer_cloud.speech_to_text_v1 import CustomWord @responses.activate @@ -13,13 +14,16 @@ def test_success(): 'WatsonModel", "rate": 16000, "name": "WatsonModel", "language": "en-US", "description": ' \ '"Watson model \'v7w_134k.3\' for Attila 2-5 reco engine."}]}' - responses.add(responses.GET, models_url, - body=models_response, status=200, - content_type='application/json') + responses.add( + responses.GET, + models_url, + body=models_response, + status=200, + content_type='application/json') speech_to_text = watson_developer_cloud.SpeechToTextV1( username="username", password="password") - speech_to_text.models() + speech_to_text.list_models() assert responses.calls[0].request.url == models_url assert responses.calls[0].response.text == models_response @@ -28,13 +32,18 @@ def test_success(): recognize_response = '{"results":[{"alternatives":[{"transcript":"thunderstorms could produce large hail ' \ 'isolated tornadoes and heavy rain "}],"final":true}],"result_index":0}' - responses.add(responses.POST, recognize_url, - body=recognize_response, status=200, - content_type='application/json') + responses.add( + responses.POST, + recognize_url, + body=recognize_response, + status=200, + content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/speech.wav'), 'rb') as audio_file: + with open( + os.path.join(os.path.dirname(__file__), '../resources/speech.wav'), + 'rb') as audio_file: speech_to_text.recognize( - audio_file, content_type='audio/l16; rate=44100') + audio=audio_file, content_type='audio/l16; rate=44100') request_url = responses.calls[1].request.url assert request_url == recognize_url @@ -46,9 +55,12 @@ def test_success(): @responses.activate def test_get_model(): model_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/models/modelid' - responses.add(responses.GET, model_url, - body='{"bogus_response": "yep"}', status=200, - content_type='application/json') + responses.add( + responses.GET, + model_url, + body='{"bogus_response": "yep"}', + status=200, + content_type='application/json') speech_to_text = watson_developer_cloud.SpeechToTextV1( username="username", password="password") speech_to_text.get_model(model_id='modelid') @@ -62,51 +74,226 @@ def _decode_body(body): return body +@responses.activate +def test_recognitions(): + url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/recognitions' + get_response = '{"recognitions": [{"created": "2018-02-01T17:43:15.432Z","id": "6193190c-0777-11e8-9b4b-43ad845196dd","updated": "2018-02-01T17:43:17.998Z","status": "failed"}]}' + responses.add( + responses.GET, + url, + body=get_response, + status=200, + content_type='application/json') + + responses.add( + responses.POST, + url, + body='{"status": "waiting"}', + status=200, + content_type='application/json') + + responses.add( + responses.DELETE, + "{0}/jobid".format(url), + body='{"description": "deleted successfully"}', + status=200, + content_type='application/json') + + responses.add( + responses.GET, + "{0}/jobid".format(url), + body='{"status": "waiting"}', + status=200, + content_type='application/json') + + speech_to_text = watson_developer_cloud.SpeechToTextV1( + username="username", password="password") + + speech_to_text.check_jobs() + assert responses.calls[0].response.json()['recognitions'][0][ + 'id'] == '6193190c-0777-11e8-9b4b-43ad845196dd' + + speech_to_text.check_job('jobid') + assert responses.calls[1].response.json() == {'status': 'waiting'} + + with open( + os.path.join(os.path.dirname(__file__), '../resources/speech.wav'), + 'rb') as audio_file: + speech_to_text.create_job(audio=audio_file) + assert responses.calls[2].response.json() == {'status': 'waiting'} + + speech_to_text.delete_job('jobid') + assert responses.calls[3].response.json() == { + "description": "deleted successfully" + } + + assert len(responses.calls) == 4 + + +@responses.activate +def test_callbacks(): + base_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1' + responses.add( + responses.POST, + "{0}/register_callback".format(base_url), + body='{"status": "created", "url": "monitorcalls.com"}', + status=200, + content_type='application/json') + + responses.add( + responses.POST, + "{0}/unregister_callback".format(base_url), + body='{"response": "The callback URL was successfully unregistered"}', + status=200, + content_type='application/json') + + speech_to_text = watson_developer_cloud.SpeechToTextV1( + username="username", password="password") + speech_to_text.register_callback("monitorcalls.com") + assert responses.calls[0].response.json() == { + "status": "created", + "url": "monitorcalls.com" + } + + speech_to_text.unregister_callback("monitorcalls.com") + assert responses.calls[1].response.json() == { + "response": "The callback URL was successfully unregistered" + } + + assert len(responses.calls) == 2 + + @responses.activate def test_custom_model(): customization_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations' - train_url = "{0}/{1}/train".format(customization_url,'customid') + train_url = "{0}/{1}/train".format(customization_url, 'customid') + + responses.add( + responses.GET, + customization_url, + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.POST, + customization_url, + body='{"bogus_response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.GET, + "{0}/modelid".format(customization_url), + body='{"bogus_response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.DELETE, + "{0}/modelid".format(customization_url), + body='{"bogus_response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.POST, + train_url, + body='{"bogus_response": "yep"}', + status=200, + content_type='application/json') + + speech_to_text = watson_developer_cloud.SpeechToTextV1( + username="username", password="password") + + speech_to_text.list_language_models() + + speech_to_text.create_language_model( + name="Example model", + base_model_name="en-US_BroadbandModel") - responses.add(responses.GET, customization_url, - body='{"get response": "yep"}', status=200, - content_type='application/json') + parsed_body = json.loads(_decode_body(responses.calls[1].request.body)) + assert parsed_body['name'] == 'Example model' - responses.add(responses.POST, customization_url, - body='{"bogus_response": "yep"}', status=200, - content_type='application/json') + speech_to_text.create_language_model( + name="Example model Two", + base_model_name="en-US_BroadbandModel") - responses.add(responses.GET, "{0}/modelid".format(customization_url), - body='{"bogus_response": "yep"}', status=200, - content_type='application/json') + parsed_body = json.loads(_decode_body(responses.calls[2].request.body)) + assert parsed_body['name'] == 'Example model Two' + assert parsed_body['base_model_name'] == 'en-US_BroadbandModel' + + speech_to_text.train_language_model('customid') + speech_to_text.get_language_model(customization_id='modelid') + speech_to_text.delete_language_model(customization_id='modelid') - responses.add(responses.DELETE, "{0}/modelid".format(customization_url), - body='{"bogus_response": "yep"}', status=200, - content_type='application/json') + assert len(responses.calls) == 6 - responses.add(responses.POST, train_url, body='{"bogus_response": "yep"}', - status=200, content_type='application/json') + +@responses.activate +def test_acoustic_model(): + acoustic_customization_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations' + train_url = "{0}/{1}/train".format(acoustic_customization_url, 'customid') + + responses.add( + responses.GET, + acoustic_customization_url, + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.POST, + acoustic_customization_url, + body='{"bogus_response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.GET, + "{0}/modelid".format(acoustic_customization_url), + body='{"bogus_response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.DELETE, + "{0}/modelid".format(acoustic_customization_url), + body='{"bogus_response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.POST, + train_url, + body='{"bogus_response": "yep"}', + status=200, + content_type='application/json') speech_to_text = watson_developer_cloud.SpeechToTextV1( username="username", password="password") - speech_to_text.list_custom_models() + speech_to_text.list_acoustic_models() - speech_to_text.create_custom_model(name="Example model", base_model="en-US_BroadbandModel", - description="Example custom language model") + speech_to_text.create_acoustic_model( + name="Example model", + base_model_name="en-US_BroadbandModel", + description="Example custom language model") parsed_body = json.loads(_decode_body(responses.calls[1].request.body)) assert parsed_body['name'] == 'Example model' - speech_to_text.create_custom_model(name="Example model Two") + speech_to_text.create_acoustic_model( + name="Example model Two", + base_model_name="en-US_BroadbandModel") parsed_body = json.loads(_decode_body(responses.calls[2].request.body)) assert parsed_body['name'] == 'Example model Two' - assert parsed_body['description'] == '' assert parsed_body['base_model_name'] == 'en-US_BroadbandModel' - speech_to_text.train_custom_model('customid') - speech_to_text.get_custom_model(modelid='modelid') - speech_to_text.delete_custom_model(modelid='modelid') + speech_to_text.train_acoustic_model('customid') + speech_to_text.get_acoustic_model(customization_id='modelid') + speech_to_text.delete_acoustic_model(customization_id='modelid') assert len(responses.calls) == 6 @@ -118,24 +305,33 @@ def test_custom_corpora(): corpora_url.format('customid'), 'corpus') with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps: - rsps.add(responses.GET, corpora_url.format('customid'), - body='{"get response": "yep"}', status=200, - content_type='application/json') - - rsps.add(responses.POST, get_corpora_url, - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - rsps.add(responses.GET, get_corpora_url, - body='{"get response": "yep"}', - status=200, - content_type='application/json') - - rsps.add(responses.DELETE, get_corpora_url, - body='{"get response": "yep"}', - status=200, - content_type='application/json') + rsps.add( + responses.GET, + corpora_url.format('customid'), + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + rsps.add( + responses.POST, + get_corpora_url, + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + rsps.add( + responses.GET, + get_corpora_url, + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + rsps.add( + responses.DELETE, + get_corpora_url, + body='{"get response": "yep"}', + status=200, + content_type='application/json') speech_to_text = watson_developer_cloud.SpeechToTextV1( username="username", password="password") @@ -145,14 +341,16 @@ def test_custom_corpora(): file_path = '../resources/speech_to_text/corpus-short-1.txt' full_path = os.path.join(os.path.dirname(__file__), file_path) with open(full_path) as corpus_file: - speech_to_text.add_corpus(customization_id='customid', - corpus_name="corpus", file_data=corpus_file) + speech_to_text.add_corpus( + customization_id='customid', + corpus_name="corpus", + corpus_file=corpus_file) - speech_to_text.get_corpus(customization_id='customid', - corpus_name='corpus') + speech_to_text.get_corpus( + customization_id='customid', corpus_name='corpus') - speech_to_text.delete_corpus(customization_id='customid', - corpus_name='corpus') + speech_to_text.delete_corpus( + customization_id='customid', corpus_name='corpus') @responses.activate @@ -160,71 +358,145 @@ def test_custom_words(): words_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/{0}/words' word_url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/customizations/{0}/words/{1}' - responses.add(responses.PUT, word_url.format('custid', 'IEEE'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') + responses.add( + responses.PUT, + word_url.format('custid', 'IEEE'), + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.PUT, + word_url.format('custid', 'wordname'), + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.DELETE, + word_url.format('custid', 'IEEE'), + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.DELETE, + word_url.format('custid', 'wordname'), + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.GET, + word_url.format('custid', 'IEEE'), + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.GET, + word_url.format('custid', 'wordname'), + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.POST, + words_url.format('custid'), + body='{"get response": "yep"}', + status=200, + content_type='application/json') + + responses.add( + responses.GET, + words_url.format('custid'), + body='{"get response": "yep"}', + status=200, + content_type='application/json') - responses.add(responses.DELETE, word_url.format('custid', 'IEEE'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') + speech_to_text = watson_developer_cloud.SpeechToTextV1( + username="username", password="password") - responses.add(responses.GET, word_url.format('custid', 'IEEE'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') + custom_word = CustomWord( + word="IEEE", sounds_like=["i triple e"], display_as="IEEE") - responses.add(responses.POST, words_url.format('custid'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') + speech_to_text.add_word( + customization_id='custid', + word_name="IEEE", + sounds_like=["i triple e"], + display_as="IEEE") - responses.add(responses.GET, words_url.format('custid'), - body='{"get response": "yep"}', - status=200, - content_type='application/json') + speech_to_text.delete_word(customization_id='custid', word_name="wordname") - speech_to_text = watson_developer_cloud.SpeechToTextV1( - username="username", password="password") + speech_to_text.delete_word(customization_id='custid', word_name='IEEE') - custom_word = speech_to_text.CustomWord(word="IEEE", - sounds_like=["i triple e"], - display_as="IEEE") + custom_words = [custom_word, custom_word, custom_word] + speech_to_text.add_words( + customization_id='custid', + words=custom_words) - speech_to_text.add_custom_word(customization_id='custid', - custom_word=custom_word) + speech_to_text.get_word(customization_id='custid', word_name="IEEE") - speech_to_text.delete_custom_word(customization_id='custid', - custom_word=custom_word) + speech_to_text.get_word(customization_id='custid', word_name='wordname') - speech_to_text.delete_custom_word(customization_id='custid', - custom_word='IEEE') + speech_to_text.list_words(customization_id='custid') + speech_to_text.list_words(customization_id='custid', sort='alphabetical') + + speech_to_text.list_words(customization_id='custid', word_type='all') + + assert len(responses.calls) == 9 - custom_words = [custom_word, custom_word, custom_word] - speech_to_text.add_custom_words(customization_id='custid', - custom_words=custom_words) - speech_to_text.get_custom_word(customization_id='custid', - custom_word="IEEE") +@responses.activate +def test_custom_audio_resources(): + url = 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations/{0}/audio/{1}' + + responses.add( + responses.POST, + url.format('custid', 'hiee'), + body='{"post response": "done"}', + status=200, + content_type='application/json') + + responses.add( + responses.DELETE, + url.format('custid', 'hiee'), + body='{"delete response": "done"}', + status=200, + content_type='application/json') + + responses.add( + responses.GET, + url.format('custid', 'hiee'), + body='{"get response": "done"}', + status=200, + content_type='application/json') + + responses.add( + responses.GET, + 'https://stream.watsonplatform.net/speech-to-text/api/v1/acoustic_customizations/custid/audio', + body='{"get response all": "done"}', + status=200, + content_type='application/json') - speech_to_text.get_custom_word(customization_id='custid', - custom_word=custom_word) + speech_to_text = watson_developer_cloud.SpeechToTextV1( + username="username", password="password") - speech_to_text.list_custom_words(customization_id='custid') - speech_to_text.list_custom_words( - customization_id='custid', sort='alphabetical') - with pytest.raises(KeyError) as keyerror: - speech_to_text.list_custom_words( - customization_id='custid', sort='badsort') - assert 'sort must be alphabetical or count' in str(keyerror.value) + with open( + os.path.join(os.path.dirname(__file__), '../resources/speech.wav'), + 'rb') as audio_file: + speech_to_text.add_audio( + customization_id='custid', + audio_name="hiee", + audio_resource=audio_file, + content_type="application/json") + assert responses.calls[0].response.json() == {"post response": "done"} - speech_to_text.list_custom_words( - customization_id='custid', word_type='all') + speech_to_text.delete_audio('custid', 'hiee') + assert responses.calls[1].response.json() == {"delete response": "done"} - with pytest.raises(KeyError) as keyerror: - speech_to_text.list_custom_words( - customization_id='custid', word_type='badwordtype') - assert 'word type must be all, user, or corpora' in str(keyerror.value) + speech_to_text.get_audio('custid', 'hiee') + assert responses.calls[2].response.json() == {"get response": "done"} - assert len(responses.calls) == 9 + speech_to_text.list_audio('custid') + assert responses.calls[3].response.json() == {"get response all": "done"} diff --git a/watson_developer_cloud/speech_to_text_v1.py b/watson_developer_cloud/speech_to_text_v1.py index 214592aa0..69a2d76c1 100644 --- a/watson_developer_cloud/speech_to_text_v1.py +++ b/watson_developer_cloud/speech_to_text_v1.py @@ -1,4 +1,6 @@ -# Copyright 2016 IBM All Rights Reserved. +# coding: utf-8 + +# Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,234 +14,3115 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -The v1 Speech to Text service -(https://www.ibm.com/watson/developercloud/speech-to-text.html) +### Service Overview + The service transcribes speech from various languages and audio formats to text with low +latency. The service supports transcription of the following languages: Brazilian +Portuguese, French, Japanese, Mandarin Chinese, Modern Standard Arabic, Spanish, UK +English, and US English. For most languages, the service supports two sampling rates, +broadband and narrowband. """ -from .watson_service import WatsonService +from __future__ import absolute_import + import json +from .watson_service import WatsonService +from .utils import deprecated + +############################################################################## +# Service +############################################################################## class SpeechToTextV1(WatsonService): - default_url = "https://stream.watsonplatform.net/speech-to-text/api" + """The Speech to Text V1 service.""" + + default_url = 'https://stream.watsonplatform.net/speech-to-text/api' + + def __init__(self, url=default_url, username=None, password=None): + """ + Construct a new client for the Speech to Text service. + + :param str url: The base url to use when contacting the service (e.g. + "https://gateway.watsonplatform.net/speech-to-text/api"). + The base url may differ between Bluemix regions. + + :param str username: The username used to authenticate with the service. + Username and password credentials are only required to run your + application locally or outside of Bluemix. When running on + Bluemix, the credentials will be automatically loaded from the + `VCAP_SERVICES` environment variable. + + :param str password: The password used to authenticate with the service. + Username and password credentials are only required to run your + application locally or outside of Bluemix. When running on + Bluemix, the credentials will be automatically loaded from the + `VCAP_SERVICES` environment variable. + + """ + + WatsonService.__init__( + self, + vcap_services_name='speech_to_text', + url=url, + username=username, + password=password, + use_vcap_services=True) + + ######################### + # models + ######################### + + def get_model(self, model_id): + """ + Retrieves information about the model. + + Returns information about a single specified language model that is available for + use with the service. The information includes the name of the model and its + minimum sampling rate in Hertz, among other things. + + :param str model_id: The identifier of the desired model in the form of its `name` from the output of `GET /v1/models`. + :return: A `dict` containing the `SpeechModel` response. + :rtype: dict + """ + if model_id is None: + raise ValueError('model_id must be provided') + url = '/v1/models/{0}'.format(*self._encode_path_vars(model_id)) + response = self.request(method='GET', url=url, accept_json=True) + return response + + def list_models(self): + """ + Retrieves the models available for the service. + + Returns a list of all language models that are available for use with the service. + The information includes the name of the model and its minimum sampling rate in + Hertz, among other things. - def __init__(self, url=default_url, **kwargs): - WatsonService.__init__(self, 'speech_to_text', url, **kwargs) + :return: A `dict` containing the `SpeechModels` response. + :rtype: dict + """ + url = '/v1/models' + response = self.request(method='GET', url=url, accept_json=True) + return response + + @deprecated('Use list_models instead.') + def models(self): + return self.list_models + + ######################### + # recognize + ######################### - def recognize(self, audio, content_type, continuous=None, model=None, + def recognize(self, + model=None, customization_id=None, + acoustic_customization_id=None, + customization_weight=None, + version=None, + audio=None, + content_type='audio/basic', inactivity_timeout=None, - keywords=None, keywords_threshold=None, + keywords=None, + keywords_threshold=None, max_alternatives=None, word_alternatives_threshold=None, - word_confidence=None, timestamps=None, interim_results=None, + word_confidence=None, + timestamps=None, profanity_filter=None, smart_formatting=None, - speaker_labels=None, - customization_weight=None): - """ - Returns the recognized text from the audio input - """ - headers = {'content-type': content_type} - params = {'continuous': continuous, - 'inactivity_timeout': inactivity_timeout, - 'keywords': keywords, - 'keywords_threshold': keywords_threshold, - 'max_alternatives': max_alternatives, - 'model': model, - 'customization_id': customization_id, - 'word_alternatives_threshold': word_alternatives_threshold, - 'word_confidence': word_confidence, - 'timestamps': timestamps, - 'interim_results': interim_results, - 'profanity_filter': profanity_filter, - 'smart_formatting': smart_formatting, - 'speaker_labels': speaker_labels, - 'customization_weight': customization_weight} - - return self.request(method='POST', url='/v1/recognize', - headers=headers, - data=audio, params=params, - stream=True, accept_json=True) + speaker_labels=None): + """ + Sends audio for speech recognition in sessionless mode. - def models(self): + :param str model: The identifier of the model to be used for the recognition request. (Use `GET /v1/models` for a list of available models.). + :param str customization_id: The GUID of a custom language model that is to be used with the request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. + :param str acoustic_customization_id: The GUID of a custom acoustic model that is to be used with the request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. + :param float customization_weight: If you specify a `customization_id` with the request, you can use the `customization_weight` parameter to tell the service how much weight to give to words from the custom language model compared to those from the base model for speech recognition. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. + :param str version: The version of the specified base `model` that is to be used for speech recognition. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. For more information, see [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). + :param str audio: NON-MULTIPART ONLY: Audio to transcribe in the format specified by the `Content-Type` header. **Required for a non-multipart request.**. + :param str content_type: The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, audio/webm;codecs=vorbis, or multipart/form-data. + :param int inactivity_timeout: NON-MULTIPART ONLY: The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. Useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. + :param list[str] keywords: NON-MULTIPART ONLY: Array of keyword strings to spot in the audio. Each keyword string can include one or more tokens. Keywords are spotted only in the final hypothesis, not in interim results. If you specify any keywords, you must also specify a keywords threshold. Omit the parameter or specify an empty array if you do not need to spot keywords. + :param float keywords_threshold: NON-MULTIPART ONLY: Confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. + :param int max_alternatives: NON-MULTIPART ONLY: Maximum number of alternative transcripts to be returned. By default, a single transcription is returned. + :param float word_alternatives_threshold: NON-MULTIPART ONLY: Confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as \"Confusion Networks\"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No alternative words are computed if you omit the parameter. + :param bool word_confidence: NON-MULTIPART ONLY: If `true`, confidence measure per word is returned. + :param bool timestamps: NON-MULTIPART ONLY: If `true`, time alignment for each word is returned. + :param bool profanity_filter: NON-MULTIPART ONLY: If `true` (the default), filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. + :param bool smart_formatting: NON-MULTIPART ONLY: If `true`, converts dates, times, series of digits and numbers, phone numbers, currency values, and Internet addresses into more readable, conventional representations in the final transcript of a recognition request. If `false` (the default), no formatting is performed. Applies to US English transcription only. + :param bool speaker_labels: NON-MULTIPART ONLY: Indicates whether labels that identify which words were spoken by which participants in a multi-person exchange are to be included in the response. The default is `false`; no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the `GET /v1/models` method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). + :return: A `dict` containing the `SpeechRecognitionResults` response. + :rtype: dict + """ + headers = {'Content-Type': content_type} + params = { + 'model': model, + 'customization_id': customization_id, + 'acoustic_customization_id': acoustic_customization_id, + 'customization_weight': customization_weight, + 'version': version, + 'inactivity_timeout': inactivity_timeout, + 'keywords': self._convert_list(keywords), + 'keywords_threshold': keywords_threshold, + 'max_alternatives': max_alternatives, + 'word_alternatives_threshold': word_alternatives_threshold, + 'word_confidence': word_confidence, + 'timestamps': timestamps, + 'profanity_filter': profanity_filter, + 'smart_formatting': smart_formatting, + 'speaker_labels': speaker_labels + } + data = audio + url = '/v1/recognize' + response = self.request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + accept_json=True) + return response + + ######################### + # asynchronous + ######################### + + def check_job(self, id): """ - Returns the list of available models to use with recognize + Checks the status of the specified asynchronous job. + + :param str id: The ID of the job whose status is to be checked. + :return: A `dict` containing the `RecognitionJob` response. + :rtype: dict """ - return self.request(method='GET', url='/v1/models', accept_json=True) + if id is None: + raise ValueError('id must be provided') + url = '/v1/recognitions/{0}'.format(*self._encode_path_vars(id)) + response = self.request(method='GET', url=url, accept_json=True) + return response - def get_model(self, model_id): + def check_jobs(self): + """ + Checks the status of all asynchronous jobs. + + :return: A `dict` containing the `RecognitionJobs` response. + :rtype: dict + """ + url = '/v1/recognitions' + response = self.request(method='GET', url=url, accept_json=True) + return response + + def create_job(self, + audio, + content_type='audio/basic', + callback_url=None, + events=None, + user_token=None, + results_ttl=None, + model=None, + customization_id=None, + acoustic_customization_id=None, + customization_weight=None, + version=None, + inactivity_timeout=None, + keywords=None, + keywords_threshold=None, + max_alternatives=None, + word_alternatives_threshold=None, + word_confidence=None, + timestamps=None, + profanity_filter=None, + smart_formatting=None, + speaker_labels=None): + """ + Creates a job for an asynchronous recognition request. + + :param str audio: Audio to transcribe in the format specified by the `Content-Type` header. + :param str content_type: The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or audio/webm;codecs=vorbis. + :param str callback_url: A URL to which callback notifications are to be sent. The URL must already be successfully white-listed by using the `POST /v1/register_callback` method. Omit the parameter to poll the service for job completion and results. You can include the same callback URL with any number of job creation requests. Use the `user_token` query parameter to specify a unique user-specified string with each job to differentiate the callback notifications for the jobs. + :param str events: If the job includes a callback URL, a comma-separated list of notification events to which to subscribe. Valid events are: `recognitions.started` generates a callback notification when the service begins to process the job. `recognitions.completed` generates a callback notification when the job is complete; you must use the `GET /v1/recognitions/{id}` method to retrieve the results before they time out or are deleted. `recognitions.completed_with_results` generates a callback notification when the job is complete; the notification includes the results of the request. `recognitions.failed` generates a callback notification if the service experiences an error while processing the job. Omit the parameter to subscribe to the default events: `recognitions.started`, `recognitions.completed`, and `recognitions.failed`. The `recognitions.completed` and `recognitions.completed_with_results` events are incompatible; you can specify only of the two events. If the job does not include a callback URL, omit the parameter. + :param str user_token: If the job includes a callback URL, a user-specified string that the service is to include with each callback notification for the job; the token allows the user to maintain an internal mapping between jobs and notification events. If the job does not include a callback URL, omit the parameter. + :param int results_ttl: The number of minutes for which the results are to be available after the job has finished. If not delivered via a callback, the results must be retrieved within this time. Omit the parameter to use a time to live of one week. The parameter is valid with or without a callback URL. + :param str model: The identifier of the model to be used for the recognition request. (Use `GET /v1/models` for a list of available models.). + :param str customization_id: The GUID of a custom language model that is to be used with the request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. + :param str acoustic_customization_id: The GUID of a custom acoustic model that is to be used with the request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. + :param float customization_weight: If you specify a `customization_id` with the request, you can use the `customization_weight` parameter to tell the service how much weight to give to words from the custom language model compared to those from the base model for speech recognition. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. + :param str version: The version of the specified base `model` that is to be used with the request. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. For more information, see [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). + :param int inactivity_timeout: The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. Useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. + :param list[str] keywords: Array of keyword strings to spot in the audio. Each keyword string can include one or more tokens. Keywords are spotted only in the final hypothesis, not in interim results. If you specify any keywords, you must also specify a keywords threshold. Omit the parameter or specify an empty array if you do not need to spot keywords. + :param float keywords_threshold: Confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. + :param int max_alternatives: Maximum number of alternative transcripts to be returned. By default, a single transcription is returned. + :param float word_alternatives_threshold: Confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as \"Confusion Networks\"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No alternative words are computed if you omit the parameter. + :param bool word_confidence: If `true`, confidence measure per word is returned. + :param bool timestamps: If `true`, time alignment for each word is returned. + :param bool profanity_filter: If `true` (the default), filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. + :param bool smart_formatting: If `true`, converts dates, times, series of digits and numbers, phone numbers, currency values, and Internet addresses into more readable, conventional representations in the final transcript of a recognition request. If `false` (the default), no formatting is performed. Applies to US English transcription only. + :param bool speaker_labels: Indicates whether labels that identify which words were spoken by which participants in a multi-person exchange are to be included in the response. The default is `false`; no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the `GET /v1/models` method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). + :return: A `dict` containing the `RecognitionJob` response. + :rtype: dict + """ + if audio is None: + raise ValueError('audio must be provided') + if content_type is None: + raise ValueError('content_type must be provided') + headers = {'Content-Type': content_type} + params = { + 'callback_url': callback_url, + 'events': events, + 'user_token': user_token, + 'results_ttl': results_ttl, + 'model': model, + 'customization_id': customization_id, + 'acoustic_customization_id': acoustic_customization_id, + 'customization_weight': customization_weight, + 'version': version, + 'inactivity_timeout': inactivity_timeout, + 'keywords': self._convert_list(keywords), + 'keywords_threshold': keywords_threshold, + 'max_alternatives': max_alternatives, + 'word_alternatives_threshold': word_alternatives_threshold, + 'word_confidence': word_confidence, + 'timestamps': timestamps, + 'profanity_filter': profanity_filter, + 'smart_formatting': smart_formatting, + 'speaker_labels': speaker_labels + } + data = audio + url = '/v1/recognitions' + response = self.request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + accept_json=True) + return response + + def delete_job(self, id): + """ + Deletes the specified asynchronous job. + + Deletes the specified job. You cannot delete a job that the service is actively + processing. Once you delete a job, its results are no longer available. The + service automatically deletes a job and its results when the time to live for the + results expires. You must submit the request with the service credentials of the + user who created the job. + + :param str id: The ID of the job that is to be deleted. + :rtype: None + """ + if id is None: + raise ValueError('id must be provided') + url = '/v1/recognitions/{0}'.format(*self._encode_path_vars(id)) + self.request(method='DELETE', url=url, accept_json=True) + return None + + def register_callback(self, callback_url, user_secret=None): """ - :param model_id: The identifier of the desired model - :return: A single instance of a Model object with results for the - specified model. + Registers a callback URL for use with the asynchronous interface. + + :param str callback_url: An HTTP or HTTPS URL to which callback notifications are to be sent. To be white-listed, the URL must successfully echo the challenge string during URL verification. During verification, the client can also check the signature that the service sends in the `X-Callback-Signature` header to verify the origin of the request. + :param str user_secret: A user-specified string that the service uses to generate the HMAC-SHA1 signature that it sends via the `X-Callback-Signature` header. The service includes the header during URL verification and with every notification sent to the callback URL. It calculates the signature over the payload of the notification. If you omit the parameter, the service does not send the header. + :return: A `dict` containing the `RegisterStatus` response. + :rtype: dict + """ + if callback_url is None: + raise ValueError('callback_url must be provided') + params = {'callback_url': callback_url, 'user_secret': user_secret} + url = '/v1/register_callback' + response = self.request( + method='POST', url=url, params=params, accept_json=True) + return response + + def unregister_callback(self, callback_url): + """ + Removes the registration for an asynchronous callback URL. + + Unregisters a callback URL that was previously white-listed with a `POST + register_callback` request for use with the asynchronous interface. Once + unregistered, the URL can no longer be used with asynchronous recognition + requests. + + :param str callback_url: The callback URL that is to be unregistered. + :rtype: None + """ + if callback_url is None: + raise ValueError('callback_url must be provided') + params = {'callback_url': callback_url} + url = '/v1/unregister_callback' + self.request(method='POST', url=url, params=params, accept_json=True) + return None + + ######################### + # customLanguageModels + ######################### + + def create_language_model(self, + name, + base_model_name, + dialect=None, + description=None): + """ + Creates a custom language model. + + Creates a new custom language model for a specified base model. The custom + language model can be used only with the base model for which it is created. The + model is owned by the instance of the service whose credentials are used to create + it. + + :param str name: A user-defined name for the new custom language model. Use a name that is unique among all custom language models that you own. Use a localized name that matches the language of the custom model. Use a name that describes the domain of the custom model, such as `Medical custom model` or `Legal custom model`. + :param str base_model_name: The name of the base language model that is to be customized by the new custom language model. The new custom model can be used only with the base model that it customizes. To determine whether a base model supports language model customization, request information about the base model and check that the attribute `custom_language_model` is set to `true`, or refer to [Language support for customization](https://console.bluemix.net/docs/services/speech-to-text/custom.html#languageSupport). + :param str dialect: The dialect of the specified language that is to be used with the custom language model. The parameter is meaningful only for Spanish models, for which the service creates a custom language model that is suited for speech in one of the following dialects: * `es-ES` for Castilian Spanish (the default) * `es-LA` for Latin American Spanish * `es-US` for North American (Mexican) Spanish A specified dialect must be valid for the base model. By default, the dialect matches the language of the base model; for example, `en-US` for either of the US English language models. + :param str description: A description of the new custom language model. Use a localized description that matches the language of the custom model. + :return: A `dict` containing the `LanguageModel` response. + :rtype: dict """ - return self.request(method='GET', - url='/v1/models/{0}'.format(model_id), - accept_json=True) + if name is None: + raise ValueError('name must be provided') + if base_model_name is None: + raise ValueError('base_model_name must be provided') + data = { + 'name': name, + 'base_model_name': base_model_name, + 'dialect': dialect, + 'description': description + } + url = '/v1/customizations' + response = self.request( + method='POST', + url=url, + json=data, + accept_json=True) + return response - def create_custom_model(self, name, description="", + @deprecated('Use create_language_model() instead.') + def create_custom_model(self, + name, + description="", base_model="en-US_BroadbandModel"): - json_body = json.dumps({'name': name, 'description': description, - 'base_model_name': base_model}) - return self.request(method='POST', url='/v1/customizations', - headers={'content-type': 'application/json'}, - data=json_body, accept_json=True) + return self.create_language_model(name, base_model, description=description) - def train_custom_model(self, customization_id, - customization_weight=None, - word_type=None): + def delete_language_model(self, customization_id): + """ + Deletes a custom language model. + + Deletes an existing custom language model. The custom model cannot be deleted if + another request, such as adding a corpus to the model, is currently being + processed. You must use credentials for the instance of the service that owns a + model to delete it. + + :param str customization_id: The GUID of the custom language model that is to be deleted. You must make the request with service credentials created for the instance of the service that owns the custom model. + :rtype: None """ - Trains a custom language model + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/customizations/{0}'.format( + *self._encode_path_vars(customization_id)) + self.request(method='DELETE', url=url, accept_json=True) + return None + + @deprecated('Use delete_language_model() instead.') + def delete_custom_model(self, modelid): + return self.delete_language_model(modelid) + + def get_language_model(self, customization_id): """ - params = {'customization_weight': customization_weight, - 'word_type': word_type} + Lists information about a custom language model. - return self.request(method='POST', - url=('/v1/customizations/{0}/train' - .format(customization_id)), params=params, - accept_json=True) + Lists information about a specified custom language model. You must use + credentials for the instance of the service that owns a model to list information + about it. - def list_custom_models(self): - return self.request(method='GET', url='/v1/customizations', - accept_json=True) + :param str customization_id: The GUID of the custom language model for which information is to be returned. You must make the request with service credentials created for the instance of the service that owns the custom model. + :return: A `dict` containing the `LanguageModel` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/customizations/{0}'.format( + *self._encode_path_vars(customization_id)) + response = self.request(method='GET', url=url, accept_json=True) + return response + @deprecated('Use get_language_model() instead.') def get_custom_model(self, modelid): - return self.request(method='GET', - url='/v1/customizations/{0}'.format(modelid), - accept_json=True) + return self.get_language_model(modelid) - def delete_custom_model(self, modelid): - return self.request(method='DELETE', - url='/v1/customizations/{0}'.format(modelid), - accept_json=True) + def list_language_models(self, language=None): + """ + Lists information about all custom language models. - def list_corpora(self, customization_id): - url = '/v1/customizations/{0}/corpora' - return self.request(method='GET', - url=url.format(customization_id), - accept_json=True) + Lists information about all custom language models that are owned by an instance + of the service. Use the `language` parameter to see all custom language models for + the specified language; omit the parameter to see all custom language models for + all languages. You must use credentials for the instance of the service that owns + a model to list information about it. + + :param str language: The identifier of the language for which custom language models are to be returned (for example, `en-US`). Omit the parameter to see all custom language models owned by the requesting service credentials. + :return: A `dict` containing the `LanguageModels` response. + :rtype: dict + """ + params = {'language': language} + url = '/v1/customizations' + response = self.request( + method='GET', url=url, params=params, accept_json=True) + return response + + @deprecated('Use list_language_models() instead.') + def list_custom_models(self): + return self.list_language_models() + + def reset_language_model(self, customization_id): + """ + Resets a custom language model. + + Resets a custom language model by removing all corpora and words from the model. + Resetting a custom language model initializes the model to its state when it was + first created. Metadata such as the name and language of the model are preserved, + but the model's words resource is removed and must be re-created. You must use + credentials for the instance of the service that owns a model to reset it. + + :param str customization_id: The GUID of the custom language model that is to be reset. You must make the request with service credentials created for the instance of the service that owns the custom model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/customizations/{0}/reset'.format( + *self._encode_path_vars(customization_id)) + self.request(method='POST', url=url, accept_json=True) + return None + + def train_language_model(self, + customization_id, + word_type_to_add=None, + customization_weight=None): + """ + Trains a custom language model. + + :param str customization_id: The GUID of the custom language model that is to be trained. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str word_type_to_add: The type of words from the custom language model's words resource on which to train the model: * `all` (the default) trains the model on all new words, regardless of whether they were extracted from corpora or were added or modified by the user. * `user` trains the model only on new words that were added or modified by the user; the model is not trained on new words extracted from corpora. + :param float customization_weight: Specifies a customization weight for the custom language model. The customization weight tells the service how much weight to give to words from the custom language model compared to those from the base model for speech recognition. Specify a value between 0.0 and 1.0. The default value is 0.3. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. The value that you assign is used for all recognition requests that use the model. You can override it for any recognition request by specifying a customization weight for that request. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + params = { + 'word_type_to_add': word_type_to_add, + 'customization_weight': customization_weight + } + url = '/v1/customizations/{0}/train'.format( + *self._encode_path_vars(customization_id)) + self.request(method='POST', url=url, params=params, accept_json=True) + return None + + @deprecated('Use train_language_model() instead.') + def train_custom_model(self, + customization_id, + customization_weight=None, + word_type=None): + self.train_language_model(customization_id, word_type, + customization_weight) + + def upgrade_language_model(self, customization_id): + """ + Upgrades a custom language model. + + :param str customization_id: The GUID of the custom language model that is to be upgraded. You must make the request with service credentials created for the instance of the service that owns the custom model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/customizations/{0}/upgrade_model'.format( + *self._encode_path_vars(customization_id)) + self.request(method='POST', url=url, accept_json=True) + return None + + ######################### + # customCorpora + ######################### def add_corpus(self, customization_id, corpus_name, - file_data, - allow_overwrite=None): - - url = '/v1/customizations/{0}/corpora/{1}' + corpus_file, + allow_overwrite=None, + corpus_file_content_type=None, + corpus_filename=None): + """ + Adds a corpus text file to a custom language model. - if allow_overwrite is None: - allow_overwrite = False + :param str customization_id: The GUID of the custom language model to which a corpus is to be added. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str corpus_name: The name of the corpus that is to be added to the custom language model. The name cannot contain spaces and cannot be the string `user`, which is reserved by the service to denote custom words added or modified by the user. Use a localized name that matches the language of the custom model. + :param file corpus_file: A plain text file that contains the training data for the corpus. Encode the file in UTF-8 if it contains non-ASCII characters; the service assumes UTF-8 encoding if it encounters non-ASCII characters. With cURL, use the `--data-binary` option to upload the file for the request. + :param bool allow_overwrite: Indicates whether the specified corpus is to overwrite an existing corpus with the same name. If a corpus with the same name already exists, the request fails unless `allow_overwrite` is set to `true`; by default, the parameter is `false`. The parameter has no effect if a corpus with the same name does not already exist. + :param str corpus_file_content_type: The content type of corpus_file. + :param str corpus_filename: The filename for corpus_file. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if corpus_name is None: + raise ValueError('corpus_name must be provided') + if corpus_file is None: + raise ValueError('corpus_file must be provided') + params = {'allow_overwrite': allow_overwrite} + if not corpus_filename and hasattr(corpus_file, 'name'): + corpus_filename = corpus_file.name + mime_type = corpus_file_content_type or 'application/octet-stream' + corpus_file_tuple = (corpus_filename, corpus_file, mime_type) + url = '/v1/customizations/{0}/corpora/{1}'.format( + *self._encode_path_vars(customization_id, corpus_name)) + self.request( + method='POST', + url=url, + params=params, + files={'corpus_file': corpus_file_tuple}, + accept_json=True) + return None - headers = {'Content-Type': 'application/octet-stream'} + def delete_corpus(self, customization_id, corpus_name): + """ + Deletes a corpus from a custom language model. - return self.request(method='POST', - url=url.format(customization_id, - corpus_name), - headers=headers, - data=file_data, - params={'allow_overwrite': allow_overwrite}, - accept_json=True) + :param str customization_id: The GUID of the custom language model from which a corpus is to be deleted. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str corpus_name: The name of the corpus that is to be deleted from the custom language model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if corpus_name is None: + raise ValueError('corpus_name must be provided') + url = '/v1/customizations/{0}/corpora/{1}'.format( + *self._encode_path_vars(customization_id, corpus_name)) + self.request(method='DELETE', url=url, accept_json=True) + return None def get_corpus(self, customization_id, corpus_name): - url = '/v1/customizations/{0}/corpora/{1}' - return self.request(method='GET', - url=url.format(customization_id, - corpus_name), - accept_json=True) + """ + Lists information about a corpus for a custom language model. - def delete_corpus(self, customization_id, corpus_name): - url = '/v1/customizations/{0}/corpora/{1}' - return self.request(method='DELETE', - url=url.format(customization_id, - corpus_name), - accept_json=True) - - class CustomWord(object): - def __init__(self, word=None, sounds_like=None, display_as=None): - self._word = word - self._sounds_like = sounds_like - self._display_as = display_as - - @property - def word(self): - return self._word - - @property - def sounds_like(self): - return self._sounds_like - - @property - def display_as(self): - return self._display_as - - def __dict__(self): - return {'word': self.word, - 'sounds_like': self.sounds_like, - 'display_as': self.display_as} + Lists information about a corpus from a custom language model. The information + includes the total number of words and out-of-vocabulary (OOV) words, name, and + status of the corpus. You must use credentials for the instance of the service + that owns a model to list its corpora. - def add_custom_words(self, customization_id, custom_words): - url = '/v1/customizations/{0}/words' - payload = {'words': [x.__dict__() for x in custom_words]} - return self.request(method='POST', - url=url.format(customization_id), - data=json.dumps(payload), - headers={'content-type': 'application/json'}, - accept_json=True) + :param str customization_id: The GUID of the custom language model for which a corpus is to be listed. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str corpus_name: The name of the corpus about which information is to be listed. + :return: A `dict` containing the `Corpus` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if corpus_name is None: + raise ValueError('corpus_name must be provided') + url = '/v1/customizations/{0}/corpora/{1}'.format( + *self._encode_path_vars(customization_id, corpus_name)) + response = self.request(method='GET', url=url, accept_json=True) + return response + + def list_corpora(self, customization_id): + """ + Lists information about all corpora for a custom language model. + + Lists information about all corpora from a custom language model. The information + includes the total number of words and out-of-vocabulary (OOV) words, name, and + status of each corpus. You must use credentials for the instance of the service + that owns a model to list its corpora. + + :param str customization_id: The GUID of the custom language model for which corpora are to be listed. You must make the request with service credentials created for the instance of the service that owns the custom model. + :return: A `dict` containing the `Corpora` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/customizations/{0}/corpora'.format( + *self._encode_path_vars(customization_id)) + response = self.request(method='GET', url=url, accept_json=True) + return response + + ######################### + # customWords + ######################### + + def add_word(self, + customization_id, + word_name, + sounds_like=None, + display_as=None): + """ + Adds a custom word to a custom language model. + + :param str customization_id: The GUID of the custom language model to which a word is to be added. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str word_name: The custom word that is to be added to or updated in the custom model. Do not include spaces in the word. Use a - (dash) or _ (underscore) to connect the tokens of compound words. + :param list[str] sounds_like: An array of sounds-like pronunciations for the custom word. Specify how words that are difficult to pronounce, foreign words, acronyms, and so on can be pronounced by users. For a word that is not in the service's base vocabulary, omit the parameter to have the service automatically generate a sounds-like pronunciation for the word. For a word that is in the service's base vocabulary, use the parameter to specify additional pronunciations for the word. You cannot override the default pronunciation of a word; pronunciations you add augment the pronunciation from the base vocabulary. A word can have at most five sounds-like pronunciations, and a pronunciation can include at most 40 characters not including spaces. + :param str display_as: An alternative spelling for the custom word when it appears in a transcript. Use the parameter when you want the word to have a spelling that is different from its usual representation or from its spelling in corpora training data. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if word_name is None: + raise ValueError('word_name must be provided') + data = { + 'word': word_name, + 'sounds_like': sounds_like, + 'display_as': display_as + } + url = '/v1/customizations/{0}/words/{1}'.format( + *self._encode_path_vars(customization_id, word_name)) + self.request( + method='PUT', + url=url, + json=data, + accept_json=True) + return None + @deprecated('Use add_word instead.') def add_custom_word(self, customization_id, custom_word): - url = '/v1/customizations/{0}/words/{1}' + return self.add_word(customization_id, custom_word) + + def add_words(self, customization_id, words): + """ + Adds one or more custom words to a custom language model. - custom_word_fragment = {'sounds_like': custom_word.sounds_like, - 'display_as': custom_word.display_as} - return self.request(method='PUT', - url=url.format(customization_id, - custom_word.word), - data=json.dumps(custom_word_fragment), - headers={'content-type': 'application/json'}, - accept_json=True) + :param str customization_id: The GUID of the custom language model to which words are to be added. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param list[CustomWord] words: An array of objects that provides information about each custom word that is to be added to or updated in the custom language model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if words is None: + raise ValueError('words must be provided') + words = [self._convert_model(x) for x in words] + data = {'words': words} + url = '/v1/customizations/{0}/words'.format( + *self._encode_path_vars(customization_id)) + self.request( + method='POST', + url=url, + json=data, + accept_json=True) + return None - def list_custom_words(self, customization_id, word_type=None, sort=None): - url = '/v1/customizations/{0}/words' - qs = {} - - if word_type: - if word_type in ['all', 'user', 'corpora']: - qs['word_type'] = word_type - else: - raise KeyError('word type must be all, user, or corpora') - - if sort: - if sort in ['alphabetical', 'count']: - qs['sort'] = sort - else: - raise KeyError('sort must be alphabetical or count') - - return self.request(method='GET', - url=url.format(customization_id), - params=qs, - accept_json=True) + @deprecated('Use add_words() instead.') + def add_custom_words(self, customization_id, custom_words): + return self.add_words(customization_id, custom_words) + + def delete_word(self, customization_id, word_name): + """ + Deletes a custom word from a custom language model. + + :param str customization_id: The GUID of the custom language model from which a word is to be deleted. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str word_name: The custom word that is to be deleted from the custom language model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if word_name is None: + raise ValueError('word_name must be provided') + url = '/v1/customizations/{0}/words/{1}'.format( + *self._encode_path_vars(customization_id, word_name)) + self.request(method='DELETE', url=url, accept_json=True) + return None + + @deprecated('Use delete_word() instead.') + def delete_custom_word(self, customization_id, custom_word): + return self.delete_word(customization_id, custom_word) + + def get_word(self, customization_id, word_name): + """ + Lists a custom word from a custom language model. + Lists information about a custom word from a custom language model. You must use + credentials for the instance of the service that owns a model to query information + about its words. + + :param str customization_id: The GUID of the custom language model from which a word is to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str word_name: The custom word that is to be queried from the custom language model. + :return: A `dict` containing the `Word` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if word_name is None: + raise ValueError('word_name must be provided') + url = '/v1/customizations/{0}/words/{1}'.format( + *self._encode_path_vars(customization_id, word_name)) + response = self.request(method='GET', url=url, accept_json=True) + return response + + @deprecated('Use get_word() instead.') def get_custom_word(self, customization_id, custom_word): - url = '/v1/customizations/{0}/words/{1}' - word = None - if isinstance(custom_word, str): - word = custom_word + return self.get_word(customization_id, custom_word) + + def list_words(self, customization_id, word_type=None, sort=None): + """ + Lists all custom words from a custom language model. + + Lists information about custom words from a custom language model. You can list + all words from the custom model's words resource, only custom words that were + added or modified by the user, or only out-of-vocabulary (OOV) words that were + extracted from corpora. You can also indicate the order in which the service is to + return words; by default, words are listed in ascending alphabetical order. You + must use credentials for the instance of the service that owns a model to query + information about its words. + + :param str customization_id: The GUID of the custom language model from which words are to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str word_type: The type of words to be listed from the custom language model's words resource: * `all` (the default) shows all words. * `user` shows only custom words that were added or modified by the user. * `corpora` shows only OOV that were extracted from corpora. + :param str sort: Indicates the order in which the words are to be listed, `alphabetical` or by `count`. You can prepend an optional `+` or `-` to an argument to indicate whether the results are to be sorted in ascending or descending order. By default, words are sorted in ascending alphabetical order. For alphabetical ordering, the lexicographical precedence is numeric values, uppercase letters, and lowercase letters. For count ordering, values with the same count are ordered alphabetically. With cURL, URL encode the `+` symbol as `%2B`. + :return: A `dict` containing the `Words` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + params = {'word_type': word_type, 'sort': sort} + url = '/v1/customizations/{0}/words'.format( + *self._encode_path_vars(customization_id)) + response = self.request( + method='GET', url=url, params=params, accept_json=True) + return response + + def list_custom_words(self, customization_id, word_type=None, sort=None): + return self.list_words(customization_id, word_type, sort) + + ######################### + # customAcousticModels + ######################### + + def create_acoustic_model(self, + name, + base_model_name, + description=None): + """ + Creates a custom acoustic model. + + Creates a new custom acoustic model for a specified base model. The custom + acoustic model can be used only with the base model for which it is created. The + model is owned by the instance of the service whose credentials are used to create + it. + + :param str name: A user-defined name for the new custom acoustic model. Use a name that is unique among all custom acoustic models that you own. Use a localized name that matches the language of the custom model. Use a name that describes the acoustic environment of the custom model, such as `Mobile custom model` or `Noisy car custom model`. + :param str base_model_name: The name of the base language model that is to be customized by the new custom acoustic model. The new custom model can be used only with the base model that it customizes. To determine whether a base model supports acoustic model customization, refer to [Language support for customization](https://console.bluemix.net/docs/services/speech-to-text/custom.html#languageSupport). + :param str description: A description of the new custom acoustic model. Use a localized description that matches the language of the custom model. + :return: A `dict` containing the `AcousticModel` response. + :rtype: dict + """ + if name is None: + raise ValueError('name must be provided') + if base_model_name is None: + raise ValueError('base_model_name must be provided') + data = { + 'name': name, + 'base_model_name': base_model_name, + 'description': description + } + url = '/v1/acoustic_customizations' + response = self.request( + method='POST', + url=url, + json=data, + accept_json=True) + return response + + def delete_acoustic_model(self, customization_id): + """ + Deletes a custom acoustic model. + + Deletes an existing custom acoustic model. The custom model cannot be deleted if + another request, such as adding an audio resource to the model, is currently being + processed. You must use credentials for the instance of the service that owns a + model to delete it. + + :param str customization_id: The GUID of the custom acoustic model that is to be deleted. You must make the request with service credentials created for the instance of the service that owns the custom model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/acoustic_customizations/{0}'.format( + *self._encode_path_vars(customization_id)) + self.request(method='DELETE', url=url, accept_json=True) + return None + + def get_acoustic_model(self, customization_id): + """ + Lists information about a custom acoustic model. + + Lists information about a specified custom acoustic model. You must use + credentials for the instance of the service that owns a model to list information + about it. + + :param str customization_id: The GUID of the custom acoustic model for which information is to be returned. You must make the request with service credentials created for the instance of the service that owns the custom model. + :return: A `dict` containing the `AcousticModel` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/acoustic_customizations/{0}'.format( + *self._encode_path_vars(customization_id)) + response = self.request(method='GET', url=url, accept_json=True) + return response + + def list_acoustic_models(self, language=None): + """ + Lists information about all custom acoustic models. + + Lists information about all custom acoustic models that are owned by an instance + of the service. Use the `language` parameter to see all custom acoustic models for + the specified language; omit the parameter to see all custom acoustic models for + all languages. You must use credentials for the instance of the service that owns + a model to list information about it. + + :param str language: The identifier of the language for which custom acoustic models are to be returned (for example, `en-US`). Omit the parameter to see all custom acoustic models owned by the requesting service credentials. + :return: A `dict` containing the `AcousticModels` response. + :rtype: dict + """ + params = {'language': language} + url = '/v1/acoustic_customizations' + response = self.request( + method='GET', url=url, params=params, accept_json=True) + return response + + def reset_acoustic_model(self, customization_id): + """ + Resets a custom acoustic model. + + Resets a custom acoustic model by removing all audio resources from the model. + Resetting a custom acoustic model initializes the model to its state when it was + first created. Metadata such as the name and language of the model are preserved, + but the model's audio resources are removed and must be re-created. You must use + credentials for the instance of the service that owns a model to reset it. + + :param str customization_id: The GUID of the custom acoustic model that is to be reset. You must make the request with service credentials created for the instance of the service that owns the custom model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/acoustic_customizations/{0}/reset'.format( + *self._encode_path_vars(customization_id)) + self.request(method='POST', url=url, accept_json=True) + return None + + def train_acoustic_model(self, + customization_id, + custom_language_model_id=None): + """ + Trains a custom acoustic model. + + :param str customization_id: The GUID of the custom acoustic model that is to be trained. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str custom_language_model_id: The GUID of a custom language model that is to be used during training of the custom acoustic model. Specify a custom language model that has been trained with verbatim transcriptions of the audio resources or that contains words that are relevant to the contents of the audio resources. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + params = {'custom_language_model_id': custom_language_model_id} + url = '/v1/acoustic_customizations/{0}/train'.format( + *self._encode_path_vars(customization_id)) + self.request(method='POST', url=url, params=params, accept_json=True) + return None + + def upgrade_acoustic_model(self, + customization_id, + custom_language_model_id=None): + """ + Upgrades a custom acoustic model. + + :param str customization_id: The GUID of the custom acoustic model that is to be upgraded. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str custom_language_model_id: If the custom acoustic model was trained with a custom language model, the GUID of that custom language model. The custom language model must be upgraded before the custom acoustic model can be upgraded. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + params = {'custom_language_model_id': custom_language_model_id} + url = '/v1/acoustic_customizations/{0}/upgrade_model'.format( + *self._encode_path_vars(customization_id)) + self.request(method='POST', url=url, params=params, accept_json=True) + return None + + ######################### + # customAudioResources + ######################### + + def add_audio(self, + customization_id, + audio_name, + audio_resource, + content_type='application/zip', + contained_content_type=None, + allow_overwrite=None): + """ + Adds an audio resource to a custom acoustic model. + + :param str customization_id: The GUID of the custom acoustic model to which an audio resource is to be added. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str audio_name: The name of the audio resource that is to be added to the custom acoustic model. The name cannot contain spaces. Use a localized name that matches the language of the custom model. + :param list[str] audio_resource: The audio resource that is to be added to the custom acoustic model, an individual audio file or an archive file. + :param str content_type: The type of the input: application/zip, application/gzip, audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or audio/webm;codecs=vorbis. + :param str contained_content_type: For an archive-type resource that contains audio files whose format is not `audio/wav`, specifies the format of the audio files. The header accepts all of the audio formats supported for use with speech recognition and with the `Content-Type` header, including the `rate`, `channels`, and `endianness` parameters that are used with some formats. For a complete list of supported audio formats, see [Audio formats](/docs/services/speech-to-text/input.html#formats). + :param bool allow_overwrite: Indicates whether the specified audio resource is to overwrite an existing resource with the same name. If a resource with the same name already exists, the request fails unless `allow_overwrite` is set to `true`; by default, the parameter is `false`. The parameter has no effect if a resource with the same name does not already exist. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if audio_name is None: + raise ValueError('audio_name must be provided') + if audio_resource is None: + raise ValueError('audio_resource must be provided') + if content_type is None: + raise ValueError('content_type must be provided') + headers = { + 'Content-Type': content_type, + 'Contained-Content-Type': contained_content_type + } + params = {'allow_overwrite': allow_overwrite} + data = audio_resource + url = '/v1/acoustic_customizations/{0}/audio/{1}'.format( + *self._encode_path_vars(customization_id, audio_name)) + self.request( + method='POST', + url=url, + headers=headers, + params=params, + data=data, + accept_json=True) + return None + + def delete_audio(self, customization_id, audio_name): + """ + Deletes an audio resource from a custom acoustic model. + + :param str customization_id: The GUID of the custom acoustic model from which an audio resource is to be deleted. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str audio_name: The name of the audio resource that is to be deleted from the custom acoustic model. + :rtype: None + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if audio_name is None: + raise ValueError('audio_name must be provided') + url = '/v1/acoustic_customizations/{0}/audio/{1}'.format( + *self._encode_path_vars(customization_id, audio_name)) + self.request(method='DELETE', url=url, accept_json=True) + return None + + def get_audio(self, customization_id, audio_name): + """ + Lists information about an audio resource for a custom acoustic model. + + :param str customization_id: The GUID of the custom acoustic model for which an audio resource is to be listed. You must make the request with service credentials created for the instance of the service that owns the custom model. + :param str audio_name: The name of the audio resource about which information is to be listed. + :return: A `dict` containing the `AudioListing` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + if audio_name is None: + raise ValueError('audio_name must be provided') + url = '/v1/acoustic_customizations/{0}/audio/{1}'.format( + *self._encode_path_vars(customization_id, audio_name)) + response = self.request(method='GET', url=url, accept_json=True) + return response + + def list_audio(self, customization_id): + """ + Lists information about all audio resources for a custom acoustic model. + + Lists information about all audio resources from a custom acoustic model. The + information includes the name of the resource and information about its audio + data, such as its duration. It also includes the status of the audio resource, + which is important for checking the service's analysis of the resource in response + to a request to add it to the custom acoustic model. You must use credentials for + the instance of the service that owns a model to list its audio resources. + + :param str customization_id: The GUID of the custom acoustic model for which audio resources are to be listed. You must make the request with service credentials created for the instance of the service that owns the custom model. + :return: A `dict` containing the `AudioResources` response. + :rtype: dict + """ + if customization_id is None: + raise ValueError('customization_id must be provided') + url = '/v1/acoustic_customizations/{0}/audio'.format( + *self._encode_path_vars(customization_id)) + response = self.request(method='GET', url=url, accept_json=True) + return response + + +############################################################################## +# Models +############################################################################## + + +class AcousticModel(object): + """ + AcousticModel. + + :attr str customization_id: The customization ID (GUID) of the custom acoustic model. **Note:** When you create a new custom acoustic model, the service returns only the GUID of the new model; it does not return the other fields of this object. + :attr str created: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom acoustic model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :attr str language: (optional) The language identifier of the custom acoustic model (for example, `en-US`). + :attr list[str] versions: (optional) A list of the available versions of the custom acoustic model. Each element of the array indicates a version of the base model with which the custom model can be used. Multiple versions exist only if the custom model has been upgraded; otherwise, only a single version is shown. + :attr str owner: (optional) The GUID of the service credentials for the instance of the service that owns the custom acoustic model. + :attr str name: (optional) The name of the custom acoustic model. + :attr str description: (optional) The description of the custom acoustic model. + :attr str base_model_name: (optional) The name of the language model for which the custom acoustic model was created. + :attr str status: (optional) The current status of the custom acoustic model: * `pending` indicates that the model was created but is waiting either for training data to be added or for the service to finish analyzing added data. * `ready` indicates that the model contains data and is ready to be trained. * `training` indicates that the model is currently being trained. * `available` indicates that the model is trained and ready to use. * `upgrading` indicates that the model is currently being upgraded. * `failed` indicates that training of the model failed. + :attr int progress: (optional) A percentage that indicates the progress of the custom acoustic model's current training. A value of `100` means that the model is fully trained. **Note:** The `progress` field does not currently reflect the progress of the training; the field changes from `0` to `100` when training is complete. + :attr str warnings: (optional) If the request included unknown query parameters, the following message: `Unexpected query parameter(s) ['parameters'] detected`, where `parameters` is a list that includes a quoted string for each unknown parameter. + """ + + def __init__(self, + customization_id, + created=None, + language=None, + versions=None, + owner=None, + name=None, + description=None, + base_model_name=None, + status=None, + progress=None, + warnings=None): + """ + Initialize a AcousticModel object. + + :param str customization_id: The customization ID (GUID) of the custom acoustic model. **Note:** When you create a new custom acoustic model, the service returns only the GUID of the new model; it does not return the other fields of this object. + :param str created: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom acoustic model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str language: (optional) The language identifier of the custom acoustic model (for example, `en-US`). + :param list[str] versions: (optional) A list of the available versions of the custom acoustic model. Each element of the array indicates a version of the base model with which the custom model can be used. Multiple versions exist only if the custom model has been upgraded; otherwise, only a single version is shown. + :param str owner: (optional) The GUID of the service credentials for the instance of the service that owns the custom acoustic model. + :param str name: (optional) The name of the custom acoustic model. + :param str description: (optional) The description of the custom acoustic model. + :param str base_model_name: (optional) The name of the language model for which the custom acoustic model was created. + :param str status: (optional) The current status of the custom acoustic model: * `pending` indicates that the model was created but is waiting either for training data to be added or for the service to finish analyzing added data. * `ready` indicates that the model contains data and is ready to be trained. * `training` indicates that the model is currently being trained. * `available` indicates that the model is trained and ready to use. * `upgrading` indicates that the model is currently being upgraded. * `failed` indicates that training of the model failed. + :param int progress: (optional) A percentage that indicates the progress of the custom acoustic model's current training. A value of `100` means that the model is fully trained. **Note:** The `progress` field does not currently reflect the progress of the training; the field changes from `0` to `100` when training is complete. + :param str warnings: (optional) If the request included unknown query parameters, the following message: `Unexpected query parameter(s) ['parameters'] detected`, where `parameters` is a list that includes a quoted string for each unknown parameter. + """ + self.customization_id = customization_id + self.created = created + self.language = language + self.versions = versions + self.owner = owner + self.name = name + self.description = description + self.base_model_name = base_model_name + self.status = status + self.progress = progress + self.warnings = warnings + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AcousticModel object from a json dictionary.""" + args = {} + if 'customization_id' in _dict: + args['customization_id'] = _dict['customization_id'] else: - word = custom_word.word + raise ValueError( + 'Required property \'customization_id\' not present in AcousticModel JSON' + ) + if 'created' in _dict: + args['created'] = _dict['created'] + if 'language' in _dict: + args['language'] = _dict['language'] + if 'versions' in _dict: + args['versions'] = _dict['versions'] + if 'owner' in _dict: + args['owner'] = _dict['owner'] + if 'name' in _dict: + args['name'] = _dict['name'] + if 'description' in _dict: + args['description'] = _dict['description'] + if 'base_model_name' in _dict: + args['base_model_name'] = _dict['base_model_name'] + if 'status' in _dict: + args['status'] = _dict['status'] + if 'progress' in _dict: + args['progress'] = _dict['progress'] + if 'warnings' in _dict: + args['warnings'] = _dict['warnings'] + return cls(**args) - return self.request(method='GET', - url=url.format(customization_id, word), - accept_json=True) + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'customization_id') and self.customization_id is not None: + _dict['customization_id'] = self.customization_id + if hasattr(self, 'created') and self.created is not None: + _dict['created'] = self.created + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'versions') and self.versions is not None: + _dict['versions'] = self.versions + if hasattr(self, 'owner') and self.owner is not None: + _dict['owner'] = self.owner + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, + 'base_model_name') and self.base_model_name is not None: + _dict['base_model_name'] = self.base_model_name + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'progress') and self.progress is not None: + _dict['progress'] = self.progress + if hasattr(self, 'warnings') and self.warnings is not None: + _dict['warnings'] = self.warnings + return _dict - def delete_custom_word(self, customization_id, custom_word): - url = '/v1/customizations/{0}/words/{1}' - word = None - if isinstance(custom_word, str): - word = custom_word + def __str__(self): + """Return a `str` version of this AcousticModel object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AcousticModels(object): + """ + AcousticModels. + + :attr list[AcousticModel] customizations: An array of objects that provides information about each available custom acoustic model. The array is empty if the requesting service credentials own no custom acoustic models (if no language is specified) or own no custom acoustic models for the specified language. + """ + + def __init__(self, customizations): + """ + Initialize a AcousticModels object. + + :param list[AcousticModel] customizations: An array of objects that provides information about each available custom acoustic model. The array is empty if the requesting service credentials own no custom acoustic models (if no language is specified) or own no custom acoustic models for the specified language. + """ + self.customizations = customizations + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AcousticModels object from a json dictionary.""" + args = {} + if 'customizations' in _dict: + args['customizations'] = [ + AcousticModel._from_dict(x) for x in _dict['customizations'] + ] + else: + raise ValueError( + 'Required property \'customizations\' not present in AcousticModels JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'customizations') and self.customizations is not None: + _dict['customizations'] = [ + x._to_dict() for x in self.customizations + ] + return _dict + + def __str__(self): + """Return a `str` version of this AcousticModels object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AudioDetails(object): + """ + AudioDetails. + + :attr str type: The type of the audio resource: * `audio` for an individual audio file * `archive` for an archive (**.zip** or **.tar.gz**) file that contains audio files. + :attr str codec: (optional) **For an audio-type resource,** the codec in which the audio is encoded. Omitted for an archive-type resource. + :attr int frequency: (optional) **For an audio-type resource,** the sampling rate of the audio in Hertz (samples per second). Omitted for an archive-type resource. + :attr str compression: (optional) **For an archive-type resource,** the format of the compressed archive: * `zip` for a **.zip** file * `gzip` for a **.tar.gz** file Omitted for an audio-type resource. + """ + + def __init__(self, type, codec=None, frequency=None, compression=None): + """ + Initialize a AudioDetails object. + + :param str type: The type of the audio resource: * `audio` for an individual audio file * `archive` for an archive (**.zip** or **.tar.gz**) file that contains audio files. + :param str codec: (optional) **For an audio-type resource,** the codec in which the audio is encoded. Omitted for an archive-type resource. + :param int frequency: (optional) **For an audio-type resource,** the sampling rate of the audio in Hertz (samples per second). Omitted for an archive-type resource. + :param str compression: (optional) **For an archive-type resource,** the format of the compressed archive: * `zip` for a **.zip** file * `gzip` for a **.tar.gz** file Omitted for an audio-type resource. + """ + self.type = type + self.codec = codec + self.frequency = frequency + self.compression = compression + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioDetails object from a json dictionary.""" + args = {} + if 'type' in _dict: + args['type'] = _dict['type'] else: - word = custom_word.word + raise ValueError( + 'Required property \'type\' not present in AudioDetails JSON') + if 'codec' in _dict: + args['codec'] = _dict['codec'] + if 'frequency' in _dict: + args['frequency'] = _dict['frequency'] + if 'compression' in _dict: + args['compression'] = _dict['compression'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'type') and self.type is not None: + _dict['type'] = self.type + if hasattr(self, 'codec') and self.codec is not None: + _dict['codec'] = self.codec + if hasattr(self, 'frequency') and self.frequency is not None: + _dict['frequency'] = self.frequency + if hasattr(self, 'compression') and self.compression is not None: + _dict['compression'] = self.compression + return _dict + + def __str__(self): + """Return a `str` version of this AudioDetails object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AudioListing(object): + """ + AudioListing. + + :attr float duration: (optional) **For an audio-type resource,** the total seconds of audio in the resource. Omitted for an archive-type resource. + :attr str name: (optional) **For an audio-type resource,** the name of the resource. Omitted for an archive-type resource. + :attr AudioDetails details: (optional) **For an audio-type resource,** an `AudioDetails` object that provides detailed information about the resource. The object is empty until the service finishes processing the audio. Omitted for an archive-type resource. + :attr str status: (optional) **For an audio-type resource,** the status of the resource: * `ok` indicates that the service has successfully analyzed the audio data. The data can be used to train the custom model. * `being_processed` indicates that the service is still analyzing the audio data. The service cannot accept requests to add new audio resources or to train the custom model until its analysis is complete. * `invalid` indicates that the audio data is not valid for training the custom model (possibly because it has the wrong format or sampling rate, or because it is corrupted). Omitted for an archive-type resource. + :attr AudioResource container: (optional) **For an archive-type resource,** an object of type `AudioResource` that provides information about the resource. Omitted for an audio-type resource. + :attr list[AudioResource] audio: (optional) **For an archive-type resource,** an array of `AudioResource` objects that provides information about the audio-type resources that are contained in the resource. Omitted for an audio-type resource. + """ + + def __init__(self, + duration=None, + name=None, + details=None, + status=None, + container=None, + audio=None): + """ + Initialize a AudioListing object. + + :param float duration: (optional) **For an audio-type resource,** the total seconds of audio in the resource. Omitted for an archive-type resource. + :param str name: (optional) **For an audio-type resource,** the name of the resource. Omitted for an archive-type resource. + :param AudioDetails details: (optional) **For an audio-type resource,** an `AudioDetails` object that provides detailed information about the resource. The object is empty until the service finishes processing the audio. Omitted for an archive-type resource. + :param str status: (optional) **For an audio-type resource,** the status of the resource: * `ok` indicates that the service has successfully analyzed the audio data. The data can be used to train the custom model. * `being_processed` indicates that the service is still analyzing the audio data. The service cannot accept requests to add new audio resources or to train the custom model until its analysis is complete. * `invalid` indicates that the audio data is not valid for training the custom model (possibly because it has the wrong format or sampling rate, or because it is corrupted). Omitted for an archive-type resource. + :param AudioResource container: (optional) **For an archive-type resource,** an object of type `AudioResource` that provides information about the resource. Omitted for an audio-type resource. + :param list[AudioResource] audio: (optional) **For an archive-type resource,** an array of `AudioResource` objects that provides information about the audio-type resources that are contained in the resource. Omitted for an audio-type resource. + """ + self.duration = duration + self.name = name + self.details = details + self.status = status + self.container = container + self.audio = audio + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioListing object from a json dictionary.""" + args = {} + if 'duration' in _dict: + args['duration'] = _dict['duration'] + if 'name' in _dict: + args['name'] = _dict['name'] + if 'details' in _dict: + args['details'] = AudioDetails._from_dict(_dict['details']) + if 'status' in _dict: + args['status'] = _dict['status'] + if 'container' in _dict: + args['container'] = AudioResource._from_dict(_dict['container']) + if 'audio' in _dict: + args['audio'] = [ + AudioResource._from_dict(x) for x in _dict['audio'] + ] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'duration') and self.duration is not None: + _dict['duration'] = self.duration + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'details') and self.details is not None: + _dict['details'] = self.details._to_dict() + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'container') and self.container is not None: + _dict['container'] = self.container._to_dict() + if hasattr(self, 'audio') and self.audio is not None: + _dict['audio'] = [x._to_dict() for x in self.audio] + return _dict + + def __str__(self): + """Return a `str` version of this AudioListing object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AudioResource(object): + """ + AudioResource. + + :attr float duration: The total seconds of audio in the audio resource. + :attr str name: The name of the audio resource. + :attr AudioDetails details: An `AudioDetails` object that provides detailed information about the audio resource. The object is empty until the service finishes processing the audio. + :attr str status: The status of the audio resource: * `ok` indicates that the service has successfully analyzed the audio data. The data can be used to train the custom model. * `being_processed` indicates that the service is still analyzing the audio data. The service cannot accept requests to add new audio resources or to train the custom model until its analysis is complete. * `invalid` indicates that the audio data is not valid for training the custom model (possibly because it has the wrong format or sampling rate, or because it is corrupted). For an archive file, the entire archive is invalid if any of its audio files are invalid. + """ + + def __init__(self, duration, name, details, status): + """ + Initialize a AudioResource object. + + :param float duration: The total seconds of audio in the audio resource. + :param str name: The name of the audio resource. + :param AudioDetails details: An `AudioDetails` object that provides detailed information about the audio resource. The object is empty until the service finishes processing the audio. + :param str status: The status of the audio resource: * `ok` indicates that the service has successfully analyzed the audio data. The data can be used to train the custom model. * `being_processed` indicates that the service is still analyzing the audio data. The service cannot accept requests to add new audio resources or to train the custom model until its analysis is complete. * `invalid` indicates that the audio data is not valid for training the custom model (possibly because it has the wrong format or sampling rate, or because it is corrupted). For an archive file, the entire archive is invalid if any of its audio files are invalid. + """ + self.duration = duration + self.name = name + self.details = details + self.status = status + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioResource object from a json dictionary.""" + args = {} + if 'duration' in _dict: + args['duration'] = _dict['duration'] + else: + raise ValueError( + 'Required property \'duration\' not present in AudioResource JSON' + ) + if 'name' in _dict: + args['name'] = _dict['name'] + else: + raise ValueError( + 'Required property \'name\' not present in AudioResource JSON') + if 'details' in _dict: + args['details'] = AudioDetails._from_dict(_dict['details']) + else: + raise ValueError( + 'Required property \'details\' not present in AudioResource JSON' + ) + if 'status' in _dict: + args['status'] = _dict['status'] + else: + raise ValueError( + 'Required property \'status\' not present in AudioResource JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'duration') and self.duration is not None: + _dict['duration'] = self.duration + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'details') and self.details is not None: + _dict['details'] = self.details._to_dict() + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + return _dict + + def __str__(self): + """Return a `str` version of this AudioResource object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class AudioResources(object): + """ + AudioResources. + + :attr float total_minutes_of_audio: The total minutes of accumulated audio summed over all of the valid audio resources for the custom acoustic model. You can use this value to determine whether the custom model has too little or too much audio to begin training. + :attr list[AudioResource] audio: An array of `AudioResource` objects that provides information about the audio resources of the custom acoustic model. The array is empty if the custom model has no audio resources. + """ + + def __init__(self, total_minutes_of_audio, audio): + """ + Initialize a AudioResources object. + + :param float total_minutes_of_audio: The total minutes of accumulated audio summed over all of the valid audio resources for the custom acoustic model. You can use this value to determine whether the custom model has too little or too much audio to begin training. + :param list[AudioResource] audio: An array of `AudioResource` objects that provides information about the audio resources of the custom acoustic model. The array is empty if the custom model has no audio resources. + """ + self.total_minutes_of_audio = total_minutes_of_audio + self.audio = audio + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AudioResources object from a json dictionary.""" + args = {} + if 'total_minutes_of_audio' in _dict: + args['total_minutes_of_audio'] = _dict['total_minutes_of_audio'] + else: + raise ValueError( + 'Required property \'total_minutes_of_audio\' not present in AudioResources JSON' + ) + if 'audio' in _dict: + args['audio'] = [ + AudioResource._from_dict(x) for x in _dict['audio'] + ] + else: + raise ValueError( + 'Required property \'audio\' not present in AudioResources JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'total_minutes_of_audio' + ) and self.total_minutes_of_audio is not None: + _dict['total_minutes_of_audio'] = self.total_minutes_of_audio + if hasattr(self, 'audio') and self.audio is not None: + _dict['audio'] = [x._to_dict() for x in self.audio] + return _dict + + def __str__(self): + """Return a `str` version of this AudioResources object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Corpora(object): + """ + Corpora. + + :attr list[Corpus] corpora: Information about corpora of the custom model. The array is empty if the custom model has no corpora. + """ + + def __init__(self, corpora): + """ + Initialize a Corpora object. + + :param list[Corpus] corpora: Information about corpora of the custom model. The array is empty if the custom model has no corpora. + """ + self.corpora = corpora + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Corpora object from a json dictionary.""" + args = {} + if 'corpora' in _dict: + args['corpora'] = [Corpus._from_dict(x) for x in _dict['corpora']] + else: + raise ValueError( + 'Required property \'corpora\' not present in Corpora JSON') + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'corpora') and self.corpora is not None: + _dict['corpora'] = [x._to_dict() for x in self.corpora] + return _dict + + def __str__(self): + """Return a `str` version of this Corpora object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Corpus(object): + """ + Corpus. + + :attr str name: The name of the corpus. + :attr int total_words: The total number of words in the corpus. The value is `0` while the corpus is being processed. + :attr int out_of_vocabulary_words: The number of OOV words in the corpus. The value is `0` while the corpus is being processed. + :attr str status: The status of the corpus: * `analyzed` indicates that the service has successfully analyzed the corpus; the custom model can be trained with data from the corpus. * `being_processed` indicates that the service is still analyzing the corpus; the service cannot accept requests to add new corpora or words, or to train the custom model. * `undetermined` indicates that the service encountered an error while processing the corpus. + :attr str error: (optional) If the status of the corpus is `undetermined`, the following message: `Analysis of corpus 'name' failed. Please try adding the corpus again by setting the 'allow_overwrite' flag to 'true'`. + """ + + def __init__(self, + name, + total_words, + out_of_vocabulary_words, + status, + error=None): + """ + Initialize a Corpus object. + + :param str name: The name of the corpus. + :param int total_words: The total number of words in the corpus. The value is `0` while the corpus is being processed. + :param int out_of_vocabulary_words: The number of OOV words in the corpus. The value is `0` while the corpus is being processed. + :param str status: The status of the corpus: * `analyzed` indicates that the service has successfully analyzed the corpus; the custom model can be trained with data from the corpus. * `being_processed` indicates that the service is still analyzing the corpus; the service cannot accept requests to add new corpora or words, or to train the custom model. * `undetermined` indicates that the service encountered an error while processing the corpus. + :param str error: (optional) If the status of the corpus is `undetermined`, the following message: `Analysis of corpus 'name' failed. Please try adding the corpus again by setting the 'allow_overwrite' flag to 'true'`. + """ + self.name = name + self.total_words = total_words + self.out_of_vocabulary_words = out_of_vocabulary_words + self.status = status + self.error = error + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Corpus object from a json dictionary.""" + args = {} + if 'name' in _dict: + args['name'] = _dict['name'] + else: + raise ValueError( + 'Required property \'name\' not present in Corpus JSON') + if 'total_words' in _dict: + args['total_words'] = _dict['total_words'] + else: + raise ValueError( + 'Required property \'total_words\' not present in Corpus JSON') + if 'out_of_vocabulary_words' in _dict: + args['out_of_vocabulary_words'] = _dict['out_of_vocabulary_words'] + else: + raise ValueError( + 'Required property \'out_of_vocabulary_words\' not present in Corpus JSON' + ) + if 'status' in _dict: + args['status'] = _dict['status'] + else: + raise ValueError( + 'Required property \'status\' not present in Corpus JSON') + if 'error' in _dict: + args['error'] = _dict['error'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'total_words') and self.total_words is not None: + _dict['total_words'] = self.total_words + if hasattr(self, 'out_of_vocabulary_words' + ) and self.out_of_vocabulary_words is not None: + _dict['out_of_vocabulary_words'] = self.out_of_vocabulary_words + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'error') and self.error is not None: + _dict['error'] = self.error + return _dict + + def __str__(self): + """Return a `str` version of this Corpus object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class CustomWord(object): + """ + CustomWord. + + :attr str word: (optional) **When specifying an array of one or more words,** you must specify the custom word that is to be added to or updated in the custom model. Do not include spaces in the word. Use a - (dash) or _ (underscore) to connect the tokens of compound words. **When adding or updating a single word directly,** omit this field. + :attr list[str] sounds_like: (optional) An array of sounds-like pronunciations for the custom word. Specify how words that are difficult to pronounce, foreign words, acronyms, and so on can be pronounced by users. For a word that is not in the service's base vocabulary, omit the parameter to have the service automatically generate a sounds-like pronunciation for the word. For a word that is in the service's base vocabulary, use the parameter to specify additional pronunciations for the word. You cannot override the default pronunciation of a word; pronunciations you add augment the pronunciation from the base vocabulary. A word can have at most five sounds-like pronunciations, and a pronunciation can include at most 40 characters not including spaces. + :attr str display_as: (optional) An alternative spelling for the custom word when it appears in a transcript. Use the parameter when you want the word to have a spelling that is different from its usual representation or from its spelling in corpora training data. + """ + + def __init__(self, word=None, sounds_like=None, display_as=None): + """ + Initialize a CustomWord object. + + :param str word: (optional) **When specifying an array of one or more words,** you must specify the custom word that is to be added to or updated in the custom model. Do not include spaces in the word. Use a - (dash) or _ (underscore) to connect the tokens of compound words. **When adding or updating a single word directly,** omit this field. + :param list[str] sounds_like: (optional) An array of sounds-like pronunciations for the custom word. Specify how words that are difficult to pronounce, foreign words, acronyms, and so on can be pronounced by users. For a word that is not in the service's base vocabulary, omit the parameter to have the service automatically generate a sounds-like pronunciation for the word. For a word that is in the service's base vocabulary, use the parameter to specify additional pronunciations for the word. You cannot override the default pronunciation of a word; pronunciations you add augment the pronunciation from the base vocabulary. A word can have at most five sounds-like pronunciations, and a pronunciation can include at most 40 characters not including spaces. + :param str display_as: (optional) An alternative spelling for the custom word when it appears in a transcript. Use the parameter when you want the word to have a spelling that is different from its usual representation or from its spelling in corpora training data. + """ + self.word = word + self.sounds_like = sounds_like + self.display_as = display_as + + @classmethod + def _from_dict(cls, _dict): + """Initialize a CustomWord object from a json dictionary.""" + args = {} + if 'word' in _dict: + args['word'] = _dict['word'] + if 'sounds_like' in _dict: + args['sounds_like'] = _dict['sounds_like'] + if 'display_as' in _dict: + args['display_as'] = _dict['display_as'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'word') and self.word is not None: + _dict['word'] = self.word + if hasattr(self, 'sounds_like') and self.sounds_like is not None: + _dict['sounds_like'] = self.sounds_like + if hasattr(self, 'display_as') and self.display_as is not None: + _dict['display_as'] = self.display_as + return _dict + + def __str__(self): + """Return a `str` version of this CustomWord object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class KeywordResult(object): + """ + KeywordResult. + + :attr str normalized_text: A specified keyword normalized to the spoken phrase that matched in the audio input. + :attr float start_time: The start time in seconds of the keyword match. + :attr float end_time: The end time in seconds of the keyword match. + :attr float confidence: A confidence score for the keyword match in the range of 0 to 1. + """ + + def __init__(self, normalized_text, start_time, end_time, confidence): + """ + Initialize a KeywordResult object. + + :param str normalized_text: A specified keyword normalized to the spoken phrase that matched in the audio input. + :param float start_time: The start time in seconds of the keyword match. + :param float end_time: The end time in seconds of the keyword match. + :param float confidence: A confidence score for the keyword match in the range of 0 to 1. + """ + self.normalized_text = normalized_text + self.start_time = start_time + self.end_time = end_time + self.confidence = confidence + + @classmethod + def _from_dict(cls, _dict): + """Initialize a KeywordResult object from a json dictionary.""" + args = {} + if 'normalized_text' in _dict: + args['normalized_text'] = _dict['normalized_text'] + else: + raise ValueError( + 'Required property \'normalized_text\' not present in KeywordResult JSON' + ) + if 'start_time' in _dict: + args['start_time'] = _dict['start_time'] + else: + raise ValueError( + 'Required property \'start_time\' not present in KeywordResult JSON' + ) + if 'end_time' in _dict: + args['end_time'] = _dict['end_time'] + else: + raise ValueError( + 'Required property \'end_time\' not present in KeywordResult JSON' + ) + if 'confidence' in _dict: + args['confidence'] = _dict['confidence'] + else: + raise ValueError( + 'Required property \'confidence\' not present in KeywordResult JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'normalized_text') and self.normalized_text is not None: + _dict['normalized_text'] = self.normalized_text + if hasattr(self, 'start_time') and self.start_time is not None: + _dict['start_time'] = self.start_time + if hasattr(self, 'end_time') and self.end_time is not None: + _dict['end_time'] = self.end_time + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + return _dict + + def __str__(self): + """Return a `str` version of this KeywordResult object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LanguageModel(object): + """ + LanguageModel. + + :attr str customization_id: The customization ID (GUID) of the custom language model. **Note:** When you create a new custom language model, the service returns only the GUID of the new model; it does not return the other fields of this object. + :attr str created: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom language model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :attr str language: (optional) The language identifier of the custom language model (for example, `en-US`). + :attr str dialect: (optional) The dialect of the language for the custom language model. By default, the dialect matches the language of the base model; for example, `en-US` for either of the US English language models. For Spanish models, the field indicates the dialect for which the model was created: * `es-ES` for Castilian Spanish (the default) * `es-LA` for Latin American Spanish * `es-US` for North American (Mexican) Spanish. + :attr list[str] versions: (optional) A list of the available versions of the custom language model. Each element of the array indicates a version of the base model with which the custom model can be used. Multiple versions exist only if the custom model has been upgraded; otherwise, only a single version is shown. + :attr str owner: (optional) The GUID of the service credentials for the instance of the service that owns the custom language model. + :attr str name: (optional) The name of the custom language model. + :attr str description: (optional) The description of the custom language model. + :attr str base_model_name: (optional) The name of the language model for which the custom language model was created. + :attr str status: (optional) The current status of the custom language model: * `pending` indicates that the model was created but is waiting either for training data to be added or for the service to finish analyzing added data. * `ready` indicates that the model contains data and is ready to be trained. * `training` indicates that the model is currently being trained. * `available` indicates that the model is trained and ready to use. * `upgrading` indicates that the model is currently being upgraded. * `failed` indicates that training of the model failed. + :attr int progress: (optional) A percentage that indicates the progress of the custom language model's current training. A value of `100` means that the model is fully trained. **Note:** The `progress` field does not currently reflect the progress of the training; the field changes from `0` to `100` when training is complete. + :attr str warnings: (optional) If the request included unknown query parameters, the following message: `Unexpected query parameter(s) ['parameters'] detected`, where `parameters` is a list that includes a quoted string for each unknown parameter. + """ + + def __init__(self, + customization_id, + created=None, + language=None, + dialect=None, + versions=None, + owner=None, + name=None, + description=None, + base_model_name=None, + status=None, + progress=None, + warnings=None): + """ + Initialize a LanguageModel object. + + :param str customization_id: The customization ID (GUID) of the custom language model. **Note:** When you create a new custom language model, the service returns only the GUID of the new model; it does not return the other fields of this object. + :param str created: (optional) The date and time in Coordinated Universal Time (UTC) at which the custom language model was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str language: (optional) The language identifier of the custom language model (for example, `en-US`). + :param str dialect: (optional) The dialect of the language for the custom language model. By default, the dialect matches the language of the base model; for example, `en-US` for either of the US English language models. For Spanish models, the field indicates the dialect for which the model was created: * `es-ES` for Castilian Spanish (the default) * `es-LA` for Latin American Spanish * `es-US` for North American (Mexican) Spanish. + :param list[str] versions: (optional) A list of the available versions of the custom language model. Each element of the array indicates a version of the base model with which the custom model can be used. Multiple versions exist only if the custom model has been upgraded; otherwise, only a single version is shown. + :param str owner: (optional) The GUID of the service credentials for the instance of the service that owns the custom language model. + :param str name: (optional) The name of the custom language model. + :param str description: (optional) The description of the custom language model. + :param str base_model_name: (optional) The name of the language model for which the custom language model was created. + :param str status: (optional) The current status of the custom language model: * `pending` indicates that the model was created but is waiting either for training data to be added or for the service to finish analyzing added data. * `ready` indicates that the model contains data and is ready to be trained. * `training` indicates that the model is currently being trained. * `available` indicates that the model is trained and ready to use. * `upgrading` indicates that the model is currently being upgraded. * `failed` indicates that training of the model failed. + :param int progress: (optional) A percentage that indicates the progress of the custom language model's current training. A value of `100` means that the model is fully trained. **Note:** The `progress` field does not currently reflect the progress of the training; the field changes from `0` to `100` when training is complete. + :param str warnings: (optional) If the request included unknown query parameters, the following message: `Unexpected query parameter(s) ['parameters'] detected`, where `parameters` is a list that includes a quoted string for each unknown parameter. + """ + self.customization_id = customization_id + self.created = created + self.language = language + self.dialect = dialect + self.versions = versions + self.owner = owner + self.name = name + self.description = description + self.base_model_name = base_model_name + self.status = status + self.progress = progress + self.warnings = warnings + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LanguageModel object from a json dictionary.""" + args = {} + if 'customization_id' in _dict: + args['customization_id'] = _dict['customization_id'] + else: + raise ValueError( + 'Required property \'customization_id\' not present in LanguageModel JSON' + ) + if 'created' in _dict: + args['created'] = _dict['created'] + if 'language' in _dict: + args['language'] = _dict['language'] + if 'dialect' in _dict: + args['dialect'] = _dict['dialect'] + if 'versions' in _dict: + args['versions'] = _dict['versions'] + if 'owner' in _dict: + args['owner'] = _dict['owner'] + if 'name' in _dict: + args['name'] = _dict['name'] + if 'description' in _dict: + args['description'] = _dict['description'] + if 'base_model_name' in _dict: + args['base_model_name'] = _dict['base_model_name'] + if 'status' in _dict: + args['status'] = _dict['status'] + if 'progress' in _dict: + args['progress'] = _dict['progress'] + if 'warnings' in _dict: + args['warnings'] = _dict['warnings'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, + 'customization_id') and self.customization_id is not None: + _dict['customization_id'] = self.customization_id + if hasattr(self, 'created') and self.created is not None: + _dict['created'] = self.created + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'dialect') and self.dialect is not None: + _dict['dialect'] = self.dialect + if hasattr(self, 'versions') and self.versions is not None: + _dict['versions'] = self.versions + if hasattr(self, 'owner') and self.owner is not None: + _dict['owner'] = self.owner + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, + 'base_model_name') and self.base_model_name is not None: + _dict['base_model_name'] = self.base_model_name + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'progress') and self.progress is not None: + _dict['progress'] = self.progress + if hasattr(self, 'warnings') and self.warnings is not None: + _dict['warnings'] = self.warnings + return _dict + + def __str__(self): + """Return a `str` version of this LanguageModel object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class LanguageModels(object): + """ + LanguageModels. + + :attr list[LanguageModel] customizations: An array of objects that provides information about each available custom language model. The array is empty if the requesting service credentials own no custom language models (if no language is specified) or own no custom language models for the specified language. + """ + + def __init__(self, customizations): + """ + Initialize a LanguageModels object. + + :param list[LanguageModel] customizations: An array of objects that provides information about each available custom language model. The array is empty if the requesting service credentials own no custom language models (if no language is specified) or own no custom language models for the specified language. + """ + self.customizations = customizations + + @classmethod + def _from_dict(cls, _dict): + """Initialize a LanguageModels object from a json dictionary.""" + args = {} + if 'customizations' in _dict: + args['customizations'] = [ + LanguageModel._from_dict(x) for x in _dict['customizations'] + ] + else: + raise ValueError( + 'Required property \'customizations\' not present in LanguageModels JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'customizations') and self.customizations is not None: + _dict['customizations'] = [ + x._to_dict() for x in self.customizations + ] + return _dict + + def __str__(self): + """Return a `str` version of this LanguageModels object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RecognitionJob(object): + """ + RecognitionJob. + + :attr str id: The ID of the job. + :attr str status: The current status of the job: * `waiting`: The service is preparing the job for processing. The service returns this status when the job is initially created or when it is waiting for capacity to process the job. The job remains in this state until the service has the capacity to begin processing it. * `processing`: The service is actively processing the job. * `completed`: The service has finished processing the job. If the job specified a callback URL and the event `recognitions.completed_with_results`, the service sent the results with the callback notification; otherwise, you must retrieve the results by checking the individual job. * `failed`: The job failed. + :attr str created: The date and time in Coordinated Universal Time (UTC) at which the job was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :attr str updated: (optional) The date and time in Coordinated Universal Time (UTC) at which the job was last updated by the service. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). **Note:** This field is returned only when you list information about a specific or all existing jobs. + :attr str url: (optional) The URL to use to request information about the job with the `GET /v1/recognitions/{id}` method. **Note:** This field is returned only when you create a new job. + :attr str user_token: (optional) The user token associated with a job that was created with a callback URL and a user token. **Note:** This field can be returned only when you list information about all existing jobs. + :attr list[SpeechRecognitionResults] results: (optional) If the status is `completed`, the results of the recognition request as an array that includes a single instance of a `SpeechRecognitionResults` object. **Note:** This field can be returned only when you list information about a specific existing job. + :attr list[str] warnings: (optional) An array of warning messages about invalid query parameters included with the request. Each warning includes a descriptive message and a list of invalid argument strings, for example, `"unexpected query parameter 'user_token', query parameter 'callback_url' was not specified"`. The request succeeds despite the warnings. **Note:** This field can be returned only when you create a new job. + """ + + def __init__(self, + id, + status, + created, + updated=None, + url=None, + user_token=None, + results=None, + warnings=None): + """ + Initialize a RecognitionJob object. + + :param str id: The ID of the job. + :param str status: The current status of the job: * `waiting`: The service is preparing the job for processing. The service returns this status when the job is initially created or when it is waiting for capacity to process the job. The job remains in this state until the service has the capacity to begin processing it. * `processing`: The service is actively processing the job. * `completed`: The service has finished processing the job. If the job specified a callback URL and the event `recognitions.completed_with_results`, the service sent the results with the callback notification; otherwise, you must retrieve the results by checking the individual job. * `failed`: The job failed. + :param str created: The date and time in Coordinated Universal Time (UTC) at which the job was created. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). + :param str updated: (optional) The date and time in Coordinated Universal Time (UTC) at which the job was last updated by the service. The value is provided in full ISO 8601 format (`YYYY-MM-DDThh:mm:ss.sTZD`). **Note:** This field is returned only when you list information about a specific or all existing jobs. + :param str url: (optional) The URL to use to request information about the job with the `GET /v1/recognitions/{id}` method. **Note:** This field is returned only when you create a new job. + :param str user_token: (optional) The user token associated with a job that was created with a callback URL and a user token. **Note:** This field can be returned only when you list information about all existing jobs. + :param list[SpeechRecognitionResults] results: (optional) If the status is `completed`, the results of the recognition request as an array that includes a single instance of a `SpeechRecognitionResults` object. **Note:** This field can be returned only when you list information about a specific existing job. + :param list[str] warnings: (optional) An array of warning messages about invalid query parameters included with the request. Each warning includes a descriptive message and a list of invalid argument strings, for example, `"unexpected query parameter 'user_token', query parameter 'callback_url' was not specified"`. The request succeeds despite the warnings. **Note:** This field can be returned only when you create a new job. + """ + self.id = id + self.status = status + self.created = created + self.updated = updated + self.url = url + self.user_token = user_token + self.results = results + self.warnings = warnings + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RecognitionJob object from a json dictionary.""" + args = {} + if 'id' in _dict: + args['id'] = _dict['id'] + else: + raise ValueError( + 'Required property \'id\' not present in RecognitionJob JSON') + if 'status' in _dict: + args['status'] = _dict['status'] + else: + raise ValueError( + 'Required property \'status\' not present in RecognitionJob JSON' + ) + if 'created' in _dict: + args['created'] = _dict['created'] + else: + raise ValueError( + 'Required property \'created\' not present in RecognitionJob JSON' + ) + if 'updated' in _dict: + args['updated'] = _dict['updated'] + if 'url' in _dict: + args['url'] = _dict['url'] + if 'user_token' in _dict: + args['user_token'] = _dict['user_token'] + if 'results' in _dict: + args['results'] = [ + SpeechRecognitionResults._from_dict(x) + for x in _dict['results'] + ] + if 'warnings' in _dict: + args['warnings'] = _dict['warnings'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'id') and self.id is not None: + _dict['id'] = self.id + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'created') and self.created is not None: + _dict['created'] = self.created + if hasattr(self, 'updated') and self.updated is not None: + _dict['updated'] = self.updated + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr(self, 'user_token') and self.user_token is not None: + _dict['user_token'] = self.user_token + if hasattr(self, 'results') and self.results is not None: + _dict['results'] = [x._to_dict() for x in self.results] + if hasattr(self, 'warnings') and self.warnings is not None: + _dict['warnings'] = self.warnings + return _dict + + def __str__(self): + """Return a `str` version of this RecognitionJob object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RecognitionJobs(object): + """ + RecognitionJobs. + + :attr list[RecognitionJob] recognitions: An array of objects that provides the status for each of the user's current jobs. The array is empty if the user has no current jobs. + """ + + def __init__(self, recognitions): + """ + Initialize a RecognitionJobs object. + + :param list[RecognitionJob] recognitions: An array of objects that provides the status for each of the user's current jobs. The array is empty if the user has no current jobs. + """ + self.recognitions = recognitions + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RecognitionJobs object from a json dictionary.""" + args = {} + if 'recognitions' in _dict: + args['recognitions'] = [ + RecognitionJob._from_dict(x) for x in _dict['recognitions'] + ] + else: + raise ValueError( + 'Required property \'recognitions\' not present in RecognitionJobs JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'recognitions') and self.recognitions is not None: + _dict['recognitions'] = [x._to_dict() for x in self.recognitions] + return _dict + + def __str__(self): + """Return a `str` version of this RecognitionJobs object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class RegisterStatus(object): + """ + RegisterStatus. + + :attr str status: The current status of the job: * `created` if the callback URL was successfully white-listed as a result of the call. * `already created` if the URL was already white-listed. + :attr str url: The callback URL that is successfully registered. + """ + + def __init__(self, status, url): + """ + Initialize a RegisterStatus object. + + :param str status: The current status of the job: * `created` if the callback URL was successfully white-listed as a result of the call. * `already created` if the URL was already white-listed. + :param str url: The callback URL that is successfully registered. + """ + self.status = status + self.url = url + + @classmethod + def _from_dict(cls, _dict): + """Initialize a RegisterStatus object from a json dictionary.""" + args = {} + if 'status' in _dict: + args['status'] = _dict['status'] + else: + raise ValueError( + 'Required property \'status\' not present in RegisterStatus JSON' + ) + if 'url' in _dict: + args['url'] = _dict['url'] + else: + raise ValueError( + 'Required property \'url\' not present in RegisterStatus JSON') + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'status') and self.status is not None: + _dict['status'] = self.status + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + return _dict + + def __str__(self): + """Return a `str` version of this RegisterStatus object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeakerLabelsResult(object): + """ + SpeakerLabelsResult. + + :attr float _from: The start time of a word from the transcript. The value matches the start time of a word from the `timestamps` array. + :attr float to: The end time of a word from the transcript. The value matches the end time of a word from the `timestamps` array. + :attr int speaker: The numeric identifier that the service assigns to a speaker from the audio. Speaker IDs begin at `0` initially but can evolve and change across interim results (if supported by the method) and between interim and final results as the service processes the audio. They are not guaranteed to be sequential, contiguous, or ordered. + :attr float confidence: A score that indicates the service's confidence in its identification of the speaker in the range of 0 to 1. + :attr bool final_results: An indication of whether the service might further change word and speaker-label results. A value of `true` means that the service guarantees not to send any further updates for the current or any preceding results; `false` means that the service might send further updates to the results. + """ + + def __init__(self, _from, to, speaker, confidence, final_results): + """ + Initialize a SpeakerLabelsResult object. + + :param float _from: The start time of a word from the transcript. The value matches the start time of a word from the `timestamps` array. + :param float to: The end time of a word from the transcript. The value matches the end time of a word from the `timestamps` array. + :param int speaker: The numeric identifier that the service assigns to a speaker from the audio. Speaker IDs begin at `0` initially but can evolve and change across interim results (if supported by the method) and between interim and final results as the service processes the audio. They are not guaranteed to be sequential, contiguous, or ordered. + :param float confidence: A score that indicates the service's confidence in its identification of the speaker in the range of 0 to 1. + :param bool final_results: An indication of whether the service might further change word and speaker-label results. A value of `true` means that the service guarantees not to send any further updates for the current or any preceding results; `false` means that the service might send further updates to the results. + """ + self._from = _from + self.to = to + self.speaker = speaker + self.confidence = confidence + self.final_results = final_results + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeakerLabelsResult object from a json dictionary.""" + args = {} + if 'from' in _dict: + args['_from'] = _dict['from'] + else: + raise ValueError( + 'Required property \'from\' not present in SpeakerLabelsResult JSON' + ) + if 'to' in _dict: + args['to'] = _dict['to'] + else: + raise ValueError( + 'Required property \'to\' not present in SpeakerLabelsResult JSON' + ) + if 'speaker' in _dict: + args['speaker'] = _dict['speaker'] + else: + raise ValueError( + 'Required property \'speaker\' not present in SpeakerLabelsResult JSON' + ) + if 'confidence' in _dict: + args['confidence'] = _dict['confidence'] + else: + raise ValueError( + 'Required property \'confidence\' not present in SpeakerLabelsResult JSON' + ) + if 'final' in _dict: + args['final_results'] = _dict['final'] + else: + raise ValueError( + 'Required property \'final\' not present in SpeakerLabelsResult JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, '_from') and self._from is not None: + _dict['from'] = self._from + if hasattr(self, 'to') and self.to is not None: + _dict['to'] = self.to + if hasattr(self, 'speaker') and self.speaker is not None: + _dict['speaker'] = self.speaker + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + if hasattr(self, 'final_results') and self.final_results is not None: + _dict['final'] = self.final_results + return _dict + + def __str__(self): + """Return a `str` version of this SpeakerLabelsResult object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeechModel(object): + """ + SpeechModel. + + :attr str name: The name of the model for use as an identifier in calls to the service (for example, `en-US_BroadbandModel`). + :attr str language: The language identifier for the model (for example, `en-US`). + :attr int rate: The sampling rate (minimum acceptable rate for audio) used by the model in Hertz. + :attr str url: The URI for the model. + :attr SupportedFeatures supported_features: Describes the additional service features supported with the model. + :attr str description: Brief description of the model. + :attr str sessions: (optional) The URI for the model for use with the `POST /v1/sessions` method. (Returned only for requests for a single model with the `GET /v1/models/{model_id}` method.). + """ + + def __init__(self, + name, + language, + rate, + url, + supported_features, + description, + sessions=None): + """ + Initialize a SpeechModel object. + + :param str name: The name of the model for use as an identifier in calls to the service (for example, `en-US_BroadbandModel`). + :param str language: The language identifier for the model (for example, `en-US`). + :param int rate: The sampling rate (minimum acceptable rate for audio) used by the model in Hertz. + :param str url: The URI for the model. + :param SupportedFeatures supported_features: Describes the additional service features supported with the model. + :param str description: Brief description of the model. + :param str sessions: (optional) The URI for the model for use with the `POST /v1/sessions` method. (Returned only for requests for a single model with the `GET /v1/models/{model_id}` method.). + """ + self.name = name + self.language = language + self.rate = rate + self.url = url + self.supported_features = supported_features + self.description = description + self.sessions = sessions + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeechModel object from a json dictionary.""" + args = {} + if 'name' in _dict: + args['name'] = _dict['name'] + else: + raise ValueError( + 'Required property \'name\' not present in SpeechModel JSON') + if 'language' in _dict: + args['language'] = _dict['language'] + else: + raise ValueError( + 'Required property \'language\' not present in SpeechModel JSON' + ) + if 'rate' in _dict: + args['rate'] = _dict['rate'] + else: + raise ValueError( + 'Required property \'rate\' not present in SpeechModel JSON') + if 'url' in _dict: + args['url'] = _dict['url'] + else: + raise ValueError( + 'Required property \'url\' not present in SpeechModel JSON') + if 'supported_features' in _dict: + args['supported_features'] = SupportedFeatures._from_dict( + _dict['supported_features']) + else: + raise ValueError( + 'Required property \'supported_features\' not present in SpeechModel JSON' + ) + if 'description' in _dict: + args['description'] = _dict['description'] + else: + raise ValueError( + 'Required property \'description\' not present in SpeechModel JSON' + ) + if 'sessions' in _dict: + args['sessions'] = _dict['sessions'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'name') and self.name is not None: + _dict['name'] = self.name + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'rate') and self.rate is not None: + _dict['rate'] = self.rate + if hasattr(self, 'url') and self.url is not None: + _dict['url'] = self.url + if hasattr( + self, + 'supported_features') and self.supported_features is not None: + _dict['supported_features'] = self.supported_features._to_dict() + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + if hasattr(self, 'sessions') and self.sessions is not None: + _dict['sessions'] = self.sessions + return _dict + + def __str__(self): + """Return a `str` version of this SpeechModel object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeechModels(object): + """ + SpeechModels. + + :attr list[SpeechModel] models: Information about each available model. + """ + + def __init__(self, models): + """ + Initialize a SpeechModels object. + + :param list[SpeechModel] models: Information about each available model. + """ + self.models = models + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeechModels object from a json dictionary.""" + args = {} + if 'models' in _dict: + args['models'] = [ + SpeechModel._from_dict(x) for x in _dict['models'] + ] + else: + raise ValueError( + 'Required property \'models\' not present in SpeechModels JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'models') and self.models is not None: + _dict['models'] = [x._to_dict() for x in self.models] + return _dict + + def __str__(self): + """Return a `str` version of this SpeechModels object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeechRecognitionAlternative(object): + """ + SpeechRecognitionAlternative. + + :attr str transcript: A transcription of the audio. + :attr float confidence: (optional) A score that indicates the service's confidence in the transcript in the range of 0 to 1. Available only for the best alternative and only in results marked as final. + :attr list[str] timestamps: (optional) Time alignments for each word from the transcript as a list of lists. Each inner list consists of three elements: the word followed by its start and end time in seconds. Example: `[["hello",0.0,1.2],["world",1.2,2.5]]`. Available only for the best alternative. + :attr list[str] word_confidence: (optional) A confidence score for each word of the transcript as a list of lists. Each inner list consists of two elements: the word and its confidence score in the range of 0 to 1. Example: `[["hello",0.95],["world",0.866]]`. Available only for the best alternative and only in results marked as final. + """ + + def __init__(self, + transcript, + confidence=None, + timestamps=None, + word_confidence=None): + """ + Initialize a SpeechRecognitionAlternative object. + + :param str transcript: A transcription of the audio. + :param float confidence: (optional) A score that indicates the service's confidence in the transcript in the range of 0 to 1. Available only for the best alternative and only in results marked as final. + :param list[str] timestamps: (optional) Time alignments for each word from the transcript as a list of lists. Each inner list consists of three elements: the word followed by its start and end time in seconds. Example: `[["hello",0.0,1.2],["world",1.2,2.5]]`. Available only for the best alternative. + :param list[str] word_confidence: (optional) A confidence score for each word of the transcript as a list of lists. Each inner list consists of two elements: the word and its confidence score in the range of 0 to 1. Example: `[["hello",0.95],["world",0.866]]`. Available only for the best alternative and only in results marked as final. + """ + self.transcript = transcript + self.confidence = confidence + self.timestamps = timestamps + self.word_confidence = word_confidence + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeechRecognitionAlternative object from a json dictionary.""" + args = {} + if 'transcript' in _dict: + args['transcript'] = _dict['transcript'] + else: + raise ValueError( + 'Required property \'transcript\' not present in SpeechRecognitionAlternative JSON' + ) + if 'confidence' in _dict: + args['confidence'] = _dict['confidence'] + if 'timestamps' in _dict: + args['timestamps'] = _dict['timestamps'] + if 'word_confidence' in _dict: + args['word_confidence'] = _dict['word_confidence'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'transcript') and self.transcript is not None: + _dict['transcript'] = self.transcript + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + if hasattr(self, 'timestamps') and self.timestamps is not None: + _dict['timestamps'] = self.timestamps + if hasattr(self, + 'word_confidence') and self.word_confidence is not None: + _dict['word_confidence'] = self.word_confidence + return _dict + + def __str__(self): + """Return a `str` version of this SpeechRecognitionAlternative object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeechRecognitionResult(object): + """ + SpeechRecognitionResult. + + :attr bool final_results: An indication of whether the transcription results are final. If `true`, the results for this utterance are not updated further; no additional results are sent for a `result_index` once its results are indicated as final. + :attr list[SpeechRecognitionAlternative] alternatives: An array of alternative transcripts. The `alternatives` array can include additional requested output such as word confidence or timestamps. + :attr dict keywords_result: (optional) A dictionary (or associative array) whose keys are the strings specified for `keywords` if both that parameter and `keywords_threshold` are specified. A keyword for which no matches are found is omitted from the array. The array is omitted if no keywords are found. + :attr list[WordAlternativeResults] word_alternatives: (optional) An array of alternative hypotheses found for words of the input audio if a `word_alternatives_threshold` is specified. + """ + + def __init__(self, + final_results, + alternatives, + keywords_result=None, + word_alternatives=None): + """ + Initialize a SpeechRecognitionResult object. + + :param bool final_results: An indication of whether the transcription results are final. If `true`, the results for this utterance are not updated further; no additional results are sent for a `result_index` once its results are indicated as final. + :param list[SpeechRecognitionAlternative] alternatives: An array of alternative transcripts. The `alternatives` array can include additional requested output such as word confidence or timestamps. + :param dict keywords_result: (optional) A dictionary (or associative array) whose keys are the strings specified for `keywords` if both that parameter and `keywords_threshold` are specified. A keyword for which no matches are found is omitted from the array. The array is omitted if no keywords are found. + :param list[WordAlternativeResults] word_alternatives: (optional) An array of alternative hypotheses found for words of the input audio if a `word_alternatives_threshold` is specified. + """ + self.final_results = final_results + self.alternatives = alternatives + self.keywords_result = keywords_result + self.word_alternatives = word_alternatives + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeechRecognitionResult object from a json dictionary.""" + args = {} + if 'final' in _dict: + args['final_results'] = _dict['final'] + else: + raise ValueError( + 'Required property \'final\' not present in SpeechRecognitionResult JSON' + ) + if 'alternatives' in _dict: + args['alternatives'] = [ + SpeechRecognitionAlternative._from_dict(x) + for x in _dict['alternatives'] + ] + else: + raise ValueError( + 'Required property \'alternatives\' not present in SpeechRecognitionResult JSON' + ) + if 'keywords_result' in _dict: + args['keywords_result'] = _dict['keywords_result'] + if 'word_alternatives' in _dict: + args['word_alternatives'] = [ + WordAlternativeResults._from_dict(x) + for x in _dict['word_alternatives'] + ] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'final_results') and self.final_results is not None: + _dict['final'] = self.final_results + if hasattr(self, 'alternatives') and self.alternatives is not None: + _dict['alternatives'] = [x._to_dict() for x in self.alternatives] + if hasattr(self, + 'keywords_result') and self.keywords_result is not None: + _dict['keywords_result'] = self.keywords_result + if hasattr(self, + 'word_alternatives') and self.word_alternatives is not None: + _dict['word_alternatives'] = [ + x._to_dict() for x in self.word_alternatives + ] + return _dict + + def __str__(self): + """Return a `str` version of this SpeechRecognitionResult object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SpeechRecognitionResults(object): + """ + SpeechRecognitionResults. + + :attr list[SpeechRecognitionResult] results: (optional) An array that can include interim and final results (interim results are returned only if supported by the method). Final results are guaranteed not to change; interim results might be replaced by further interim results and final results. The service periodically sends updates to the results list; the `result_index` is set to the lowest index in the array that has changed; it is incremented for new results. + :attr int result_index: (optional) An index that indicates a change point in the `results` array. The service increments the index only for additional results that it sends for new audio for the same request. + :attr list[SpeakerLabelsResult] speaker_labels: (optional) An array that identifies which words were spoken by which speakers in a multi-person exchange. Returned in the response only if `speaker_labels` is `true`. When interim results are also requested for methods that support them, it is possible for a `SpeechRecognitionResults` object to include only the `speaker_labels` field. + :attr list[str] warnings: (optional) An array of warning messages associated with the request: * Warnings for invalid query parameters or JSON fields can include a descriptive message and a list of invalid argument strings, for example, `"Unknown arguments:"` or `"Unknown url query arguments:"` followed by a list of the form `"invalid_arg_1, invalid_arg_2."` * The following warning is returned if the request passes a custom model that is based on an older version of a base model for which an updated version is available: `"Using previous version of base model, because your custom model has been built with it. Please note that this version will be supported only for a limited time. Consider updating your custom model to the new base model. If you do not do that you will be automatically switched to base model when you used the non-updated custom model."` In both cases, the request succeeds despite the warnings. + """ + + def __init__(self, + results=None, + result_index=None, + speaker_labels=None, + warnings=None): + """ + Initialize a SpeechRecognitionResults object. + + :param list[SpeechRecognitionResult] results: (optional) An array that can include interim and final results (interim results are returned only if supported by the method). Final results are guaranteed not to change; interim results might be replaced by further interim results and final results. The service periodically sends updates to the results list; the `result_index` is set to the lowest index in the array that has changed; it is incremented for new results. + :param int result_index: (optional) An index that indicates a change point in the `results` array. The service increments the index only for additional results that it sends for new audio for the same request. + :param list[SpeakerLabelsResult] speaker_labels: (optional) An array that identifies which words were spoken by which speakers in a multi-person exchange. Returned in the response only if `speaker_labels` is `true`. When interim results are also requested for methods that support them, it is possible for a `SpeechRecognitionResults` object to include only the `speaker_labels` field. + :param list[str] warnings: (optional) An array of warning messages associated with the request: * Warnings for invalid query parameters or JSON fields can include a descriptive message and a list of invalid argument strings, for example, `"Unknown arguments:"` or `"Unknown url query arguments:"` followed by a list of the form `"invalid_arg_1, invalid_arg_2."` * The following warning is returned if the request passes a custom model that is based on an older version of a base model for which an updated version is available: `"Using previous version of base model, because your custom model has been built with it. Please note that this version will be supported only for a limited time. Consider updating your custom model to the new base model. If you do not do that you will be automatically switched to base model when you used the non-updated custom model."` In both cases, the request succeeds despite the warnings. + """ + self.results = results + self.result_index = result_index + self.speaker_labels = speaker_labels + self.warnings = warnings + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SpeechRecognitionResults object from a json dictionary.""" + args = {} + if 'results' in _dict: + args['results'] = [ + SpeechRecognitionResult._from_dict(x) for x in _dict['results'] + ] + if 'result_index' in _dict: + args['result_index'] = _dict['result_index'] + if 'speaker_labels' in _dict: + args['speaker_labels'] = [ + SpeakerLabelsResult._from_dict(x) + for x in _dict['speaker_labels'] + ] + if 'warnings' in _dict: + args['warnings'] = _dict['warnings'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'results') and self.results is not None: + _dict['results'] = [x._to_dict() for x in self.results] + if hasattr(self, 'result_index') and self.result_index is not None: + _dict['result_index'] = self.result_index + if hasattr(self, 'speaker_labels') and self.speaker_labels is not None: + _dict['speaker_labels'] = [ + x._to_dict() for x in self.speaker_labels + ] + if hasattr(self, 'warnings') and self.warnings is not None: + _dict['warnings'] = self.warnings + return _dict + + def __str__(self): + """Return a `str` version of this SpeechRecognitionResults object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class SupportedFeatures(object): + """ + SupportedFeatures. + + :attr bool custom_language_model: Indicates whether the customization interface can be used to create a custom language model based on the language model. + :attr bool speaker_labels: Indicates whether the `speaker_labels` parameter can be used with the language model. + """ + + def __init__(self, custom_language_model, speaker_labels): + """ + Initialize a SupportedFeatures object. + + :param bool custom_language_model: Indicates whether the customization interface can be used to create a custom language model based on the language model. + :param bool speaker_labels: Indicates whether the `speaker_labels` parameter can be used with the language model. + """ + self.custom_language_model = custom_language_model + self.speaker_labels = speaker_labels + + @classmethod + def _from_dict(cls, _dict): + """Initialize a SupportedFeatures object from a json dictionary.""" + args = {} + if 'custom_language_model' in _dict: + args['custom_language_model'] = _dict['custom_language_model'] + else: + raise ValueError( + 'Required property \'custom_language_model\' not present in SupportedFeatures JSON' + ) + if 'speaker_labels' in _dict: + args['speaker_labels'] = _dict['speaker_labels'] + else: + raise ValueError( + 'Required property \'speaker_labels\' not present in SupportedFeatures JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'custom_language_model' + ) and self.custom_language_model is not None: + _dict['custom_language_model'] = self.custom_language_model + if hasattr(self, 'speaker_labels') and self.speaker_labels is not None: + _dict['speaker_labels'] = self.speaker_labels + return _dict + + def __str__(self): + """Return a `str` version of this SupportedFeatures object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Word(object): + """ + Word. + + :attr str word: A word from the custom model's words resource. The spelling of the word is used to train the model. + :attr list[str] sounds_like: An array of pronunciations for the word. The array can include the sounds-like pronunciation automatically generated by the service if none is provided for the word; the service adds this pronunciation when it finishes processing the word. + :attr str display_as: The spelling of the word that the service uses to display the word in a transcript. The field contains an empty string if no display-as value is provided for the word, in which case the word is displayed as it is spelled. + :attr int count: A sum of the number of times the word is found across all corpora. For example, if the word occurs five times in one corpus and seven times in another, its count is `12`. If you add a custom word to a model before it is added by any corpora, the count begins at `1`; if the word is added from a corpus first and later modified, the count reflects only the number of times it is found in corpora. + :attr list[str] source: An array of sources that describes how the word was added to the custom model's words resource. For OOV words added from a corpus, includes the name of the corpus; if the word was added by multiple corpora, the names of all corpora are listed. If the word was modified or added by the user directly, the field includes the string `user`. + :attr list[WordError] error: (optional) If the service discovered one or more problems that you need to correct for the word's definition, an array that describes each of the errors. + """ + + def __init__(self, + word, + sounds_like, + display_as, + count, + source, + error=None): + """ + Initialize a Word object. + + :param str word: A word from the custom model's words resource. The spelling of the word is used to train the model. + :param list[str] sounds_like: An array of pronunciations for the word. The array can include the sounds-like pronunciation automatically generated by the service if none is provided for the word; the service adds this pronunciation when it finishes processing the word. + :param str display_as: The spelling of the word that the service uses to display the word in a transcript. The field contains an empty string if no display-as value is provided for the word, in which case the word is displayed as it is spelled. + :param int count: A sum of the number of times the word is found across all corpora. For example, if the word occurs five times in one corpus and seven times in another, its count is `12`. If you add a custom word to a model before it is added by any corpora, the count begins at `1`; if the word is added from a corpus first and later modified, the count reflects only the number of times it is found in corpora. + :param list[str] source: An array of sources that describes how the word was added to the custom model's words resource. For OOV words added from a corpus, includes the name of the corpus; if the word was added by multiple corpora, the names of all corpora are listed. If the word was modified or added by the user directly, the field includes the string `user`. + :param list[WordError] error: (optional) If the service discovered one or more problems that you need to correct for the word's definition, an array that describes each of the errors. + """ + self.word = word + self.sounds_like = sounds_like + self.display_as = display_as + self.count = count + self.source = source + self.error = error + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Word object from a json dictionary.""" + args = {} + if 'word' in _dict: + args['word'] = _dict['word'] + else: + raise ValueError( + 'Required property \'word\' not present in Word JSON') + if 'sounds_like' in _dict: + args['sounds_like'] = _dict['sounds_like'] + else: + raise ValueError( + 'Required property \'sounds_like\' not present in Word JSON') + if 'display_as' in _dict: + args['display_as'] = _dict['display_as'] + else: + raise ValueError( + 'Required property \'display_as\' not present in Word JSON') + if 'count' in _dict: + args['count'] = _dict['count'] + else: + raise ValueError( + 'Required property \'count\' not present in Word JSON') + if 'source' in _dict: + args['source'] = _dict['source'] + else: + raise ValueError( + 'Required property \'source\' not present in Word JSON') + if 'error' in _dict: + args['error'] = [WordError._from_dict(x) for x in _dict['error']] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'word') and self.word is not None: + _dict['word'] = self.word + if hasattr(self, 'sounds_like') and self.sounds_like is not None: + _dict['sounds_like'] = self.sounds_like + if hasattr(self, 'display_as') and self.display_as is not None: + _dict['display_as'] = self.display_as + if hasattr(self, 'count') and self.count is not None: + _dict['count'] = self.count + if hasattr(self, 'source') and self.source is not None: + _dict['source'] = self.source + if hasattr(self, 'error') and self.error is not None: + _dict['error'] = [x._to_dict() for x in self.error] + return _dict + + def __str__(self): + """Return a `str` version of this Word object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class WordAlternativeResult(object): + """ + WordAlternativeResult. + + :attr float confidence: A confidence score for the word alternative hypothesis in the range of 0 to 1. + :attr str word: An alternative hypothesis for a word from the input audio. + """ + + def __init__(self, confidence, word): + """ + Initialize a WordAlternativeResult object. + + :param float confidence: A confidence score for the word alternative hypothesis in the range of 0 to 1. + :param str word: An alternative hypothesis for a word from the input audio. + """ + self.confidence = confidence + self.word = word + + @classmethod + def _from_dict(cls, _dict): + """Initialize a WordAlternativeResult object from a json dictionary.""" + args = {} + if 'confidence' in _dict: + args['confidence'] = _dict['confidence'] + else: + raise ValueError( + 'Required property \'confidence\' not present in WordAlternativeResult JSON' + ) + if 'word' in _dict: + args['word'] = _dict['word'] + else: + raise ValueError( + 'Required property \'word\' not present in WordAlternativeResult JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'confidence') and self.confidence is not None: + _dict['confidence'] = self.confidence + if hasattr(self, 'word') and self.word is not None: + _dict['word'] = self.word + return _dict + + def __str__(self): + """Return a `str` version of this WordAlternativeResult object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class WordAlternativeResults(object): + """ + WordAlternativeResults. + + :attr float start_time: The start time in seconds of the word from the input audio that corresponds to the word alternatives. + :attr float end_time: The end time in seconds of the word from the input audio that corresponds to the word alternatives. + :attr list[WordAlternativeResult] alternatives: An array of alternative hypotheses for a word from the input audio. + """ + + def __init__(self, start_time, end_time, alternatives): + """ + Initialize a WordAlternativeResults object. + + :param float start_time: The start time in seconds of the word from the input audio that corresponds to the word alternatives. + :param float end_time: The end time in seconds of the word from the input audio that corresponds to the word alternatives. + :param list[WordAlternativeResult] alternatives: An array of alternative hypotheses for a word from the input audio. + """ + self.start_time = start_time + self.end_time = end_time + self.alternatives = alternatives + + @classmethod + def _from_dict(cls, _dict): + """Initialize a WordAlternativeResults object from a json dictionary.""" + args = {} + if 'start_time' in _dict: + args['start_time'] = _dict['start_time'] + else: + raise ValueError( + 'Required property \'start_time\' not present in WordAlternativeResults JSON' + ) + if 'end_time' in _dict: + args['end_time'] = _dict['end_time'] + else: + raise ValueError( + 'Required property \'end_time\' not present in WordAlternativeResults JSON' + ) + if 'alternatives' in _dict: + args['alternatives'] = [ + WordAlternativeResult._from_dict(x) + for x in _dict['alternatives'] + ] + else: + raise ValueError( + 'Required property \'alternatives\' not present in WordAlternativeResults JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'start_time') and self.start_time is not None: + _dict['start_time'] = self.start_time + if hasattr(self, 'end_time') and self.end_time is not None: + _dict['end_time'] = self.end_time + if hasattr(self, 'alternatives') and self.alternatives is not None: + _dict['alternatives'] = [x._to_dict() for x in self.alternatives] + return _dict + + def __str__(self): + """Return a `str` version of this WordAlternativeResults object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class WordError(object): + """ + WordError. + + :attr str element: A key-value pair that describes an error associated with the definition of a word in the words resource. Each pair has the format `"element": "message"`, where `element` is the aspect of the definition that caused the problem and `message` describes the problem. The following example describes a problem with one of the word's sounds-like definitions: `"sounds_like_string": "Numbers are not allowed in sounds-like. You can try for example 'suggested_string'."` You must correct the error before you can train the model. + """ + + def __init__(self, element): + """ + Initialize a WordError object. + + :param str element: A key-value pair that describes an error associated with the definition of a word in the words resource. Each pair has the format `"element": "message"`, where `element` is the aspect of the definition that caused the problem and `message` describes the problem. The following example describes a problem with one of the word's sounds-like definitions: `"sounds_like_string": "Numbers are not allowed in sounds-like. You can try for example 'suggested_string'."` You must correct the error before you can train the model. + """ + self.element = element + + @classmethod + def _from_dict(cls, _dict): + """Initialize a WordError object from a json dictionary.""" + args = {} + if 'element' in _dict: + args['element'] = _dict['element'] + else: + raise ValueError( + 'Required property \'element\' not present in WordError JSON') + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'element') and self.element is not None: + _dict['element'] = self.element + return _dict + + def __str__(self): + """Return a `str` version of this WordError object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Words(object): + """ + Words. + + :attr list[Word] words: Information about each word in the custom model's words resource. The array is empty if the custom model has no words. + """ + + def __init__(self, words): + """ + Initialize a Words object. + + :param list[Word] words: Information about each word in the custom model's words resource. The array is empty if the custom model has no words. + """ + self.words = words + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Words object from a json dictionary.""" + args = {} + if 'words' in _dict: + args['words'] = [Word._from_dict(x) for x in _dict['words']] + else: + raise ValueError( + 'Required property \'words\' not present in Words JSON') + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'words') and self.words is not None: + _dict['words'] = [x._to_dict() for x in self.words] + return _dict + + def __str__(self): + """Return a `str` version of this Words object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ - return self.request(method='DELETE', - url=url.format(customization_id, word), - accept_json=True) + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other From 0f9c798e00e05443bfae699cfbcb8e4e4f7d014c Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Tue, 13 Feb 2018 19:29:25 -0500 Subject: [PATCH 03/45] Add global transaction id and transaction id (#356) --- .../test_integration_speech_to_text_v1.py | 4 ++++ watson_developer_cloud/watson_service.py | 21 +++++++++++++++---- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/test/integration/test_integration_speech_to_text_v1.py b/test/integration/test_integration_speech_to_text_v1.py index cc00bf085..651e7eadb 100644 --- a/test/integration/test_integration_speech_to_text_v1.py +++ b/test/integration/test_integration_speech_to_text_v1.py @@ -25,6 +25,10 @@ def test_models(self): assert output is not None model = self.speech_to_text.get_model('ko-KR_BroadbandModel') assert model is not None + try: + self.speech_to_text.get_model('bogus') + except Exception as e: + assert 'X-global-transaction-id:' in str(e) def test_create_custom_model(self): current_custom_models = self.speech_to_text.list_language_models() diff --git a/watson_developer_cloud/watson_service.py b/watson_developer_cloud/watson_service.py index c55546942..8f1e8b254 100755 --- a/watson_developer_cloud/watson_service.py +++ b/watson_developer_cloud/watson_service.py @@ -59,16 +59,29 @@ class WatsonApiException(WatsonException): :param int code: The HTTP status code returned. :param str message: A message describing the error. :param dict info: A dictionary of additional information about the error. + :param response httpResponse: response """ - def __init__(self, code, message, info=None): + def __init__(self, code, message, info=None, httpResponse=None): # Call the base class constructor with the parameters it needs super(WatsonApiException, self).__init__(message) self.message = message self.code = code self.info = info + self.httpResponse = httpResponse + self.transactionId = '' + self.globalTransactionId = '' + if httpResponse is not None: + self.transactionId = httpResponse.headers.get('X-DP-Watson-Tran-ID') + self.globalTransactionId = httpResponse.headers.get('X-Global-Transaction-ID') + def __str__(self): - return 'Error: ' + self.message + ', Code: ' + str(self.code) + msg = 'Error: ' + self.message + ', Code: ' + str(self.code) + if (self.transactionId): + msg += ' , X-dp-watson-tran-id: ' + str(self.transactionId) + if (self.globalTransactionId): + msg += ' , X-global-transaction-id: ' + str(self.globalTransactionId) + return msg class WatsonInvalidArgument(WatsonException): @@ -412,7 +425,7 @@ def request(self, method, url, accept_json=False, headers=None, error_message = response_json['statusInfo'] if error_message == 'invalid-api-key': status_code = 401 - raise WatsonApiException(status_code, error_message) + raise WatsonApiException(status_code, error_message, httpResponse=response) return response_json return response else: @@ -423,4 +436,4 @@ def request(self, method, url, accept_json=False, headers=None, error_message = self._get_error_message(response) error_info = self._get_error_info(response) raise WatsonApiException(response.status_code, error_message, - error_info) + info=error_info, httpResponse=response) From f8b776858bfb08eb568c743f048774c4c52f610b Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 14 Feb 2018 11:14:58 -0500 Subject: [PATCH 04/45] Reorganize unit tests and add test-header --- examples/authorization_v1.py | 2 ++ .../tone_conversation_integration.v1.py | 2 ++ examples/conversation_v1.py | 1 + examples/discovery_v1.py | 1 + examples/document_conversion_v1.py | 1 + examples/language_translator_v2.py | 1 + examples/natural_language_classifier_v1.py | 1 + examples/natural_language_understanding_v1.py | 1 + examples/personality_insights_v2.py | 1 + examples/personality_insights_v3.py | 1 + examples/speech_to_text_v1.py | 1 + examples/text_to_speech_v1.py | 1 + examples/tone_analyzer_v3.py | 1 + examples/visual_recognition_v3.py | 1 + test/{ => integration}/test_examples.py | 0 .../test_integration_speech_to_text_v1.py | 1 + .../test_integration_text_to_speech_v1.py | 1 + .../test_integration_visual_recognition.py | 1 + test/{ => unit}/__init__.py | 0 test/{ => unit}/test_alchemy_language_v1.py | 0 test/{ => unit}/test_conversation_v1.py | 0 test/{ => unit}/test_discovery_v1.py | 0 test/{ => unit}/test_document_conversion_v1.py | 4 ++-- test/{ => unit}/test_language_translator_v2.py | 0 .../test_natural_language_classifier_v1.py | 2 +- .../test_natural_language_understanding.py | 0 test/{ => unit}/test_personality_insights_v2.py | 2 +- test/{ => unit}/test_personality_insights_v3.py | 16 ++++++++-------- test/{ => unit}/test_retrieve_and_rank_v1.py | 6 +++--- test/{ => unit}/test_speech_to_text_v1.py | 8 ++++---- test/{ => unit}/test_text_to_speech_v1.py | 0 test/{ => unit}/test_tone_analyzer_v3.py | 14 +++++++------- test/{ => unit}/test_tradeoff_analytics_v1.py | 16 ++++++++-------- test/{ => unit}/test_visual_recognition_v3.py | 8 ++++---- test/{ => unit}/test_watson_service.py | 0 watson_developer_cloud/watson_service.py | 8 ++++---- 36 files changed, 61 insertions(+), 42 deletions(-) rename test/{ => integration}/test_examples.py (100%) rename test/{ => unit}/__init__.py (100%) rename test/{ => unit}/test_alchemy_language_v1.py (100%) rename test/{ => unit}/test_conversation_v1.py (100%) rename test/{ => unit}/test_discovery_v1.py (100%) rename test/{ => unit}/test_document_conversion_v1.py (90%) rename test/{ => unit}/test_language_translator_v2.py (100%) rename test/{ => unit}/test_natural_language_classifier_v1.py (97%) rename test/{ => unit}/test_natural_language_understanding.py (100%) rename test/{ => unit}/test_personality_insights_v2.py (91%) rename test/{ => unit}/test_personality_insights_v3.py (81%) rename test/{ => unit}/test_retrieve_and_rank_v1.py (96%) rename test/{ => unit}/test_speech_to_text_v1.py (98%) rename test/{ => unit}/test_text_to_speech_v1.py (100%) rename test/{ => unit}/test_tone_analyzer_v3.py (87%) rename test/{ => unit}/test_tradeoff_analytics_v1.py (77%) rename test/{ => unit}/test_visual_recognition_v3.py (95%) rename test/{ => unit}/test_watson_service.py (100%) diff --git a/examples/authorization_v1.py b/examples/authorization_v1.py index 1700cc654..6122a7794 100644 --- a/examples/authorization_v1.py +++ b/examples/authorization_v1.py @@ -5,6 +5,8 @@ authorization = AuthorizationV1( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') +authorization.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) + print(json.dumps(authorization.get_token(url=SpeechToTextV1.default_url), indent=2)) diff --git a/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py b/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py index 60724f721..d84826385 100644 --- a/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py +++ b/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py @@ -18,12 +18,14 @@ username=os.environ.get('CONVERSATION_USERNAME') or 'YOUR SERVICE NAME', password=os.environ.get('CONVERSATION_PASSWORD') or 'YOUR PASSWORD', version='2016-09-20') +conversation.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # replace with your own tone analyzer credentials tone_analyzer = ToneAnalyzerV3( username=os.environ.get('TONE_ANALYZER_USERNAME') or 'YOUR SERVICE NAME', password=os.environ.get('TONE_ANALYZER_PASSWORD') or 'YOUR SERVICE NAME', version='2016-02-11') +tone_analyzer.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # replace with your own workspace_id workspace_id = os.environ.get('WORKSPACE_ID') or 'YOUR WORKSPACE ID' diff --git a/examples/conversation_v1.py b/examples/conversation_v1.py index 1ac66200a..9d86922b9 100644 --- a/examples/conversation_v1.py +++ b/examples/conversation_v1.py @@ -7,6 +7,7 @@ username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD', version='2017-04-21') +conversation.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # When you send multiple requests for the same conversation, include the # context object from the previous response. diff --git a/examples/discovery_v1.py b/examples/discovery_v1.py index a791eac43..edef8aa11 100644 --- a/examples/discovery_v1.py +++ b/examples/discovery_v1.py @@ -7,6 +7,7 @@ version='2017-10-16', username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') +discovery.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) environments = discovery.list_environments() print(json.dumps(environments, indent=2)) diff --git a/examples/document_conversion_v1.py b/examples/document_conversion_v1.py index a4e1d7fb9..bdcbdd572 100644 --- a/examples/document_conversion_v1.py +++ b/examples/document_conversion_v1.py @@ -8,6 +8,7 @@ username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD', version='2016-02-09') +document_conversion.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # Example of retrieving html or plain text with open(join(dirname(__file__), '../resources/example.html'), diff --git a/examples/language_translator_v2.py b/examples/language_translator_v2.py index c733207d7..01d0bf2a4 100644 --- a/examples/language_translator_v2.py +++ b/examples/language_translator_v2.py @@ -6,6 +6,7 @@ language_translator = LanguageTranslatorV2( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') +language_translator.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # create new custom model # with open('../resources/language_translator_model.tmx', 'rb') as \ diff --git a/examples/natural_language_classifier_v1.py b/examples/natural_language_classifier_v1.py index 783ead19e..e5783f404 100644 --- a/examples/natural_language_classifier_v1.py +++ b/examples/natural_language_classifier_v1.py @@ -8,6 +8,7 @@ natural_language_classifier = NaturalLanguageClassifierV1( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') +natural_language_classifier.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) classifiers = natural_language_classifier.list_classifiers() print(json.dumps(classifiers, indent=2)) diff --git a/examples/natural_language_understanding_v1.py b/examples/natural_language_understanding_v1.py index 29b84f4e5..c4e33a968 100644 --- a/examples/natural_language_understanding_v1.py +++ b/examples/natural_language_understanding_v1.py @@ -8,6 +8,7 @@ version='2017-02-27', username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') +natural_language_understanding.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) response = natural_language_understanding.analyze( text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! ' diff --git a/examples/personality_insights_v2.py b/examples/personality_insights_v2.py index e97a9cc51..d31450d13 100755 --- a/examples/personality_insights_v2.py +++ b/examples/personality_insights_v2.py @@ -6,6 +6,7 @@ personality_insights = PersonalityInsightsV2( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') +personality_insights.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) with open(join(dirname(__file__), '../resources/personality.txt')) as \ personality_text: diff --git a/examples/personality_insights_v3.py b/examples/personality_insights_v3.py index 02c806a72..f0a56f5a7 100755 --- a/examples/personality_insights_v3.py +++ b/examples/personality_insights_v3.py @@ -11,6 +11,7 @@ version='2016-10-20', username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') +personality_insights.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) with open(join(dirname(__file__), '../resources/personality-v3.json')) as \ profile_json: diff --git a/examples/speech_to_text_v1.py b/examples/speech_to_text_v1.py index b89e99d51..7938a564d 100644 --- a/examples/speech_to_text_v1.py +++ b/examples/speech_to_text_v1.py @@ -6,6 +6,7 @@ speech_to_text = SpeechToTextV1( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') +speech_to_text.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) print(json.dumps(speech_to_text.list_models(), indent=2)) diff --git a/examples/text_to_speech_v1.py b/examples/text_to_speech_v1.py index 34f0fa1c9..3da1291f8 100644 --- a/examples/text_to_speech_v1.py +++ b/examples/text_to_speech_v1.py @@ -7,6 +7,7 @@ text_to_speech = TextToSpeechV1( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') +text_to_speech.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) print(json.dumps(text_to_speech.list_voices(), indent=2)) diff --git a/examples/tone_analyzer_v3.py b/examples/tone_analyzer_v3.py index 21dc45ff2..3a02ee028 100755 --- a/examples/tone_analyzer_v3.py +++ b/examples/tone_analyzer_v3.py @@ -7,6 +7,7 @@ username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD', version='2017-09-26') +tone_analyzer.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) print("\ntone_chat() example 1:\n") utterances = [{'text': 'I am very happy.', 'user': 'glenn'}, diff --git a/examples/visual_recognition_v3.py b/examples/visual_recognition_v3.py index 9b6432965..68168ce82 100644 --- a/examples/visual_recognition_v3.py +++ b/examples/visual_recognition_v3.py @@ -7,6 +7,7 @@ '/ginni_bio_780x981_v4_03162016.jpg' visual_recognition = VisualRecognitionV3('2016-05-20', api_key='YOUR API KEY') +visual_recognition.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # with open(join(dirname(__file__), '../resources/cars.zip'), 'rb') as cars, \ # open(join(dirname(__file__), '../resources/trucks.zip'), 'rb') as diff --git a/test/test_examples.py b/test/integration/test_examples.py similarity index 100% rename from test/test_examples.py rename to test/integration/test_examples.py diff --git a/test/integration/test_integration_speech_to_text_v1.py b/test/integration/test_integration_speech_to_text_v1.py index 651e7eadb..e33281011 100644 --- a/test/integration/test_integration_speech_to_text_v1.py +++ b/test/integration/test_integration_speech_to_text_v1.py @@ -10,6 +10,7 @@ def setUp(self): self.speech_to_text = watson_developer_cloud.SpeechToTextV1( username=os.getenv('SPEECH_TO_TEXT_USERNAME'), password=os.getenv('SPEECH_TO_TEXT_PASSWORD')) + self.speech_to_text.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) self.custom_models = self.speech_to_text.list_language_models() self.create_custom_model = self.speech_to_text.create_language_model( name="integration_test_model", diff --git a/test/integration/test_integration_text_to_speech_v1.py b/test/integration/test_integration_text_to_speech_v1.py index aa474eb80..5d84efe4b 100644 --- a/test/integration/test_integration_text_to_speech_v1.py +++ b/test/integration/test_integration_text_to_speech_v1.py @@ -6,6 +6,7 @@ class TestIntegrationTextToSpeechV1(unittest.TestCase): def setUp(self): self.text_to_speech = watson_developer_cloud.TextToSpeechV1() + self.text_to_speech.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) self.original_customizations = self.text_to_speech.list_voice_models() self.created_customization = self.text_to_speech.create_voice_model( name="test_integration_customization", diff --git a/test/integration/test_integration_visual_recognition.py b/test/integration/test_integration_visual_recognition.py index 26c78ec53..c4322ff23 100644 --- a/test/integration/test_integration_visual_recognition.py +++ b/test/integration/test_integration_visual_recognition.py @@ -11,6 +11,7 @@ class IntegrationTestVisualRecognitionV3(TestCase): def setUp(self): self.visual_recognition = watson_developer_cloud.VisualRecognitionV3('2016-05-20', api_key=os.environ.get( 'VISUAL_RECOGNITION_API_KEY')) + self.visual_recognition.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) self.collections = self.visual_recognition.list_collections() collection_json = self.visual_recognition.create_collection(name="test_integration_collection") diff --git a/test/__init__.py b/test/unit/__init__.py similarity index 100% rename from test/__init__.py rename to test/unit/__init__.py diff --git a/test/test_alchemy_language_v1.py b/test/unit/test_alchemy_language_v1.py similarity index 100% rename from test/test_alchemy_language_v1.py rename to test/unit/test_alchemy_language_v1.py diff --git a/test/test_conversation_v1.py b/test/unit/test_conversation_v1.py similarity index 100% rename from test/test_conversation_v1.py rename to test/unit/test_conversation_v1.py diff --git a/test/test_discovery_v1.py b/test/unit/test_discovery_v1.py similarity index 100% rename from test/test_discovery_v1.py rename to test/unit/test_discovery_v1.py diff --git a/test/test_document_conversion_v1.py b/test/unit/test_document_conversion_v1.py similarity index 90% rename from test/test_document_conversion_v1.py rename to test/unit/test_document_conversion_v1.py index 288a66bef..999de7cf9 100644 --- a/test/test_document_conversion_v1.py +++ b/test/unit/test_document_conversion_v1.py @@ -17,7 +17,7 @@ def test_success(): body=convert_response, status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/simple.html'), 'r') as document: + with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as document: convertConfig = {'conversion_target': watson_developer_cloud.DocumentConversionV1.NORMALIZED_HTML} document_conversion.convert_document(document=document, config=convertConfig, media_type='text/html') @@ -31,7 +31,7 @@ def test_success(): body=index_response, status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/example.html'), 'r') as document: + with open(os.path.join(os.path.dirname(__file__), '../../resources/example.html'), 'r') as document: indexConfig = { 'retrieve_and_rank': { 'dry_run':'false', diff --git a/test/test_language_translator_v2.py b/test/unit/test_language_translator_v2.py similarity index 100% rename from test/test_language_translator_v2.py rename to test/unit/test_language_translator_v2.py diff --git a/test/test_natural_language_classifier_v1.py b/test/unit/test_natural_language_classifier_v1.py similarity index 97% rename from test/test_natural_language_classifier_v1.py rename to test/unit/test_natural_language_classifier_v1.py index efdaa0aac..cf5236bec 100644 --- a/test/test_natural_language_classifier_v1.py +++ b/test/unit/test_natural_language_classifier_v1.py @@ -61,7 +61,7 @@ def test_success(): responses.add(responses.POST, create_url, body=create_response, status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/weather_data_train.csv'), 'rb') as training_data: + with open(os.path.join(os.path.dirname(__file__), '../../resources/weather_data_train.csv'), 'rb') as training_data: natural_language_classifier.create_classifier( training_data=training_data, metadata='{"language": "en"}') diff --git a/test/test_natural_language_understanding.py b/test/unit/test_natural_language_understanding.py similarity index 100% rename from test/test_natural_language_understanding.py rename to test/unit/test_natural_language_understanding.py diff --git a/test/test_personality_insights_v2.py b/test/unit/test_personality_insights_v2.py similarity index 91% rename from test/test_personality_insights_v2.py rename to test/unit/test_personality_insights_v2.py index 96c50ef12..2095f217b 100644 --- a/test/test_personality_insights_v2.py +++ b/test/unit/test_personality_insights_v2.py @@ -16,7 +16,7 @@ def test_success(): body=profile_response, status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/personality.txt')) as personality_text: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as personality_text: personality_insights = watson_developer_cloud.PersonalityInsightsV2( username="username", password="password") personality_insights.profile(personality_text) diff --git a/test/test_personality_insights_v3.py b/test/unit/test_personality_insights_v3.py similarity index 81% rename from test/test_personality_insights_v3.py rename to test/unit/test_personality_insights_v3.py index 523a8a6e5..00120b32a 100755 --- a/test/test_personality_insights_v3.py +++ b/test/unit/test_personality_insights_v3.py @@ -12,14 +12,14 @@ def test_plain_to_json(): personality_insights = watson_developer_cloud.PersonalityInsightsV3( '2016-10-20', username="username", password="password") - with open(os.path.join(os.path.dirname(__file__), '../resources/personality-v3-expect1.txt')) as expect_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect1.txt')) as expect_file: profile_response = expect_file.read() responses.add(responses.POST, profile_url, body=profile_response, status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/personality-v3.txt')) as personality_text: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3.txt')) as personality_text: response = personality_insights.profile( personality_text, content_type='text/plain;charset=utf-8') @@ -35,14 +35,14 @@ def test_json_to_json(): personality_insights = watson_developer_cloud.PersonalityInsightsV3( '2016-10-20', username="username", password="password") - with open(os.path.join(os.path.dirname(__file__), '../resources/personality-v3-expect2.txt')) as expect_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect2.txt')) as expect_file: profile_response = expect_file.read() responses.add(responses.POST, profile_url, body=profile_response, status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/personality-v3.json')) as personality_text: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3.json')) as personality_text: response = personality_insights.profile( personality_text, content_type='application/json', raw_scores=True, consumption_preferences=True) @@ -61,14 +61,14 @@ def test_json_to_csv(): personality_insights = watson_developer_cloud.PersonalityInsightsV3( '2016-10-20', username="username", password="password") - with open(os.path.join(os.path.dirname(__file__), '../resources/personality-v3-expect3.txt')) as expect_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect3.txt')) as expect_file: profile_response = expect_file.read() responses.add(responses.POST, profile_url, body=profile_response, status=200, content_type='text/csv') - with open(os.path.join(os.path.dirname(__file__), '../resources/personality-v3.json')) as personality_text: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3.json')) as personality_text: personality_insights.profile( personality_text, content_type='application/json', accept='text/csv', csv_headers=True, @@ -88,7 +88,7 @@ def test_plain_to_json_es(): personality_insights = watson_developer_cloud.PersonalityInsightsV3( '2016-10-20', username="username", password="password") - with codecs.open(os.path.join(os.path.dirname(__file__), '../resources/personality-v3-expect4.txt'), \ + with codecs.open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-expect4.txt'), \ encoding='utf-8') as expect_file: profile_response = expect_file.read() @@ -96,7 +96,7 @@ def test_plain_to_json_es(): body=profile_response, status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/personality-v3-es.txt')) as personality_text: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality-v3-es.txt')) as personality_text: response = personality_insights.profile( personality_text, content_type='text/plain;charset=utf-8', content_language='es', accept_language='es') diff --git a/test/test_retrieve_and_rank_v1.py b/test/unit/test_retrieve_and_rank_v1.py similarity index 96% rename from test/test_retrieve_and_rank_v1.py rename to test/unit/test_retrieve_and_rank_v1.py index ec25264d8..7dffd60d5 100644 --- a/test/test_retrieve_and_rank_v1.py +++ b/test/unit/test_retrieve_and_rank_v1.py @@ -65,7 +65,7 @@ def test_rank(): ranker_answer = None - with open(os.path.join(os.path.dirname(__file__), '../resources/ranker_answer_data.csv'), 'rb') as answer_data: + with open(os.path.join(os.path.dirname(__file__), '../../resources/ranker_answer_data.csv'), 'rb') as answer_data: ranker_answer = retrieve_and_rank.rank('3b140ax14-rank-10383', answer_data=answer_data, top_answers=3) assert ranker_answer is not None @@ -84,7 +84,7 @@ def test_create_ranker(): content_type='application/json') ranker = None - with open(os.path.join(os.path.dirname(__file__), '../resources/ranker_training_data.csv'), 'rb') as training_data: + with open(os.path.join(os.path.dirname(__file__), '../../resources/ranker_training_data.csv'), 'rb') as training_data: ranker = retrieve_and_rank.create_ranker(training_data=training_data, name='pythonRank') assert ranker is not None @@ -192,7 +192,7 @@ def test_create_config(): match_querystring=True, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/solr_config.zip'), 'rb') as config_data: + with open(os.path.join(os.path.dirname(__file__), '../../resources/solr_config.zip'), 'rb') as config_data: config = retrieve_and_rank.create_config(CLUSTER_ID, 'exampleconfig', config=config_data) assert config is not None diff --git a/test/test_speech_to_text_v1.py b/test/unit/test_speech_to_text_v1.py similarity index 98% rename from test/test_speech_to_text_v1.py rename to test/unit/test_speech_to_text_v1.py index 3912e40d3..7165d9aab 100755 --- a/test/test_speech_to_text_v1.py +++ b/test/unit/test_speech_to_text_v1.py @@ -40,7 +40,7 @@ def test_success(): content_type='application/json') with open( - os.path.join(os.path.dirname(__file__), '../resources/speech.wav'), + os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: speech_to_text.recognize( audio=audio_file, content_type='audio/l16; rate=44100') @@ -117,7 +117,7 @@ def test_recognitions(): assert responses.calls[1].response.json() == {'status': 'waiting'} with open( - os.path.join(os.path.dirname(__file__), '../resources/speech.wav'), + os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: speech_to_text.create_job(audio=audio_file) assert responses.calls[2].response.json() == {'status': 'waiting'} @@ -338,7 +338,7 @@ def test_custom_corpora(): speech_to_text.list_corpora(customization_id='customid') - file_path = '../resources/speech_to_text/corpus-short-1.txt' + file_path = '../../resources/speech_to_text/corpus-short-1.txt' full_path = os.path.join(os.path.dirname(__file__), file_path) with open(full_path) as corpus_file: speech_to_text.add_corpus( @@ -483,7 +483,7 @@ def test_custom_audio_resources(): username="username", password="password") with open( - os.path.join(os.path.dirname(__file__), '../resources/speech.wav'), + os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: speech_to_text.add_audio( customization_id='custid', diff --git a/test/test_text_to_speech_v1.py b/test/unit/test_text_to_speech_v1.py similarity index 100% rename from test/test_text_to_speech_v1.py rename to test/unit/test_text_to_speech_v1.py diff --git a/test/test_tone_analyzer_v3.py b/test/unit/test_tone_analyzer_v3.py similarity index 87% rename from test/test_tone_analyzer_v3.py rename to test/unit/test_tone_analyzer_v3.py index 8bfa54f2b..456009715 100755 --- a/test/test_tone_analyzer_v3.py +++ b/test/unit/test_tone_analyzer_v3.py @@ -12,14 +12,14 @@ def test_tone(): tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone' tone_args = '?version=2016-05-19' tone_response = None - with open(os.path.join(os.path.dirname(__file__), '../resources/tone-v3-expect1.json')) as response_json: + with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect1.json')) as response_json: tone_response = response_json.read() responses.add(responses.POST, tone_url, body=tone_response, status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/personality.txt')) as tone_text: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text: tone_analyzer = watson_developer_cloud.ToneAnalyzerV3("2016-05-19", username="username", password="password") tone_analyzer.tone(tone_text.read()) @@ -36,14 +36,14 @@ def test_tone_with_args(): tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone' tone_args = {'version': '2016-05-19', 'sentences': 'false'} tone_response = None - with open(os.path.join(os.path.dirname(__file__), '../resources/tone-v3-expect1.json')) as response_json: + with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect1.json')) as response_json: tone_response = response_json.read() responses.add(responses.POST, tone_url, body=tone_response, status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/personality.txt')) as tone_text: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text: tone_analyzer = watson_developer_cloud.ToneAnalyzerV3("2016-05-19", username="username", password="password") tone_analyzer.tone(tone_text.read(), sentences=False) @@ -64,14 +64,14 @@ def test_tone_with_positional_args(): tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone' tone_args = {'version': '2016-05-19', 'sentences': 'false'} tone_response = None - with open(os.path.join(os.path.dirname(__file__), '../resources/tone-v3-expect1.json')) as response_json: + with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect1.json')) as response_json: tone_response = response_json.read() responses.add(responses.POST, tone_url, body=tone_response, status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/personality.txt')) as tone_text: + with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text: tone_analyzer = watson_developer_cloud.ToneAnalyzerV3("2016-05-19", username="username", password="password") tone_analyzer.tone(tone_text.read(), 'application/json', False) @@ -92,7 +92,7 @@ def test_tone_chat(): tone_url = 'https://gateway.watsonplatform.net/tone-analyzer/api/v3/tone_chat' tone_args = '?version=2016-05-19' tone_response = None - with open(os.path.join(os.path.dirname(__file__), '../resources/tone-v3-expect2.json')) as response_json: + with open(os.path.join(os.path.dirname(__file__), '../../resources/tone-v3-expect2.json')) as response_json: tone_response = response_json.read() responses.add(responses.POST, tone_url, diff --git a/test/test_tradeoff_analytics_v1.py b/test/unit/test_tradeoff_analytics_v1.py similarity index 77% rename from test/test_tradeoff_analytics_v1.py rename to test/unit/test_tradeoff_analytics_v1.py index b499a372e..3d56c2112 100755 --- a/test/test_tradeoff_analytics_v1.py +++ b/test/unit/test_tradeoff_analytics_v1.py @@ -9,7 +9,7 @@ @responses.activate def test_visualization_no_preferable_options(): - with open(os.path.join(os.path.dirname(__file__), '../resources/tradeoff-expect1.txt')) as expect_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/tradeoff-expect1.txt')) as expect_file: dilemmas_response = expect_file.read() responses.add(responses.POST, dilemmas_url, @@ -19,7 +19,7 @@ def test_visualization_no_preferable_options(): tradeoff_analytics = watson_developer_cloud.TradeoffAnalyticsV1( username="username", password="password") - with open(os.path.join(os.path.dirname(__file__), '../resources/problem.json')) as data_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/problem.json')) as data_file: tradeoff_analytics.dilemmas(json.load(data_file)) assert 'generate_visualization=true' in responses.calls[0].request.url @@ -29,7 +29,7 @@ def test_visualization_no_preferable_options(): @responses.activate def test_no_visualization_no_preferable_options(): - with open(os.path.join(os.path.dirname(__file__), '../resources/tradeoff-expect2.txt')) as expect_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/tradeoff-expect2.txt')) as expect_file: dilemmas_response = expect_file.read() responses.add(responses.POST, dilemmas_url, @@ -39,7 +39,7 @@ def test_no_visualization_no_preferable_options(): tradeoff_analytics = watson_developer_cloud.TradeoffAnalyticsV1( username="username", password="password") - with open(os.path.join(os.path.dirname(__file__), '../resources/problem.json')) as data_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/problem.json')) as data_file: tradeoff_analytics.dilemmas(json.load(data_file), generate_visualization=False) assert 'generate_visualization=false' in responses.calls[0].request.url @@ -48,7 +48,7 @@ def test_no_visualization_no_preferable_options(): @responses.activate def test_no_visualization_preferable_options(): - with open(os.path.join(os.path.dirname(__file__), '../resources/tradeoff-expect3.txt')) as expect_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/tradeoff-expect3.txt')) as expect_file: dilemmas_response = expect_file.read() responses.add(responses.POST, dilemmas_url, @@ -58,7 +58,7 @@ def test_no_visualization_preferable_options(): tradeoff_analytics = watson_developer_cloud.TradeoffAnalyticsV1( username="username", password="password") - with open(os.path.join(os.path.dirname(__file__), '../resources/problem.json')) as data_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/problem.json')) as data_file: tradeoff_analytics.dilemmas( json.load(data_file), generate_visualization=False, @@ -71,7 +71,7 @@ def test_no_visualization_preferable_options(): @responses.activate def test_visualization_preferable_options(): - with open(os.path.join(os.path.dirname(__file__), '../resources/tradeoff-expect4.txt')) as expect_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/tradeoff-expect4.txt')) as expect_file: dilemmas_response = expect_file.read() responses.add(responses.POST, dilemmas_url, @@ -80,7 +80,7 @@ def test_visualization_preferable_options(): tradeoff_analytics = watson_developer_cloud.TradeoffAnalyticsV1( username="username", password="password") - with open(os.path.join(os.path.dirname(__file__), '../resources/problem.json')) as data_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/problem.json')) as data_file: tradeoff_analytics.dilemmas( json.load(data_file), find_preferable_options=True) diff --git a/test/test_visual_recognition_v3.py b/test/unit/test_visual_recognition_v3.py similarity index 95% rename from test/test_visual_recognition_v3.py rename to test/unit/test_visual_recognition_v3.py index 58d26eec3..11e964f7e 100644 --- a/test/test_visual_recognition_v3.py +++ b/test/unit/test_visual_recognition_v3.py @@ -96,8 +96,8 @@ def test_create_classifier(self): status=200, content_type='application/json') - with open(os.path.join(os.path.dirname(__file__), '../resources/cars.zip'), 'rb') as cars, \ - open(os.path.join(os.path.dirname(__file__), '../resources/trucks.zip'), 'rb') as trucks: + with open(os.path.join(os.path.dirname(__file__), '../../resources/cars.zip'), 'rb') as cars, \ + open(os.path.join(os.path.dirname(__file__), '../../resources/trucks.zip'), 'rb') as trucks: vr_service.create_classifier('Cars vs Trucks', cars_positive_examples=cars, negative_examples=trucks) assert len(responses.calls) == 1 @@ -172,7 +172,7 @@ def test_classify(self): vr_service.classify(parameters=json.dumps({'url': 'http://google.com', 'classifier_ids': ['one', 'two', 'three']})) vr_service.classify(parameters=json.dumps({'url': 'http://google.com', 'owners': ['me', 'IBM']})) - with open(os.path.join(os.path.dirname(__file__), '../resources/test.jpg'), 'rb') as image_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/test.jpg'), 'rb') as image_file: vr_service.classify(images_file=image_file) assert len(responses.calls) == 4 @@ -229,6 +229,6 @@ def test_detect_faces(self): content_type='application/json') vr_service.detect_faces(parameters='{"url": "http://google.com"}') - with open(os.path.join(os.path.dirname(__file__), '../resources/test.jpg'), 'rb') as image_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/test.jpg'), 'rb') as image_file: vr_service.detect_faces(images_file=image_file) assert len(responses.calls) == 2 diff --git a/test/test_watson_service.py b/test/unit/test_watson_service.py similarity index 100% rename from test/test_watson_service.py rename to test/unit/test_watson_service.py diff --git a/watson_developer_cloud/watson_service.py b/watson_developer_cloud/watson_service.py index 8f1e8b254..7592856bc 100755 --- a/watson_developer_cloud/watson_service.py +++ b/watson_developer_cloud/watson_service.py @@ -68,8 +68,8 @@ def __init__(self, code, message, info=None, httpResponse=None): self.code = code self.info = info self.httpResponse = httpResponse - self.transactionId = '' - self.globalTransactionId = '' + self.transactionId = None + self.globalTransactionId = None if httpResponse is not None: self.transactionId = httpResponse.headers.get('X-DP-Watson-Tran-ID') self.globalTransactionId = httpResponse.headers.get('X-Global-Transaction-ID') @@ -77,9 +77,9 @@ def __init__(self, code, message, info=None, httpResponse=None): def __str__(self): msg = 'Error: ' + self.message + ', Code: ' + str(self.code) - if (self.transactionId): + if self.transactionId is not None: msg += ' , X-dp-watson-tran-id: ' + str(self.transactionId) - if (self.globalTransactionId): + if self.globalTransactionId is not None: msg += ' , X-global-transaction-id: ' + str(self.globalTransactionId) return msg From aa32016a9a2379067094a75df03263c089778df8 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 14 Feb 2018 11:33:35 -0500 Subject: [PATCH 05/45] Remove test header from examples folder --- examples/authorization_v1.py | 1 - .../tone_conversation_integration.v1.py | 2 -- examples/conversation_v1.py | 1 - examples/discovery_v1.py | 1 - examples/document_conversion_v1.py | 1 - examples/language_translator_v2.py | 1 - examples/natural_language_classifier_v1.py | 1 - examples/natural_language_understanding_v1.py | 1 - examples/personality_insights_v2.py | 1 - examples/personality_insights_v3.py | 1 - examples/speech_to_text_v1.py | 1 - examples/text_to_speech_v1.py | 1 - examples/tone_analyzer_v3.py | 1 - examples/visual_recognition_v3.py | 1 - 14 files changed, 15 deletions(-) diff --git a/examples/authorization_v1.py b/examples/authorization_v1.py index 6122a7794..2378cb111 100644 --- a/examples/authorization_v1.py +++ b/examples/authorization_v1.py @@ -5,7 +5,6 @@ authorization = AuthorizationV1( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') -authorization.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) print(json.dumps(authorization.get_token(url=SpeechToTextV1.default_url), diff --git a/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py b/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py index d84826385..60724f721 100644 --- a/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py +++ b/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py @@ -18,14 +18,12 @@ username=os.environ.get('CONVERSATION_USERNAME') or 'YOUR SERVICE NAME', password=os.environ.get('CONVERSATION_PASSWORD') or 'YOUR PASSWORD', version='2016-09-20') -conversation.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # replace with your own tone analyzer credentials tone_analyzer = ToneAnalyzerV3( username=os.environ.get('TONE_ANALYZER_USERNAME') or 'YOUR SERVICE NAME', password=os.environ.get('TONE_ANALYZER_PASSWORD') or 'YOUR SERVICE NAME', version='2016-02-11') -tone_analyzer.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # replace with your own workspace_id workspace_id = os.environ.get('WORKSPACE_ID') or 'YOUR WORKSPACE ID' diff --git a/examples/conversation_v1.py b/examples/conversation_v1.py index 9d86922b9..1ac66200a 100644 --- a/examples/conversation_v1.py +++ b/examples/conversation_v1.py @@ -7,7 +7,6 @@ username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD', version='2017-04-21') -conversation.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # When you send multiple requests for the same conversation, include the # context object from the previous response. diff --git a/examples/discovery_v1.py b/examples/discovery_v1.py index edef8aa11..a791eac43 100644 --- a/examples/discovery_v1.py +++ b/examples/discovery_v1.py @@ -7,7 +7,6 @@ version='2017-10-16', username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') -discovery.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) environments = discovery.list_environments() print(json.dumps(environments, indent=2)) diff --git a/examples/document_conversion_v1.py b/examples/document_conversion_v1.py index bdcbdd572..a4e1d7fb9 100644 --- a/examples/document_conversion_v1.py +++ b/examples/document_conversion_v1.py @@ -8,7 +8,6 @@ username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD', version='2016-02-09') -document_conversion.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # Example of retrieving html or plain text with open(join(dirname(__file__), '../resources/example.html'), diff --git a/examples/language_translator_v2.py b/examples/language_translator_v2.py index 01d0bf2a4..c733207d7 100644 --- a/examples/language_translator_v2.py +++ b/examples/language_translator_v2.py @@ -6,7 +6,6 @@ language_translator = LanguageTranslatorV2( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') -language_translator.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # create new custom model # with open('../resources/language_translator_model.tmx', 'rb') as \ diff --git a/examples/natural_language_classifier_v1.py b/examples/natural_language_classifier_v1.py index e5783f404..783ead19e 100644 --- a/examples/natural_language_classifier_v1.py +++ b/examples/natural_language_classifier_v1.py @@ -8,7 +8,6 @@ natural_language_classifier = NaturalLanguageClassifierV1( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') -natural_language_classifier.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) classifiers = natural_language_classifier.list_classifiers() print(json.dumps(classifiers, indent=2)) diff --git a/examples/natural_language_understanding_v1.py b/examples/natural_language_understanding_v1.py index c4e33a968..29b84f4e5 100644 --- a/examples/natural_language_understanding_v1.py +++ b/examples/natural_language_understanding_v1.py @@ -8,7 +8,6 @@ version='2017-02-27', username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') -natural_language_understanding.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) response = natural_language_understanding.analyze( text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! ' diff --git a/examples/personality_insights_v2.py b/examples/personality_insights_v2.py index d31450d13..e97a9cc51 100755 --- a/examples/personality_insights_v2.py +++ b/examples/personality_insights_v2.py @@ -6,7 +6,6 @@ personality_insights = PersonalityInsightsV2( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') -personality_insights.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) with open(join(dirname(__file__), '../resources/personality.txt')) as \ personality_text: diff --git a/examples/personality_insights_v3.py b/examples/personality_insights_v3.py index f0a56f5a7..02c806a72 100755 --- a/examples/personality_insights_v3.py +++ b/examples/personality_insights_v3.py @@ -11,7 +11,6 @@ version='2016-10-20', username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') -personality_insights.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) with open(join(dirname(__file__), '../resources/personality-v3.json')) as \ profile_json: diff --git a/examples/speech_to_text_v1.py b/examples/speech_to_text_v1.py index 7938a564d..b89e99d51 100644 --- a/examples/speech_to_text_v1.py +++ b/examples/speech_to_text_v1.py @@ -6,7 +6,6 @@ speech_to_text = SpeechToTextV1( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') -speech_to_text.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) print(json.dumps(speech_to_text.list_models(), indent=2)) diff --git a/examples/text_to_speech_v1.py b/examples/text_to_speech_v1.py index 3da1291f8..34f0fa1c9 100644 --- a/examples/text_to_speech_v1.py +++ b/examples/text_to_speech_v1.py @@ -7,7 +7,6 @@ text_to_speech = TextToSpeechV1( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD') -text_to_speech.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) print(json.dumps(text_to_speech.list_voices(), indent=2)) diff --git a/examples/tone_analyzer_v3.py b/examples/tone_analyzer_v3.py index 3a02ee028..21dc45ff2 100755 --- a/examples/tone_analyzer_v3.py +++ b/examples/tone_analyzer_v3.py @@ -7,7 +7,6 @@ username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD', version='2017-09-26') -tone_analyzer.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) print("\ntone_chat() example 1:\n") utterances = [{'text': 'I am very happy.', 'user': 'glenn'}, diff --git a/examples/visual_recognition_v3.py b/examples/visual_recognition_v3.py index 68168ce82..9b6432965 100644 --- a/examples/visual_recognition_v3.py +++ b/examples/visual_recognition_v3.py @@ -7,7 +7,6 @@ '/ginni_bio_780x981_v4_03162016.jpg' visual_recognition = VisualRecognitionV3('2016-05-20', api_key='YOUR API KEY') -visual_recognition.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) # with open(join(dirname(__file__), '../resources/cars.zip'), 'rb') as cars, \ # open(join(dirname(__file__), '../resources/trucks.zip'), 'rb') as From 5fc6615841c98b248fb254c924e0205c0369f1b9 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 14 Feb 2018 13:52:50 -0500 Subject: [PATCH 06/45] remove deprecated services (#358) * Remove deprecated services * Remove unused dependencies --- README.md | 3 - examples/alchemy_data_news_v1.py | 21 - examples/alchemy_language_v1.py | 48 -- examples/document_conversion_v1.py | 130 ----- requirements.txt | 3 - setup.py | 10 +- test/integration/test_examples.py | 3 +- test/unit/test_alchemy_language_v1.py | 85 --- test/unit/test_document_conversion_v1.py | 48 -- test/unit/test_retrieve_and_rank_v1.py | 279 ---------- test/unit/test_tradeoff_analytics_v1.py | 91 ---- test_env/bin/codecov | 11 + test_env/bin/coverage | 11 + test_env/bin/coverage-2.7 | 11 + test_env/bin/coverage2 | 11 + test_env/bin/dotenv | 11 + test_env/bin/easy_install | 11 + test_env/bin/easy_install-2.7 | 11 + test_env/bin/epylint | 11 + test_env/bin/isort | 11 + test_env/bin/pbr | 11 + test_env/bin/pip | 11 + test_env/bin/pip2 | 11 + test_env/bin/pip2.7 | 11 + test_env/bin/py.test | 11 + test_env/bin/pybabel | 11 + test_env/bin/pygmentize | 11 + test_env/bin/pylint | 11 + test_env/bin/pyreverse | 11 + test_env/bin/pytest | 11 + test_env/bin/python | Bin 0 -> 51712 bytes test_env/bin/python-config | 78 +++ test_env/bin/python2 | 1 + test_env/bin/python2.7 | 1 + test_env/bin/rst2html.py | 23 + test_env/bin/rst2html4.py | 26 + test_env/bin/rst2html5.py | 35 ++ test_env/bin/rst2latex.py | 26 + test_env/bin/rst2man.py | 26 + test_env/bin/rst2odt.py | 30 ++ test_env/bin/rst2odt_prepstyles.py | 67 +++ test_env/bin/rst2pseudoxml.py | 23 + test_env/bin/rst2s5.py | 24 + test_env/bin/rst2xetex.py | 27 + test_env/bin/rst2xml.py | 23 + test_env/bin/rstpep2html.py | 25 + test_env/bin/sphinx-apidoc | 11 + test_env/bin/sphinx-autogen | 11 + test_env/bin/sphinx-build | 11 + test_env/bin/sphinx-quickstart | 11 + test_env/bin/symilar | 11 + test_env/bin/tox | 11 + test_env/bin/tox-quickstart | 11 + test_env/bin/virtualenv | 11 + test_env/bin/wheel | 11 + test_env/include/python2.7 | 1 + test_env/pip-selfcheck.json | 1 + utilities/retrieve_and_rank_v1_rank.py | 499 ------------------ watson_developer_cloud/__init__.py | 6 - .../alchemy_data_news_v1.py | 125 ----- watson_developer_cloud/alchemy_language_v1.py | 322 ----------- watson_developer_cloud/alchemy_vision_v1.py | 54 -- .../document_conversion_v1.py | 72 --- .../retrieve_and_rank_v1.py | 137 ----- .../tradeoff_analytics_v1.py | 48 -- watson_developer_cloud/watson_service.py | 59 --- 66 files changed, 751 insertions(+), 2037 deletions(-) delete mode 100644 examples/alchemy_data_news_v1.py delete mode 100644 examples/alchemy_language_v1.py delete mode 100644 examples/document_conversion_v1.py delete mode 100644 test/unit/test_alchemy_language_v1.py delete mode 100644 test/unit/test_document_conversion_v1.py delete mode 100644 test/unit/test_retrieve_and_rank_v1.py delete mode 100755 test/unit/test_tradeoff_analytics_v1.py create mode 100755 test_env/bin/codecov create mode 100755 test_env/bin/coverage create mode 100755 test_env/bin/coverage-2.7 create mode 100755 test_env/bin/coverage2 create mode 100755 test_env/bin/dotenv create mode 100755 test_env/bin/easy_install create mode 100755 test_env/bin/easy_install-2.7 create mode 100755 test_env/bin/epylint create mode 100755 test_env/bin/isort create mode 100755 test_env/bin/pbr create mode 100755 test_env/bin/pip create mode 100755 test_env/bin/pip2 create mode 100755 test_env/bin/pip2.7 create mode 100755 test_env/bin/py.test create mode 100755 test_env/bin/pybabel create mode 100755 test_env/bin/pygmentize create mode 100755 test_env/bin/pylint create mode 100755 test_env/bin/pyreverse create mode 100755 test_env/bin/pytest create mode 100755 test_env/bin/python create mode 100755 test_env/bin/python-config create mode 120000 test_env/bin/python2 create mode 120000 test_env/bin/python2.7 create mode 100755 test_env/bin/rst2html.py create mode 100755 test_env/bin/rst2html4.py create mode 100755 test_env/bin/rst2html5.py create mode 100755 test_env/bin/rst2latex.py create mode 100755 test_env/bin/rst2man.py create mode 100755 test_env/bin/rst2odt.py create mode 100755 test_env/bin/rst2odt_prepstyles.py create mode 100755 test_env/bin/rst2pseudoxml.py create mode 100755 test_env/bin/rst2s5.py create mode 100755 test_env/bin/rst2xetex.py create mode 100755 test_env/bin/rst2xml.py create mode 100755 test_env/bin/rstpep2html.py create mode 100755 test_env/bin/sphinx-apidoc create mode 100755 test_env/bin/sphinx-autogen create mode 100755 test_env/bin/sphinx-build create mode 100755 test_env/bin/sphinx-quickstart create mode 100755 test_env/bin/symilar create mode 100755 test_env/bin/tox create mode 100755 test_env/bin/tox-quickstart create mode 100755 test_env/bin/virtualenv create mode 100755 test_env/bin/wheel create mode 120000 test_env/include/python2.7 create mode 100644 test_env/pip-selfcheck.json delete mode 100644 utilities/retrieve_and_rank_v1_rank.py delete mode 100644 watson_developer_cloud/alchemy_data_news_v1.py delete mode 100644 watson_developer_cloud/alchemy_language_v1.py delete mode 100644 watson_developer_cloud/alchemy_vision_v1.py delete mode 100644 watson_developer_cloud/document_conversion_v1.py delete mode 100644 watson_developer_cloud/retrieve_and_rank_v1.py delete mode 100755 watson_developer_cloud/tradeoff_analytics_v1.py diff --git a/README.md b/README.md index f73ca49ec..5a9504e2c 100755 --- a/README.md +++ b/README.md @@ -86,9 +86,6 @@ print(json.dumps(response, indent=2)) ## Dependencies * [requests] -* `pysolr` >=3.3, <4.0 -* `argparse` >=1.3.0 -* `pyOpenSSL` >=16.2.0 * `python_dateutil` >= 2.5.3 * [responses] for testing diff --git a/examples/alchemy_data_news_v1.py b/examples/alchemy_data_news_v1.py deleted file mode 100644 index f31ba1f22..000000000 --- a/examples/alchemy_data_news_v1.py +++ /dev/null @@ -1,21 +0,0 @@ -from __future__ import print_function -import json -from watson_developer_cloud import AlchemyDataNewsV1 - -alchemy_data_news = AlchemyDataNewsV1(api_key='YOUR API KEY') - -results = alchemy_data_news.get_news_documents(start='now-7d', end='now', - time_slice='12h') -print(json.dumps(results, indent=2)) - -results = alchemy_data_news.get_news_documents( - start='1453334400', - end='1454022000', - return_fields=['enriched.url.title', - 'enriched.url.url', - 'enriched.url.author', - 'enriched.url.publicationDate'], - query_fields={ - 'q.enriched.url.enrichedTitle.entities.entity': - '|text=IBM,type=company|'}) -print(json.dumps(results, indent=2)) diff --git a/examples/alchemy_language_v1.py b/examples/alchemy_language_v1.py deleted file mode 100644 index e41d49ae4..000000000 --- a/examples/alchemy_language_v1.py +++ /dev/null @@ -1,48 +0,0 @@ -import json -from watson_developer_cloud import AlchemyLanguageV1 - -alchemy_language = AlchemyLanguageV1(api_key='YOUR API KEY') - -url = 'https://developer.ibm.com/watson/blog/2015/11/03/price-reduction-for' \ - '-watson-personality-insights/' - -print(json.dumps( - alchemy_language.targeted_sentiment(text='I love cats! Dogs are smelly.', - targets=['cats', 'dogs'], - language='english'), indent=2)) -# print(json.dumps(alchemy_language.targeted_emotion(text='I love apples. I -# hate bananas', -# targets=['apples', -# 'bananas'], language='english'), indent=2)) - -# print(json.dumps(alchemy_language.author(url=url), indent=2)) -# print(json.dumps(alchemy_language.concepts(max_items=2, url=url), indent=2)) -# print(json.dumps(alchemy_language.dates(url=url, anchor_date='2016-03-22 -# 00:00:00'), indent=2)) -# print(json.dumps(alchemy_language.emotion(url=url), indent=2)) -# print(json.dumps(alchemy_language.entities(url=url), indent=2)) -# print(json.dumps(alchemy_language.keywords(max_items=5, url=url), indent=2)) -# print(json.dumps(alchemy_language.category(url=url), indent=2)) -# print(json.dumps(alchemy_language.typed_relations(url=url), indent=2)) -# print(json.dumps(alchemy_language.relations(url=url), indent=2)) -# print(json.dumps(alchemy_language.language(url=url), indent=2)) -# print(json.dumps(alchemy_language.text(url=url), indent=2)) -# print(json.dumps(alchemy_language.raw_text(url=url), indent=2)) -# print(json.dumps(alchemy_language.title(url=url), indent=2)) -# print(json.dumps(alchemy_language.feeds(url=url), indent=2)) -# print(json.dumps(alchemy_language.microformats( -# url='http://microformats.org/wiki/hcard-examples'), indent=2)) -# print(json.dumps(alchemy_language.publication_date(url=url), indent=2)) -# print(json.dumps(alchemy_language.taxonomy(url=url), indent=2)) -combined_operations = ['page-image', 'entity', 'keyword', 'title', 'author', - 'taxonomy', 'concept', 'doc-emotion'] -print( - json.dumps(alchemy_language.combined(url=url, extract=combined_operations), - indent=2)) - -# Get sentiment and emotion information results for detected entities/keywords: -# print(json.dumps(alchemy_language.entities(url=url, sentiment=True, -# emotion=True), indent=2)) -# print(json.dumps(alchemy_language.keywords(max_items=5, url=url, -# sentiment=True, emotion=True), -# indent=2)) diff --git a/examples/document_conversion_v1.py b/examples/document_conversion_v1.py deleted file mode 100644 index a4e1d7fb9..000000000 --- a/examples/document_conversion_v1.py +++ /dev/null @@ -1,130 +0,0 @@ -# coding=utf-8 -import json -from os.path import join, dirname -from io import open -from watson_developer_cloud import DocumentConversionV1 - -document_conversion = DocumentConversionV1( - username='YOUR SERVICE USERNAME', - password='YOUR SERVICE PASSWORD', - version='2016-02-09') - -# Example of retrieving html or plain text -with open(join(dirname(__file__), '../resources/example.html'), - encoding='utf8') as document: - config = {'conversion_target': DocumentConversionV1.NORMALIZED_HTML} - print(document_conversion.convert_document( - document=document, config=config, media_type='text/html').content) - -# Example with JSON -with open(join(dirname(__file__), '../resources/example.html'), - encoding='utf8') as document: - config['conversion_target'] = DocumentConversionV1.ANSWER_UNITS - print(json.dumps( - document_conversion.convert_document(document=document, config=config), - indent=2)) - -# Examples of index_document API -print( - "########## Example of a dry run of index_document with only a document " - "##########") -with open(join(dirname(__file__), '../resources/example.html'), - encoding='utf8') as document: - config = { - 'retrieve_and_rank': { - 'dry_run': 'true' - } - } - print(json.dumps( - document_conversion.index_document(config=config, document=document), - indent=2)) - -print( - "########## Example of a dry run of index_document with only metadata " - "##########") -config = { - 'retrieve_and_rank': { - 'dry_run': 'true' - } -} -metadata = { - 'metadata': [ - {'name': 'id', 'value': '12345'} - ] -} -print( - json.dumps( - document_conversion.index_document(config=config, metadata=metadata), - indent=2)) - -print( - "########## Example of a dry run of index_document with document and " - "metadata " - "##########") -with open(join(dirname(__file__), '../resources/example.html'), - encoding='utf8') as document: - config = { - 'retrieve_and_rank': { - 'dry_run': 'true' - } - } - metadata = { - 'metadata': [ - {'name': 'id', 'value': '12345'} - ] - } - print(json.dumps( - document_conversion.index_document(config=config, document=document, - metadata=metadata), indent=2)) - -print( - "########## Example of a dry run of index_document with document, " - "metadata, " - "and additional config for conversion" - "##########") -with open(join(dirname(__file__), '../resources/example.html'), - encoding='utf8') as document: - config = { - 'convert_document': { - 'normalized_html': { - 'exclude_content': {"xpaths": ["//body/div"]} - } - }, - 'retrieve_and_rank': { - 'dry_run': 'true' - } - } - metadata = { - 'metadata': [ - {'name': 'id', 'value': '12345'} - ] - } - print(json.dumps( - document_conversion.index_document(config=config, document=document, - metadata=metadata), indent=2)) - -# print("########## Example of index_document with document, metadata (A -# service instance id, SOLR cluster id, and " -# "a SOLR collection name must be provided from the Retrieve and Rank -# service in order to index) ##########") -# with open(join(dirname(__file__), '../resources/example.html'), 'r') as -# document: -# config = { -# 'retrieve_and_rank': { -# 'dry_run': 'false', -# 'service_instance_id': 'YOUR RETRIEVE AND RANK SERVICE INSTANCE -# ID', -# 'cluster_id': 'YOUR RETRIEVE AND RANK SERVICE SOLR CLUSTER ID', -# 'search_collection': 'YOUR RETRIEVE AND RANK SERVICE SOLR -# SEARCH COLLECTION NAME' -# } -# } -# metadata = { -# 'metadata': [ -# {'name': 'id', 'value': '12345'} -# ] -# } -# print(json.dumps(document_conversion.index_document(config=config, -# document=document, -# metadata=metadata), -# indent=2)) diff --git a/requirements.txt b/requirements.txt index 604befabf..bc1b2947d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,2 @@ requests>=2.0,<3.0 -pysolr>=3.3,<4.0 -argparse>=1.3.0 -pyOpenSSL>=16.2.0 python_dateutil>=2.5.3 diff --git a/setup.py b/setup.py index 7d25ff8d1..7a4e5b883 100644 --- a/setup.py +++ b/setup.py @@ -64,7 +64,7 @@ def run_tests(self): version=__version__, description='Client library to use the IBM Watson Services', license='Apache 2.0', - install_requires=['requests>=2.0, <3.0', 'pysolr>= 3.3, <4.0', 'pyOpenSSL>=16.2.0', 'python_dateutil>=2.5.3'], + install_requires=['requests>=2.0, <3.0', 'python_dateutil>=2.5.3'], tests_require=['responses', 'pytest', 'python_dotenv', 'pytest-rerunfailures', 'tox'], cmdclass={'test': PyTest}, author='Jeffrey Stylos', @@ -73,12 +73,12 @@ def run_tests(self): url='https://github.com/watson-developer-cloud/python-sdk', packages=['watson_developer_cloud'], include_package_data=True, - keywords='alchemy datanews, language, vision, question and answer' + - ' tone_analyzer, natural language classifier, retrieve and rank,' + - ' tradeoff analytics, text to speech, language translation, ' + + keywords='language, vision, question and answer' + + ' tone_analyzer, natural language classifier,' + + ' text to speech, language translation, ' + 'language identification, concept expansion, machine translation, ' + 'personality insights, message resonance, watson developer cloud, ' + - ' wdc, watson, ibm, dialog, user modeling, alchemyapi, alchemy, ' + + ' wdc, watson, ibm, dialog, user modeling,' + 'tone analyzer, speech to text, visual recognition', classifiers=[ 'Programming Language :: Python', diff --git a/test/integration/test_examples.py b/test/integration/test_examples.py index a52c131da..8c50fe682 100644 --- a/test/integration/test_examples.py +++ b/test/integration/test_examples.py @@ -10,8 +10,7 @@ from glob import glob # tests to exclude -excludes = ['authorization_v1.py', 'alchemy_data_news_v1.py', - 'alchemy_language_v1.py', 'discovery_v1.ipynb', '__init__.py'] +excludes = ['authorization_v1.py', 'discovery_v1.ipynb', '__init__.py'] # examples path. /examples examples_path = join(dirname(__file__), '../', 'examples', '*.py') diff --git a/test/unit/test_alchemy_language_v1.py b/test/unit/test_alchemy_language_v1.py deleted file mode 100644 index 69bece59b..000000000 --- a/test/unit/test_alchemy_language_v1.py +++ /dev/null @@ -1,85 +0,0 @@ -from unittest import TestCase -import watson_developer_cloud -import responses -import pytest - - -class TestAlchemyLanguageV1(TestCase): - - def test_api_key(self): - default_url = 'https://gateway-a.watsonplatform.net/calls' - inited = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey', - x_watson_learning_opt_out=True) - assert inited.api_key == 'boguskey' - assert inited.url == default_url - inited.set_url(url="http://google.com") - assert inited.url == "http://google.com" - - # with pytest.raises(watson_developer_cloud.WatsonException): - # watson_developer_cloud.AlchemyLanguageV1() - - # with pytest.raises(watson_developer_cloud.WatsonException): - # watson_developer_cloud.AlchemyLanguageV1(api_key='YOUR API KEY') - - def test_unpack_id(self): - - testdict = {'one': 10} - assert watson_developer_cloud.AlchemyLanguageV1.unpack_id(testdict, 'one') == 10 - assert watson_developer_cloud.AlchemyLanguageV1.unpack_id(testdict, 'two') == testdict - - @responses.activate - def test_author(self): - url = 'https://gateway-a.watsonplatform.net' - default_url = 'https://gateway-a.watsonplatform.net/calls' - responses.add(responses.POST, '{0}/html/HTMLGetAuthor'.format(url), - body='{"bogus": "response"}', status=200, - content_type='application/json') - responses.add(responses.POST, '{0}/url/URLGetAuthor'.format(url), - body='{"bogus": "response"}', status=200, - content_type='application/json') - responses.add(responses.POST, '{0}/html/HTMLGetAuthor'.format(default_url), - body='{"bogus": "response"}', status=200, - content_type='application/json') - responses.add(responses.POST, '{0}/url/URLGetAuthor'.format(default_url), - body='{"bogus": "response"}', status=200, - content_type='application/json') - alang = watson_developer_cloud.AlchemyLanguageV1(url=url, api_key='boguskey', x_watson_learning_opt_out=True) - alang.author(html="I'm html") - alang.author(url="http://google.com") - with pytest.raises(watson_developer_cloud.WatsonInvalidArgument): - alang.author() - - alang = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey', - x_watson_learning_opt_out=True) - alang.author(html="I'm html") - alang.author(url="http://google.com") - assert len(responses.calls) == 4 - - @responses.activate - def test_auth_exception(self): - default_url = 'https://gateway-a.watsonplatform.net/calls' - responses.add(responses.POST, '{0}/url/URLGetAuthor'.format(default_url), - body='{"bogus": "response"}', status=401, - content_type='application/json') - - alang = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey', - x_watson_learning_opt_out=True) - with pytest.raises(watson_developer_cloud.WatsonException): - alang.author(url="http://google.com") - assert len(responses.calls) == 1 - - @responses.activate - def test_authors(self): - default_url = 'https://gateway-a.watsonplatform.net/calls' - responses.add(responses.POST, '{0}/url/URLGetAuthors'.format(default_url), - body='{"bogus": "response"}', status=200, - content_type='application/json') - responses.add(responses.POST, '{0}/html/HTMLGetAuthors'.format(default_url), - body='{"bogus": "response"}', status=200, - content_type='application/json') - - alang = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey', - x_watson_learning_opt_out=True) - alang.authors(url="http://google.com") - alang.authors(html="

Author

") - assert len(responses.calls) == 2 diff --git a/test/unit/test_document_conversion_v1.py b/test/unit/test_document_conversion_v1.py deleted file mode 100644 index 999de7cf9..000000000 --- a/test/unit/test_document_conversion_v1.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -import os -import responses -import watson_developer_cloud - - -@responses.activate -def test_success(): - convert_url = 'https://gateway.watsonplatform.net/document-conversion/api/v1/convert_document' - convert_response = '' \ - 'Simple HTML Page' \ - '

Chapter 1

The content of the first chapter.

' - document_conversion = watson_developer_cloud.DocumentConversionV1( - username="username", password="password", version='2015-12-15') - - responses.add(responses.POST, convert_url, - body=convert_response, status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as document: - convertConfig = {'conversion_target': watson_developer_cloud.DocumentConversionV1.NORMALIZED_HTML} - document_conversion.convert_document(document=document, config=convertConfig, media_type='text/html') - - assert responses.calls[0].request.url.startswith(convert_url) - assert responses.calls[0].response.text == convert_response - - index_url = 'https://gateway.watsonplatform.net/document-conversion/api/v1/index_document' - index_response = '{"status": "success"}' - - responses.add(responses.POST, index_url, - body=index_response, status=200, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/example.html'), 'r') as document: - indexConfig = { - 'retrieve_and_rank': { - 'dry_run':'false', - 'service_instance_id':'serviceInstanceId', - 'cluster_id':'clusterId', - 'search_collection':'searchCollectionName' - } - } - document_conversion.index_document(config=indexConfig, document=document) - - assert responses.calls[1].request.url.startswith(index_url) - assert responses.calls[1].response.text == index_response - - assert len(responses.calls) == 2 diff --git a/test/unit/test_retrieve_and_rank_v1.py b/test/unit/test_retrieve_and_rank_v1.py deleted file mode 100644 index 7dffd60d5..000000000 --- a/test/unit/test_retrieve_and_rank_v1.py +++ /dev/null @@ -1,279 +0,0 @@ -import os -import responses -import watson_developer_cloud - -CLUSTER_ID = 'sc0747112c_f978_4e1f_b97e_0e3a8101ac5b' -URL_CLUSTERS = 'https://gateway.watsonplatform.net/retrieve-and-rank/api/v1/solr_clusters' -URL_CLUSTER = URL_CLUSTERS + '/' + CLUSTER_ID - -RANKER_ID = '3b140ax14-rank-10383' -URL_RANKERS = 'https://gateway.watsonplatform.net/retrieve-and-rank/api/v1/rankers' -URL_RANKER = URL_RANKERS + '/' + RANKER_ID - -retrieve_and_rank = watson_developer_cloud.RetrieveAndRankV1(username="username", - password="password") - -@responses.activate -def test_list_rankers(): - - listrank_response = '{"rankers":[{"ranker_id":"3b140ax14-rank-10383","name":"pythonRank"}]}' - - responses.add(responses.GET, URL_RANKERS, - match_querystring=True, - body=listrank_response, status=200, - content_type='application/json') - - ranker_list = retrieve_and_rank.list_rankers() - - assert ranker_list is not None - assert len(ranker_list['rankers']) == 1 - assert responses.calls[0].request.url == URL_RANKERS - assert responses.calls[0].response.text == listrank_response - - -@responses.activate -def test_ranker_status(): - - statusrank_response = '{"ranker_id":"3b140ax14-rank-10383","name":"pythonRank","status":"Available"}' - - responses.add(responses.GET, URL_RANKER, - match_querystring=True, - body=statusrank_response, status=200, - content_type='application/json') - - ranker_status = retrieve_and_rank.get_ranker_status(RANKER_ID) - - assert ranker_status is not None - assert ranker_status['status'] is not None - assert responses.calls[0].request.url == URL_RANKER - assert responses.calls[0].response.text == statusrank_response - - -@responses.activate -def test_rank(): - - rank_url = URL_RANKER + '/rank' - rank_response = '{"ranker_id":"3b140ax14-rank-10383",' \ - '"top_answer":"30965a00-5415-4ef5-8e4a-bb21a7aeab44", "answers":[' \ - '{"answer_id":"30965a00-5415-4ef5-8e4a-bb21a7aeab44","score":180.0,"confidence":0.2636349925008873},' \ - '{"answer_id":"30965a00-5415-4ef5-8e4a-bb21a7aeab44","score":178.0,"confidence":0.25972667610243827}]}' - - responses.add(responses.POST, rank_url, - match_querystring=True, - body=rank_response, status=200, - content_type='application/json') - - ranker_answer = None - - with open(os.path.join(os.path.dirname(__file__), '../../resources/ranker_answer_data.csv'), 'rb') as answer_data: - ranker_answer = retrieve_and_rank.rank('3b140ax14-rank-10383', answer_data=answer_data, top_answers=3) - - assert ranker_answer is not None - assert responses.calls[0].request.url == rank_url - assert responses.calls[0].response.text == rank_response - - -@responses.activate -def test_create_ranker(): - - createrank_response = '{"ranker_id":"3b140ax14-rank-10383","name":"pythonRank","status":"Training"}' - - responses.add(responses.POST, URL_RANKERS, - match_querystring=True, - body=createrank_response, status=200, - content_type='application/json') - - ranker = None - with open(os.path.join(os.path.dirname(__file__), '../../resources/ranker_training_data.csv'), 'rb') as training_data: - ranker = retrieve_and_rank.create_ranker(training_data=training_data, name='pythonRank') - - assert ranker is not None - assert ranker['ranker_id'] == RANKER_ID - assert ranker['name'] == 'pythonRank' - assert responses.calls[0].request.url == URL_RANKERS - assert responses.calls[0].response.text == createrank_response - - -@responses.activate -def test_delete_ranker(): - removerank_response = '{}' - - responses.add(responses.DELETE, URL_RANKER, - match_querystring=True, - body=removerank_response, status=200, - content_type='application/json') - - retrieve_and_rank.delete_ranker(RANKER_ID) - - assert responses.calls[0].request.url == URL_RANKER - assert responses.calls[0].response.text == removerank_response - -@responses.activate -def test_list_cluster(): - listcluster_response = '{"clusters":[{"solr_cluster_id":"sc0747112c_f978_4e1f_b97e_0e3a8101ac5b","cluster_name":"","cluster_size":"","solr_cluster_status":"READY"}]}' - - responses.add(responses.GET, URL_CLUSTERS, - match_querystring=True, - body=listcluster_response, status=200, - content_type='application/json') - - clusters = retrieve_and_rank.list_solr_clusters() - - assert clusters is not None - assert responses.calls[0].request.url == URL_CLUSTERS - assert responses.calls[0].response.text == listcluster_response - -@responses.activate -def test_create_cluster(): - createcluster_response = '{"solr_cluster_id":"sc0747112c_f978_4e1f_b97e_0e3a8101ac5b","cluster_name":"","cluster_size":"","solr_cluster_status":"NOT_AVAILABLE"}' - - responses.add(responses.POST, URL_CLUSTERS, - match_querystring=True, - body=createcluster_response, status=200, - content_type='application/json') - - retrieve_and_rank.create_solr_cluster(cluster_name='pythonCluster', cluster_size=None) - - assert responses.calls[0].request.url == URL_CLUSTERS - assert responses.calls[0].response.text == createcluster_response - -@responses.activate -def test_delete_cluster(): - removecluster_response = '{"message":"WRRCSR023: Successfully deleted Solr cluster [sc0747112c_f978_4e1f_b97e_0e3a8101ac5b].","statusCode":200}' - - responses.add(responses.DELETE, URL_CLUSTER, - body=removecluster_response, status=200, - match_querystring=True, - content_type='application/json') - - retrieve_and_rank.delete_solr_cluster(CLUSTER_ID) - - assert responses.calls[0].request.url == URL_CLUSTER - assert responses.calls[0].response.text == removecluster_response - -@responses.activate -def test_cluster_status(): - statuscluster_response = '{"solr_cluster_id":"sc0747112c_f978_4e1f_b97e_0e3a8101ac5b","cluster_name":"","cluster_size":"","solr_cluster_status":"READY"}' - - responses.add(responses.GET, URL_CLUSTER, - body=statuscluster_response, status=200, - match_querystring=True, - content_type='application/json') - - cluster_status = retrieve_and_rank.get_solr_cluster_status(CLUSTER_ID) - - assert cluster_status is not None - assert responses.calls[0].request.url == URL_CLUSTER - assert responses.calls[0].response.text == statuscluster_response - -@responses.activate -def test_list_config(): - listconfigs_url = URL_CLUSTER + '/config' - listconfigs_response = '{"solr_configs":[]}' - - responses.add(responses.GET, listconfigs_url, - body=listconfigs_response, status=200, - match_querystring=True, - content_type='application/json') - - config_list = retrieve_and_rank.list_configs(CLUSTER_ID) - - assert config_list is not None - assert responses.calls[0].request.url == listconfigs_url - assert responses.calls[0].response.text == listconfigs_response - -@responses.activate -def test_create_config(): - createconfig_url = URL_CLUSTER + '/config/exampleconfig' - createconfig_response = '{"message":"WRRCSR026: Successfully uploaded named config [example-config] for Solr cluster [sc0747112c_f978_4e1f_b97e_0e3a8101ac5b].","statusCode":200}' - - responses.add(responses.POST, createconfig_url, - body=createconfig_response, status=200, - match_querystring=True, - content_type='application/json') - - with open(os.path.join(os.path.dirname(__file__), '../../resources/solr_config.zip'), 'rb') as config_data: - config = retrieve_and_rank.create_config(CLUSTER_ID, 'exampleconfig', config=config_data) - - assert config is not None - assert responses.calls[00].request.url == createconfig_url - assert responses.calls[00].response.text == createconfig_response - -@responses.activate -def test_delete_config(): - removeconfig_url = URL_CLUSTER + '/config/exampleconfig' - removeconfig_response = '{"message":"WRRCSR025: Successfully deleted named config [example-config] for Solr cluster [sc0747112c_f978_4e1f_b97e_0e3a8101ac5b]].","statusCode":200}' - - responses.add(responses.DELETE, removeconfig_url, - body=removeconfig_response, status=200, - match_querystring=True, - content_type='application/json') - - response = retrieve_and_rank.delete_config(CLUSTER_ID, 'exampleconfig') - - assert response is not None - assert responses.calls[0].request.url == removeconfig_url - assert responses.calls[0].response.text == removeconfig_response - -@responses.activate -def test_get_config(): - getconfig_url = URL_CLUSTER + '/config/exampleconfig' - getconfig_response = '{}' - - responses.add(responses.GET, getconfig_url, - match_querystring=True, - body=getconfig_response, status=200) - - retrieve_and_rank.get_config(CLUSTER_ID, 'exampleconfig') - - assert responses.calls[0].request.url == getconfig_url - assert responses.calls[0].response.text == getconfig_response - -@responses.activate -def test_list_collections(): - listcollection_url = URL_CLUSTER + '/solr/admin/collections?action=LIST&wt=json' - - listcollection_response = '{"responseHeader":{"status":0,"QTime":0},"collections":["examplecollection"]}' - - responses.add(responses.GET, listcollection_url, - match_querystring=True, - body=listcollection_response, status=200, - content_type='application/json') - - retrieve_and_rank.list_collections(CLUSTER_ID) - - assert responses.calls[0].response.text == listcollection_response - -@responses.activate -def test_create_collection(): - createcollection_url = URL_CLUSTER + '/solr/admin/collections?action=CREATE&wt=json&collection.configName=exampleconfig&name=examplecollection' - createcollection_response = '{}' - - responses.add(responses.POST, createcollection_url, - match_querystring=True, - body=createcollection_response, status=200, - content_type='application/json') - - collection = retrieve_and_rank.create_collection(CLUSTER_ID, 'examplecollection', 'exampleconfig') - - assert collection is not None - assert responses.calls[0].response.text == createcollection_response - -@responses.activate -def test_delete_collection(): - deletecollection_url = URL_CLUSTER + '/solr/admin/collections?action=DELETE&wt=json&name=examplecollection' - deletecollection_response = '{}' - - responses.add(responses.POST, deletecollection_url, - body=deletecollection_response, status=200, - match_querystring=True, - content_type='application/json') - - retrieve_and_rank.delete_collection(CLUSTER_ID, 'examplecollection', None) - - assert responses.calls[0].response.text == deletecollection_response - -@responses.activate -def test_get_solr_client(): - solr_client = retrieve_and_rank.get_pysolr_client(CLUSTER_ID, 'examplecollection') - assert solr_client is not None diff --git a/test/unit/test_tradeoff_analytics_v1.py b/test/unit/test_tradeoff_analytics_v1.py deleted file mode 100755 index 3d56c2112..000000000 --- a/test/unit/test_tradeoff_analytics_v1.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -import json -import os -import responses -import watson_developer_cloud - -dilemmas_url = 'https://gateway.watsonplatform.net/tradeoff-analytics/api/v1/dilemmas' - -@responses.activate -def test_visualization_no_preferable_options(): - - with open(os.path.join(os.path.dirname(__file__), '../../resources/tradeoff-expect1.txt')) as expect_file: - dilemmas_response = expect_file.read() - - responses.add(responses.POST, dilemmas_url, - body=dilemmas_response, status=200, - content_type='application/json') - - tradeoff_analytics = watson_developer_cloud.TradeoffAnalyticsV1( - username="username", password="password") - - with open(os.path.join(os.path.dirname(__file__), '../../resources/problem.json')) as data_file: - tradeoff_analytics.dilemmas(json.load(data_file)) - - assert 'generate_visualization=true' in responses.calls[0].request.url - assert responses.calls[0].response.text == dilemmas_response - assert len(responses.calls) == 1 - -@responses.activate -def test_no_visualization_no_preferable_options(): - - with open(os.path.join(os.path.dirname(__file__), '../../resources/tradeoff-expect2.txt')) as expect_file: - dilemmas_response = expect_file.read() - - responses.add(responses.POST, dilemmas_url, - body=dilemmas_response, status=200, - content_type='application/json') - - tradeoff_analytics = watson_developer_cloud.TradeoffAnalyticsV1( - username="username", password="password") - - with open(os.path.join(os.path.dirname(__file__), '../../resources/problem.json')) as data_file: - tradeoff_analytics.dilemmas(json.load(data_file), generate_visualization=False) - - assert 'generate_visualization=false' in responses.calls[0].request.url - assert responses.calls[0].response.text == dilemmas_response - assert len(responses.calls) == 1 - -@responses.activate -def test_no_visualization_preferable_options(): - with open(os.path.join(os.path.dirname(__file__), '../../resources/tradeoff-expect3.txt')) as expect_file: - dilemmas_response = expect_file.read() - - responses.add(responses.POST, dilemmas_url, - body=dilemmas_response, status=200, - content_type='application/json') - - tradeoff_analytics = watson_developer_cloud.TradeoffAnalyticsV1( - username="username", password="password") - - with open(os.path.join(os.path.dirname(__file__), '../../resources/problem.json')) as data_file: - tradeoff_analytics.dilemmas( - json.load(data_file), - generate_visualization=False, - find_preferable_options=True) - - assert 'find_preferable_options=true' in responses.calls[0].request.url - assert responses.calls[0].response.text == dilemmas_response - assert len(responses.calls) == 1 - - -@responses.activate -def test_visualization_preferable_options(): - with open(os.path.join(os.path.dirname(__file__), '../../resources/tradeoff-expect4.txt')) as expect_file: - dilemmas_response = expect_file.read() - - responses.add(responses.POST, dilemmas_url, - body=dilemmas_response, status=200, - content_type='application/json') - tradeoff_analytics = watson_developer_cloud.TradeoffAnalyticsV1( - username="username", password="password") - - with open(os.path.join(os.path.dirname(__file__), '../../resources/problem.json')) as data_file: - tradeoff_analytics.dilemmas( - json.load(data_file), - find_preferable_options=True) - - assert 'generate_visualization=true' in responses.calls[0].request.url - assert 'find_preferable_options=true' in responses.calls[0].request.url - assert responses.calls[0].response.text == dilemmas_response - assert len(responses.calls) == 1 diff --git a/test_env/bin/codecov b/test_env/bin/codecov new file mode 100755 index 000000000..b64c2fc17 --- /dev/null +++ b/test_env/bin/codecov @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from codecov import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/coverage b/test_env/bin/coverage new file mode 100755 index 000000000..fcc21fa49 --- /dev/null +++ b/test_env/bin/coverage @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from coverage import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/coverage-2.7 b/test_env/bin/coverage-2.7 new file mode 100755 index 000000000..fcc21fa49 --- /dev/null +++ b/test_env/bin/coverage-2.7 @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from coverage import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/coverage2 b/test_env/bin/coverage2 new file mode 100755 index 000000000..fcc21fa49 --- /dev/null +++ b/test_env/bin/coverage2 @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from coverage import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/dotenv b/test_env/bin/dotenv new file mode 100755 index 000000000..118baf235 --- /dev/null +++ b/test_env/bin/dotenv @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from dotenv import cli + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(cli.cli()) diff --git a/test_env/bin/easy_install b/test_env/bin/easy_install new file mode 100755 index 000000000..e834ef311 --- /dev/null +++ b/test_env/bin/easy_install @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from setuptools.command.easy_install import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/easy_install-2.7 b/test_env/bin/easy_install-2.7 new file mode 100755 index 000000000..e834ef311 --- /dev/null +++ b/test_env/bin/easy_install-2.7 @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from setuptools.command.easy_install import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/epylint b/test_env/bin/epylint new file mode 100755 index 000000000..169410962 --- /dev/null +++ b/test_env/bin/epylint @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pylint import run_epylint + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(run_epylint()) diff --git a/test_env/bin/isort b/test_env/bin/isort new file mode 100755 index 000000000..5a4b1e7f8 --- /dev/null +++ b/test_env/bin/isort @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from isort.main import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/pbr b/test_env/bin/pbr new file mode 100755 index 000000000..17451cb19 --- /dev/null +++ b/test_env/bin/pbr @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pbr.cmd.main import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/pip b/test_env/bin/pip new file mode 100755 index 000000000..ca137e6d7 --- /dev/null +++ b/test_env/bin/pip @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pip import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/pip2 b/test_env/bin/pip2 new file mode 100755 index 000000000..ca137e6d7 --- /dev/null +++ b/test_env/bin/pip2 @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pip import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/pip2.7 b/test_env/bin/pip2.7 new file mode 100755 index 000000000..ca137e6d7 --- /dev/null +++ b/test_env/bin/pip2.7 @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pip import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/py.test b/test_env/bin/py.test new file mode 100755 index 000000000..b2fbdb63f --- /dev/null +++ b/test_env/bin/py.test @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pytest import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/pybabel b/test_env/bin/pybabel new file mode 100755 index 000000000..323f32e1f --- /dev/null +++ b/test_env/bin/pybabel @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from babel.messages.frontend import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/pygmentize b/test_env/bin/pygmentize new file mode 100755 index 000000000..59cfc88e9 --- /dev/null +++ b/test_env/bin/pygmentize @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pygments.cmdline import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/pylint b/test_env/bin/pylint new file mode 100755 index 000000000..076476a9d --- /dev/null +++ b/test_env/bin/pylint @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pylint import run_pylint + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(run_pylint()) diff --git a/test_env/bin/pyreverse b/test_env/bin/pyreverse new file mode 100755 index 000000000..5d6dc2e31 --- /dev/null +++ b/test_env/bin/pyreverse @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pylint import run_pyreverse + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(run_pyreverse()) diff --git a/test_env/bin/pytest b/test_env/bin/pytest new file mode 100755 index 000000000..b2fbdb63f --- /dev/null +++ b/test_env/bin/pytest @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pytest import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/python b/test_env/bin/python new file mode 100755 index 0000000000000000000000000000000000000000..a18406382eba6fce8954e92dbc10f6d9806965d5 GIT binary patch literal 51712 zcmeHw2V7H0*Y^#Aq98>Pv7k}Gf=Ut)#kCOx2?`pC0v3oNM2LhWn*8ySA|QvUbgT&b>DfvRU2l`JV6D-}Ak9ew;aH+PQQ8b7#1BW^(4_ zr``Jrg0N!|M2&2Ms74TO5F9K7&l#u&5X7l54v36H&Nv1E1_1^E1_1^E1_1^E1_1^E z1_1^E1_1^E27&(~1kS#D^a(!lS@4mcZ3p$P1H`UQ5P1Zfyo(0l3xLF8AwME~aCl5V zGP;Tfa|d5^sOKtV6pIIh#}BBQoh;9;8b@EAoPcnEBqVfVu}(HhN6A#Us=gq1xEV;e z2seZd!&RI;k!=|ki?zCpByB~Mb^RRcQTj&FIm}^m0Y_RNKt?)BpEy;fP|GwBW34Zn z)_0Z8k72~U!u_d!QmsxSSEkSzto6lvQuP~HlgfZ$OIJlVB#6Zs%8_zqvRJMht|G%# z@!Qmt(uWg3=rC-pgG!-%Y}ZL-yN2-x@GVl z`gzg%meTq#Y^e|Lw?3VQOe{{8=p>a9*7sY=rt0TKgO6b|R+XhHC2R+(pGHOY?Gqjr zY3^fOx*dR%j^;u?Dv0%BQ6lL#hjD<+0v=-H2t3##Jp{tTpukcdH7khK^Fj(B2&}_^ z4v-(xoj?`>p+o&Y)R!elL9Djd~zc5Res!|f9?>}54NtcaOY0@~cGL2TQQffH? z{+&2<9@HCbq1P_pGBhiz@}A(nL|Iy$EZslUAMQ_{1ew8Mxl|x?eTa~77!qd_94a#> zN}i;VXtFrH%vaZPda5+CUaAZwJZMl0TlL~wP>M2qSNyafNK=vf;ct0zUxmFk4M@s%9ZMCl(J z3(T5isHw2q4XDi?5Qzsh1=1VQgWAH2jq^_ABoPpqw5o;zb{~6f}l;Au{R_LjvWLAP{;dFf$4CWDE&KQ&ngK~`c-&g zxD8Be1!>Xx4-cFxu#ME;3@h9>6Yi8h!Nj5)V4SaYJJHQ&Yzzvpx$@FJnM^#B(FlMD z^oQZ{_3$nY^;#j*-!k67MliJRNd@uph2&#`wKzbg`I2dddhzBg_l&Din<1z-r7DtE zRZPY5sn}3Db~MEDOagtGaRjErC1c4L<3M1M`lC=6FUT6De+?Q(!8fo`00|JJq4)|* z8G@QqT7#*qMjtA+JzbJ&HG%%R(aBu+9w=Ug>fS>8QQGH25Mt+H?Cpx!X;kcVj6Dsp zJZOYh#(h+}aUF21^e*Iu4&$&I&~>_mpUbEt$T@-?YY2ZRKY^df7bhGx;ukR4`C>(p z`d9k*&>vQl`h&&++YOZ?jMGt-q1zfN{y;_iXexd)L>Rk#Fq!b#dNqWfm{1N0gvqxS zT-)U9O!_XQ??(D{NZ*6>JxLz{HTgCq{l=u9;0*KhkeU`u?QP z1wYE*odY%Ei%h;>124uqB+Bo>^;$HWRgKg(Qm|EmLOLN{$U<3n5)*tDZ7I zR-?+4C~Oq$C6USo$kT0M-V1Jn9ja2L*(eqv(MVEcp%SI78kMmTQ!fC$ZdfXEm7OL zMl>`(+>p&AxfZ7b+3-N>G%8y<`}X47xSzgIXIuLEW~3*<45N(_!oR2ii5NDYF4a~k z#M7oajSQM#q>Uo|Gvv}Vd{qAWL5q>4!*YY|dWs~&B^tR6y&}2oXdPTmZW9b@_+Se) zvf(m~OewXQX#gx8g(u5xjRoXQL`$Tpa;2>YBwngYwiQ26Cs)XIP$do@I%=j|2fb0H zl-nLtEHcYALX}3xrebXMZHv4a$#NAtIx;MhEt2Un)HdL%%p9u7km;~}*}dd2IEIgs zrb?74pRYMS56}VA)oeaIpjw?oX$zprYxGnpR2os1Qu?>54^O*RrTBae!sID(okRgW z12n^Mrn5O@Rn{O(u1)(}7mv_Ll0J(UWKgGm;c75!0E%)Yn=ggO%2o%dtXZ@?MMM7U z`kPpaq#C(e$Hs@!=D(pz&GZ8v3CngG_@{+4`hWhHSebo*OauMS=I~ybDOM$y%KBx> zG*!VguO;+^?f_4=RF#gGf;J43OH!07E%d?|nHrv7SeEn`NVK6cnG*h;C}fdJXgRnj zF+?LvhW~c5WsT7L={xIh#zHF(_O_M9DH`3IfJbP*2dKG+|NdOBWYs23X|& z)Aa)gSmgO!hCm`VJVq4RuWxvaSU6~apkH4x9r5P`VN7^0$QBqN4uy3*rHW8f3rz4j z2JcryV<2>v96m_MwQhVtM5+mI&gf8v?>1OJvV-+e%tf%ifrw_X<^O!V1FEy>ilICv z&ZqfGns21}PMRO0`6-%Trui>4|DEP9Y5s}kj`X^99hx_$c}tpeY5o<>!)V@@=H^P9 z*S{0#cp1%AG#^d#Ni?5E^Y3VWmgbLXUJDi{@xEKpJe1~xX`VszX*AzR^V2l{o#xeH zp$_#lqj@))52m@6<}+x%f#yHZyo}~=Xzl@vtEjIn%|mIPK=bi5pGEWUXnu_5w`l&6 z=AI2HeO#K0Xs)99B$_Xw`EHuuqWL?TH-Lp#tQU{wi8P-~^BpvQK=VfM&|>~bny1rz z5zUX%{65W{V8IddccOU;&8N}4l;(G74p|w;AiyBNAiyBNAiyBNAiyBNAiyBNAiyAC z0|EEuUc)^2!$RwXh;i$>M4Bd+rlyHu%0-h*^w+o*hu|h#a+ACmR&Zb z#e~MoJwz2%1Z(K~9nB)M`1I za})$&VMHoc!Y*FSo6R!EKn`-#JVZ{ijKpogn6nroH8P1pp^{>FE{3%*nW@ag;8HTE zljwXR`}PY9@7Mt&*HV!hg^WaR6CG4a)76lSjwKeSr;4dv$1?a`fdreNJ~@$NqdEVt zqe_~8B!9}#3O>5Kg0|3b3Hj3t*a|?nvT)}w^*s*L68K$?pXQ|V1>Z94PlYS$*~uy# zVHF-^6;@b<$5@3YTZLy@g%??c*I0!&S%tS-g%4PTPg;eqT7~afg`Zf3Ut5K7<_-QT z@TqMT_OuG)93Hj-HU>5j?#;%Ze88G9xz`@HT@bjb1a^B6xNwXug+G0%15_8t0|=(x z2$<#~_upgtfeXR?|Nm~?sMyw13*!W=u!zGo8WsG&!#rrfxI+yWH~?7=6jcT!a)_uB zLk&@6RONVK4$>n<s#;CAP7y?U)6HWZEzmD*bB9Hs*z_ir;Zpf9e;f?UP?71UR+ferxx7-`2C$stev+dE29X_0k_5X6No* zJ6W{*QP~g8+;&V|n4Fp1YSU%>GVrlTrc%wEoZ?YXU#Ir zPQQ7k)kV*Hy*LF3LH{AbfRH099uNnNL%cW!p|ntu6$ z{MWTvHSCsVbF=MOES@X3cJ-RW@&zneHG4aj17Vp7N-fQGapUINyKr;uUM;O=$Fj3y zHNz6L9+m@?Na8thYgVr|(7t+IyMZEJU9LL@YSpbpE~T&|l~R9RORf(_I@R^2B74eU z(PcQjJA%#6fS;XcV9k-|$@Rc|)#}!!^2Lx#dVFq^`n7pH9yg!^uTx;>4!ptjYX@|+ z2y(~Uu7k6!Iyir!4uM=ASqEo7x~{Nd$`--;3@qZX|DxjDTvl^STmZ>xxhz)#;%nLE zvRK6OweDY=T;4jRz45I*Hzy%&(7wTq^jq)WeSD_#+!jHd1xuQJ+v?imh3u!v!H#Lp zYtG&jZuhQ!WXQ9Gh6@JB^0l0#yTSWdZpoVTvGNGXLN|w&w;$vkYUguv#GxLOKAhUn zCTdw^mizpS%)3o?p1V+UTa;+j`&(X_%ZgU63phD#&GvV5_H|x=x{=U*(zhcXJs5tH z-Dg9W_D|N|zkFg+d-n(c~O;_~M0we}}9_T#-bD&O^(wDr`AkjM24 zy!y}Hd-hKI?+%p@>YLD57TBuV`jizjdgWZCUXn7h^G@oBkZ_bGzuK3meSaQC0R zdgu6>IXUN=w73~OPb~SAXIG6yuvX-3=H_hVx&XZ1t}K?x-X0#Qd~RI~``SBmYgKn_ zTOFRM8r7<5zmA9=@0iH#h#8vNbGYrfZI`xL+A7~SRi{(KxHWPl zxg!9U1z~BABf<4Qsf43}C(mE1QCPMrJnhh`EJCgQ?o~{^5j*O6m96tTzbJD-lP&Vu z#||C-we7hlH4@Zsu6G?Qh`7;Nk$qBImegp+S0`5tZgRWex^M3p3!Z4*W@r;`zg#q~ z%r)oLx_JlVl?z|$iT!8$nd~mQeE8$VOHIRo%_m#Al&!ruuV}=*dG94B`aQ3|*(>EK zxAbVO$H&8RR_gaW%iIy%xS18ho5vFrRxprS;C==4V}B zvZ_nd{z+Y$KN}|++tBIo)99O#Vb3Q`I`h?#U5m=M>=+WW=IEt(y=KSNQ(M=1c?;+C z@V-7`WAK47S-kzyU9a=<>X)ZaKJuH_OwJWm%&K{te-2wvLp^;`%aJ!=MA*a4t+s+5 z5$vom`cI7r+>YE10YL%04&1EM5)4qxi@cc!hY*>u{CCK1914v!ME zCboajad}+Sz=wqm^kr}RDE_GNZF8=j(X0C#Va8!=0omWN_4*AKmQ{azBT%*r;$j_f>p!p-g<N3sZ_5x3ORUxS<_erQY6y#htRF=ULZ9-CnefY~Aqqr!ytHT&^WH zzWigilCQpd>FPpEciP;{(_eR~Ywa6Jp5LzcdU}Lt;KA=X{H<@^-&UW#w<0QhT+^sD z;cUMCh%U*=Z34d_a5D7f_=kh-1E!>IE%GjtX-z4Kvb~X4GnWK<$d-+qd7?el>2q15 zVHnJ`>h{$?*TZdg;4k*@To(T4@n+lSvK--x_CN&vuS%Niz~bgS=lYpP7(13{6YT%m zzZ?L#IfpAU+i`QYH_X`@BU8vFuqB5Lo1b*4axGh0@o!AVPRe3Slv!-GL<4(gBwB3- zEUmL8u+u^f8&R;#a3~`~QnGQ7fq#DR+efQ!z*Ys=6@WV7cb76H?9#L9n&j{`wBx*}_M>(@nzG>L zi;)YSXI>b|?RT(g-h{kH^SVWJdcJN!$kcW}Z7!~Jb^lB4g2xwjD>!dOBZefGN&`o}{;Ed1*az2Lo4H)yt!>b+On9K02)1G|Batwc)K6w@d!7 zdhwwrf7HRn=d0fvU+-uu&g~t?9}I|@JDJlWR(7}C?^^$DE#BGx{QAMtTbJfP zPjq}HE_k{*LXoj~^OW;-Z;w9sFo~~QJ)y(1w}W-boqG&Yo^4k;Y(kr^_sjZ;YrmM$ zS{0j9(t2AHuHE1IIyH#vj6)Ln9%IKPs=TGxe2vLv zH8zi7bwRx=kW-a0>?1d*Vpw8%wz2sRQ#mZiB<5FZ>*37#zjHVXLUfwi`*YiI{g$>~ z+A`mVMgU&b=*?aD7KghWaZ43$?82A0DzBV1vbmn9#uwi%t=~GSPaA1+bx(Uc__(S1 ztwRo9=>!kEExT#l+NZu05Wo^?uU7@6*YVn^I@W z`ZvGzF{CJHTWUzm9^a{_-klj+C-!RTxq%KPy*HOgKFk_BEm3p6^)~Oe>DT+DEOGd7 z@BX0PJrkRbnb>Lnr`?m=20F|XPT2Wr{*7vT3`x%$q>r3&JL7D}LA*rA^aNI8nJ8vP z=BS1Bo!pwt+}p7@wojP<+-s|O%bqqKK6BmxZQYXo!Fzp{FXeU_Ei2eiy0_lTMDDRg zEBx$l9XT5@F@4ELQ~2Sf4K{S$^Ig%*x4VuV|GK05BO`0~whTe;N4 z!^qYwZPe%P{^SqAwdb39@_D(e9q?Fgh0jNBPT@a3Oxjf$z%4&zE!qyg$Kb7iXgcPy z;?~(N&&$?``eeN;c)G&#-K>DEy<2M16rTRSxWzA;pE>+Stur;eM;~V`{Eb)ibfwR! zK8dpf?mj*Fv-k8<%N9F+G7YJF|9Tgvi;8WVMmTT$w&c^?Zw>Wptg#nAxfWgYO`)LS zYu>KOk#9V%*7830cGb9|5#PP_;BH=Rydx7#a5+2p=dL;XE~_StefV(tm=i^VUq9^D zz)KuvYkY3Tg_Q`KdwR8tixX)O8D`aPovSUULjLOslPPa zleEC#`;?owF&!sX7{?&MAiyBNAiyBNAiyBNAiyBNAiyBNAiyBNAiyBNAiyBNAiyBN zAiyBNAiyB---5sy%KksOMH06|;(p=Qu*n$g|HFvTYg^dNhdsoU5qUK0PwK2{-`>NQAVxvF2W^J{ zMAA7r5ZNf;w$Lxx@Sg%Yeu8{cKnG%0;bytPvJZiDF!S~=*}@lo#uh%9M9MBdzyu?l9Za`nMgTK?gC7&d{~c3$a-kp4p9h}yBnLP(i#gbnuD_n-#XiI3qMtdLY=m^21{B&|0DgBru6 z6QtaMl+Ti~4=I=8r3~#*&PEIH`n=X4^KKl=>k1ro;$`zfARsiApe`eayrx@Tsp~{N znQotop9qG$O_(Xx_z}LVCmeAjP5g^1s#CagFI+0mR9J!+)A>($=p1MKoysbh@X%fu zM=LPipd!K8fBVAj6OP*FnKCZyB;cY#r~%f_Q1TQ|h&3is84LGeRUfniC8?N=PBxVw zRz_(>ZK!;%6Mv*C9V;@GARasMCWYXPAK;@8z|cQ{m^?4qUQkdW< zf(C42kr+)TlFv{P4`l;lhAQ4b(_%BDia0VC`4koPCv#ytVXg}nxrURu$mgyok<8VY z&b7gu%Q2RBLu8y|4m!sg$5+TA-VNhODukT@E}H^M;F-XiiZ!O1GuoRn98D#7Ke0wp zMWmr%3*G@%gg?!2$~l5VLZWyGoC2VPbQmb&V#yIA z5pJ7jiZJ9CgDnKZRJ89OC+u&WV^(Ny$S)^TPSPpgm{Xh$`Hvt@Xw1iF>xf&T*nAcM z&PN|`8aL@6Jol_RFqz#l!%few0HRclxE25Z4d(!4$+VRR0l@oJ75o1XB*XBTLavYF3nEfISffUVGJL7QdL9ct zOtJKz-CS7LUJkSYXgg3T&`F@5fXaZ3K+l0b09A+eYd4^VKt4eJK*2zvKv6)kK)A*| z94H-VBv39;G0;4q5}+$UuYl^bf-}8=dIKc^eFHQPXgkm)pyxm?tzjJwC>$sWXbjK- zplv{>f$jm}LS18^K%m}0GN1yWML;D$XMyemIrtGo6QE$AAwVO6ih_`wkL=lKq{blK*xa|0Wpq2fI)ykfI)yk zfI)ykfI)ykfI)ykfI)yk;Gagoy}9$S*gDN-`(@X)Q?t}|{h>l&w%*oR0FJiK0(c2) zJfy?30Gz9y1>i<1syGY4gVJkh;v33%SzXP17C>VJYJEY1Sx3c)I80fd+%%of3+e3(U?`6K{5 zoxtKCfG7x3=K%N&>>C*$z~u(wmGR7iDkj4rZU7l)J`MnnF)*7CFNTKs51$24)NGw2 zKA&8eE;6450DB3ja{%0^eb|shPA0*W5DKL1PsunHMRc-|BP?V~d;bcHcuPC~lP%(B zTF92R{nuE;Z?cdr?fF~U@;_;j=c(N^4uk;K1gZt(1mp}<8wgg{89V(7=|5nnzp@Wj?S0tO@EDGY9{0Bl_Wzsg^@sek z_WGwK9=?9O@cx@EE8WiBeBQTva&}*rc2oO4u)DL`$7kNj2KT4TzTC6?)bhbo=f51* z((Cd|`48g-&L90wcUaZ!WL(`|-|rHvTG{`2q0C)7D(vf8Rr%`!i#&ACKT>?DaGD`Yr6@ z+B9_gMQh%Sy?(}C|G)VZkuMpxX6*IjAsvjpe#TxuW3T^TI0J~W*U#ANXYBP?J${6- z*U#AN$Gv@(XT1HN*y~>@Y25VN$^D{-rhN0;?}Tvo!;y*`MegxEKeTo~_U89F-i5!e zO&|RzFRR>l_U&0Ues0yd)00a<&r10$X{zw_FQ>;|Tkjv&_j$pRR;4FmJNXY=_-s;5ulGK>vAP8#mjp}4 zcZhNS;g3u0Qzg00=giPY7wnebO#9wRRr0;4^@&qm0!sHz-RSL6YhA>)j=lS4%#j!7 zOi8~Gn0{_d;<|fV4}G(= (3, 2): + valid_opts.insert(-1, 'extension-suffix') + valid_opts.append('abiflags') +if sys.version_info >= (3, 3): + valid_opts.append('configdir') + + +def exit_with_usage(code=1): + sys.stderr.write("Usage: {0} [{1}]\n".format( + sys.argv[0], '|'.join('--'+opt for opt in valid_opts))) + sys.exit(code) + +try: + opts, args = getopt.getopt(sys.argv[1:], '', valid_opts) +except getopt.error: + exit_with_usage() + +if not opts: + exit_with_usage() + +pyver = sysconfig.get_config_var('VERSION') +getvar = sysconfig.get_config_var + +opt_flags = [flag for (flag, val) in opts] + +if '--help' in opt_flags: + exit_with_usage(code=0) + +for opt in opt_flags: + if opt == '--prefix': + print(sysconfig.get_config_var('prefix')) + + elif opt == '--exec-prefix': + print(sysconfig.get_config_var('exec_prefix')) + + elif opt in ('--includes', '--cflags'): + flags = ['-I' + sysconfig.get_path('include'), + '-I' + sysconfig.get_path('platinclude')] + if opt == '--cflags': + flags.extend(getvar('CFLAGS').split()) + print(' '.join(flags)) + + elif opt in ('--libs', '--ldflags'): + abiflags = getattr(sys, 'abiflags', '') + libs = ['-lpython' + pyver + abiflags] + libs += getvar('LIBS').split() + libs += getvar('SYSLIBS').split() + # add the prefix/lib/pythonX.Y/config dir, but only if there is no + # shared library in prefix/lib/. + if opt == '--ldflags': + if not getvar('Py_ENABLE_SHARED'): + libs.insert(0, '-L' + getvar('LIBPL')) + if not getvar('PYTHONFRAMEWORK'): + libs.extend(getvar('LINKFORSHARED').split()) + print(' '.join(libs)) + + elif opt == '--extension-suffix': + ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') + if ext_suffix is None: + ext_suffix = sysconfig.get_config_var('SO') + print(ext_suffix) + + elif opt == '--abiflags': + if not getattr(sys, 'abiflags', None): + exit_with_usage() + print(sys.abiflags) + + elif opt == '--configdir': + print(sysconfig.get_config_var('LIBPL')) diff --git a/test_env/bin/python2 b/test_env/bin/python2 new file mode 120000 index 000000000..d8654aa0e --- /dev/null +++ b/test_env/bin/python2 @@ -0,0 +1 @@ +python \ No newline at end of file diff --git a/test_env/bin/python2.7 b/test_env/bin/python2.7 new file mode 120000 index 000000000..d8654aa0e --- /dev/null +++ b/test_env/bin/python2.7 @@ -0,0 +1 @@ +python \ No newline at end of file diff --git a/test_env/bin/rst2html.py b/test_env/bin/rst2html.py new file mode 100755 index 000000000..61577783b --- /dev/null +++ b/test_env/bin/rst2html.py @@ -0,0 +1,23 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing HTML. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates (X)HTML documents from standalone reStructuredText ' + 'sources. ' + default_description) + +publish_cmdline(writer_name='html', description=description) diff --git a/test_env/bin/rst2html4.py b/test_env/bin/rst2html4.py new file mode 100755 index 000000000..a1dcbdf0a --- /dev/null +++ b/test_env/bin/rst2html4.py @@ -0,0 +1,26 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing (X)HTML. + +The output conforms to XHTML 1.0 transitional +and almost to HTML 4.01 transitional (except for closing empty tags). +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates (X)HTML documents from standalone reStructuredText ' + 'sources. ' + default_description) + +publish_cmdline(writer_name='html4', description=description) diff --git a/test_env/bin/rst2html5.py b/test_env/bin/rst2html5.py new file mode 100755 index 000000000..40bad1c6f --- /dev/null +++ b/test_env/bin/rst2html5.py @@ -0,0 +1,35 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python +# -*- coding: utf8 -*- +# :Copyright: © 2015 Günter Milde. +# :License: Released under the terms of the `2-Clause BSD license`_, in short: +# +# Copying and distribution of this file, with or without modification, +# are permitted in any medium without royalty provided the copyright +# notice and this notice are preserved. +# This file is offered as-is, without any warranty. +# +# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause +# +# Revision: $Revision: 7847 $ +# Date: $Date: 2015-03-17 18:30:47 +0100 (Di, 17 Mär 2015) $ + +""" +A minimal front end to the Docutils Publisher, producing HTML 5 documents. + +The output also conforms to XHTML 1.0 transitional +(except for the doctype declaration). +""" + +try: + import locale # module missing in Jython + locale.setlocale(locale.LC_ALL, '') +except locale.Error: + pass + +from docutils.core import publish_cmdline, default_description + +description = (u'Generates HTML 5 documents from standalone ' + u'reStructuredText sources ' + + default_description) + +publish_cmdline(writer_name='html5', description=description) diff --git a/test_env/bin/rst2latex.py b/test_env/bin/rst2latex.py new file mode 100755 index 000000000..6c1c5bec2 --- /dev/null +++ b/test_env/bin/rst2latex.py @@ -0,0 +1,26 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing LaTeX. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline + +description = ('Generates LaTeX documents from standalone reStructuredText ' + 'sources. ' + 'Reads from (default is stdin) and writes to ' + ' (default is stdout). See ' + ' for ' + 'the full reference.') + +publish_cmdline(writer_name='latex', description=description) diff --git a/test_env/bin/rst2man.py b/test_env/bin/rst2man.py new file mode 100755 index 000000000..5db6ead2c --- /dev/null +++ b/test_env/bin/rst2man.py @@ -0,0 +1,26 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# Author: +# Contact: grubert@users.sf.net +# Copyright: This module has been placed in the public domain. + +""" +man.py +====== + +This module provides a simple command line interface that uses the +man page writer to output from ReStructuredText source. +""" + +import locale +try: + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description +from docutils.writers import manpage + +description = ("Generates plain unix manual documents. " + default_description) + +publish_cmdline(writer=manpage.Writer(), description=description) diff --git a/test_env/bin/rst2odt.py b/test_env/bin/rst2odt.py new file mode 100755 index 000000000..5644184f5 --- /dev/null +++ b/test_env/bin/rst2odt.py @@ -0,0 +1,30 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $ +# Author: Dave Kuhlman +# Copyright: This module has been placed in the public domain. + +""" +A front end to the Docutils Publisher, producing OpenOffice documents. +""" + +import sys +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline_to_binary, default_description +from docutils.writers.odf_odt import Writer, Reader + + +description = ('Generates OpenDocument/OpenOffice/ODF documents from ' + 'standalone reStructuredText sources. ' + default_description) + + +writer = Writer() +reader = Reader() +output = publish_cmdline_to_binary(reader=reader, writer=writer, + description=description) + diff --git a/test_env/bin/rst2odt_prepstyles.py b/test_env/bin/rst2odt_prepstyles.py new file mode 100755 index 000000000..57dcb5667 --- /dev/null +++ b/test_env/bin/rst2odt_prepstyles.py @@ -0,0 +1,67 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $ +# Author: Dave Kuhlman +# Copyright: This module has been placed in the public domain. + +""" +Fix a word-processor-generated styles.odt for odtwriter use: Drop page size +specifications from styles.xml in STYLE_FILE.odt. +""" + +# +# Author: Michael Schutte + +from lxml import etree +import sys +import zipfile +from tempfile import mkstemp +import shutil +import os + +NAMESPACES = { + "style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0", + "fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0" +} + +def prepstyle(filename): + + zin = zipfile.ZipFile(filename) + styles = zin.read("styles.xml") + + root = etree.fromstring(styles) + for el in root.xpath("//style:page-layout-properties", + namespaces=NAMESPACES): + for attr in el.attrib: + if attr.startswith("{%s}" % NAMESPACES["fo"]): + del el.attrib[attr] + + tempname = mkstemp() + zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w", + zipfile.ZIP_DEFLATED) + + for item in zin.infolist(): + if item.filename == "styles.xml": + zout.writestr(item, etree.tostring(root)) + else: + zout.writestr(item, zin.read(item.filename)) + + zout.close() + zin.close() + shutil.move(tempname[1], filename) + + +def main(): + args = sys.argv[1:] + if len(args) != 1: + print >> sys.stderr, __doc__ + print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0] + sys.exit(1) + filename = args[0] + prepstyle(filename) + +if __name__ == '__main__': + main() + + +# vim:tw=78:sw=4:sts=4:et: diff --git a/test_env/bin/rst2pseudoxml.py b/test_env/bin/rst2pseudoxml.py new file mode 100755 index 000000000..8d4711b24 --- /dev/null +++ b/test_env/bin/rst2pseudoxml.py @@ -0,0 +1,23 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing pseudo-XML. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates pseudo-XML from standalone reStructuredText ' + 'sources (for testing purposes). ' + default_description) + +publish_cmdline(description=description) diff --git a/test_env/bin/rst2s5.py b/test_env/bin/rst2s5.py new file mode 100755 index 000000000..b97a6643b --- /dev/null +++ b/test_env/bin/rst2s5.py @@ -0,0 +1,24 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $ +# Author: Chris Liechti +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing HTML slides using +the S5 template system. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates S5 (X)HTML slideshow documents from standalone ' + 'reStructuredText sources. ' + default_description) + +publish_cmdline(writer_name='s5', description=description) diff --git a/test_env/bin/rst2xetex.py b/test_env/bin/rst2xetex.py new file mode 100755 index 000000000..869a93034 --- /dev/null +++ b/test_env/bin/rst2xetex.py @@ -0,0 +1,27 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# $Id: rst2xetex.py 7847 2015-03-17 17:30:47Z milde $ +# Author: Guenter Milde +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline + +description = ('Generates LaTeX documents from standalone reStructuredText ' + 'sources for compilation with the Unicode-aware TeX variants ' + 'XeLaTeX or LuaLaTeX. ' + 'Reads from (default is stdin) and writes to ' + ' (default is stdout). See ' + ' for ' + 'the full reference.') + +publish_cmdline(writer_name='xetex', description=description) diff --git a/test_env/bin/rst2xml.py b/test_env/bin/rst2xml.py new file mode 100755 index 000000000..8d6c45db3 --- /dev/null +++ b/test_env/bin/rst2xml.py @@ -0,0 +1,23 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing Docutils XML. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates Docutils-native XML from standalone ' + 'reStructuredText sources. ' + default_description) + +publish_cmdline(writer_name='xml', description=description) diff --git a/test_env/bin/rstpep2html.py b/test_env/bin/rstpep2html.py new file mode 100755 index 000000000..753d78325 --- /dev/null +++ b/test_env/bin/rstpep2html.py @@ -0,0 +1,25 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $ +# Author: David Goodger +# Copyright: This module has been placed in the public domain. + +""" +A minimal front end to the Docutils Publisher, producing HTML from PEP +(Python Enhancement Proposal) documents. +""" + +try: + import locale + locale.setlocale(locale.LC_ALL, '') +except: + pass + +from docutils.core import publish_cmdline, default_description + + +description = ('Generates (X)HTML from reStructuredText-format PEP files. ' + + default_description) + +publish_cmdline(reader_name='pep', writer_name='pep_html', + description=description) diff --git a/test_env/bin/sphinx-apidoc b/test_env/bin/sphinx-apidoc new file mode 100755 index 000000000..afd35fa1d --- /dev/null +++ b/test_env/bin/sphinx-apidoc @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from sphinx.ext.apidoc import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/sphinx-autogen b/test_env/bin/sphinx-autogen new file mode 100755 index 000000000..c49b281f1 --- /dev/null +++ b/test_env/bin/sphinx-autogen @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from sphinx.ext.autosummary.generate import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/sphinx-build b/test_env/bin/sphinx-build new file mode 100755 index 000000000..59d142cb9 --- /dev/null +++ b/test_env/bin/sphinx-build @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from sphinx.cmd.build import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/sphinx-quickstart b/test_env/bin/sphinx-quickstart new file mode 100755 index 000000000..476d96286 --- /dev/null +++ b/test_env/bin/sphinx-quickstart @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from sphinx.cmd.quickstart import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/symilar b/test_env/bin/symilar new file mode 100755 index 000000000..a49fa4708 --- /dev/null +++ b/test_env/bin/symilar @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from pylint import run_symilar + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(run_symilar()) diff --git a/test_env/bin/tox b/test_env/bin/tox new file mode 100755 index 000000000..91cad0cd3 --- /dev/null +++ b/test_env/bin/tox @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from tox import cmdline + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(cmdline()) diff --git a/test_env/bin/tox-quickstart b/test_env/bin/tox-quickstart new file mode 100755 index 000000000..ff9d96b14 --- /dev/null +++ b/test_env/bin/tox-quickstart @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from tox._quickstart import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/virtualenv b/test_env/bin/virtualenv new file mode 100755 index 000000000..f279ccd05 --- /dev/null +++ b/test_env/bin/virtualenv @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from virtualenv import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/bin/wheel b/test_env/bin/wheel new file mode 100755 index 000000000..3e42eeba9 --- /dev/null +++ b/test_env/bin/wheel @@ -0,0 +1,11 @@ +#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python + +# -*- coding: utf-8 -*- +import re +import sys + +from wheel.tool import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/test_env/include/python2.7 b/test_env/include/python2.7 new file mode 120000 index 000000000..3fe034fcc --- /dev/null +++ b/test_env/include/python2.7 @@ -0,0 +1 @@ +/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7 \ No newline at end of file diff --git a/test_env/pip-selfcheck.json b/test_env/pip-selfcheck.json new file mode 100644 index 000000000..5e657e400 --- /dev/null +++ b/test_env/pip-selfcheck.json @@ -0,0 +1 @@ +{"last_check":"2018-02-14T18:10:50Z","pypi_version":"9.0.1"} \ No newline at end of file diff --git a/utilities/retrieve_and_rank_v1_rank.py b/utilities/retrieve_and_rank_v1_rank.py deleted file mode 100644 index 8c8b1adf6..000000000 --- a/utilities/retrieve_and_rank_v1_rank.py +++ /dev/null @@ -1,499 +0,0 @@ -""" -This example code implements a command line runnable script that can be used to run learning-to-rank experiments - using the /rank API methods available as part of the Retrieve and Rank service. See usage details by running - python rank_with_evaluation_v1.py --help -""" -import argparse -import csv -import json -import logging -import math -import sys -import tempfile -from collections import defaultdict -from time import sleep - -from watson_developer_cloud import RetrieveAndRankV1, WatsonException - -BLUEMIX_CONNECTION = None -_MAX_RUNTIME_ATTEMPTS = 1 -_QID_COL_INDEX = 0 -_LABEL_COL_INDEX = -1 -_ANS_ID_COL_INDEX = 0 -_TOP_K_FOR_METRICS = 5 - - -def train_ranker(train_file, is_enabled_make_space=False): - """ - Method submits POST request to create a new ranker using the input training file. Then polls the service - waiting for training to complete. Raises exception if ranker training fails. - - :param file train_file: filepath to the training file in csv format (qid,feature1,feature2...,label) - :param bool is_enabled_make_space: boolean which decides if pre-existing rankers can be deleted to make space - space for a new ranker. - :return: ranker id that can be used to access to the ranker in bluemix - :rtype: str - """ - LOGGER.info("Submitting request to create a new ranker trained with file %s" % train_file) - - try: - response = BLUEMIX_CONNECTION.create_ranker(training_data=train_file, name='RANKER-EXPERIMENT') - LOGGER.info("Training request submitted successfully for ranker id:<<%s>>" % response['ranker_id']) - if LOGGER.isEnabledFor(logging.DEBUG): - print(json.dumps(response, indent=2)) - return response['ranker_id'] - - except WatsonException as ex: - LOGGER.error("Training failed with response: %s" % ex.message) - - # Check if quota is full & make space if deletion is enabled - if "This user or service instance has the maximum number of rankers" in ex.message: - if is_enabled_make_space: - LOGGER.warn("Quota is full. Deleting all previous rankers to make space.") - _delete_existing_rankers() - train_file.seek(0) # rewind the file handler so that we can make another request - return train_ranker(train_file, False) - else: - LOGGER.error("Quota is full. Use the '-r' parameter to make space by deleting all previous rankers.") - raise ex - else: - raise ex - - -def _delete_existing_rankers(): - """ - Helper method deletes pre-existing rankers under this bluemix url for this user. Expects at least one pre-existing - ranker to be found. - """ - previously_created_rankers = BLUEMIX_CONNECTION.list_rankers()['rankers'] - - LOGGER.debug("Found %d previously created rankers" % len(previously_created_rankers)) - for ranker in previously_created_rankers: - response = BLUEMIX_CONNECTION.delete_ranker(ranker['ranker_id']) - if LOGGER.isEnabledFor(logging.DEBUG): - print(json.dumps(response, indent=2)) - - LOGGER.info("Deleted %d rankers successfully" % len(previously_created_rankers)) - - -def wait_for_training_to_complete(ranker_id): - """ - Polls (every 30s) the service to check when the ranker's status is no longer "TRAINING". - Raises exception if the final state is not "AVAILABLE", otherwise returns cleanly. - - :param str ranker_id: the ranker id whose status needs to be checked. - """ - LOGGER.info("Checking/Waiting for training to complete for ranker %s" % ranker_id) - - response = BLUEMIX_CONNECTION.get_ranker_status(ranker_id=ranker_id) - - while response['status'].upper() == "TRAINING": - seconds_between_polling_requests = 30 - LOGGER.debug( - "Ranker still in status: %s. Will continue polling every %d secs." % ( - response['status'], seconds_between_polling_requests)) - sleep(seconds_between_polling_requests) - response = BLUEMIX_CONNECTION.get_ranker_status(ranker_id=ranker_id) - if LOGGER.isEnabledFor(logging.DEBUG): - print(json.dumps(response, indent=2)) - - LOGGER.info("Finished waiting for ranker <<%s>> to train: %s" % (ranker_id, response['status'])) - - if response['status'].upper() != "AVAILABLE": - raise RuntimeError("Unusable ranker, training failed with description: %s" % response['status_description']) - - -def initialize_logger(log_level): - """ - Initializes and returns a Logger that prints timestamps and is set to log at the input log level - :param log_level: level to log messages - :return: initialized logger - :rtype: logging.logger - """ - logger = logging.getLogger(__name__) - logger.setLevel(log_level) - - ch = logging.StreamHandler(sys.stdout) - ch.setLevel(log_level) - formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s - %(message)s') - ch.setFormatter(formatter) - logger.addHandler(ch) - - return logger - - -def test_ranker(ranker_id, test_file, prediction_outfile): - """ - Generates runtime requests using the data from the input test file, submits them to the ranker associated with - the input ranker id and writes returned predictions to the specified output path. The predictions are in the - same sequence as the feature vectors in the test file. However, since RaaS only returns top 10 ranked documents - the remaining document scores are defaulted to -1 (with confidence 0) - - :param str ranker_id: id for the associated ranker in bluemix - :param file test_file: a csv containing data to use for the requests (question_id, feature_1, feature_2,...,label) - :param file prediction_outfile: valid path for the prediction file to be created (over writes existing) - """ - LOGGER.info("Sending runtime requests from <<%s>> to ranker id: <<%s>> (predictions will be written to: <<%s>>)" % ( - test_file, ranker_id, prediction_outfile)) - - reader = csv.reader(test_file, delimiter=',') - feature_names = reader.next() - feature_names.pop(_QID_COL_INDEX) - feature_names.pop(_LABEL_COL_INDEX) - - prev_qid = None - stats = defaultdict(float) - - curr_answer_set = [] - - for row in reader: - if row: - curr_qid = row[_QID_COL_INDEX] - if not prev_qid: - LOGGER.debug("Starting to read answers for the first qid <<%s>>" % curr_qid) - prev_qid = curr_qid - curr_answer_set.append(row) - - elif prev_qid != curr_qid: - LOGGER.debug("We're done reading answers for qid <<%s>>. Generate predictions and prepare to read " - "answers for qid <<%s>>" % (prev_qid, curr_qid)) - - _update_request_stats(curr_answer_set, stats) - ranked_candidate_answers = _call_runtime(ranker_id, prev_qid, curr_answer_set, feature_names) - _update_response_stats(ranked_candidate_answers, stats) - _write_to_prediction_file(ranked_candidate_answers, prediction_outfile) - - prev_qid = curr_qid - curr_answer_set = [row] - else: - # Keep collecting answers for the current qid - curr_answer_set.append(row) - - # deal with the last answer set - if curr_answer_set: - LOGGER.debug("Generating predictions for the final qid <<%s>>" % prev_qid) - - _update_request_stats(curr_answer_set, stats) - ranked_candidate_answers = _call_runtime(ranker_id, prev_qid, curr_answer_set, feature_names) - _update_response_stats(ranked_candidate_answers, stats) - _write_to_prediction_file(ranked_candidate_answers, prediction_outfile) - else: - raise ValueError("No test instances found in the file") - - LOGGER.info("Completed getting runtime predictions for %d questions" % stats['num_questions']) - # average out the stats across all queries where appropriate - stats['top-1-accuracy'] = stats['num_top_answers_correct'] / float(stats['num_questions']) - stats['ndcg@%d' % _TOP_K_FOR_METRICS] /= float(stats['num_questions']) - stats['recall@%d' % _TOP_K_FOR_METRICS] = float(stats['num_correct_in_top_%d' % _TOP_K_FOR_METRICS]) / stats[ - 'num_possible_correct_answers_in_top_%d' % _TOP_K_FOR_METRICS] - stats['precision@%d' % _TOP_K_FOR_METRICS] /= float(stats['num_questions']) - - return stats - - -def _write_to_prediction_file(ranked_candidate_answers, outfile): - """ - Write predictions to the outfile in the desired format and in the natural ordering imposed by the answer id. - - HACK: Assumes that the answer ids were generated in a fashion that maintains the input ordering - from the test file -- making it easier to run performance calculations using computeAccuracy.pl. - - :param list(CandidateAnswer) ranked_candidate_answers: list of candidate answers with the answer id, rank score - and confidence score fields populated. - :param file outfile: open file for writing - """ - for answer in sorted(ranked_candidate_answers, key=lambda ca: ca.answer_id): - outfile.write("%.4f %.4f\n" % (answer.rank_score, answer.confidence_score)) - - -def _update_request_stats(candidate_answers, stats): - stats['num_questions'] += 1 - stats['num_answers_sent_for_ranking'] += len(candidate_answers) - - -def _compute_precision(rank_ordering, k): - """ - Helper function to calculate precision at k (i.e. whether or not at least one answer is correct in top k) - :param list rank_ordering:sequence of ground truth in the rank order returned by the system - :param int k: top k at which to truncate precision calculation - :return: 1 if at least one answer was correct in the top k, else 0 - :rtype: numeric - """ - for r in rank_ordering[0:k]: - if _is_correct(r): - return 1 - return 0 - - -def _compute_ndcg(rank_order, ideal_order, k): - """ - This is a function to get ndcg - :param list rank_order: sequence of ground truth in the rank order returned by the system - :param list ideal_order: sequence of ground truth in the ideal order - :param int k: top k at which to evaluate ndcg - :return: ndcg for this ordering of ground truth - :rtype: numeric - """ - ideal_ndcg = _compute_dcg(ideal_order, k) - if ideal_ndcg == 0: - ndcg = 0.0 - else: - ndcg = _compute_dcg(rank_order, k) / float(ideal_ndcg) - return ndcg - - -def _compute_dcg(s, k): - """ - A function to compute dcg - :param s: sequence of ground truth in the rank order to use for calculating dcg - :param k: top k at which to evaluate ndcg - :return: dcg for this ordering of ground truth - :rtype: numeric - """ - dcg = 0.0 - for i in range(min(k, len(s))): - dcg += (math.pow(2, s[i]) - 1) / math.log(i + 2, 2) - return dcg - - -def _update_response_stats(ranked_candidate_answers, stats): - # Get the ground truth ordering from the candidate answer ranks - ground_truth_ordering_by_rank = list() - for answer in ranked_candidate_answers: - ground_truth_ordering_by_rank.append(answer.ground_truth) - if answer.rank_score >= 0: - # HACK: since the service only returns top k answers, we set the score for non-top-k-answers to -1 - stats['num_answers_returned'] += 1 - - # Collect stats for precision - stats['num_top_answers_correct'] += _compute_precision(ground_truth_ordering_by_rank, k=1) - stats['precision@%d' % _TOP_K_FOR_METRICS] += _compute_precision(ground_truth_ordering_by_rank, k=_TOP_K_FOR_METRICS) - - # Collect stat for ndcg@k - ideal_ground_truth_ordering = sorted(ground_truth_ordering_by_rank, reverse=True) - stats['ndcg@%d' % _TOP_K_FOR_METRICS] += _compute_ndcg(ground_truth_ordering_by_rank, ideal_ground_truth_ordering, - _TOP_K_FOR_METRICS) - - # Collect stats for recall@k - for i in range(min(_TOP_K_FOR_METRICS, len(ground_truth_ordering_by_rank))): - if _is_correct(ground_truth_ordering_by_rank[i]): - stats['num_correct_in_top_%d' % _TOP_K_FOR_METRICS] += 1 - if _is_correct(ideal_ground_truth_ordering[i]): - stats['num_possible_correct_answers_in_top_%d' % _TOP_K_FOR_METRICS] += 1 - - -def _is_correct(ground_truth_label): - """ - Returns true if label is > 0, false otherwise - :param int ground_truth_label: label - :return: true or false - :rtype: bool - """ - if ground_truth_label > 0: - return True - return False - - -def _call_runtime(ranker_id, qid, candidate_answers, feature_names): - """ - Helper method for a single runtime request to the specified ranker id for the candidate answers and question id - provided as input. - :param str ranker_id: id associated with the ranker to submit the runtime requests to. - :param str qid: question id associated with the candidate answers - :param list(CandidateAnswer) candidate_answers: list of feature vectors representing the candidate answers - :param list(str) feature_names: feature header row - :return: list of candidate answers some of them have no rank score (these weren't returned by the service) - :rtype: list(CandidateAnswer) - """ - mock_answer_id = 0 - gt_lookup = dict() - for row in candidate_answers: - # remove the qid and ground truth column and add in an answer id field - label = row.pop(_LABEL_COL_INDEX) - if qid != row.pop(_QID_COL_INDEX): - raise ValueError("Unexpected qid encountered while processing answer set for <<%s>>" % qid) - mock_answer_id += 1 - row.insert(_ANS_ID_COL_INDEX, mock_answer_id) - # Keep track of the ground truths since the answers will be re-ordered/pruned by the service - gt_lookup[mock_answer_id] = int(label) - - answer_file_headers = list(feature_names) - answer_file_headers.insert(_ANS_ID_COL_INDEX, 'answer_id') - - with tempfile.NamedTemporaryFile() as file_to_send_with_request: - writer = csv.writer(file_to_send_with_request) - writer.writerow(answer_file_headers) - - for candidate in candidate_answers: - writer.writerow(candidate) - - file_to_send_with_request.flush() - file_to_send_with_request.seek(0) - - num_attempts_for_this_qid = 0 - response = None - while True: - try: - response = BLUEMIX_CONNECTION.rank(ranker_id=ranker_id, answer_data=file_to_send_with_request) - break - except WatsonException as ex: - num_attempts_for_this_qid += 1 - if num_attempts_for_this_qid < _MAX_RUNTIME_ATTEMPTS: - LOGGER.warn("Attempt #%d for qid: %s failed. Retrying." % (num_attempts_for_this_qid, qid)) - else: - LOGGER.error("Runtime request for qid <<%s>> failed %d time(s) with reason: %s" % - (qid, (num_attempts_for_this_qid + 1), ex.message)) - raise ex - - if LOGGER.isEnabledFor(logging.DEBUG): - print(json.dumps(response, indent=2)) - - LOGGER.debug("Runtime request processed <<%d>> candidates for qid: <<%s>>" % ( - len(candidate_answers), qid)) - - return _parse_candidate_answer_list(qid, gt_lookup, response) - - -def _parse_candidate_answer_list(qid, gt_lookup, response_contents): - candidate_answers = list() - - # first collect the answers which were returned by the service - answers_which_were_returned = list() - for ranked_answer in response_contents['answers']: - aid = int(ranked_answer['answer_id']) - candidate_answers.append( - CandidateAnswer(qid=qid, answer_id=aid, ground_truth=gt_lookup[aid], rank_score=ranked_answer['score'], - confidence_score=ranked_answer['confidence'])) - answers_which_were_returned.append(aid) - - # now deal with the answers which weren't returned - for aid, gt in gt_lookup.iteritems(): - if aid not in answers_which_were_returned: - candidate_answers.append( - CandidateAnswer(qid=qid, answer_id=aid, ground_truth=gt, rank_score=-1, confidence_score=0)) - - return candidate_answers - - -class CandidateAnswer: - """ - Class defines a data structure to hold question id, ground truth, rank score, and confidence score for an answer. - Implements a natural ordering based on answer id - """ - - def __init__(self, qid, answer_id, ground_truth, rank_score=None, confidence_score=None): - self.qid = qid - self.answer_id = answer_id - self.ground_truth = int(ground_truth) - self.rank_score = float(rank_score) - self.confidence_score = float(confidence_score) - - -def validate_mandatory_args(args): - """ - Checks command line argument sufficiency and raises exception if not met. - :param args: arguments parsed by the argparse library - """ - if args.ranker_id is None and args.train_file is None: - raise ValueError("Either a ranker id should be provided or a training file from which a new ranker will" - " be generated. Found both: <<%s>> and <<%s>>" % (args.ranker_id, args.train_file)) - if args.validation_file is not None and args.outFile is None: - raise ValueError("Expected valid output location for validation set predictions, but found: <<%s>>" % - args.outFile) - - -def initialize_ranker_connection(credentials_file): - """ - Initializes and returns a RetrieveAndRankV1 object to use for the experiment based on the credentials - in the credentials file - :param File credentials_file: - :return: RetrieveAndRankV1 object initialized with the provided credentials - :rtype: RetrieveAndRankV1 - """ - bluemix_url, user, password = parse_credentials(credentials_file) - return RetrieveAndRankV1(url=bluemix_url, username=user, password=password) - - -def parse_credentials(config_file): - """ - Helper returns BLUEMIX_URL, USER, and PASSWORD from the json snippet in the input file - :param file config_file: file containing the credentials snippet provided by Bluemix UI. Contents - should look something like this: - { - "credentials": { - "url": "https://gateway-s.watsonplatform.net/search/api", - "username": "bb6d90e0-76bc-4454e-b123-32435c724fe6", - "password": "0sAwXk1Vgrse" - } - } - :return: credentials parsed from the file: BLUEMIX_URL, USER, PASSWORD - :rtype: tuple(str, str, str) - """ - try: - creds = json.load(config_file)['credentials'] - return creds['url'], creds['username'], creds['password'] - except ValueError as ex: - raise ValueError("Unable to parse creds file <<%s>> as a dictionary of key value pairs: %s" % ( - config_file, ex.message)) - - -def main(args): - """ - Kicks off training and/or validation set performance evaluation based on the args - :param args: arguments parsed by the argparse library - """ - - if args.ranker_id: - ranker_id = args.ranker_id - wait_for_training_to_complete(ranker_id) - else: - ranker_id = train_ranker(train_file=args.train_file, is_enabled_make_space=args.is_enabled_delete_rankers) - wait_for_training_to_complete(ranker_id) - - if args.validation_file: - stats = test_ranker(ranker_id, test_file=args.validation_file, prediction_outfile=args.outFile) - - json.dump(stats, args.accuracy_outfile, sort_keys=True, indent=True) - - -if __name__ == '__main__': - """ - parses the command line args to setup constants and then calls main runner - """ - # Get cmd line args - parser = argparse.ArgumentParser(description='Facilitates learning-to-rank experiments using the /rank API', - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('-t', '--train-file', dest='train_file', type=argparse.FileType('rb'), - help="Training feature file for ranker in csv format: " - "qid,feature_1,feature_2,...,feature_n,ground_truth") - parser.add_argument('-v', '--validation-file', dest='validation_file', type=argparse.FileType('rb'), - help="Validation feature file for testing the Ranker (same format as training file)") - parser.add_argument('-o', '--output-path', dest='outFile', type=argparse.FileType('wb'), - help="Output path for file with rank predictions for validation set") - parser.add_argument('-a', '--accuracy-output', dest='accuracy_outfile', type=argparse.FileType('wb'), - default=sys.stdout, help="output path for accuracy related stats from validation run") - parser.add_argument('-d', '--debug', help="Print lots of debugging statements", action="store_const", - dest="loglevel", const=logging.DEBUG, default=logging.INFO) - parser.add_argument('-i', '--ranker-id', dest='ranker_id', - help="If no training file is specified, this field is required in order to know which" - " ranker to use for generating predictions for the validation set.") - parser.add_argument('-r', '--delete-rankers-if-quota-reached', - help="If this flag is enabled, previously created rankers will be deleted if there is " - "insufficient space in the user's current quota of rankers to train", - action="store_const", dest="is_enabled_delete_rankers", const=True, default=False) - parser.add_argument('-c', '--credentials-file', dest='bluemix_credentials_file', required=True, - type=argparse.FileType('rb'), - help="path to file containing bluemix credentials (can be retrieved from the bluemix console. " - "Contents will look something like this:" - "{\"credentials\": {" - "\"url\": \"https://gateway.watsonplatform.net/retrieve-and-rank/api\"," - "\"username\": \"bb666666-7777-44444-bbbb-333333333333\"," - "\"password\": \"0sAwXk1Vgrse\"}}") - - args = parser.parse_args() - validate_mandatory_args(args) - - LOGGER = initialize_logger(args.loglevel) - BLUEMIX_CONNECTION = initialize_ranker_connection(args.bluemix_credentials_file) - - main(args) \ No newline at end of file diff --git a/watson_developer_cloud/__init__.py b/watson_developer_cloud/__init__.py index b25873e51..bea16fd40 100755 --- a/watson_developer_cloud/__init__.py +++ b/watson_developer_cloud/__init__.py @@ -16,12 +16,8 @@ from .watson_service import WatsonException from .watson_service import WatsonApiException from .watson_service import WatsonInvalidArgument -from .alchemy_data_news_v1 import AlchemyDataNewsV1 -from .alchemy_language_v1 import AlchemyLanguageV1 -from .alchemy_vision_v1 import AlchemyVisionV1 from .authorization_v1 import AuthorizationV1 from .conversation_v1 import ConversationV1 -from .document_conversion_v1 import DocumentConversionV1 from .dialog_v1 import DialogV1 from .language_translation_v2 import LanguageTranslationV2 from .language_translator_v2 import LanguageTranslatorV2 @@ -29,11 +25,9 @@ from .natural_language_understanding_v1 import NaturalLanguageUnderstandingV1 from .personality_insights_v2 import PersonalityInsightsV2 from .personality_insights_v3 import PersonalityInsightsV3 -from .retrieve_and_rank_v1 import RetrieveAndRankV1 from .speech_to_text_v1 import SpeechToTextV1 from .text_to_speech_v1 import TextToSpeechV1 from .tone_analyzer_v3 import ToneAnalyzerV3 -from .tradeoff_analytics_v1 import TradeoffAnalyticsV1 from .visual_recognition_v3 import VisualRecognitionV3 from .discovery_v1 import DiscoveryV1 from .version import __version__ diff --git a/watson_developer_cloud/alchemy_data_news_v1.py b/watson_developer_cloud/alchemy_data_news_v1.py deleted file mode 100644 index 2ee58e978..000000000 --- a/watson_developer_cloud/alchemy_data_news_v1.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright 2016 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -The AlchemyData News service -(https://www.ibm.com/watson/developercloud/alchemy-data-news.html) -""" - -from .watson_service import WatsonService - - -class AlchemyDataNewsV1(WatsonService): - default_url = 'https://gateway-a.watsonplatform.net/calls' - - def __init__(self, url=default_url, **kwargs): - WatsonService.__init__(self, 'alchemy_api', url, **kwargs) - - def get_news_documents(self, start, end, max_results=10, query_fields=None, - return_fields=None, time_slice=None, - next_page=None, dedup=None, dedup_threshold=None, - rank=None): - """ - :param start: The time (in UTC seconds) of the beginning date and time - of the query. Valid values are UTC times and relative times: - now (current time), now-{time value}, s (seconds), m (minutes), - h (hours), d (days), M (months), and y (years) - - :param end: The time (in UTC seconds) of the end date and time of the - query. Valid values are UTC times and relative times: - now (current time), now-{time value}, s (seconds), m (minutes), - h (hours), d (days), M (months), and y (years) - - :param max_results: The maximum number of results that are returned - from your query. If None, all matching results are returned - - :param query_fields: There are nearly 400 variations of entity, - taxonomy, sentiment analysis, concepts, and keywords. The full list - of parameters is available in the Developer Cloud API documentation. - Common fields include q.enriched.url.enrichedTitle.relations.relation, - q.enriched.url.enrichedTitle.entities.entity, - q.enriched.url.enrichedTitle.taxonomy.taxonomy, - q.enriched.url.enrichedTitle.docSentiment.type, - q.enriched.url.concepts.concept.text, - q.enriched.url.enrichedTitle.keywords.keyword.text - - :param return fields: A comma-separated list of document fields to - return for each matching document. Any available document fields can - be retrieved. To return multiple fields, use a comma separated list. - Common fields to return are enriched.url.url (URL), enriched.url.title - (title), enriched.url.text(full article text), and enriched.url.author - (author name). If you do not specify fields to be returned or a - timeSlice, the AlchemyData News API only returns the total number of - matching results within the start and end date range - - :param time_slice: The interval to divide the returned data. The - default is that the query engine returns the total count over the time - duration specified with start and end. If you specify a value, it - returns a time series representing the count (max 1000) in each slice - of time: now (current time), s (seconds), m (minutes), h (hours), - d (days), M (months), and y (years) - - :param next_page: If a query is too broad or spans a long time period, - the number of results can be very large and more results may be - available than those which were returned. If there are more matching - results available, a next parameter is returned in the response. To - get the next page of results, execute the query again and append the - next parameter to your query - - :param dedup: Many news articles are published by a single source, - such as Associated Press, and then syndicated widely across the web. - dedup removes duplicate results based on a comparison of their cleaned - titles: False (Default) turns off dudup, True turns on dedup - - :param dedup_threshold: Defines how strictly the algorithm defines a - duplicate. Valid values are between 0 and 1. The default value is 0.4. - A value of 0.0 allows only titles that exactly match those of other - articles to be tagged as duplicate. 0.4 allows articles that are very - similar but not necessarily identical to be tagged as duplicates. A - value of 1.0 allows articles to be aggressively labeled as duplicates, - sometimes even when the titles are very dissimilar - - :param rank: The News API monitors and ranks 60,000 top-level domains, - each with a varying range of page views. rank allows you to specify to - only return articles from well-known, high-traffic publishers. If the - rank parameter is not specified, articles of all ranks are returned: - high, medium, low, or unknown - - :return: result elements depend on the parameters that you passed to - the query. If return fields are requested, the result element contains - a docs element that contains the matching documents, a next element - that contains an identifier for the next matching result in the - AlchemyData News data set, and a status element that provides status - information about retrieving the requested number of results. If no - return fields are requested in your query, the result element contains - a count of matching news items and the status of querying the - AlchemyData News data set. - """ - - if isinstance(return_fields, list): - return_fields = ','.join(return_fields) - params = {'start': start, - 'end': end, - 'maxResults': max_results, - 'return': return_fields, - 'timeSlice': time_slice, - 'next': next_page, - 'dedup': dedup, - 'dedupThreshold': dedup_threshold, - 'rank': rank} - if isinstance(query_fields, dict): - for key in query_fields: - params[key if key.startswith('q.') else 'q.' + key] = \ - query_fields[key] - return self._alchemy_html_request(method_url='/data/GetNews', - method='GET', params=params) diff --git a/watson_developer_cloud/alchemy_language_v1.py b/watson_developer_cloud/alchemy_language_v1.py deleted file mode 100644 index 15468d6a8..000000000 --- a/watson_developer_cloud/alchemy_language_v1.py +++ /dev/null @@ -1,322 +0,0 @@ -# Copyright 2016 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -The AlchemyAPI Language service -(https://www.ibm.com/watson/developercloud/alchemy-language.html) -""" - -from .watson_service import WatsonService - - -class AlchemyLanguageV1(WatsonService): - default_url = 'https://gateway-a.watsonplatform.net/calls' - - def __init__(self, url=default_url, **kwargs): - WatsonService.__init__(self, 'alchemy_api', url, **kwargs) - - def author(self, html=None, url=None, language=None): - params = {'language': language} - return self._alchemy_html_request('GetAuthor', html=html, url=url, - params=params) - - def authors(self, html=None, url=None, language=None): - params = {'language': language} - return self._alchemy_html_request('GetAuthors', html=html, url=url, - params=params) - - def keywords(self, html=None, text=None, url=None, - strict_extract_mode=False, sentiment=False, emotion=False, - show_source_text=False, max_items=None, language=None, - max_keywords=50): - """ - :param html: HTML input - :param text: Text input - :param url: URL input - :param max_items: The number of results to return (default 50) - :param max_keywords: deprecated, use max_items instead - :return: A JSON object with extracted keywords from the source document - """ - if not max_items: - max_items = max_keywords - params = { - 'keywordExtractMode': - 'strict' if strict_extract_mode else 'normal', - 'sentiment': sentiment, - 'emotion': emotion, - 'showSourceText': show_source_text, - 'maxRetrieve': max_items, - 'language': language} - return self._alchemy_html_request('GetRankedKeywords', html=html, - text=text, url=url, params=params) - - def concepts(self, html=None, text=None, url=None, max_items=8, - linked_data=True, show_source_text=False, - language=None, knowledge_graph=False): - params = {'maxRetrieve': max_items, - 'linkedData': linked_data, - 'showSourceText': show_source_text, - 'language': language, - 'knowledgeGreaph': knowledge_graph} - return self._alchemy_html_request('GetRankedConcepts', html=html, - text=text, url=url, params=params) - - def dates(self, html=None, text=None, url=None, anchor_date=None, - show_source_text=False, language=None): - params = {'anchorDate': anchor_date, - 'showSourceText': show_source_text, - 'language': language} - return self._alchemy_html_request('ExtractDates', html=html, text=text, - url=url, params=params) - - def entities(self, html=None, text=None, url=None, disambiguate=True, - linked_data=True, coreference=True, - quotations=False, sentiment=False, emotion=False, - show_source_text=False, max_items=50, language=None, - model=None): - params = {'disambiguate': disambiguate, - 'linkedData': linked_data, - 'coreference': coreference, - 'quotations': quotations, - 'sentiment': sentiment, - 'emotion': emotion, - 'showSourceText': show_source_text, - 'maxRetrieve': max_items, - 'language': language, - 'model': model} - return self._alchemy_html_request('GetRankedNamedEntities', html=html, - text=text, url=url, params=params) - - def emotion(self, html=None, text=None, url=None, show_source_text=False, - source_text_type=None, - constraint_query=None, xpath_query=None, language=None): - params = {'showSourceText': show_source_text, - 'sourceText': source_text_type, - 'cquery': constraint_query, - 'xpath': xpath_query, - 'language': language} - return self._alchemy_html_request('GetEmotion', html=html, text=text, - url=url, params=params) - - def targeted_emotion(self, targets, html=None, text=None, url=None, - language=None, constraint_query=None, - xpath_query=None, show_source_text=False, - source_text_type=None): - if isinstance(targets, list): - targets = '|'.join(targets) - - params = {'targets': targets, - 'language': language, - 'cquery': constraint_query, - 'xpath': xpath_query, - 'showSourceText': show_source_text, - 'sourceText': source_text_type} - return self._alchemy_html_request('GetTargetedEmotion', html=html, - text=text, url=url, params=params) - - def typed_relations(self, html=None, text=None, url=None, model=None, - show_source_text=False): - params = {'model': model, - 'showSourceText': show_source_text} - return self._alchemy_html_request('GetTypedRelations', html=html, - text=text, url=url, params=params) - - def relations(self, html=None, text=None, url=None, sentiment=False, - keywords=False, entities=False, - require_entities=False, sentiment_excludes_entities=True, - disambiguate=True, linked_data=True, - coreference=True, show_source_text=False, max_items=50, - language=None): - params = {'sentiment': sentiment, - 'keywords': keywords, - 'entities': entities, - 'requireEntities': require_entities, - 'sentimentExcludesEntities': sentiment_excludes_entities, - 'disambiguate': disambiguate, - 'linkedData': linked_data, - 'coreference': coreference, - 'showSourceText': show_source_text, - 'maxRetrieve': max_items, - 'language': language} - return self._alchemy_html_request('GetRelations', html=html, text=text, - url=url, params=params) - - def language(self, html=None, text=None, url=None): - return self._alchemy_html_request('GetLanguage', html=html, text=text, - url=url) - - def text(self, html=None, url=None, use_metadata=True, - extract_links=False): - params = {'useMetadata': use_metadata, - 'extractLinks': extract_links} - return self._alchemy_html_request('GetText', html=html, url=url, - params=params) - - def raw_text(self, html=None, url=None): - return self._alchemy_html_request('GetRawText', html=html, url=url) - - def category(self, html=None, text=None, url=None, show_source_text=False, - language=None): - params = {'showSourceText': show_source_text, 'language': language} - return self._alchemy_html_request('GetCategory', html=html, text=text, - url=url, params=params) - - def title(self, html=None, url=None, use_metadata=True, language=None): - params = {'useMetadata': use_metadata, 'language': language} - return self._alchemy_html_request('GetTitle', html=html, url=url, - params=params) - - def feeds(self, html=None, url=None): - return self._alchemy_html_request('GetFeedLinks', html=html, url=url) - - def microformats(self, html=None, url=None): - return self._alchemy_html_request('GetMicroformatData', html=html, - url=url) - - def publication_date(self, html=None, url=None): - return self._alchemy_html_request('GetPubDate', html=html, url=url) - - def taxonomy(self, html=None, text=None, url=None, show_source_text=False, - source_text_type=None, - constraint_query=None, xpath_query=None, base_url=None, - language=None): - """ - source_text_type -> - where to obtain the text that will be processed by this API call. - AlchemyAPI supports multiple modes of text extraction: - web page cleaning (removes ads, navigation links, etc.), - raw text extraction - (processes all web page text, including ads / nav links), - visual constraint queries, and XPath queries. - Possible values: - cleaned_or_raw : cleaning enabled, fallback to raw when - cleaning produces no text (default) - cleaned : operate on 'cleaned' web page text (web - page cleaning enabled) - raw : operate on raw web page text (web page - cleaning disabled) - cquery : operate on the results of a visual - constraints query - Note: The 'constraint_query' argument must - also be set to a valid visual constraints - query. - xpath : operate on the results of an XPath query - Note: The 'xpath' http argument must also - be set to a valid XPath query. - constraint_query -> - a visual constraints query to apply to the web page. - xpath -> - an XPath query to apply to the web page. - base_url -> - rel-tag output base http url (must be uri-argument encoded) - """ - params = {'showSourceText': show_source_text, - 'sourceText': source_text_type, - 'cquery': constraint_query, - 'xpath': xpath_query, - 'baseUrl': base_url, - 'language': language} - return self._alchemy_html_request('GetRankedTaxonomy', html=html, - text=text, url=url, params=params) - - # Some of these options don't appear in the API documentation but are - # supported by the previous AlchemyAPI SDK - def combined(self, html=None, text=None, url=None, extract=None, - disambiguate=True, linked_data=True, - coreference=True, quotations=False, sentiment=False, - show_source_text=False, max_items=50, - base_url=None, language=None): - """ - Combined call for page-image, entity, keyword, title, author, - taxonomy, concept, doc-emotion. - INPUT: - extract -> - List or comma separated string - Possible values: page-image, entity, keyword, title, author, - taxonomy, concept - default : entity, keyword, taxonomy, concept - disambiguate -> - disambiguate detected entities - Possible values: - True : enabled (default) - False : disabled - linked_data -> - include Linked Data content links with disambiguated entities - Possible values : - True : enabled (default) - False : disabled - coreference -> - resolve he/she/etc coreferences into detected entities - Possible values: - True : enabled (default) - False : disabled - quotations -> - enable quotations extraction - Possible values: - True : enabled - False : disabled (default) - sentiment -> - enable entity-level sentiment analysis - Possible values: - True : enabled - False : disabled (default) - show_source_text -> - include the original 'source text' the entities were extracted - from within the API response - Possible values: - True : enabled - False : disabled (default) - max_items -> - maximum number of named entities to extract - default : 50 - base_url -> - rel-tag output base http url - OUTPUT: - The response, already converted from JSON to a Python object. - """ - if isinstance(extract, list): - extract = ','.join(extract) - - params = {'extract': extract, - 'disambiguate': disambiguate, - 'linkedData': linked_data, - 'coreference': coreference, - 'quotations': quotations, - 'sentiment': sentiment, - 'showSourceText': show_source_text, - 'maxRetrieve': max_items, - 'baseUrl': base_url, - 'language': language} - return self._alchemy_html_request('GetCombinedData', html=html, - text=text, url=url, params=params) - - def sentiment(self, html=None, text=None, url=None, language=None): - params = {'language': language} - return self._alchemy_html_request('GetTextSentiment', html=html, - text=text, url=url, params=params) - - def targeted_sentiment(self, targets, html=None, text=None, url=None, - language=None, constraint_query=None, - xpath_query=None, show_source_text=False, - source_text_type=None): - if isinstance(targets, list): - targets = '|'.join(targets) - - params = {'targets': targets, - 'language': language, - 'cquery': constraint_query, - 'xpath': xpath_query, - 'showSourceText': show_source_text, - 'sourceText': source_text_type} - return self._alchemy_html_request('GetTargetedSentiment', html=html, - text=text, url=url, params=params) diff --git a/watson_developer_cloud/alchemy_vision_v1.py b/watson_developer_cloud/alchemy_vision_v1.py deleted file mode 100644 index 75411af65..000000000 --- a/watson_developer_cloud/alchemy_vision_v1.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2016 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -The AlchemyAPI Vision service -(https://www.ibm.com/watson/developercloud/visual-recognition.html) -""" -from __future__ import print_function -from .watson_service import WatsonService - - -class AlchemyVisionV1(WatsonService): - """AlchemyVision was deprecated, migrate your application to use - VisualRecognition.""" - default_url = 'https://gateway-a.watsonplatform.net/calls' - - def __init__(self, url=default_url, **kwargs): - WatsonService.__init__(self, 'alchemy_api', url, **kwargs) - print( - 'WARNING: The AlchemyVision service was deprecated, ' - 'use VisualRecognitionV3 instead') - - def get_image_keywords(self, image_file=None, image_url=None, - knowledge_graph=False, force_show_all=False): - method_name = 'GetRankedImageKeywords' - params = {'knowledgeGraph': knowledge_graph, - 'forceShowAll': force_show_all} - return self._alchemy_image_request(method_name, image_file, image_url, - params) - - def recognize_faces(self, image_file=None, image_url=None, - knowledge_graph=False): - method_name = 'GetRankedImageFaceTags' - params = {'knowledgeGraph': knowledge_graph} - return self._alchemy_image_request(method_name, image_file, image_url, - params) - - def get_image_scene_text(self, image_file=None, image_url=None): - method_name = 'GetRankedImageSceneText' - return self._alchemy_image_request(method_name, image_file, image_url) - - def get_image_links(self, url=None, html=None): - method_name = 'GetImage' - return self._alchemy_html_request(method_name, url=url, html=html) diff --git a/watson_developer_cloud/document_conversion_v1.py b/watson_developer_cloud/document_conversion_v1.py deleted file mode 100644 index bbf1714e6..000000000 --- a/watson_developer_cloud/document_conversion_v1.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2016 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -The v1 Document Conversion service -(https://www.ibm.com/watson/developercloud/document-conversion.html) -""" -from .utils import deprecated -from .watson_service import WatsonService -import os -import json -DEPRECATION_MESSAGE = "Since Document Conversion Service was retired in October 2017, we have continued to improve document conversion capabilities within Watson Discovery. If you are a Document Conversion user, get started with Discovery today. Refer to the migration guide: https://console.bluemix.net/docs/services/discovery/migrate-dcs-rr.html" - -class DocumentConversionV1(WatsonService): - DEFAULT_URL = 'https://gateway.watsonplatform.net/document-conversion/api' - ANSWER_UNITS = 'answer_units' - NORMALIZED_HTML = 'normalized_html' - NORMALIZED_TEXT = 'normalized_text' - latest_version = '2016-02-10' - - @deprecated(DEPRECATION_MESSAGE) - def __init__(self, version, url=DEFAULT_URL, **kwargs): - WatsonService.__init__(self, 'document_conversion', url, **kwargs) - self.version = version - - @deprecated(DEPRECATION_MESSAGE) - def convert_document(self, document, config, media_type=None): - params = {'version': self.version} - filename = os.path.basename(document.name) - file_tuple = (filename, document, media_type)\ - if media_type else (filename, document) - files = [('file', file_tuple), - ('config', - ('config.json', json.dumps(config), 'application/json'))] - accept_json = config['conversion_target'] == DocumentConversionV1.\ - ANSWER_UNITS - return self.request(method='POST', url='/v1/convert_document', - files=files, params=params, - accept_json=accept_json) - @deprecated(DEPRECATION_MESSAGE) - def index_document(self, config, document=None, metadata=None, - media_type=None): - if document is None and metadata is None: - raise AssertionError( - 'Missing required parameters: document or metadata. At least ' - 'one of those is' - 'required.') - params = {'version': self.version} - files = [('config', ('config.json', json.dumps(config), - 'application/json'))] - if document is not None: - filename = os.path.basename(document.name) - file_tuple = (filename, document, media_type)\ - if media_type else (filename, document) - files.append(('file', file_tuple)) - if metadata is not None: - files.append(('metadata', - ('metadata.json', json.dumps(metadata), - 'application/json'))) - return self.request(method='POST', url='/v1/index_document', - files=files, params=params, accept_json=True) diff --git a/watson_developer_cloud/retrieve_and_rank_v1.py b/watson_developer_cloud/retrieve_and_rank_v1.py deleted file mode 100644 index 4fc219e1d..000000000 --- a/watson_developer_cloud/retrieve_and_rank_v1.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright 2016 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -The v1 Retrieve and Rank service -(https://www.ibm.com/watson/developercloud/retrieve-rank.html) -""" - -import json -import pysolr -from watson_developer_cloud.watson_service import WatsonService - - -class RetrieveAndRankV1(WatsonService): - default_url = 'https://gateway.watsonplatform.net/retrieve-and-rank/api' - - def __init__(self, url=default_url, **kwargs): - WatsonService.__init__(self, 'retrieve_and_rank', url, **kwargs) - - def list_solr_clusters(self): - return self.request(method='GET', url='/v1/solr_clusters', - accept_json=True) - - def create_solr_cluster(self, cluster_name=None, cluster_size=None): - if cluster_size: - cluster_size = str(cluster_size) - params = {'cluster_name': cluster_name, 'cluster_size': cluster_size} - return self.request(method='POST', url='/v1/solr_clusters', - accept_json=True, json=params) - - def delete_solr_cluster(self, solr_cluster_id): - return self.request(method='DELETE', - url='/v1/solr_clusters/{0}'.format( - solr_cluster_id), - accept_json=True) - - def get_solr_cluster_status(self, solr_cluster_id): - return self.request(method='GET', - url='/v1/solr_clusters/{0}'.format( - solr_cluster_id), - accept_json=True) - - def list_configs(self, solr_cluster_id): - return self.request(method='GET', - url='/v1/solr_clusters/{0}/config'.format( - solr_cluster_id), accept_json=True) - - # Need to test - def create_config(self, solr_cluster_id, config_name, config): - return self.request(method='POST', - url='/v1/solr_clusters/{0}/config/{1}'.format( - solr_cluster_id, config_name), - files={'body': config}, - headers={'content-type': 'application/zip'}, - accept_json=True) - - def delete_config(self, solr_cluster_id, config_name): - return self.request(method='DELETE', - url='/v1/solr_clusters/{0}/config/{1}'.format( - solr_cluster_id, config_name), - accept_json=True) - - def get_config(self, solr_cluster_id, config_name): - return self.request(method='GET', - url='/v1/solr_clusters/{0}/config/{1}'.format( - solr_cluster_id, config_name)) - - def list_collections(self, solr_cluster_id): - params = {'action': 'LIST', 'wt': 'json'} - return self.request(method='GET', - url='/v1/solr_clusters/{0}/solr/admin/collections' - .format(solr_cluster_id), - params=params, accept_json=True) - - def create_collection(self, solr_cluster_id, collection_name, config_name): - params = {'collection.configName': config_name, - 'name': collection_name, - 'action': 'CREATE', 'wt': 'json'} - return self.request(method='POST', - url='/v1/solr_clusters/{0}/solr/admin/collections' - .format(solr_cluster_id), - params=params, accept_json=True) - - def delete_collection(self, solr_cluster_id, collection_name, - config_name=None): - params = {'name': collection_name, 'action': 'DELETE', 'wt': 'json'} - return self.request(method='POST', - url='/v1/solr_clusters/{0}/solr/admin/collections' - .format(solr_cluster_id), - params=params, accept_json=True) - - def get_pysolr_client(self, solr_cluster_id, collection_name): - base_url = self.url.replace('https://', - 'https://' + self.username + ':' + - self.password + '@') - url = base_url + '/v1/solr_clusters/{0}/solr/{1}'.format( - solr_cluster_id, collection_name) - return pysolr.Solr(url) - - def create_ranker(self, training_data, name=None): - data = None - if name: - data = {'training_metadata': json.dumps({'name': name})} - return self.request(method='POST', url='/v1/rankers', accept_json=True, - files=[('training_data', training_data)], - data=data) - - def list_rankers(self): - return self.request(method='GET', url='/v1/rankers', accept_json=True) - - def get_ranker_status(self, ranker_id): - return self.request(method='GET', - url='/v1/rankers/{0}'.format(ranker_id), - accept_json=True) - - def rank(self, ranker_id, answer_data, top_answers=10): - data = {'answers': + top_answers} - return self.request(method='POST', - url='/v1/rankers/{0}/rank'.format(ranker_id), - files=[('answer_data', answer_data)], data=data, - accept_json=True) - - def delete_ranker(self, ranker_id): - return self.request(method='DELETE', - url='/v1/rankers/{0}'.format(ranker_id), - accept_json=True) diff --git a/watson_developer_cloud/tradeoff_analytics_v1.py b/watson_developer_cloud/tradeoff_analytics_v1.py deleted file mode 100755 index 3e3a2dc55..000000000 --- a/watson_developer_cloud/tradeoff_analytics_v1.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2016 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -The v1 Tradeoff Analytics service -(https://www.ibm.com/watson/developercloud/tradeoff-analytics.html) -""" - -from .watson_service import WatsonService - - -class TradeoffAnalyticsV1(WatsonService): - """Wrapper for the Tradeoff Analytics service""" - default_url = 'https://gateway.watsonplatform.net/tradeoff-analytics/api' - - def __init__(self, url=default_url, **kwargs): - WatsonService.__init__(self, 'tradeoff_analytics', url, **kwargs) - - def dilemmas(self, params, - generate_visualization=True, - find_preferable_options=False): - """ - :param params: The JSON problem (subject, columns, and options) - :param generate_visualization: If True, returns the map visualization - used by the Tradeoff Analytics widget - :param find_preferable_options: If True, returns a refined subset of - best candidate options that will most likely satisfy the greatest number - of users - :return: A dilemma that contains the problem and its resolution - """ - - parameters = { - 'generate_visualization': generate_visualization, - 'find_preferable_options': find_preferable_options - } - - return self.request(method='POST', url='/v1/dilemmas', json=params, - params=parameters, accept_json=True) diff --git a/watson_developer_cloud/watson_service.py b/watson_developer_cloud/watson_service.py index 7592856bc..a9cabf82d 100755 --- a/watson_developer_cloud/watson_service.py +++ b/watson_developer_cloud/watson_service.py @@ -305,65 +305,6 @@ def _get_error_info(response): return error_info if any(error_info) else None - def _alchemy_html_request(self, method_name=None, url=None, html=None, - text=None, params=None, method='POST', - method_url=None): - if params is None: - params = {} - params['outputMode'] = 'json' - headers = {'content-type': 'application/x-www-form-urlencoded'} - params = _convert_boolean_values(params) - url_encoded_params = {} - - if method.upper() == 'POST': - url_encoded_params = params - params = {} - - url_encoded_params['html'] = html - url_encoded_params['text'] = text - - if method_url is None: - if url: - params['url'] = url - method_url = '/url/URL' + method_name - elif html: - method_url = '/html/HTML' + method_name - elif text: - method_url = '/text/Text' + method_name - else: - raise WatsonInvalidArgument( - 'url, html or text must be specified') - - return self.request(method=method, url=method_url, params=params, - data=url_encoded_params, headers=headers, - accept_json=True) - - def _alchemy_image_request(self, method_name, image_file=None, - image_url=None, params=None): - if params is None: - params = {} - params['outputMode'] = 'json' - params = _convert_boolean_values(params) - headers = {} - image_contents = None - - if image_file: - params['imagePostMode'] = 'raw' - image_contents = image_file.read() - # headers['content-length'] = sys.getsizeof(image_contents) - url = '/image/Image' + method_name - elif image_url: - params['imagePostMode'] = 'not-raw' - params['url'] = image_url - url = '/url/URL' + method_name - else: - raise WatsonInvalidArgument( - 'image_file or image_url must be specified') - - return self.request(method='POST', url=url, params=params, - data=image_contents, headers=headers, - accept_json=True) - def request(self, method, url, accept_json=False, headers=None, params=None, json=None, data=None, files=None, **kwargs): full_url = self.url + url From a819f382b4ac5299bc817a390aeb9659594d7d31 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 14 Feb 2018 14:07:34 -0500 Subject: [PATCH 07/45] changing the check (#360) --- test/integration/test_integration_text_to_speech_v1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/test_integration_text_to_speech_v1.py b/test/integration/test_integration_text_to_speech_v1.py index 5d84efe4b..209eab9ec 100644 --- a/test/integration/test_integration_text_to_speech_v1.py +++ b/test/integration/test_integration_text_to_speech_v1.py @@ -37,7 +37,7 @@ def test_customizations(self): old_length = len(self.original_customizations['customizations']) new_length = len( self.text_to_speech.list_voice_models()['customizations']) - assert new_length - old_length == 1 + assert new_length - old_length >= 1 def test_custom_words(self): customization_id = self.created_customization['customization_id'] From 2993c16544b397536714e3a32d58617331f05062 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 14 Feb 2018 16:45:19 -0500 Subject: [PATCH 08/45] Regenerate conversation and add more tests --- test/unit/test_conversation_v1.py | 48 +++++++++ watson_developer_cloud/conversation_v1.py | 97 +++++++++++++++-- watson_developer_cloud/dialog_v1.py | 126 ---------------------- 3 files changed, 137 insertions(+), 134 deletions(-) delete mode 100644 watson_developer_cloud/dialog_v1.py diff --git a/test/unit/test_conversation_v1.py b/test/unit/test_conversation_v1.py index 5f6ffb8db..9a851ac80 100644 --- a/test/unit/test_conversation_v1.py +++ b/test/unit/test_conversation_v1.py @@ -1446,3 +1446,51 @@ def test_update_workspace(): assert workspace == response # Verify that response can be converted to a Workspace Workspace._from_dict(workspace) + +@responses.activate +def test_dialog_nodes(): + url = 'https://gateway.watsonplatform.net/conversation/api/v1/workspaces/id/dialog_nodes' + responses.add( + responses.GET, + url, + body='{ "application/json": { "dialog_node": "location-atm" }}', + status=200, + content_type='application/json') + + responses.add( + responses.POST, + "{0}?version=2017-05-26".format(url), + body='{ "application/json": { "dialog_node": "location-done" }}', + status=200, + content_type='application/json') + + responses.add( + responses.DELETE, + "{0}/location-done?version=2017-05-26".format(url), + body='{"description": "deleted successfully"}', + status=200, + content_type='application/json') + + responses.add( + responses.GET, + "{0}/location-done?version=2017-05-26".format(url), + body='{ "application/json": { "dialog_node": "location-atm" }}', + status=200, + content_type='application/json') + + conversation = watson_developer_cloud.ConversationV1('2017-05-26', + username="username", password="password") + + conversation.create_dialog_node('id', 'location-done') + assert responses.calls[0].response.json()['application/json']['dialog_node'] == 'location-done' + + conversation.delete_dialog_node('id', 'location-done') + assert responses.calls[1].response.json() == {"description": "deleted successfully"} + + conversation.get_dialog_node('id', 'location-done') + assert responses.calls[2].response.json() == { "application/json": { "dialog_node": "location-atm" }} + + conversation.list_dialog_nodes('id') + assert responses.calls[3].response.json() == { "application/json": { "dialog_node": "location-atm" }} + + assert len(responses.calls) == 4 \ No newline at end of file diff --git a/watson_developer_cloud/conversation_v1.py b/watson_developer_cloud/conversation_v1.py index f84c75d0c..f9c9de6a4 100644 --- a/watson_developer_cloud/conversation_v1.py +++ b/watson_developer_cloud/conversation_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2017 IBM All Rights Reserved. +# Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -213,7 +213,8 @@ def update_workspace(self, dialog_nodes=None, counterexamples=None, metadata=None, - learning_opt_out=None): + learning_opt_out=None, + append=None): """ Update workspace. @@ -230,6 +231,7 @@ def update_workspace(self, :param list[CreateCounterexample] counterexamples: An array of objects defining input examples that have been marked as irrelevant input. :param object metadata: Any metadata related to the workspace. :param bool learning_opt_out: Whether training data from the workspace can be used by IBM for general service improvements. `true` indicates that workspace training data is not to be used. + :param bool append: Specifies that the elements included in the request body are to be appended to the existing data in the workspace. The default value is `false`. :return: A `dict` containing the `Workspace` response. :rtype: dict """ @@ -243,7 +245,7 @@ def update_workspace(self, dialog_nodes = [self._convert_model(x) for x in dialog_nodes] if counterexamples is not None: counterexamples = [self._convert_model(x) for x in counterexamples] - params = {'version': self.version} + params = {'version': self.version, 'append': append} data = { 'name': name, 'description': description, @@ -264,6 +266,7 @@ def update_workspace(self, # message ######################### + def message(self, workspace_id, input=None, @@ -271,7 +274,8 @@ def message(self, context=None, entities=None, intents=None, - output=None): + output=None, + nodes_visited_details=None): """ Get a response to a user's input. @@ -282,6 +286,7 @@ def message(self, :param list[RuntimeEntity] entities: Include the entities from the previous response when they do not need to change and to prevent Watson from trying to identify them. :param list[RuntimeIntent] intents: An array of name-confidence pairs for the user input. Include the intents from the previous response when they do not need to change and to prevent Watson from trying to identify them. :param OutputData output: System output. Include the output from the request when you have several requests within the same Dialog turn to pass back in the intermediate information. + :param bool nodes_visited_details: Whether to include additional diagnostic information about the dialog nodes that were visited during processing of the message. :return: A `dict` containing the `MessageResponse` response. :rtype: dict """ @@ -297,7 +302,10 @@ def message(self, intents = [self._convert_model(x) for x in intents] if output is not None: output = self._convert_model(output) - params = {'version': self.version} + params = { + 'version': self.version, + 'nodes_visited_details': nodes_visited_details + } data = { 'input': input, 'alternate_intents': alternate_intents, @@ -1310,7 +1318,7 @@ def update_dialog_node(self, :param object new_metadata: The metadata for the dialog node. :param DialogNodeNextStep new_next_step: The next step to execute following this dialog node. :param str new_title: The alias used to identify the dialog node. - :param str new_type: How the node is processed. + :param str new_type: How the dialog node is processed. :param str new_event_name: How an `event_handler` node is processed. :param str new_variable: The location in the dialog context where output is stored. :param list[DialogNodeAction] new_actions: The actions for the dialog node. @@ -2726,6 +2734,58 @@ def __ne__(self, other): return not self == other +class DialogNodeVisitedDetails(object): + """ + DialogNodeVisitedDetails. + + :attr str dialog_node: (optional) A dialog node that was triggered during processing of the input message. + :attr str title: (optional) The title of the dialog node. + """ + + def __init__(self, dialog_node=None, title=None): + """ + Initialize a DialogNodeVisitedDetails object. + + :param str dialog_node: (optional) A dialog node that was triggered during processing of the input message. + :param str title: (optional) The title of the dialog node. + """ + self.dialog_node = dialog_node + self.title = title + + @classmethod + def _from_dict(cls, _dict): + """Initialize a DialogNodeVisitedDetails object from a json dictionary.""" + args = {} + if 'dialog_node' in _dict: + args['dialog_node'] = _dict['dialog_node'] + if 'title' in _dict: + args['title'] = _dict['title'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'dialog_node') and self.dialog_node is not None: + _dict['dialog_node'] = self.dialog_node + if hasattr(self, 'title') and self.title is not None: + _dict['title'] = self.title + return _dict + + def __str__(self): + """Return a `str` version of this DialogNodeVisitedDetails object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class Entity(object): """ Entity. @@ -3980,20 +4040,28 @@ class OutputData(object): :attr list[LogMessage] log_messages: Up to 50 messages logged with the request. :attr list[str] text: An array of responses to the user. :attr list[str] nodes_visited: (optional) An array of the nodes that were triggered to create the response. + :attr list[DialogNodeVisitedDetails] nodes_visited_details: (optional) An array of objects containing detailed diagnostic information about the nodes that were triggered during processing of the input message. """ - def __init__(self, log_messages, text, nodes_visited=None, **kwargs): + def __init__(self, + log_messages, + text, + nodes_visited=None, + nodes_visited_details=None, + **kwargs): """ Initialize a OutputData object. :param list[LogMessage] log_messages: Up to 50 messages logged with the request. :param list[str] text: An array of responses to the user. :param list[str] nodes_visited: (optional) An array of the nodes that were triggered to create the response. + :param list[DialogNodeVisitedDetails] nodes_visited_details: (optional) An array of objects containing detailed diagnostic information about the nodes that were triggered during processing of the input message. :param **kwargs: (optional) Any additional properties. """ self.log_messages = log_messages self.text = text self.nodes_visited = nodes_visited + self.nodes_visited_details = nodes_visited_details for _key, _value in kwargs.items(): setattr(self, _key, _value) @@ -4020,6 +4088,12 @@ def _from_dict(cls, _dict): if 'nodes_visited' in _dict: args['nodes_visited'] = _dict['nodes_visited'] del xtra['nodes_visited'] + if 'nodes_visited_details' in _dict: + args['nodes_visited_details'] = [ + DialogNodeVisitedDetails._from_dict(x) + for x in _dict['nodes_visited_details'] + ] + del xtra['nodes_visited_details'] args.update(xtra) return cls(**args) @@ -4032,6 +4106,11 @@ def _to_dict(self): _dict['text'] = self.text if hasattr(self, 'nodes_visited') and self.nodes_visited is not None: _dict['nodes_visited'] = self.nodes_visited + if hasattr(self, 'nodes_visited_details' + ) and self.nodes_visited_details is not None: + _dict['nodes_visited_details'] = [ + x._to_dict() for x in self.nodes_visited_details + ] if hasattr(self, '_additionalProperties'): for _key in self._additionalProperties: _value = getattr(self, _key, None) @@ -4040,7 +4119,9 @@ def _to_dict(self): return _dict def __setattr__(self, name, value): - properties = {'log_messages', 'text', 'nodes_visited'} + properties = { + 'log_messages', 'text', 'nodes_visited', 'nodes_visited_details' + } if not hasattr(self, '_additionalProperties'): super(OutputData, self).__setattr__('_additionalProperties', set()) if name not in properties: diff --git a/watson_developer_cloud/dialog_v1.py b/watson_developer_cloud/dialog_v1.py deleted file mode 100644 index 0cc613fb1..000000000 --- a/watson_developer_cloud/dialog_v1.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright 2016 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -The v1 Dialog service -(https://www.ibm.com/watson/developercloud/dialog.html) -""" -from __future__ import print_function -from .watson_service import WatsonService - - -class DialogV1(WatsonService): - default_url = 'https://gateway.watsonplatform.net/dialog/api' - dialog_json_format = 'application/wds+json' - dialog_xml_format = 'application/wds+xml' - dialog_binary_format = 'application/octet-stream' - - def __init__(self, url=default_url, **kwargs): - WatsonService.__init__(self, 'dialog', url, **kwargs) - print( - 'WARNING: The Dialog service was deprecated. Existing instances ' - 'of the service stopped functioning on August 9, 2017. ' - 'Dialog was replaced by the Conversation service. See ' - 'https://console.bluemix.net/docs/services/conversation' - '/index.html#about') - - def get_dialogs(self): - return self.request(method='GET', url='/v1/dialogs', accept_json=True) - - def get_dialog(self, dialog_id, accept='application/wds+json'): - accept_json = accept == self.dialog_json_format - headers = {'accept': accept} - return self.request(method='GET', - url='/v1/dialogs/{0}'.format(dialog_id), - headers=headers, - accept_json=accept_json) - - def create_dialog(self, dialog_file, name): - return self.request(method='POST', url='/v1/dialogs', - files={'file': dialog_file}, accept_json=True, - data={'name': name}) - - def update_dialog(self, dialog_id, dialog_file): - dialog_id = self.unpack_id(dialog_id, 'dialog_id') - return self.request(method='PUT', - url='/v1/dialogs/{0}'.format(dialog_id), - files={'file': dialog_file}, - accept_json=True) - - def get_content(self, dialog_id): - dialog_id = self.unpack_id(dialog_id, 'dialog_id') - return self.request(method='GET', - url='/v1/dialogs/{0}/content'.format(dialog_id), - accept_json=True) - - def update_content(self, dialog_id, content): - dialog_id = self.unpack_id(dialog_id, 'dialog_id') - return self.request(method='PUT', - url='/v1/dialogs/{0}/content'.format(dialog_id), - json=content, - accept_json=True) - - def conversation(self, dialog_id, dialog_input=None, client_id=None, - conversation_id=None): - dialog_id = self.unpack_id(dialog_id, 'dialog_id') - data = {'input': dialog_input, 'client_id': client_id, - 'conversation_id': conversation_id} - return self.request(method='POST', - url='/v1/dialogs/{0}/conversation'.format( - dialog_id), data=data, - accept_json=True) - - @staticmethod - def _format_date(date): - if date: - return date.strftime('%Y-%m-%d %H:%M:%S') - - def get_conversation(self, dialog_id, date_from, date_to): - dialog_id = self.unpack_id(dialog_id, 'dialog_id') - params = {'date_from': self._format_date( - date_from), 'date_to': self._format_date(date_to)} - return self.request(method='GET', - url='/v1/dialogs/{0}/conversation'.format( - dialog_id), params=params, - accept_json=True) - - def get_profile(self, dialog_id, client_id, name=None): - dialog_id = self.unpack_id(dialog_id, 'dialog_id') - client_id = self.unpack_id(client_id, 'client_id') - params = {'client_id': client_id, 'name': name} - return self.request(method='GET', - url='/v1/dialogs/{0}/profile'.format(dialog_id), - params=params, - accept_json=True) - - def update_profile(self, dialog_id, name_values, client_id=None): - dialog_id = self.unpack_id(dialog_id, 'dialog_id') - client_id = self.unpack_id(client_id, 'client_id') - if isinstance(name_values, dict): - name_values = list({'name': item[0], 'value': item[1]} for item in - name_values.items()) - params = { - 'client_id': client_id, - 'name_values': name_values - } - return self.request(method='PUT', - url='/v1/dialogs/{0}/profile'.format(dialog_id), - json=params, - accept_json=True) - - def delete_dialog(self, dialog_id): - dialog_id = self.unpack_id(dialog_id, 'dialog_id') - return self.request(method='DELETE', - url='/v1/dialogs/{0}'.format(dialog_id), - accept_json=True) From 20fbed6219b18fefaf5a1eeb128b2ef24f28c6fa Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 15 Feb 2018 10:13:05 -0500 Subject: [PATCH 09/45] Remove dialog import --- watson_developer_cloud/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/watson_developer_cloud/__init__.py b/watson_developer_cloud/__init__.py index bea16fd40..96c1f3bdd 100755 --- a/watson_developer_cloud/__init__.py +++ b/watson_developer_cloud/__init__.py @@ -18,7 +18,6 @@ from .watson_service import WatsonInvalidArgument from .authorization_v1 import AuthorizationV1 from .conversation_v1 import ConversationV1 -from .dialog_v1 import DialogV1 from .language_translation_v2 import LanguageTranslationV2 from .language_translator_v2 import LanguageTranslatorV2 from .natural_language_classifier_v1 import NaturalLanguageClassifierV1 From 059cba5f1efed1b09b4bd03d7bc83cb2e8435380 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 15 Feb 2018 11:12:57 -0500 Subject: [PATCH 10/45] Regenerate discovery --- test/unit/test_discovery_v1.py | 36 + watson_developer_cloud/discovery_v1.py | 1182 +++++++++++++++++++----- 2 files changed, 986 insertions(+), 232 deletions(-) diff --git a/test/unit/test_discovery_v1.py b/test/unit/test_discovery_v1.py index ea53a7d13..c28240647 100644 --- a/test/unit/test_discovery_v1.py +++ b/test/unit/test_discovery_v1.py @@ -763,3 +763,39 @@ def test_update_training_example(): assert response == mock_response # Verify that response can be converted to a TrainingExample TrainingExample._from_dict(response) + +@responses.activate +def test_expansions(): + url = 'https://gateway.watsonplatform.net/discovery/api/v1/environments/envid/collections/colid/expansions' + responses.add( + responses.GET, + url, + body='{"expansions": "results"}', + status=200, + content_type='application_json') + responses.add( + responses.DELETE, + url, + body='{"description": "success" }', + status=200, + content_type='application_json') + responses.add( + responses.POST, + url, + body='{"expansions": "success" }', + status=200, + content_type='application_json') + + discovery = watson_developer_cloud.DiscoveryV1('2017-11-07', + username="username", password="password") + + discovery.list_expansions('envid', 'colid') + assert responses.calls[0].response.json() == {"expansions": "results"} + + discovery.create_expansions('envid', 'colid', { "expansions": [{"input_terms": "dumb"}] }) + assert responses.calls[1].response.json() == {"expansions": "success" } + + discovery.delete_expansions('envid', 'colid') + assert responses.calls[2].response.json() == {"description": "success" } + + assert len(responses.calls) == 3 \ No newline at end of file diff --git a/watson_developer_cloud/discovery_v1.py b/watson_developer_cloud/discovery_v1.py index f34d30ed8..3c77a7018 100644 --- a/watson_developer_cloud/discovery_v1.py +++ b/watson_developer_cloud/discovery_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2017 IBM All Rights Reserved. +# Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -492,6 +492,35 @@ def create_collection(self, method='POST', url=url, params=params, json=data, accept_json=True) return response + def create_expansions(self, environment_id, collection_id, expansions): + """ + Set the expansion list. + + Create or replace the Expansion list for this collection. The maximum number of + expanded terms per collection is `500`. The current expansion list is replaced + with the uploaded content. + + :param str environment_id: The ID of the environment. + :param str collection_id: The ID of the collection. + :param list[Expansion] expansions: An array of query expansion definitions. Each object in the `expansions` array represents a term or set of terms that will be expanded into other terms. Each expansion object can be configured so that all terms are expanded to all other terms in the object - bi-directional, or a set list of terms can be expanded into a second list of terms - uni-directional. To create a bi-directional expansion specify an `expanded_terms` array. When found in a query, all items in the `expanded_terms` array are then expanded to the other items in the same array. To create a uni-directional expansion, specify both an array of `input_terms` and an array of `expanded_terms`. When items in the `input_terms` array are present in a query, they are expanded using the items listed in the `expanded_terms` array. + :return: A `dict` containing the `Expansions` response. + :rtype: dict + """ + if environment_id is None: + raise ValueError('environment_id must be provided') + if collection_id is None: + raise ValueError('collection_id must be provided') + if expansions is None: + raise ValueError('expansions must be provided') + expansions = [self._convert_model(x) for x in expansions] + params = {'version': self.version} + data = {'expansions': expansions} + url = '/v1/environments/{0}/collections/{1}/expansions'.format( + *self._encode_path_vars(environment_id, collection_id)) + response = self.request( + method='POST', url=url, params=params, json=data, accept_json=True) + return response + def delete_collection(self, environment_id, collection_id): """ Delete a collection. @@ -512,6 +541,27 @@ def delete_collection(self, environment_id, collection_id): method='DELETE', url=url, params=params, accept_json=True) return response + def delete_expansions(self, environment_id, collection_id): + """ + Delete the expansions list. + + Remove the expansion information for this collection. The expansion list must be + deleted to disable query expansion for a collection. + + :param str environment_id: The ID of the environment. + :param str collection_id: The ID of the collection. + :rtype: None + """ + if environment_id is None: + raise ValueError('environment_id must be provided') + if collection_id is None: + raise ValueError('collection_id must be provided') + params = {'version': self.version} + url = '/v1/environments/{0}/collections/{1}/expansions'.format( + *self._encode_path_vars(environment_id, collection_id)) + self.request(method='DELETE', url=url, params=params, accept_json=True) + return None + def get_collection(self, environment_id, collection_id): """ Get collection details. @@ -574,6 +624,29 @@ def list_collections(self, environment_id, name=None): method='GET', url=url, params=params, accept_json=True) return response + def list_expansions(self, environment_id, collection_id): + """ + List current expansions. + + Returns the current expansion list for the specified collection. If an expansion + list is not specified, an object with empty expansion arrays is returned. + + :param str environment_id: The ID of the environment. + :param str collection_id: The ID of the collection. + :return: A `dict` containing the `Expansions` response. + :rtype: dict + """ + if environment_id is None: + raise ValueError('environment_id must be provided') + if collection_id is None: + raise ValueError('collection_id must be provided') + params = {'version': self.version} + url = '/v1/environments/{0}/collections/{1}/expansions'.format( + *self._encode_path_vars(environment_id, collection_id)) + response = self.request( + method='GET', url=url, params=params, accept_json=True) + return response + def update_collection(self, environment_id, collection_id, @@ -1496,7 +1569,7 @@ class Collection(object): :attr datetime updated: (optional) The timestamp of when the collection was last updated in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. :attr str status: (optional) The status of the collection. :attr str configuration_id: (optional) The unique identifier of the collection's configuration. - :attr str language: (optional) The language of the documents stored in the collection. Permitted values include `en_us` (U.S. English), `de` (German), and `es` (Spanish). + :attr str language: (optional) The language of the documents stored in the collection. Permitted values include `en` (English), `de` (German), and `es` (Spanish). :attr DocumentCounts document_counts: (optional) The object providing information about the documents in the collection. Present only when retrieving details of a collection. :attr CollectionDiskUsage disk_usage: (optional) The object providing information about the disk usage of the collection. Present only when retrieving details of a collection. :attr TrainingStatus training_status: (optional) Provides information about the status of relevance training for collection. @@ -1524,7 +1597,7 @@ def __init__(self, :param datetime updated: (optional) The timestamp of when the collection was last updated in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. :param str status: (optional) The status of the collection. :param str configuration_id: (optional) The unique identifier of the collection's configuration. - :param str language: (optional) The language of the documents stored in the collection. Permitted values include `en_us` (U.S. English), `de` (German), and `es` (Spanish). + :param str language: (optional) The language of the documents stored in the collection. Permitted values include `en` (English), `de` (German), and `es` (Spanish). :param DocumentCounts document_counts: (optional) The object providing information about the documents in the collection. Present only when retrieving details of a collection. :param CollectionDiskUsage disk_usage: (optional) The object providing information about the disk usage of the collection. Present only when retrieving details of a collection. :param TrainingStatus training_status: (optional) Provides information about the status of relevance training for collection. @@ -2556,7 +2629,7 @@ class Enrichment(object): :attr str destination_field: Field where enrichments will be stored. This field must already exist or be at most 1 level deeper than an existing field. For example, if `text` is a top-level field with no sub-fields, `text.foo` is a valid destination but `text.foo.bar` is not. :attr str source_field: Field to be enriched. :attr bool overwrite: (optional) Indicates that the enrichments will overwrite the destination_field field if it already exists. - :attr str enrichment_name: Name of the enrichment service to call. Currently the only valid value is `natural_language_understanding`. Previous API versions also supported `alchemy_language`. + :attr str enrichment_name: Name of the enrichment service to call. Current options are `natural_language_understanding` and `elements`. When using `natual_language_understanding`, the `options` object must contain Natural Language Understanding Options. When using `elements` the `options` object must contain Element Classification options. Additionally, when using the `elements` enrichment the configuration specified and files ingested must meet all the criteria specified in [the documentation](https://console.bluemix.net/docs/services/discovery/element-classification.html) Previous API versions also supported `alchemy_language`. :attr bool ignore_downstream_errors: (optional) If true, then most errors generated during the enrichment process will be treated as warnings and will not cause the document to fail processing. :attr EnrichmentOptions options: (optional) A list of options specific to the enrichment. """ @@ -2574,7 +2647,7 @@ def __init__(self, :param str destination_field: Field where enrichments will be stored. This field must already exist or be at most 1 level deeper than an existing field. For example, if `text` is a top-level field with no sub-fields, `text.foo` is a valid destination but `text.foo.bar` is not. :param str source_field: Field to be enriched. - :param str enrichment_name: Name of the enrichment service to call. Currently the only valid value is `natural_language_understanding`. Previous API versions also supported `alchemy_language`. + :param str enrichment_name: Name of the enrichment service to call. Current options are `natural_language_understanding` and `elements`. When using `natual_language_understanding`, the `options` object must contain Natural Language Understanding Options. When using `elements` the `options` object must contain Element Classification options. Additionally, when using the `elements` enrichment the configuration specified and files ingested must meet all the criteria specified in [the documentation](https://console.bluemix.net/docs/services/discovery/element-classification.html) Previous API versions also supported `alchemy_language`. :param str description: (optional) Describes what the enrichment step does. :param bool overwrite: (optional) Indicates that the enrichments will overwrite the destination_field field if it already exists. :param bool ignore_downstream_errors: (optional) If true, then most errors generated during the enrichment process will be treated as warnings and will not cause the document to fail processing. @@ -2659,25 +2732,40 @@ def __ne__(self, other): class EnrichmentOptions(object): """ - EnrichmentOptions. + Options which are specific to a particular enrichment. + :attr NluEnrichmentFeatures features: (optional) An object representing the enrichment features that will be applied to the specified field. + :attr str model: (optional) *For use with `elements` enrichments only.* The element extraction model to use. Models available are: `contract`. """ - def __init__(self): + def __init__(self, features=None, model=None): """ Initialize a EnrichmentOptions object. + :param NluEnrichmentFeatures features: (optional) An object representing the enrichment features that will be applied to the specified field. + :param str model: (optional) *For use with `elements` enrichments only.* The element extraction model to use. Models available are: `contract`. """ + self.features = features + self.model = model @classmethod def _from_dict(cls, _dict): """Initialize a EnrichmentOptions object from a json dictionary.""" args = {} + if 'features' in _dict: + args['features'] = NluEnrichmentFeatures._from_dict( + _dict['features']) + if 'model' in _dict: + args['model'] = _dict['model'] return cls(**args) def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} + if hasattr(self, 'features') and self.features is not None: + _dict['features'] = self.features._to_dict() + if hasattr(self, 'model') and self.model is not None: + _dict['model'] = self.model return _dict def __str__(self): @@ -2859,6 +2947,115 @@ def __ne__(self, other): return not self == other +class Expansion(object): + """ + An expansion definition. Each object respresents one set of expandable strings. For + example, you could have expansions for the word `hot` in one object, and expansions + for the word `cold` in another. + + :attr list[str] input_terms: (optional) A list of terms that will be expanded for this expansion. If specified, only the items in this list are expanded. + :attr list[str] expanded_terms: A list of terms that this expansion will be expanded to. If specified without `input_terms`, it also functions as the input term list. + """ + + def __init__(self, expanded_terms, input_terms=None): + """ + Initialize a Expansion object. + + :param list[str] expanded_terms: A list of terms that this expansion will be expanded to. If specified without `input_terms`, it also functions as the input term list. + :param list[str] input_terms: (optional) A list of terms that will be expanded for this expansion. If specified, only the items in this list are expanded. + """ + self.input_terms = input_terms + self.expanded_terms = expanded_terms + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Expansion object from a json dictionary.""" + args = {} + if 'input_terms' in _dict: + args['input_terms'] = _dict['input_terms'] + if 'expanded_terms' in _dict: + args['expanded_terms'] = _dict['expanded_terms'] + else: + raise ValueError( + 'Required property \'expanded_terms\' not present in Expansion JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'input_terms') and self.input_terms is not None: + _dict['input_terms'] = self.input_terms + if hasattr(self, 'expanded_terms') and self.expanded_terms is not None: + _dict['expanded_terms'] = self.expanded_terms + return _dict + + def __str__(self): + """Return a `str` version of this Expansion object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Expansions(object): + """ + The query expansion definitions for the specified collection. + + :attr list[Expansion] expansions: An array of query expansion definitions. Each object in the `expansions` array represents a term or set of terms that will be expanded into other terms. Each expansion object can be configured so that all terms are expanded to all other terms in the object - bi-directional, or a set list of terms can be expanded into a second list of terms - uni-directional. To create a bi-directional expansion specify an `expanded_terms` array. When found in a query, all items in the `expanded_terms` array are then expanded to the other items in the same array. To create a uni-directional expansion, specify both an array of `input_terms` and an array of `expanded_terms`. When items in the `input_terms` array are present in a query, they are expanded using the items listed in the `expanded_terms` array. + """ + + def __init__(self, expansions): + """ + Initialize a Expansions object. + + :param list[Expansion] expansions: An array of query expansion definitions. Each object in the `expansions` array represents a term or set of terms that will be expanded into other terms. Each expansion object can be configured so that all terms are expanded to all other terms in the object - bi-directional, or a set list of terms can be expanded into a second list of terms - uni-directional. To create a bi-directional expansion specify an `expanded_terms` array. When found in a query, all items in the `expanded_terms` array are then expanded to the other items in the same array. To create a uni-directional expansion, specify both an array of `input_terms` and an array of `expanded_terms`. When items in the `input_terms` array are present in a query, they are expanded using the items listed in the `expanded_terms` array. + """ + self.expansions = expansions + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Expansions object from a json dictionary.""" + args = {} + if 'expansions' in _dict: + args['expansions'] = [ + Expansion._from_dict(x) for x in _dict['expansions'] + ] + else: + raise ValueError( + 'Required property \'expansions\' not present in Expansions JSON' + ) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'expansions') and self.expansions is not None: + _dict['expansions'] = [x._to_dict() for x in self.expansions] + return _dict + + def __str__(self): + """Return a `str` version of this Expansions object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class Field(object): """ Field. @@ -3437,56 +3634,51 @@ def __ne__(self, other): return not self == other -class NormalizationOperation(object): +class NluEnrichmentCategories(object): """ - NormalizationOperation. + An object that indicates the Categories enrichment will be applied to the specified + field. - :attr str operation: (optional) Identifies what type of operation to perform. **copy** - Copies the value of the `source_field` to the `destination_field` field. If the `destination_field` already exists, then the value of the `source_field` overwrites the original value of the `destination_field`. **move** - Renames (moves) the `source_field` to the `destination_field`. If the `destination_field` already exists, then the value of the `source_field` overwrites the original value of the `destination_field`. Rename is identical to copy, except that the `source_field` is removed after the value has been copied to the `destination_field` (it is the same as a _copy_ followed by a _remove_). **merge** - Merges the value of the `source_field` with the value of the `destination_field`. The `destination_field` is converted into an array if it is not already an array, and the value of the `source_field` is appended to the array. This operation removes the `source_field` after the merge. If the `source_field` does not exist in the current document, then the `destination_field` is still converted into an array (if it is not an array already). This is ensures the type for `destination_field` is consistent across all documents. **remove** - Deletes the `source_field` field. The `destination_field` is ignored for this operation. **remove_nulls** - Removes all nested null (blank) leif values from the JSON tree. `source_field` and `destination_field` are ignored by this operation because _remove_nulls_ operates on the entire JSON tree. Typically, `remove_nulls` is invoked as the last normalization operation (if it is inoked at all, it can be time-expensive). - :attr str source_field: (optional) The source field for the operation. - :attr str destination_field: (optional) The destination field for the operation. """ - def __init__(self, - operation=None, - source_field=None, - destination_field=None): + def __init__(self, **kwargs): """ - Initialize a NormalizationOperation object. + Initialize a NluEnrichmentCategories object. - :param str operation: (optional) Identifies what type of operation to perform. **copy** - Copies the value of the `source_field` to the `destination_field` field. If the `destination_field` already exists, then the value of the `source_field` overwrites the original value of the `destination_field`. **move** - Renames (moves) the `source_field` to the `destination_field`. If the `destination_field` already exists, then the value of the `source_field` overwrites the original value of the `destination_field`. Rename is identical to copy, except that the `source_field` is removed after the value has been copied to the `destination_field` (it is the same as a _copy_ followed by a _remove_). **merge** - Merges the value of the `source_field` with the value of the `destination_field`. The `destination_field` is converted into an array if it is not already an array, and the value of the `source_field` is appended to the array. This operation removes the `source_field` after the merge. If the `source_field` does not exist in the current document, then the `destination_field` is still converted into an array (if it is not an array already). This is ensures the type for `destination_field` is consistent across all documents. **remove** - Deletes the `source_field` field. The `destination_field` is ignored for this operation. **remove_nulls** - Removes all nested null (blank) leif values from the JSON tree. `source_field` and `destination_field` are ignored by this operation because _remove_nulls_ operates on the entire JSON tree. Typically, `remove_nulls` is invoked as the last normalization operation (if it is inoked at all, it can be time-expensive). - :param str source_field: (optional) The source field for the operation. - :param str destination_field: (optional) The destination field for the operation. + :param **kwargs: (optional) Any additional properties. """ - self.operation = operation - self.source_field = source_field - self.destination_field = destination_field + for _key, _value in kwargs.items(): + setattr(self, _key, _value) @classmethod def _from_dict(cls, _dict): - """Initialize a NormalizationOperation object from a json dictionary.""" + """Initialize a NluEnrichmentCategories object from a json dictionary.""" args = {} - if 'operation' in _dict: - args['operation'] = _dict['operation'] - if 'source_field' in _dict: - args['source_field'] = _dict['source_field'] - if 'destination_field' in _dict: - args['destination_field'] = _dict['destination_field'] + xtra = _dict.copy() + args.update(xtra) return cls(**args) def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'operation') and self.operation is not None: - _dict['operation'] = self.operation - if hasattr(self, 'source_field') and self.source_field is not None: - _dict['source_field'] = self.source_field - if hasattr(self, - 'destination_field') and self.destination_field is not None: - _dict['destination_field'] = self.destination_field + if hasattr(self, '_additionalProperties'): + for _key in self._additionalProperties: + _value = getattr(self, _key, None) + if _value is not None: + _dict[_key] = _value return _dict + def __setattr__(self, name, value): + properties = {} + if not hasattr(self, '_additionalProperties'): + super(NluEnrichmentCategories, self).__setattr__( + '_additionalProperties', set()) + if name not in properties: + self._additionalProperties.add(name) + super(NluEnrichmentCategories, self).__setattr__(name, value) + def __str__(self): - """Return a `str` version of this NormalizationOperation object.""" + """Return a `str` version of this NluEnrichmentCategories object.""" return json.dumps(self._to_dict(), indent=2) def __eq__(self, other): @@ -3500,87 +3692,45 @@ def __ne__(self, other): return not self == other -class Notice(object): +class NluEnrichmentEmotion(object): """ - A notice produced for the collection. + An object specifying the emotion detection enrichment and related parameters. - :attr str notice_id: (optional) Identifies the notice. Many notices might have the same ID. This field exists so that user applications can programmatically identify a notice and take automatic corrective action. - :attr datetime created: (optional) The creation date of the collection in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. - :attr str document_id: (optional) Unique identifier of the document. - :attr str query_id: (optional) Unique identifier of the query used for relevance training. - :attr str severity: (optional) Severity level of the notice. - :attr str step: (optional) Ingestion or training step in which the notice occurred. - :attr str description: (optional) The description of the notice. + :attr bool document: (optional) When `true`, emotion detection is performed on the entire field. + :attr list[str] targets: (optional) A comma-separated list of target strings that will have any associated emotions detected. """ - def __init__(self, - notice_id=None, - created=None, - document_id=None, - query_id=None, - severity=None, - step=None, - description=None): + def __init__(self, document=None, targets=None): """ - Initialize a Notice object. + Initialize a NluEnrichmentEmotion object. - :param str notice_id: (optional) Identifies the notice. Many notices might have the same ID. This field exists so that user applications can programmatically identify a notice and take automatic corrective action. - :param datetime created: (optional) The creation date of the collection in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. - :param str document_id: (optional) Unique identifier of the document. - :param str query_id: (optional) Unique identifier of the query used for relevance training. - :param str severity: (optional) Severity level of the notice. - :param str step: (optional) Ingestion or training step in which the notice occurred. - :param str description: (optional) The description of the notice. + :param bool document: (optional) When `true`, emotion detection is performed on the entire field. + :param list[str] targets: (optional) A comma-separated list of target strings that will have any associated emotions detected. """ - self.notice_id = notice_id - self.created = created - self.document_id = document_id - self.query_id = query_id - self.severity = severity - self.step = step - self.description = description + self.document = document + self.targets = targets @classmethod def _from_dict(cls, _dict): - """Initialize a Notice object from a json dictionary.""" + """Initialize a NluEnrichmentEmotion object from a json dictionary.""" args = {} - if 'notice_id' in _dict: - args['notice_id'] = _dict['notice_id'] - if 'created' in _dict: - args['created'] = string_to_datetime(_dict['created']) - if 'document_id' in _dict: - args['document_id'] = _dict['document_id'] - if 'query_id' in _dict: - args['query_id'] = _dict['query_id'] - if 'severity' in _dict: - args['severity'] = _dict['severity'] - if 'step' in _dict: - args['step'] = _dict['step'] - if 'description' in _dict: - args['description'] = _dict['description'] + if 'document' in _dict: + args['document'] = _dict['document'] + if 'targets' in _dict: + args['targets'] = _dict['targets'] return cls(**args) def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'notice_id') and self.notice_id is not None: - _dict['notice_id'] = self.notice_id - if hasattr(self, 'created') and self.created is not None: - _dict['created'] = datetime_to_string(self.created) - if hasattr(self, 'document_id') and self.document_id is not None: - _dict['document_id'] = self.document_id - if hasattr(self, 'query_id') and self.query_id is not None: - _dict['query_id'] = self.query_id - if hasattr(self, 'severity') and self.severity is not None: - _dict['severity'] = self.severity - if hasattr(self, 'step') and self.step is not None: - _dict['step'] = self.step - if hasattr(self, 'description') and self.description is not None: - _dict['description'] = self.description + if hasattr(self, 'document') and self.document is not None: + _dict['document'] = self.document + if hasattr(self, 'targets') and self.targets is not None: + _dict['targets'] = self.targets return _dict def __str__(self): - """Return a `str` version of this Notice object.""" + """Return a `str` version of this NluEnrichmentEmotion object.""" return json.dumps(self._to_dict(), indent=2) def __eq__(self, other): @@ -3594,38 +3744,88 @@ def __ne__(self, other): return not self == other -class PdfHeadingDetection(object): +class NluEnrichmentEntities(object): """ - PdfHeadingDetection. - - :attr list[FontSetting] fonts: (optional) + An object speficying the Entities enrichment and related parameters. + + :attr bool sentiment: (optional) When `true`, sentiment analysis of entities will be performed on the specified field. + :attr bool emotion: (optional) When `true`, emotion detection of entities will be performed on the specified field. + :attr int limit: (optional) The maximum number of entities to extract for each instance of the specified field. + :attr bool mentions: (optional) When `true`, the number of mentions of each identified entity is recorded. The default is `false`. + :attr bool mention_types: (optional) When `true`, the types of mentions for each idetifieid entity is recorded. The default is `false`. + :attr bool sentence_location: (optional) When `true`, a list of sentence locations for each instance of each identified entity is recorded. The default is `false`. + :attr str model: (optional) The enrichement model to use with entity extraction. May be a custom model provided by Watson Knowledge Studio, the public model for use with Knowledge Graph `en-news`, or the default public model `alchemy`. """ - def __init__(self, fonts=None): - """ - Initialize a PdfHeadingDetection object. - - :param list[FontSetting] fonts: (optional) - """ - self.fonts = fonts + def __init__(self, + sentiment=None, + emotion=None, + limit=None, + mentions=None, + mention_types=None, + sentence_location=None, + model=None): + """ + Initialize a NluEnrichmentEntities object. + + :param bool sentiment: (optional) When `true`, sentiment analysis of entities will be performed on the specified field. + :param bool emotion: (optional) When `true`, emotion detection of entities will be performed on the specified field. + :param int limit: (optional) The maximum number of entities to extract for each instance of the specified field. + :param bool mentions: (optional) When `true`, the number of mentions of each identified entity is recorded. The default is `false`. + :param bool mention_types: (optional) When `true`, the types of mentions for each idetifieid entity is recorded. The default is `false`. + :param bool sentence_location: (optional) When `true`, a list of sentence locations for each instance of each identified entity is recorded. The default is `false`. + :param str model: (optional) The enrichement model to use with entity extraction. May be a custom model provided by Watson Knowledge Studio, the public model for use with Knowledge Graph `en-news`, or the default public model `alchemy`. + """ + self.sentiment = sentiment + self.emotion = emotion + self.limit = limit + self.mentions = mentions + self.mention_types = mention_types + self.sentence_location = sentence_location + self.model = model @classmethod def _from_dict(cls, _dict): - """Initialize a PdfHeadingDetection object from a json dictionary.""" + """Initialize a NluEnrichmentEntities object from a json dictionary.""" args = {} - if 'fonts' in _dict: - args['fonts'] = [FontSetting._from_dict(x) for x in _dict['fonts']] + if 'sentiment' in _dict: + args['sentiment'] = _dict['sentiment'] + if 'emotion' in _dict: + args['emotion'] = _dict['emotion'] + if 'limit' in _dict: + args['limit'] = _dict['limit'] + if 'mentions' in _dict: + args['mentions'] = _dict['mentions'] + if 'mention_types' in _dict: + args['mention_types'] = _dict['mention_types'] + if 'sentence_location' in _dict: + args['sentence_location'] = _dict['sentence_location'] + if 'model' in _dict: + args['model'] = _dict['model'] return cls(**args) def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'fonts') and self.fonts is not None: - _dict['fonts'] = [x._to_dict() for x in self.fonts] + if hasattr(self, 'sentiment') and self.sentiment is not None: + _dict['sentiment'] = self.sentiment + if hasattr(self, 'emotion') and self.emotion is not None: + _dict['emotion'] = self.emotion + if hasattr(self, 'limit') and self.limit is not None: + _dict['limit'] = self.limit + if hasattr(self, 'mentions') and self.mentions is not None: + _dict['mentions'] = self.mentions + if hasattr(self, 'mention_types') and self.mention_types is not None: + _dict['mention_types'] = self.mention_types + if hasattr(self, + 'sentence_location') and self.sentence_location is not None: + _dict['sentence_location'] = self.sentence_location + if hasattr(self, 'model') and self.model is not None: + _dict['model'] = self.model return _dict def __str__(self): - """Return a `str` version of this PdfHeadingDetection object.""" + """Return a `str` version of this NluEnrichmentEntities object.""" return json.dumps(self._to_dict(), indent=2) def __eq__(self, other): @@ -3639,17 +3839,534 @@ def __ne__(self, other): return not self == other -class PdfSettings(object): +class NluEnrichmentFeatures(object): """ - A list of PDF conversion settings. - - :attr PdfHeadingDetection heading: (optional) + NluEnrichmentFeatures. + + :attr NluEnrichmentKeywords keywords: (optional) An object specifying the Keyword enrichment and related parameters. + :attr NluEnrichmentEntities entities: (optional) An object speficying the Entities enrichment and related parameters. + :attr NluEnrichmentSentiment sentiment: (optional) An object specifying the sentiment extraction enrichment and related parameters. + :attr NluEnrichmentEmotion emotion: (optional) An object specifying the emotion detection enrichment and related parameters. + :attr NluEnrichmentCategories categories: (optional) An object specifying the categories enrichment and related parameters. + :attr NluEnrichmentSemanticRoles semantic_roles: (optional) An object specifiying the semantic roles enrichment and related parameters. + :attr NluEnrichmentRelations relations: (optional) An object specifying the relations enrichment and related parameters. """ - def __init__(self, heading=None): - """ - Initialize a PdfSettings object. - + def __init__(self, + keywords=None, + entities=None, + sentiment=None, + emotion=None, + categories=None, + semantic_roles=None, + relations=None): + """ + Initialize a NluEnrichmentFeatures object. + + :param NluEnrichmentKeywords keywords: (optional) An object specifying the Keyword enrichment and related parameters. + :param NluEnrichmentEntities entities: (optional) An object speficying the Entities enrichment and related parameters. + :param NluEnrichmentSentiment sentiment: (optional) An object specifying the sentiment extraction enrichment and related parameters. + :param NluEnrichmentEmotion emotion: (optional) An object specifying the emotion detection enrichment and related parameters. + :param NluEnrichmentCategories categories: (optional) An object specifying the categories enrichment and related parameters. + :param NluEnrichmentSemanticRoles semantic_roles: (optional) An object specifiying the semantic roles enrichment and related parameters. + :param NluEnrichmentRelations relations: (optional) An object specifying the relations enrichment and related parameters. + """ + self.keywords = keywords + self.entities = entities + self.sentiment = sentiment + self.emotion = emotion + self.categories = categories + self.semantic_roles = semantic_roles + self.relations = relations + + @classmethod + def _from_dict(cls, _dict): + """Initialize a NluEnrichmentFeatures object from a json dictionary.""" + args = {} + if 'keywords' in _dict: + args['keywords'] = NluEnrichmentKeywords._from_dict( + _dict['keywords']) + if 'entities' in _dict: + args['entities'] = NluEnrichmentEntities._from_dict( + _dict['entities']) + if 'sentiment' in _dict: + args['sentiment'] = NluEnrichmentSentiment._from_dict( + _dict['sentiment']) + if 'emotion' in _dict: + args['emotion'] = NluEnrichmentEmotion._from_dict(_dict['emotion']) + if 'categories' in _dict: + args['categories'] = NluEnrichmentCategories._from_dict( + _dict['categories']) + if 'semantic_roles' in _dict: + args['semantic_roles'] = NluEnrichmentSemanticRoles._from_dict( + _dict['semantic_roles']) + if 'relations' in _dict: + args['relations'] = NluEnrichmentRelations._from_dict( + _dict['relations']) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'keywords') and self.keywords is not None: + _dict['keywords'] = self.keywords._to_dict() + if hasattr(self, 'entities') and self.entities is not None: + _dict['entities'] = self.entities._to_dict() + if hasattr(self, 'sentiment') and self.sentiment is not None: + _dict['sentiment'] = self.sentiment._to_dict() + if hasattr(self, 'emotion') and self.emotion is not None: + _dict['emotion'] = self.emotion._to_dict() + if hasattr(self, 'categories') and self.categories is not None: + _dict['categories'] = self.categories._to_dict() + if hasattr(self, 'semantic_roles') and self.semantic_roles is not None: + _dict['semantic_roles'] = self.semantic_roles._to_dict() + if hasattr(self, 'relations') and self.relations is not None: + _dict['relations'] = self.relations._to_dict() + return _dict + + def __str__(self): + """Return a `str` version of this NluEnrichmentFeatures object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class NluEnrichmentKeywords(object): + """ + An object specifying the Keyword enrichment and related parameters. + + :attr bool sentiment: (optional) When `true`, sentiment analysis of keywords will be performed on the specified field. + :attr bool emotion: (optional) When `true`, emotion detection of keywords will be performed on the specified field. + :attr int limit: (optional) The maximum number of keywords to extract for each instance of the specified field. + """ + + def __init__(self, sentiment=None, emotion=None, limit=None): + """ + Initialize a NluEnrichmentKeywords object. + + :param bool sentiment: (optional) When `true`, sentiment analysis of keywords will be performed on the specified field. + :param bool emotion: (optional) When `true`, emotion detection of keywords will be performed on the specified field. + :param int limit: (optional) The maximum number of keywords to extract for each instance of the specified field. + """ + self.sentiment = sentiment + self.emotion = emotion + self.limit = limit + + @classmethod + def _from_dict(cls, _dict): + """Initialize a NluEnrichmentKeywords object from a json dictionary.""" + args = {} + if 'sentiment' in _dict: + args['sentiment'] = _dict['sentiment'] + if 'emotion' in _dict: + args['emotion'] = _dict['emotion'] + if 'limit' in _dict: + args['limit'] = _dict['limit'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'sentiment') and self.sentiment is not None: + _dict['sentiment'] = self.sentiment + if hasattr(self, 'emotion') and self.emotion is not None: + _dict['emotion'] = self.emotion + if hasattr(self, 'limit') and self.limit is not None: + _dict['limit'] = self.limit + return _dict + + def __str__(self): + """Return a `str` version of this NluEnrichmentKeywords object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class NluEnrichmentRelations(object): + """ + An object specifying the relations enrichment and related parameters. + + :attr str model: (optional) *For use with `natural_language_understanding` enrichments only.* The enrichement model to use with relationship extraction. May be a custom model provided by Watson Knowledge Studio, the public model for use with Knowledge Graph `en-news`, the default is`en-news`. + """ + + def __init__(self, model=None): + """ + Initialize a NluEnrichmentRelations object. + + :param str model: (optional) *For use with `natural_language_understanding` enrichments only.* The enrichement model to use with relationship extraction. May be a custom model provided by Watson Knowledge Studio, the public model for use with Knowledge Graph `en-news`, the default is`en-news`. + """ + self.model = model + + @classmethod + def _from_dict(cls, _dict): + """Initialize a NluEnrichmentRelations object from a json dictionary.""" + args = {} + if 'model' in _dict: + args['model'] = _dict['model'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'model') and self.model is not None: + _dict['model'] = self.model + return _dict + + def __str__(self): + """Return a `str` version of this NluEnrichmentRelations object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class NluEnrichmentSemanticRoles(object): + """ + An object specifiying the semantic roles enrichment and related parameters. + + :attr bool entities: (optional) When `true` entities are extracted from the identified sentence parts. + :attr bool keywords: (optional) When `true`, keywords are extracted from the identified sentence parts. + :attr int limit: (optional) The maximum number of semantic roles enrichments to extact from each instance of the specified field. + """ + + def __init__(self, entities=None, keywords=None, limit=None): + """ + Initialize a NluEnrichmentSemanticRoles object. + + :param bool entities: (optional) When `true` entities are extracted from the identified sentence parts. + :param bool keywords: (optional) When `true`, keywords are extracted from the identified sentence parts. + :param int limit: (optional) The maximum number of semantic roles enrichments to extact from each instance of the specified field. + """ + self.entities = entities + self.keywords = keywords + self.limit = limit + + @classmethod + def _from_dict(cls, _dict): + """Initialize a NluEnrichmentSemanticRoles object from a json dictionary.""" + args = {} + if 'entities' in _dict: + args['entities'] = _dict['entities'] + if 'keywords' in _dict: + args['keywords'] = _dict['keywords'] + if 'limit' in _dict: + args['limit'] = _dict['limit'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'entities') and self.entities is not None: + _dict['entities'] = self.entities + if hasattr(self, 'keywords') and self.keywords is not None: + _dict['keywords'] = self.keywords + if hasattr(self, 'limit') and self.limit is not None: + _dict['limit'] = self.limit + return _dict + + def __str__(self): + """Return a `str` version of this NluEnrichmentSemanticRoles object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class NluEnrichmentSentiment(object): + """ + An object specifying the sentiment extraction enrichment and related parameters. + + :attr bool document: (optional) When `true`, sentiment analysis is performed on the entire field. + :attr list[str] targets: (optional) A comma-separated list of target strings that will have any associated sentiment analyzed. + """ + + def __init__(self, document=None, targets=None): + """ + Initialize a NluEnrichmentSentiment object. + + :param bool document: (optional) When `true`, sentiment analysis is performed on the entire field. + :param list[str] targets: (optional) A comma-separated list of target strings that will have any associated sentiment analyzed. + """ + self.document = document + self.targets = targets + + @classmethod + def _from_dict(cls, _dict): + """Initialize a NluEnrichmentSentiment object from a json dictionary.""" + args = {} + if 'document' in _dict: + args['document'] = _dict['document'] + if 'targets' in _dict: + args['targets'] = _dict['targets'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'document') and self.document is not None: + _dict['document'] = self.document + if hasattr(self, 'targets') and self.targets is not None: + _dict['targets'] = self.targets + return _dict + + def __str__(self): + """Return a `str` version of this NluEnrichmentSentiment object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class NormalizationOperation(object): + """ + NormalizationOperation. + + :attr str operation: (optional) Identifies what type of operation to perform. **copy** - Copies the value of the `source_field` to the `destination_field` field. If the `destination_field` already exists, then the value of the `source_field` overwrites the original value of the `destination_field`. **move** - Renames (moves) the `source_field` to the `destination_field`. If the `destination_field` already exists, then the value of the `source_field` overwrites the original value of the `destination_field`. Rename is identical to copy, except that the `source_field` is removed after the value has been copied to the `destination_field` (it is the same as a _copy_ followed by a _remove_). **merge** - Merges the value of the `source_field` with the value of the `destination_field`. The `destination_field` is converted into an array if it is not already an array, and the value of the `source_field` is appended to the array. This operation removes the `source_field` after the merge. If the `source_field` does not exist in the current document, then the `destination_field` is still converted into an array (if it is not an array already). This is ensures the type for `destination_field` is consistent across all documents. **remove** - Deletes the `source_field` field. The `destination_field` is ignored for this operation. **remove_nulls** - Removes all nested null (blank) leif values from the JSON tree. `source_field` and `destination_field` are ignored by this operation because _remove_nulls_ operates on the entire JSON tree. Typically, `remove_nulls` is invoked as the last normalization operation (if it is inoked at all, it can be time-expensive). + :attr str source_field: (optional) The source field for the operation. + :attr str destination_field: (optional) The destination field for the operation. + """ + + def __init__(self, + operation=None, + source_field=None, + destination_field=None): + """ + Initialize a NormalizationOperation object. + + :param str operation: (optional) Identifies what type of operation to perform. **copy** - Copies the value of the `source_field` to the `destination_field` field. If the `destination_field` already exists, then the value of the `source_field` overwrites the original value of the `destination_field`. **move** - Renames (moves) the `source_field` to the `destination_field`. If the `destination_field` already exists, then the value of the `source_field` overwrites the original value of the `destination_field`. Rename is identical to copy, except that the `source_field` is removed after the value has been copied to the `destination_field` (it is the same as a _copy_ followed by a _remove_). **merge** - Merges the value of the `source_field` with the value of the `destination_field`. The `destination_field` is converted into an array if it is not already an array, and the value of the `source_field` is appended to the array. This operation removes the `source_field` after the merge. If the `source_field` does not exist in the current document, then the `destination_field` is still converted into an array (if it is not an array already). This is ensures the type for `destination_field` is consistent across all documents. **remove** - Deletes the `source_field` field. The `destination_field` is ignored for this operation. **remove_nulls** - Removes all nested null (blank) leif values from the JSON tree. `source_field` and `destination_field` are ignored by this operation because _remove_nulls_ operates on the entire JSON tree. Typically, `remove_nulls` is invoked as the last normalization operation (if it is inoked at all, it can be time-expensive). + :param str source_field: (optional) The source field for the operation. + :param str destination_field: (optional) The destination field for the operation. + """ + self.operation = operation + self.source_field = source_field + self.destination_field = destination_field + + @classmethod + def _from_dict(cls, _dict): + """Initialize a NormalizationOperation object from a json dictionary.""" + args = {} + if 'operation' in _dict: + args['operation'] = _dict['operation'] + if 'source_field' in _dict: + args['source_field'] = _dict['source_field'] + if 'destination_field' in _dict: + args['destination_field'] = _dict['destination_field'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'operation') and self.operation is not None: + _dict['operation'] = self.operation + if hasattr(self, 'source_field') and self.source_field is not None: + _dict['source_field'] = self.source_field + if hasattr(self, + 'destination_field') and self.destination_field is not None: + _dict['destination_field'] = self.destination_field + return _dict + + def __str__(self): + """Return a `str` version of this NormalizationOperation object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class Notice(object): + """ + A notice produced for the collection. + + :attr str notice_id: (optional) Identifies the notice. Many notices might have the same ID. This field exists so that user applications can programmatically identify a notice and take automatic corrective action. + :attr datetime created: (optional) The creation date of the collection in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. + :attr str document_id: (optional) Unique identifier of the document. + :attr str query_id: (optional) Unique identifier of the query used for relevance training. + :attr str severity: (optional) Severity level of the notice. + :attr str step: (optional) Ingestion or training step in which the notice occurred. + :attr str description: (optional) The description of the notice. + """ + + def __init__(self, + notice_id=None, + created=None, + document_id=None, + query_id=None, + severity=None, + step=None, + description=None): + """ + Initialize a Notice object. + + :param str notice_id: (optional) Identifies the notice. Many notices might have the same ID. This field exists so that user applications can programmatically identify a notice and take automatic corrective action. + :param datetime created: (optional) The creation date of the collection in the format yyyy-MM-dd'T'HH:mm:ss.SSS'Z'. + :param str document_id: (optional) Unique identifier of the document. + :param str query_id: (optional) Unique identifier of the query used for relevance training. + :param str severity: (optional) Severity level of the notice. + :param str step: (optional) Ingestion or training step in which the notice occurred. + :param str description: (optional) The description of the notice. + """ + self.notice_id = notice_id + self.created = created + self.document_id = document_id + self.query_id = query_id + self.severity = severity + self.step = step + self.description = description + + @classmethod + def _from_dict(cls, _dict): + """Initialize a Notice object from a json dictionary.""" + args = {} + if 'notice_id' in _dict: + args['notice_id'] = _dict['notice_id'] + if 'created' in _dict: + args['created'] = string_to_datetime(_dict['created']) + if 'document_id' in _dict: + args['document_id'] = _dict['document_id'] + if 'query_id' in _dict: + args['query_id'] = _dict['query_id'] + if 'severity' in _dict: + args['severity'] = _dict['severity'] + if 'step' in _dict: + args['step'] = _dict['step'] + if 'description' in _dict: + args['description'] = _dict['description'] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'notice_id') and self.notice_id is not None: + _dict['notice_id'] = self.notice_id + if hasattr(self, 'created') and self.created is not None: + _dict['created'] = datetime_to_string(self.created) + if hasattr(self, 'document_id') and self.document_id is not None: + _dict['document_id'] = self.document_id + if hasattr(self, 'query_id') and self.query_id is not None: + _dict['query_id'] = self.query_id + if hasattr(self, 'severity') and self.severity is not None: + _dict['severity'] = self.severity + if hasattr(self, 'step') and self.step is not None: + _dict['step'] = self.step + if hasattr(self, 'description') and self.description is not None: + _dict['description'] = self.description + return _dict + + def __str__(self): + """Return a `str` version of this Notice object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class PdfHeadingDetection(object): + """ + PdfHeadingDetection. + + :attr list[FontSetting] fonts: (optional) + """ + + def __init__(self, fonts=None): + """ + Initialize a PdfHeadingDetection object. + + :param list[FontSetting] fonts: (optional) + """ + self.fonts = fonts + + @classmethod + def _from_dict(cls, _dict): + """Initialize a PdfHeadingDetection object from a json dictionary.""" + args = {} + if 'fonts' in _dict: + args['fonts'] = [FontSetting._from_dict(x) for x in _dict['fonts']] + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'fonts') and self.fonts is not None: + _dict['fonts'] = [x._to_dict() for x in self.fonts] + return _dict + + def __str__(self): + """Return a `str` version of this PdfHeadingDetection object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + +class PdfSettings(object): + """ + A list of PDF conversion settings. + + :attr PdfHeadingDetection heading: (optional) + """ + + def __init__(self, heading=None): + """ + Initialize a PdfSettings object. + :param PdfHeadingDetection heading: (optional) """ self.heading = heading @@ -4060,6 +4777,114 @@ def __ne__(self, other): return not self == other + +class QueryNoticesResult(object): + """ + QueryNoticesResult. + + :attr str id: (optional) The unique identifier of the document. + :attr float score: (optional) *Deprecated* This field is now part of the `result_metadata` object. + :attr object metadata: (optional) Metadata of the document. + :attr str collection_id: (optional) The collection ID of the collection containing the document for this result. + :attr QueryResultResultMetadata result_metadata: (optional) + """ + + def __init__(self, + id=None, + score=None, + metadata=None, + collection_id=None, + result_metadata=None, + **kwargs): + """ + Initialize a QueryNoticesResult object. + + :param str id: (optional) The unique identifier of the document. + :param float score: (optional) *Deprecated* This field is now part of the `result_metadata` object. + :param object metadata: (optional) Metadata of the document. + :param str collection_id: (optional) The collection ID of the collection containing the document for this result. + :param QueryResultResultMetadata result_metadata: (optional) + :param **kwargs: (optional) Any additional properties. + """ + self.id = id + self.score = score + self.metadata = metadata + self.collection_id = collection_id + self.result_metadata = result_metadata + for _key, _value in kwargs.items(): + setattr(self, _key, _value) + + @classmethod + def _from_dict(cls, _dict): + """Initialize a QueryNoticesResult object from a json dictionary.""" + args = {} + xtra = _dict.copy() + if 'id' in _dict: + args['id'] = _dict['id'] + del xtra['id'] + if 'score' in _dict: + args['score'] = _dict['score'] + del xtra['score'] + if 'metadata' in _dict: + args['metadata'] = _dict['metadata'] + del xtra['metadata'] + if 'collection_id' in _dict: + args['collection_id'] = _dict['collection_id'] + del xtra['collection_id'] + if 'result_metadata' in _dict: + args['result_metadata'] = QueryResultResultMetadata._from_dict( + _dict['result_metadata']) + del xtra['result_metadata'] + args.update(xtra) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'id') and self.id is not None: + _dict['id'] = self.id + if hasattr(self, 'score') and self.score is not None: + _dict['score'] = self.score + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata + if hasattr(self, 'collection_id') and self.collection_id is not None: + _dict['collection_id'] = self.collection_id + if hasattr(self, + 'result_metadata') and self.result_metadata is not None: + _dict['result_metadata'] = self.result_metadata._to_dict() + if hasattr(self, '_additionalProperties'): + for _key in self._additionalProperties: + _value = getattr(self, _key, None) + if _value is not None: + _dict[_key] = _value + return _dict + + def __setattr__(self, name, value): + properties = { + 'id', 'score', 'metadata', 'collection_id', 'result_metadata' + } + if not hasattr(self, '_additionalProperties'): + super(QueryNoticesResult, self).__setattr__('_additionalProperties', + set()) + if name not in properties: + self._additionalProperties.add(name) + super(QueryNoticesResult, self).__setattr__(name, value) + + def __str__(self): + """Return a `str` version of this QueryNoticesResult object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class QueryPassages(object): """ QueryPassages. @@ -5306,110 +6131,3 @@ def __eq__(self, other): def __ne__(self, other): """Return `true` when self and other are not equal, false otherwise.""" return not self == other - - -class QueryNoticesResult(object): - """ - QueryNoticesResult. - - :attr str id: (optional) The unique identifier of the document. - :attr float score: (optional) *Deprecated* This field is now part of the `result_metadata` object. - :attr object metadata: (optional) Metadata of the document. - :attr str collection_id: (optional) The collection ID of the collection containing the document for this result. - :attr QueryResultResultMetadata result_metadata: (optional) - """ - - def __init__(self, - id=None, - score=None, - metadata=None, - collection_id=None, - result_metadata=None, - **kwargs): - """ - Initialize a QueryNoticesResult object. - - :param str id: (optional) The unique identifier of the document. - :param float score: (optional) *Deprecated* This field is now part of the `result_metadata` object. - :param object metadata: (optional) Metadata of the document. - :param str collection_id: (optional) The collection ID of the collection containing the document for this result. - :param QueryResultResultMetadata result_metadata: (optional) - :param **kwargs: (optional) Any additional properties. - """ - self.id = id - self.score = score - self.metadata = metadata - self.collection_id = collection_id - self.result_metadata = result_metadata - for _key, _value in kwargs.items(): - setattr(self, _key, _value) - - @classmethod - def _from_dict(cls, _dict): - """Initialize a QueryNoticesResult object from a json dictionary.""" - args = {} - xtra = _dict.copy() - if 'id' in _dict: - args['id'] = _dict['id'] - del xtra['id'] - if 'score' in _dict: - args['score'] = _dict['score'] - del xtra['score'] - if 'metadata' in _dict: - args['metadata'] = _dict['metadata'] - del xtra['metadata'] - if 'collection_id' in _dict: - args['collection_id'] = _dict['collection_id'] - del xtra['collection_id'] - if 'result_metadata' in _dict: - args['result_metadata'] = QueryResultResultMetadata._from_dict( - _dict['result_metadata']) - del xtra['result_metadata'] - args.update(xtra) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'id') and self.id is not None: - _dict['id'] = self.id - if hasattr(self, 'score') and self.score is not None: - _dict['score'] = self.score - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata - if hasattr(self, 'collection_id') and self.collection_id is not None: - _dict['collection_id'] = self.collection_id - if hasattr(self, - 'result_metadata') and self.result_metadata is not None: - _dict['result_metadata'] = self.result_metadata._to_dict() - if hasattr(self, '_additionalProperties'): - for _key in self._additionalProperties: - _value = getattr(self, _key, None) - if _value is not None: - _dict[_key] = _value - return _dict - - def __setattr__(self, name, value): - properties = { - 'id', 'score', 'metadata', 'collection_id', 'result_metadata' - } - if not hasattr(self, '_additionalProperties'): - super(QueryNoticesResult, self).__setattr__('_additionalProperties', - set()) - if name not in properties: - self._additionalProperties.add(name) - super(QueryNoticesResult, self).__setattr__(name, value) - - def __str__(self): - """Return a `str` version of this QueryNoticesResult object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other From 848c93286eacad2fce3d2a1059f5f7e7ecf63e0d Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 15 Feb 2018 11:36:10 -0500 Subject: [PATCH 11/45] Regenerate nbc --- .../natural_language_classifier_v1.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/watson_developer_cloud/natural_language_classifier_v1.py b/watson_developer_cloud/natural_language_classifier_v1.py index ada747d39..9a3bc0051 100644 --- a/watson_developer_cloud/natural_language_classifier_v1.py +++ b/watson_developer_cloud/natural_language_classifier_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2017 IBM All Rights Reserved. +# Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -72,10 +72,9 @@ def __init__(self, url=default_url, username=None, password=None): def classify(self, classifier_id, text): """ - Returns label information for the input. - - The status must be `Available` before you can use the classifier to classify text. - Use `Get information about a classifier` to retrieve the status. + Returns label information for the input. The status must be `Available` before you + can use the classifier to classify text. Use `Get information about a classifier` + to retrieve the status. :param str classifier_id: Classifier ID to use. :param str text: The submitted phrase. @@ -104,8 +103,8 @@ def create_classifier(self, Sends data to create and train a classifier and returns information about the new classifier. - :param file metadata: Metadata in JSON format. The metadata identifies the language of the data, and an optional name to identify the classifier. For details, see the [API reference](https://www.ibm.com/watson/developercloud/natural-language-classifier/api/v1/python.html?python#create-classifier). - :param file training_data: Training data in CSV format. Each text value must have at least one class. The data can include up to 15,000 records. For details, see [Using your own data](https://console.bluemix.net/docs/services/natural-language-classifier/using-your-data.html). + :param file metadata: Metadata in JSON format. The metadata identifies the language of the data, and an optional name to identify the classifier. For details, see the [API reference](https://www.ibm.com/watson/developercloud/natural-language-classifier/api/v1/#create_classifier). + :param file training_data: Training data in CSV format. Each text value must have at least one class. The data can include up to 15,000 records. For details, see [Using your own data](https://www.ibm.com/watson/developercloud/doc/natural-language-classifier/using-your-data.html). :param str metadata_filename: The filename for training_metadata. :param str training_data_filename: The filename for training_data. :return: A `dict` containing the `Classifier` response. From 662afd296aefbe7f3ad2585ed2cf69a09c0d0f81 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 15 Feb 2018 11:47:19 -0500 Subject: [PATCH 12/45] Regenerate nlu --- .../natural_language_understanding_v1.py | 316 +++++++++--------- 1 file changed, 158 insertions(+), 158 deletions(-) diff --git a/watson_developer_cloud/natural_language_understanding_v1.py b/watson_developer_cloud/natural_language_understanding_v1.py index 95f6dfac7..ff87cc011 100644 --- a/watson_developer_cloud/natural_language_understanding_v1.py +++ b/watson_developer_cloud/natural_language_understanding_v1.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2017 IBM All Rights Reserved. +# Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -219,6 +219,163 @@ def list_models(self): ############################################################################## +class AnalysisResults(object): + """ + Results of the analysis, organized by feature. + + :attr str language: (optional) Language used to analyze the text. + :attr str analyzed_text: (optional) Text that was used in the analysis. + :attr str retrieved_url: (optional) URL that was used to retrieve HTML content. + :attr Usage usage: (optional) API usage information for the request. + :attr list[ConceptsResult] concepts: (optional) The general concepts referenced or alluded to in the specified content. + :attr list[EntitiesResult] entities: (optional) The important entities in the specified content. + :attr list[KeywordsResult] keywords: (optional) The important keywords in content organized by relevance. + :attr list[CategoriesResult] categories: (optional) The hierarchical 5-level taxonomy the content is categorized into. + :attr EmotionResult emotion: (optional) The anger, disgust, fear, joy, or sadness conveyed by the content. + :attr MetadataResult metadata: (optional) The metadata holds author information, publication date and the title of the text/HTML content. + :attr list[RelationsResult] relations: (optional) The relationships between entities in the content. + :attr list[SemanticRolesResult] semantic_roles: (optional) The subjects of actions and the objects the actions act upon. + :attr SentimentResult sentiment: (optional) The sentiment of the content. + """ + + def __init__(self, + language=None, + analyzed_text=None, + retrieved_url=None, + usage=None, + concepts=None, + entities=None, + keywords=None, + categories=None, + emotion=None, + metadata=None, + relations=None, + semantic_roles=None, + sentiment=None): + """ + Initialize a AnalysisResults object. + + :param str language: (optional) Language used to analyze the text. + :param str analyzed_text: (optional) Text that was used in the analysis. + :param str retrieved_url: (optional) URL that was used to retrieve HTML content. + :param Usage usage: (optional) API usage information for the request. + :param list[ConceptsResult] concepts: (optional) The general concepts referenced or alluded to in the specified content. + :param list[EntitiesResult] entities: (optional) The important entities in the specified content. + :param list[KeywordsResult] keywords: (optional) The important keywords in content organized by relevance. + :param list[CategoriesResult] categories: (optional) The hierarchical 5-level taxonomy the content is categorized into. + :param EmotionResult emotion: (optional) The anger, disgust, fear, joy, or sadness conveyed by the content. + :param MetadataResult metadata: (optional) The metadata holds author information, publication date and the title of the text/HTML content. + :param list[RelationsResult] relations: (optional) The relationships between entities in the content. + :param list[SemanticRolesResult] semantic_roles: (optional) The subjects of actions and the objects the actions act upon. + :param SentimentResult sentiment: (optional) The sentiment of the content. + """ + self.language = language + self.analyzed_text = analyzed_text + self.retrieved_url = retrieved_url + self.usage = usage + self.concepts = concepts + self.entities = entities + self.keywords = keywords + self.categories = categories + self.emotion = emotion + self.metadata = metadata + self.relations = relations + self.semantic_roles = semantic_roles + self.sentiment = sentiment + + @classmethod + def _from_dict(cls, _dict): + """Initialize a AnalysisResults object from a json dictionary.""" + args = {} + if 'language' in _dict: + args['language'] = _dict['language'] + if 'analyzed_text' in _dict: + args['analyzed_text'] = _dict['analyzed_text'] + if 'retrieved_url' in _dict: + args['retrieved_url'] = _dict['retrieved_url'] + if 'usage' in _dict: + args['usage'] = Usage._from_dict(_dict['usage']) + if 'concepts' in _dict: + args['concepts'] = [ + ConceptsResult._from_dict(x) for x in _dict['concepts'] + ] + if 'entities' in _dict: + args['entities'] = [ + EntitiesResult._from_dict(x) for x in _dict['entities'] + ] + if 'keywords' in _dict: + args['keywords'] = [ + KeywordsResult._from_dict(x) for x in _dict['keywords'] + ] + if 'categories' in _dict: + args['categories'] = [ + CategoriesResult._from_dict(x) for x in _dict['categories'] + ] + if 'emotion' in _dict: + args['emotion'] = EmotionResult._from_dict(_dict['emotion']) + if 'metadata' in _dict: + args['metadata'] = MetadataResult._from_dict(_dict['metadata']) + if 'relations' in _dict: + args['relations'] = [ + RelationsResult._from_dict(x) for x in _dict['relations'] + ] + if 'semantic_roles' in _dict: + args['semantic_roles'] = [ + SemanticRolesResult._from_dict(x) + for x in _dict['semantic_roles'] + ] + if 'sentiment' in _dict: + args['sentiment'] = SentimentResult._from_dict(_dict['sentiment']) + return cls(**args) + + def _to_dict(self): + """Return a json dictionary representing this model.""" + _dict = {} + if hasattr(self, 'language') and self.language is not None: + _dict['language'] = self.language + if hasattr(self, 'analyzed_text') and self.analyzed_text is not None: + _dict['analyzed_text'] = self.analyzed_text + if hasattr(self, 'retrieved_url') and self.retrieved_url is not None: + _dict['retrieved_url'] = self.retrieved_url + if hasattr(self, 'usage') and self.usage is not None: + _dict['usage'] = self.usage._to_dict() + if hasattr(self, 'concepts') and self.concepts is not None: + _dict['concepts'] = [x._to_dict() for x in self.concepts] + if hasattr(self, 'entities') and self.entities is not None: + _dict['entities'] = [x._to_dict() for x in self.entities] + if hasattr(self, 'keywords') and self.keywords is not None: + _dict['keywords'] = [x._to_dict() for x in self.keywords] + if hasattr(self, 'categories') and self.categories is not None: + _dict['categories'] = [x._to_dict() for x in self.categories] + if hasattr(self, 'emotion') and self.emotion is not None: + _dict['emotion'] = self.emotion._to_dict() + if hasattr(self, 'metadata') and self.metadata is not None: + _dict['metadata'] = self.metadata._to_dict() + if hasattr(self, 'relations') and self.relations is not None: + _dict['relations'] = [x._to_dict() for x in self.relations] + if hasattr(self, 'semantic_roles') and self.semantic_roles is not None: + _dict['semantic_roles'] = [ + x._to_dict() for x in self.semantic_roles + ] + if hasattr(self, 'sentiment') and self.sentiment is not None: + _dict['sentiment'] = self.sentiment._to_dict() + return _dict + + def __str__(self): + """Return a `str` version of this AnalysisResults object.""" + return json.dumps(self._to_dict(), indent=2) + + def __eq__(self, other): + """Return `true` when self and other are equal, false otherwise.""" + if not isinstance(other, self.__class__): + return False + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Return `true` when self and other are not equal, false otherwise.""" + return not self == other + + class Author(object): """ The author of the analyzed content. @@ -2636,160 +2793,3 @@ def __eq__(self, other): def __ne__(self, other): """Return `true` when self and other are not equal, false otherwise.""" return not self == other - - -class AnalysisResults(object): - """ - Results of the analysis, organized by feature. - - :attr str language: (optional) Language used to analyze the text. - :attr str analyzed_text: (optional) Text that was used in the analysis. - :attr str retrieved_url: (optional) URL that was used to retrieve HTML content. - :attr Usage usage: (optional) API usage information for the request. - :attr list[ConceptsResult] concepts: (optional) The general concepts referenced or alluded to in the specified content. - :attr list[EntitiesResult] entities: (optional) The important entities in the specified content. - :attr list[KeywordsResult] keywords: (optional) The important keywords in content organized by relevance. - :attr list[CategoriesResult] categories: (optional) The hierarchical 5-level taxonomy the content is categorized into. - :attr EmotionResult emotion: (optional) The anger, disgust, fear, joy, or sadness conveyed by the content. - :attr MetadataResult metadata: (optional) The metadata holds author information, publication date and the title of the text/HTML content. - :attr list[RelationsResult] relations: (optional) The relationships between entities in the content. - :attr list[SemanticRolesResult] semantic_roles: (optional) The subjects of actions and the objects the actions act upon. - :attr SentimentResult sentiment: (optional) The sentiment of the content. - """ - - def __init__(self, - concepts=None, - entities=None, - keywords=None, - categories=None, - emotion=None, - metadata=None, - relations=None, - semantic_roles=None, - sentiment=None, - language=None, - analyzed_text=None, - retrieved_url=None, - usage=None): - """ - Initialize a AnalysisResults object. - - :param list[ConceptsResult] concepts: (optional) The general concepts referenced or alluded to in the specified content. - :param list[EntitiesResult] entities: (optional) The important entities in the specified content. - :param list[KeywordsResult] keywords: (optional) The important keywords in content organized by relevance. - :param list[CategoriesResult] categories: (optional) The hierarchical 5-level taxonomy the content is categorized into. - :param EmotionResult emotion: (optional) The anger, disgust, fear, joy, or sadness conveyed by the content. - :param MetadataResult metadata: (optional) The metadata holds author information, publication date and the title of the text/HTML content. - :param list[RelationsResult] relations: (optional) The relationships between entities in the content. - :param list[SemanticRolesResult] semantic_roles: (optional) The subjects of actions and the objects the actions act upon. - :param SentimentResult sentiment: (optional) The sentiment of the content. - :param str language: (optional) Language used to analyze the text. - :param str analyzed_text: (optional) Text that was used in the analysis. - :param str retrieved_url: (optional) URL that was used to retrieve HTML content. - :param Usage usage: (optional) API usage information for the request. - """ - self.language = language - self.analyzed_text = analyzed_text - self.retrieved_url = retrieved_url - self.usage = usage - self.concepts = concepts - self.entities = entities - self.keywords = keywords - self.categories = categories - self.emotion = emotion - self.metadata = metadata - self.relations = relations - self.semantic_roles = semantic_roles - self.sentiment = sentiment - - @classmethod - def _from_dict(cls, _dict): - """Initialize a AnalysisResults object from a json dictionary.""" - args = {} - if 'language' in _dict: - args['language'] = _dict['language'] - if 'analyzed_text' in _dict: - args['analyzed_text'] = _dict['analyzed_text'] - if 'retrieved_url' in _dict: - args['retrieved_url'] = _dict['retrieved_url'] - if 'usage' in _dict: - args['usage'] = Usage._from_dict(_dict['usage']) - if 'concepts' in _dict: - args['concepts'] = [ - ConceptsResult._from_dict(x) for x in _dict['concepts'] - ] - if 'entities' in _dict: - args['entities'] = [ - EntitiesResult._from_dict(x) for x in _dict['entities'] - ] - if 'keywords' in _dict: - args['keywords'] = [ - KeywordsResult._from_dict(x) for x in _dict['keywords'] - ] - if 'categories' in _dict: - args['categories'] = [ - CategoriesResult._from_dict(x) for x in _dict['categories'] - ] - if 'emotion' in _dict: - args['emotion'] = EmotionResult._from_dict(_dict['emotion']) - if 'metadata' in _dict: - args['metadata'] = MetadataResult._from_dict(_dict['metadata']) - if 'relations' in _dict: - args['relations'] = [ - RelationsResult._from_dict(x) for x in _dict['relations'] - ] - if 'semantic_roles' in _dict: - args['semantic_roles'] = [ - SemanticRolesResult._from_dict(x) - for x in _dict['semantic_roles'] - ] - if 'sentiment' in _dict: - args['sentiment'] = SentimentResult._from_dict(_dict['sentiment']) - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'language') and self.language is not None: - _dict['language'] = self.language - if hasattr(self, 'analyzed_text') and self.analyzed_text is not None: - _dict['analyzed_text'] = self.analyzed_text - if hasattr(self, 'retrieved_url') and self.retrieved_url is not None: - _dict['retrieved_url'] = self.retrieved_url - if hasattr(self, 'usage') and self.usage is not None: - _dict['usage'] = self.usage._to_dict() - if hasattr(self, 'concepts') and self.concepts is not None: - _dict['concepts'] = [x._to_dict() for x in self.concepts] - if hasattr(self, 'entities') and self.entities is not None: - _dict['entities'] = [x._to_dict() for x in self.entities] - if hasattr(self, 'keywords') and self.keywords is not None: - _dict['keywords'] = [x._to_dict() for x in self.keywords] - if hasattr(self, 'categories') and self.categories is not None: - _dict['categories'] = [x._to_dict() for x in self.categories] - if hasattr(self, 'emotion') and self.emotion is not None: - _dict['emotion'] = self.emotion._to_dict() - if hasattr(self, 'metadata') and self.metadata is not None: - _dict['metadata'] = self.metadata._to_dict() - if hasattr(self, 'relations') and self.relations is not None: - _dict['relations'] = [x._to_dict() for x in self.relations] - if hasattr(self, 'semantic_roles') and self.semantic_roles is not None: - _dict['semantic_roles'] = [ - x._to_dict() for x in self.semantic_roles - ] - if hasattr(self, 'sentiment') and self.sentiment is not None: - _dict['sentiment'] = self.sentiment._to_dict() - return _dict - - def __str__(self): - """Return a `str` version of this AnalysisResults object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other From 4caa81a48e181b4566e34fb0939de88f53bb48a9 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 15 Feb 2018 15:31:40 -0500 Subject: [PATCH 13/45] Regenerate visual recognition --- .../test_integration_visual_recognition.py | 61 ++--- test/unit/test_visual_recognition_v3.py | 2 +- .../visual_recognition_v3.py | 251 +++++++++++++----- 3 files changed, 203 insertions(+), 111 deletions(-) diff --git a/test/integration/test_integration_visual_recognition.py b/test/integration/test_integration_visual_recognition.py index c4322ff23..b35af246b 100644 --- a/test/integration/test_integration_visual_recognition.py +++ b/test/integration/test_integration_visual_recognition.py @@ -3,54 +3,41 @@ import os from os.path import join, dirname from unittest import TestCase +import json pytestmark = pytest.mark.skip('Run These Manually, they are destructive') - class IntegrationTestVisualRecognitionV3(TestCase): def setUp(self): self.visual_recognition = watson_developer_cloud.VisualRecognitionV3('2016-05-20', api_key=os.environ.get( - 'VISUAL_RECOGNITION_API_KEY')) + 'YOUR API KEY')) self.visual_recognition.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) - self.collections = self.visual_recognition.list_collections() - - collection_json = self.visual_recognition.create_collection(name="test_integration_collection") - self.collection_id = collection_json['collection_id'] - - def tearDown(self): - results = self.visual_recognition.delete_collection(collection_id=self.collection_id) - assert not results - - def test_list_collections(self): - results = self.visual_recognition.list_collections() - assert len(results['collections']) - len(self.collections['collections']) == 1 - - def test_add_image_check_metadata_delete_image(self): - with open(join(dirname(__file__), '../resources/face.jpg'), 'rb') as image_file: - self.visual_recognition.add_image(collection_id=self.collection_id, image_file=image_file, metadata={'name': 'face'}) - images = self.visual_recognition.list_images(self.collection_id) - assert len(images['images']) == 1 + def test_classify(self): + car_path = join(dirname(__file__), '../../resources/cars.zip') + with open(car_path, 'rb') as images_file: + parameters = json.dumps({'threshold': 0.1, 'classifier_ids': ['CarsvsTrucks_1479118188', 'default']}) + car_results = self.visual_recognition.classify(images_file=images_file, + parameters=parameters) + assert car_results is not None - image_id = images['images'][0]['image_id'] - meta = self.visual_recognition.get_image_metadata(collection_id=self.collection_id, image_id=image_id) - assert not meta + def test_detect_faces(self): + output = self.visual_recognition.detect_faces(parameters=json.dumps({'url': 'https://www.ibm.com/ibm/ginni/images/ginni_bio_780x981_v4_03162016.jpg'})) + assert output is not None - assert meta['name'] == 'face' - assert 'neverland' not in meta + def test_custom_classifier(self): + with open(os.path.join(os.path.dirname(__file__), '../../resources/cars.zip'), 'rb') as cars, \ + open(os.path.join(os.path.dirname(__file__), '../../resources/trucks.zip'), 'rb') as trucks: + classifier = self.visual_recognition.create_classifier('Cars vs Trucks', classname_positive_examples=cars, negative_examples=trucks) - self.visual_recognition.set_image_metadata(collection_id=self.collection_id, image_id=image_id, metadata={'location': 'neverland'}) - meta = self.visual_recognition.get_image_metadata(collection_id=self.collection_id, image_id=image_id) - assert not meta - assert 'name' not in meta - assert meta['location'] == 'neverland' + assert classifier is not None - self.visual_recognition.delete_image(collection_id=self.collection_id, image_id=image_id) + classifier_id = classifier['classifier_id'] + output = self.visual_recognition.get_classifier(classifier_id) + assert output is not None - images = self.visual_recognition.list_images(self.collection_id) - assert images['images'] + classifiers = self.visual_recognition.list_classifiers() + assert classifiers is not None - def test_find_similar(self): - with open(join(dirname(__file__), '../resources/face.jpg'), 'rb') as image_file: - results = self.visual_recognition.find_similar(collection_id=self.collection_id, image_file=image_file) - assert results['images_processed'] == 1 + output = self.visual_recognition.delete_classifier(classifier_id) + assert output is not None diff --git a/test/unit/test_visual_recognition_v3.py b/test/unit/test_visual_recognition_v3.py index 11e964f7e..320c7833e 100644 --- a/test/unit/test_visual_recognition_v3.py +++ b/test/unit/test_visual_recognition_v3.py @@ -98,7 +98,7 @@ def test_create_classifier(self): with open(os.path.join(os.path.dirname(__file__), '../../resources/cars.zip'), 'rb') as cars, \ open(os.path.join(os.path.dirname(__file__), '../../resources/trucks.zip'), 'rb') as trucks: - vr_service.create_classifier('Cars vs Trucks', cars_positive_examples=cars, negative_examples=trucks) + vr_service.create_classifier('Cars vs Trucks', classname_positive_examples=cars, negative_examples=trucks) assert len(responses.calls) == 1 diff --git a/watson_developer_cloud/visual_recognition_v3.py b/watson_developer_cloud/visual_recognition_v3.py index e521e804b..12c33b0c1 100644 --- a/watson_developer_cloud/visual_recognition_v3.py +++ b/watson_developer_cloud/visual_recognition_v3.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2017 IBM All Rights Reserved. +# Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -**Important**: As of September 8, 2017, the beta period for Similarity Search is closed. +**Important:** As of September 8, 2017, the beta period for Similarity Search is closed. For more information, see [Visual Recognition API – Similarity Search Update](https://www.ibm.com/blogs/bluemix/2017/08/visual-recognition-api-similarity-search-update). @@ -22,8 +22,8 @@ scenes, objects, and faces in images you upload to the service. You can create and train a custom classifier to identify subjects that suit your needs. -**Tip**: To test calls to the **Custom classifiers** methods with the API explorer, -provide your `api_key` from your Bluemix service instance. +**Tip:** To test calls to the **Custom classifiers** methods with the API explorer, +provide your `api_key` from your IBM® Cloud service instance. """ from __future__ import absolute_import @@ -75,7 +75,7 @@ def __init__(self, version, url=default_url, api_key=None): self.version = version ######################### - # classify + # general ######################### def classify(self, @@ -87,8 +87,10 @@ def classify(self, """ Classify images. - :param file images_file: An image file (.jpg, .png) or .zip file with images. Include no more than 20 images and limit the .zip file to 5 MB. You can also include images with the `url` property in the **parameters** object. - :param str parameters: Specifies input parameters. The parameter can include these inputs in a JSON object: - url: A string with the image URL to analyze. You can also include images in the **images_file** parameter. - classifier_ids: An array of classifier IDs to classify the images against. - owners: An array with the values IBM, me, or both to specify which classifiers to run. - threshold: A floating point value that specifies the minimum score a class must have to be displayed in the response. For example: {\"url\": \"...\", \"classifier_ids\": [\"...\",\"...\"], \"owners\": [\"IBM\", \"me\"], \"threshold\": 0.4}. + Classify images with built-in or custom classifiers. + + :param file images_file: An image file (.jpg, .png) or .zip file with images. Maximum image size is 10 MB. Include no more than 20 images and limit the .zip file to 100 MB. Encode the image and .zip file names in UTF-8 if they contain non-ASCII characters. The service assumes UTF-8 encoding if it encounters non-ASCII characters. You can also include images with the `url` property in the **parameters** object. + :param str parameters: A JSON object that specifies additional request options. The parameter can be sent as a string or a file, and can include these inputs: - **url**: A string with the image URL to analyze. Must be in .jpg, or .png format. The minimum recommended pixel density is 32X32 pixels per inch, and the maximum image size is 10 MB. You can also include images in the **images_file** parameter. - **threshold**: A floating point value that specifies the minimum score a class must have to be displayed in the response. The default threshold for returning scores from a classifier is `0.5`. Set the threshold to `0.0` to ignore the classification score and return all values. - **owners**: An array of the categories of classifiers to apply. Use `IBM` to classify against the `default` general classifier, and use `me` to classify against your custom classifiers. To analyze the image against both classifier categories, set the value to both `IBM` and `me`. The built-in `default` classifier is used if both **classifier_ids** and **owners** parameters are empty. The **classifier_ids** parameter overrides **owners**, so make sure that **classifier_ids** is empty. - **classifier_ids**: Specifies which classifiers to apply and overrides the **owners** parameter. You can specify both custom and built-in classifiers. The built-in `default` classifier is used if both **classifier_ids** and **owners** parameters are empty. The following built-in classifier IDs require no training: - `default`: Returns classes from thousands of general tags. - `food`: (Beta) Enhances specificity and accuracy for images of food items. - `explicit`: (Beta) Evaluates whether the image might be pornographic. Example: `{\"classifier_ids\":[\"CarsvsTrucks_1479118188\",\"explicit\"],\"threshold\":0.6}`. :param str accept_language: Specifies the language of the output class names. Can be `en` (English), `ar` (Arabic), `de` (German), `es` (Spanish), `it` (Italian), `ja` (Japanese), or `ko` (Korean). Classes for which no translation is available are omitted. The response might not be in the specified language under these conditions: - English is returned when the requested language is not supported. - Classes are not returned when there is no translation for them. - Custom classifiers returned with this method return tags in the language of the custom classifier. :param str images_file_content_type: The content type of images_file. :param str images_filename: The filename for images_file. @@ -119,16 +121,25 @@ def classify(self, accept_json=True) return response + ######################### + # face + ######################### + def detect_faces(self, images_file=None, parameters=None, images_file_content_type=None, images_filename=None): """ - Detect faces in an image. + Detect faces in images. + + Analyze and get data about faces in images. Responses can include estimated age + and gender, and the service can identify celebrities. This feature uses a built-in + classifier, so you do not train it on custom classifiers. The Detect faces method + does not support general biometric facial recognition. :param file images_file: An image file (.jpg, .png) or .zip file with images. Include no more than 15 images. You can also include images with the `url` property in the **parameters** object. All faces are detected, but if there are more than 10 faces in an image, age and gender confidence scores might return scores of 0. - :param str parameters: A JSON string containing the image URL to analyze. For example: {\"url\": \"...\"}. + :param str parameters: A JSON object that specifies a single image (.jpg, .png) to analyze by URL. The parameter can be sent as a string or a file. Example: `{\"url\":\"http://www.example.com/images/myimage.jpg\"}`. :param str images_file_content_type: The content type of images_file. :param str images_filename: The filename for images_file. :return: A `dict` containing the `DetectedFaces` response. @@ -157,38 +168,73 @@ def detect_faces(self, return response ######################### - # customClassifiers + # custom ######################### def create_classifier(self, - name, - **kwargs): - """ - Create a classifier. - - :param str name: The name of the new classifier. Cannot contain special characters. - :param file _positive_examples: A compressed (.zip) file of images that depict the visual subject for a class within the new classifier. Must contain a minimum of 10 images. The swagger limits you to training only one class. To train more classes, use the API functionality. - :param file negative_examples: A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. - :return: A `dict` containing the `Classifier` response. - :rtype: dict - """ - if name is None: - raise ValueError('name must be provided') - params = {'version': self.version} - data = {'name': name} - url = '/v3/classifiers' - response = self.request( - method='POST', - url=url, - params=params, - data=data, - files=kwargs, - accept_json=True) - return response + name, + classname_positive_examples, + negative_examples=None, + classname_positive_examples_filename=None, + negative_examples_filename=None): + """ + Create a classifier. + + Train a new multi-faceted classifier on the uploaded image data. Create your + custom classifier with positive or negative examples. Include at least two sets of + examples, either two positive example files or one positive and one negative file. + You can upload a maximum of 256 MB per call. Encode all names in UTF-8 if they + contain non-ASCII characters (.zip and image file names, and classifier and class + names). The service assumes UTF-8 encoding if it encounters non-ASCII characters. + + :param str name: The name of the new classifier. Encode special characters in UTF-8. + :param file classname_positive_examples: A .zip file of images that depict the visual subject of a class in the new classifier. You can include more than one positive example file in a call. Append `_positive_examples` to the form name. The prefix is used as the class name. For example, `goldenretriever_positive_examples` creates the class **goldenretriever**. Include at least 10 images in .jpg or .png format. The minimum recommended image resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100 MB per .zip file. Encode special characters in the file name in UTF-8. The API explorer limits you to training only one class. To train more classes, use the API functionality. + :param file negative_examples: A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. Encode special characters in the file name in UTF-8. + :param str classname_positive_examples_filename: The filename for classname_positive_examples. + :param str negative_examples_filename: The filename for negative_examples. + :return: A `dict` containing the `Classifier` response. + :rtype: dict + """ + if name is None: + raise ValueError('name must be provided') + if classname_positive_examples is None: + raise ValueError('classname_positive_examples must be provided') + params = {'version': self.version} + name_tuple = (None, name, 'text/plain') + if not classname_positive_examples_filename and hasattr( + classname_positive_examples, 'name'): + classname_positive_examples_filename = classname_positive_examples.name + mime_type = 'application/octet-stream' + classname_positive_examples_tuple = ( + classname_positive_examples_filename, classname_positive_examples, + mime_type) + negative_examples_tuple = None + if negative_examples: + if not negative_examples_filename and hasattr( + negative_examples, 'name'): + negative_examples_filename = negative_examples.name + if not negative_examples_filename: + raise ValueError('negative_examples_filename must be provided') + mime_type = 'application/octet-stream' + negative_examples_tuple = (negative_examples_filename, + negative_examples, mime_type) + url = '/v3/classifiers' + response = self.request( + method='POST', + url=url, + params=params, + files={ + 'name': name_tuple, + 'classname_positive_examples': + classname_positive_examples_tuple, + 'negative_examples': negative_examples_tuple + }, + accept_json=True) + return response def delete_classifier(self, classifier_id): """ - Delete a custom classifier. + Delete a classifier. :param str classifier_id: The ID of the classifier. :rtype: None @@ -222,7 +268,7 @@ def list_classifiers(self, verbose=None): """ Retrieve a list of custom classifiers. - :param bool verbose: Specify true to return classifier details. Omit this parameter to return a brief list of classifiers. + :param bool verbose: Specify `true` to return details about the classifiers. Omit this parameter to return a brief list of classifiers. :return: A `dict` containing the `Classifiers` response. :rtype: dict """ @@ -234,30 +280,71 @@ def list_classifiers(self, verbose=None): def update_classifier(self, classifier_id, - **kwargs): + classname_positive_examples=None, + negative_examples=None, + classname_positive_examples_filename=None, + negative_examples_filename=None): """ Update a classifier. + Update a custom classifier by adding new positive or negative classes (examples) + or by adding new images to existing classes. You must supply at least one set of + positive or negative examples. For details, see [Updating custom + classifiers](https://console.bluemix.net/docs/services/visual-recognition/customizing.html#updating-custom-classifiers). + Encode all names in UTF-8 if they contain non-ASCII characters (.zip and image + file names, and classifier and class names). The service assumes UTF-8 encoding if + it encounters non-ASCII characters. **Important:** You can't update a custom + classifier with an API key for a Lite plan. To update a custom classifer on a Lite + plan, create another service instance on a Standard plan and re-create your custom + classifier. **Tip:** Don't make retraining calls on a classifier until the status + is ready. When you submit retraining requests in parallel, the last request + overwrites the previous requests. The retrained property shows the last time the + classifier retraining finished. + :param str classifier_id: The ID of the classifier. - :param file _positive_examples: A compressed (.zip) file of images that depict the visual subject for a class within the classifier. Must contain a minimum of 10 images. - :param file negative_examples: A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. + :param file classname_positive_examples: A .zip file of images that depict the visual subject of a class in the classifier. The positive examples create or update classes in the classifier. You can include more than one positive example file in a call. Append `_positive_examples` to the form name. The prefix is used to name the class. For example, `goldenretriever_positive_examples` creates the class `goldenretriever`. Include at least 10 images in .jpg or .png format. The minimum recommended image resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100 MB per .zip file. Encode special characters in the file name in UTF-8. + :param file negative_examples: A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. Encode special characters in the file name in UTF-8. + :param str classname_positive_examples_filename: The filename for classname_positive_examples. + :param str negative_examples_filename: The filename for negative_examples. :return: A `dict` containing the `Classifier` response. :rtype: dict """ if classifier_id is None: raise ValueError('classifier_id must be provided') params = {'version': self.version} + classname_positive_examples_tuple = None + if classname_positive_examples: + if not classname_positive_examples_filename and hasattr( + classname_positive_examples, 'name'): + classname_positive_examples_filename = classname_positive_examples.name + mime_type = 'application/octet-stream' + classname_positive_examples_tuple = ( + classname_positive_examples_filename, + classname_positive_examples, mime_type) + negative_examples_tuple = None + if negative_examples: + if not negative_examples_filename and hasattr( + negative_examples, 'name'): + negative_examples_filename = negative_examples.name + if not negative_examples_filename: + raise ValueError('negative_examples_filename must be provided') + mime_type = 'application/octet-stream' + negative_examples_tuple = (negative_examples_filename, + negative_examples, mime_type) url = '/v3/classifiers/{0}'.format( *self._encode_path_vars(classifier_id)) response = self.request( method='POST', url=url, params=params, - files=kwargs, + files={ + 'classname_positive_examples': + classname_positive_examples_tuple, + 'negative_examples': negative_examples_tuple + }, accept_json=True) return response - ############################################################################## # Models ############################################################################## @@ -316,7 +403,7 @@ class ClassResult(object): Result of a class within a classifier. :attr str class_name: The name of the class. - :attr float score: (optional) Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. + :attr float score: (optional) Confidence score for the property in the range of 0 to 1. A higher score indicates greater likelihood that the class is depicted in the image. The default threshold for returning scores from a classifier is 0.5. :attr str type_hierarchy: (optional) Knowledge graph of the property. For example, `People/Leaders/Presidents/USA/Barack Obama`. Included only if identified. """ @@ -325,7 +412,7 @@ def __init__(self, class_name, score=None, type_hierarchy=None): Initialize a ClassResult object. :param str class_name: The name of the class. - :param float score: (optional) Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. + :param float score: (optional) Confidence score for the property in the range of 0 to 1. A higher score indicates greater likelihood that the class is depicted in the image. The default threshold for returning scores from a classifier is 0.5. :param str type_hierarchy: (optional) Knowledge graph of the property. For example, `People/Leaders/Presidents/USA/Barack Obama`. Included only if identified. """ self.class_name = class_name @@ -540,13 +627,14 @@ class Classifier(object): """ Information about a classifier. - :attr str classifier_id: The ID of the classifier. - :attr str name: The name of the classifier. - :attr str owner: (optional) Unique ID of the account who owns the classifier. + :attr str classifier_id: ID of a classifier identified in the image. + :attr str name: Name of the classifier. + :attr str owner: (optional) Unique ID of the account who owns the classifier. Returned when verbose=`true`. Might not be returned by some requests. :attr str status: (optional) The training status of classifier. :attr str explanation: (optional) If classifier training has failed, this field may explain why. - :attr datetime created: (optional) The time and date when classifier was created. - :attr list[Class] classes: (optional) An array of classes that define a classifier. + :attr datetime created: (optional) Date and time in Coordinated Universal Time that the classifier was created. + :attr list[Class] classes: (optional) Array of classes that define a classifier. + :attr datetime retrained: (optional) Date and time in Coordinated Universal Time that the classifier was updated. Returned when verbose=`true`. Might not be returned by some requests. """ def __init__(self, @@ -556,17 +644,19 @@ def __init__(self, status=None, explanation=None, created=None, - classes=None): + classes=None, + retrained=None): """ Initialize a Classifier object. - :param str classifier_id: The ID of the classifier. - :param str name: The name of the classifier. - :param str owner: (optional) Unique ID of the account who owns the classifier. + :param str classifier_id: ID of a classifier identified in the image. + :param str name: Name of the classifier. + :param str owner: (optional) Unique ID of the account who owns the classifier. Returned when verbose=`true`. Might not be returned by some requests. :param str status: (optional) The training status of classifier. :param str explanation: (optional) If classifier training has failed, this field may explain why. - :param datetime created: (optional) The time and date when classifier was created. - :param list[Class] classes: (optional) An array of classes that define a classifier. + :param datetime created: (optional) Date and time in Coordinated Universal Time that the classifier was created. + :param list[Class] classes: (optional) Array of classes that define a classifier. + :param datetime retrained: (optional) Date and time in Coordinated Universal Time that the classifier was updated. Returned when verbose=`true`. Might not be returned by some requests. """ self.classifier_id = classifier_id self.name = name @@ -575,6 +665,7 @@ def __init__(self, self.explanation = explanation self.created = created self.classes = classes + self.retrained = retrained @classmethod def _from_dict(cls, _dict): @@ -601,6 +692,8 @@ def _from_dict(cls, _dict): args['created'] = string_to_datetime(_dict['created']) if 'classes' in _dict: args['classes'] = [Class._from_dict(x) for x in _dict['classes']] + if 'retrained' in _dict: + args['retrained'] = string_to_datetime(_dict['retrained']) return cls(**args) def _to_dict(self): @@ -620,6 +713,8 @@ def _to_dict(self): _dict['created'] = datetime_to_string(self.created) if hasattr(self, 'classes') and self.classes is not None: _dict['classes'] = [x._to_dict() for x in self.classes] + if hasattr(self, 'retrained') and self.retrained is not None: + _dict['retrained'] = datetime_to_string(self.retrained) return _dict def __str__(self): @@ -642,8 +737,8 @@ class ClassifierResult(object): Classifier and score combination. :attr str name: Name of the classifier. - :attr str classifier_id: Classifier ID. Only returned if custom classifier. - :attr list[ClassResult] classes: An array of classes within a classifier. + :attr str classifier_id: The ID of a classifier identified in the image. + :attr list[ClassResult] classes: An array of classes within the classifier. """ def __init__(self, name, classifier_id, classes): @@ -651,8 +746,8 @@ def __init__(self, name, classifier_id, classes): Initialize a ClassifierResult object. :param str name: Name of the classifier. - :param str classifier_id: Classifier ID. Only returned if custom classifier. - :param list[ClassResult] classes: An array of classes within a classifier. + :param str classifier_id: The ID of a classifier identified in the image. + :param list[ClassResult] classes: An array of classes within the classifier. """ self.name = name self.classifier_id = classifier_id @@ -834,44 +929,54 @@ class ErrorInfo(object): Information about what might have caused a failure, such as an image that is too large. Not returned when there is no error. - :attr str error_id: Codified error string. For example, `limit_exceeded`. + :attr int code: HTTP status code. :attr str description: Human-readable error description. For example, `File size limit exceeded`. + :attr str error_id: Codified error string. For example, `limit_exceeded`. """ - def __init__(self, error_id, description): + def __init__(self, code, description, error_id): """ Initialize a ErrorInfo object. - :param str error_id: Codified error string. For example, `limit_exceeded`. + :param int code: HTTP status code. :param str description: Human-readable error description. For example, `File size limit exceeded`. + :param str error_id: Codified error string. For example, `limit_exceeded`. """ - self.error_id = error_id + self.code = code self.description = description + self.error_id = error_id @classmethod def _from_dict(cls, _dict): """Initialize a ErrorInfo object from a json dictionary.""" args = {} - if 'error_id' in _dict: - args['error_id'] = _dict['error_id'] + if 'code' in _dict: + args['code'] = _dict['code'] else: raise ValueError( - 'Required property \'error_id\' not present in ErrorInfo JSON') + 'Required property \'code\' not present in ErrorInfo JSON') if 'description' in _dict: args['description'] = _dict['description'] else: raise ValueError( 'Required property \'description\' not present in ErrorInfo JSON' ) + if 'error_id' in _dict: + args['error_id'] = _dict['error_id'] + else: + raise ValueError( + 'Required property \'error_id\' not present in ErrorInfo JSON') return cls(**args) def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} - if hasattr(self, 'error_id') and self.error_id is not None: - _dict['error_id'] = self.error_id + if hasattr(self, 'code') and self.code is not None: + _dict['code'] = self.code if hasattr(self, 'description') and self.description is not None: _dict['description'] = self.description + if hasattr(self, 'error_id') and self.error_id is not None: + _dict['error_id'] = self.error_id return _dict def __str__(self): @@ -964,7 +1069,7 @@ class FaceAge(object): :attr int min: (optional) Estimated minimum age. :attr int max: (optional) Estimated maximum age. - :attr float score: (optional) Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. + :attr float score: (optional) Confidence score for the property in the range of 0 to 1. A higher score indicates greater likelihood that the class is depicted in the image. The default threshold for returning scores from a classifier is 0.5. """ def __init__(self, min=None, max=None, score=None): @@ -973,7 +1078,7 @@ def __init__(self, min=None, max=None, score=None): :param int min: (optional) Estimated minimum age. :param int max: (optional) Estimated maximum age. - :param float score: (optional) Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. + :param float score: (optional) Confidence score for the property in the range of 0 to 1. A higher score indicates greater likelihood that the class is depicted in the image. The default threshold for returning scores from a classifier is 0.5. """ self.min = min self.max = max @@ -1023,7 +1128,7 @@ class FaceGender(object): an image, the response might return the confidence score 0. :attr str gender: Gender identified by the face. For example, `MALE` or `FEMALE`. - :attr float score: (optional) Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. + :attr float score: (optional) Confidence score for the property in the range of 0 to 1. A higher score indicates greater likelihood that the class is depicted in the image. The default threshold for returning scores from a classifier is 0.5. """ def __init__(self, gender, score=None): @@ -1031,7 +1136,7 @@ def __init__(self, gender, score=None): Initialize a FaceGender object. :param str gender: Gender identified by the face. For example, `MALE` or `FEMALE`. - :param float score: (optional) Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. + :param float score: (optional) Confidence score for the property in the range of 0 to 1. A higher score indicates greater likelihood that the class is depicted in the image. The default threshold for returning scores from a classifier is 0.5. """ self.gender = gender self.score = score @@ -1079,7 +1184,7 @@ class FaceIdentity(object): a celebrity is not detected. :attr str name: Name of the person. - :attr float score: (optional) Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. + :attr float score: (optional) Confidence score for the property in the range of 0 to 1. A higher score indicates greater likelihood that the class is depicted in the image. The default threshold for returning scores from a classifier is 0.5. :attr str type_hierarchy: (optional) Knowledge graph of the property. For example, `People/Leaders/Presidents/USA/Barack Obama`. Included only if identified. """ @@ -1088,7 +1193,7 @@ def __init__(self, name, score=None, type_hierarchy=None): Initialize a FaceIdentity object. :param str name: Name of the person. - :param float score: (optional) Confidence score for the property. Scores range from 0-1, with a higher score indicating greater correlation. + :param float score: (optional) Confidence score for the property in the range of 0 to 1. A higher score indicates greater likelihood that the class is depicted in the image. The default threshold for returning scores from a classifier is 0.5. :param str type_hierarchy: (optional) Knowledge graph of the property. For example, `People/Leaders/Presidents/USA/Barack Obama`. Included only if identified. """ self.name = name From 82b3920205fe9f4ecd1c59bb4d27583c4ede6808 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 15 Feb 2018 16:21:49 -0500 Subject: [PATCH 14/45] Regenerate language translator --- .../language_translator_v2.py | 96 ++++++++++--------- 1 file changed, 50 insertions(+), 46 deletions(-) diff --git a/watson_developer_cloud/language_translator_v2.py b/watson_developer_cloud/language_translator_v2.py index b98bda0ce..678f55a55 100644 --- a/watson_developer_cloud/language_translator_v2.py +++ b/watson_developer_cloud/language_translator_v2.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2017 IBM All Rights Reserved. +# Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -73,10 +73,10 @@ def translate(self, text, model_id=None, source=None, target=None): """ Translates the input text from the source language to the target language. - :param list[str] text: Input text in UTF-8 encoding. It is a list so that multiple paragraphs can be submitted. Also accept a single string, instead of an array, as valid input. - :param str model_id: The unique model_id of the translation model being used to translate text. The model_id inherently specifies source language, target language, and domain. If the model_id is specified, there is no need for the source and target parameters and the values are ignored. - :param str source: Used in combination with target as an alternative way to select the model for translation. When target and source are set, and model_id is not set, the system chooses a default model with the right language pair to translate (usually the model based on the news domain). - :param str target: Used in combination with source as an alternative way to select the model for translation. When target and source are set, and model_id is not set, the system chooses a default model with the right language pair to translate (usually the model based on the news domain). + :param list[str] text: Input text in UTF-8 encoding. Multiple entries will result in multiple translations in the response. + :param str model_id: Model ID of the translation model to use. If this is specified, the `source` and `target` parameters will be ignored. The method requires either a model ID or both the `source` and `target` parameters. + :param str source: Language code of the source text language. Use with `target` as an alternative way to select a translation model. When `source` and `target` are set, and a model ID is not set, the system chooses a default model for the language pair (usually the model based on the news domain). + :param str target: Language code of the translation target language. Use with source as an alternative way to select a translation model. :return: A `dict` containing the `TranslationResult` response. :rtype: dict """ @@ -120,10 +120,10 @@ def identify(self, text): def list_identifiable_languages(self): """ - Lists all languages that can be identified by the API. + List identifiable languages. - Lists all languages that the service can identify. Returns the two-letter code - (for example, `en` for English or `es` for Spanish) and name of each language. + Lists the languages that the service can identify. Returns the language code (for + example, `en` for English or `es` for Spanish) and name of each language. :return: A `dict` containing the `IdentifiableLanguages` response. :rtype: dict @@ -147,10 +147,14 @@ def create_model(self, monolingual_corpus_filename=None): """ Uploads a TMX glossary file on top of a domain to customize a translation model. - - :param str base_model_id: Specifies the domain model that is used as the base for the training. To see current supported domain models, use the GET /v2/models parameter. - :param str name: The model name. Valid characters are letters, numbers, -, and _. No spaces. - :param file forced_glossary: A TMX file with your customizations. The customizations in the file completely overwrite the domain data translation, including high frequency or high confidence phrase translations. You can upload only one glossary with a file size less than 10 MB per call. + Depending on the size of the file, training can range from minutes for a glossary + to several hours for a large parallel corpus. Glossary files must be less than 10 + MB. The cumulative file size of all uploaded glossary and corpus files is limited + to 250 MB. + + :param str base_model_id: The model ID of the model to use as the base for customization. To see available models, use the `List models` method. + :param str name: An optional model name that you can use to identify the model. Valid characters are letters, numbers, dashes, underscores, spaces and apostrophes. The maximum length is 32 characters. + :param file forced_glossary: A TMX file with your customizations. The customizations in the file completely overwrite the domain translaton data, including high frequency or high confidence phrase translations. You can upload only one glossary with a file size less than 10 MB per call. :param file parallel_corpus: A TMX file that contains entries that are treated as a parallel corpus instead of a glossary. :param file monolingual_corpus: A UTF-8 encoded plain text file that is used to customize the target language model. :param str forced_glossary_filename: The filename for forced_glossary. @@ -164,16 +168,16 @@ def create_model(self, params = {'base_model_id': base_model_id, 'name': name} forced_glossary_tuple = None if forced_glossary: - if not forced_glossary_filename and hasattr(forced_glossary, - 'name'): + if not forced_glossary_filename and hasattr( + forced_glossary, 'name'): forced_glossary_filename = forced_glossary.name mime_type = 'application/octet-stream' forced_glossary_tuple = (forced_glossary_filename, forced_glossary, mime_type) parallel_corpus_tuple = None if parallel_corpus: - if not parallel_corpus_filename and hasattr(parallel_corpus, - 'name'): + if not parallel_corpus_filename and hasattr( + parallel_corpus, 'name'): parallel_corpus_filename = parallel_corpus.name mime_type = 'application/octet-stream' parallel_corpus_tuple = (parallel_corpus_filename, parallel_corpus, @@ -203,7 +207,7 @@ def delete_model(self, model_id): """ Deletes a custom translation model. - :param str model_id: The model identifier. + :param str model_id: Model ID of the model to delete. :return: A `dict` containing the `DeleteModelResult` response. :rtype: dict """ @@ -231,9 +235,9 @@ def list_models(self, source=None, target=None, default_models=None): """ Lists available standard and custom models by source or target language. - :param str source: Filter models by source language. - :param str target: Filter models by target language. - :param bool default_models: Valid values are leaving it unset, `true`, and `false`. When `true`, it filters models to return the default_models model or models. When `false`, it returns the non-default_models model or models. If not set, it returns all models, default_models and non-default_models. + :param str source: Specify a language code to filter results by source language. + :param str target: Specify a language code to filter results by target language. + :param bool default_models: If the default_models parameter isn't specified, the service will return all models (default_models and non-default_models) for each language pair. To return only default_models models, set this to `true`. To return only non-default_models models, set this to `false`. :return: A `dict` containing the `TranslationModels` response. :rtype: dict """ @@ -302,7 +306,7 @@ class IdentifiableLanguage(object): """ IdentifiableLanguage. - :attr str language: The code for an identifiable language. + :attr str language: The language code for an identifiable language. :attr str name: The name of the identifiable language. """ @@ -310,7 +314,7 @@ def __init__(self, language, name): """ Initialize a IdentifiableLanguage object. - :param str language: The code for an identifiable language. + :param str language: The language code for an identifiable language. :param str name: The name of the identifiable language. """ self.language = language @@ -413,7 +417,7 @@ class IdentifiedLanguage(object): """ IdentifiedLanguage. - :attr str language: The code for an identified language. + :attr str language: The language code for an identified language. :attr float confidence: The confidence score for the identified language. """ @@ -421,7 +425,7 @@ def __init__(self, language, confidence): """ Initialize a IdentifiedLanguage object. - :param str language: The code for an identified language. + :param str language: The language code for an identified language. :param float confidence: The confidence score for the identified language. """ self.language = language @@ -575,15 +579,15 @@ class TranslationModel(object): """ Response payload for models. - :attr str model_id: A globally unique string that identifies the underlying model that is used for translation. This string contains all the information about source language, target language, domain, and various other related configurations. - :attr str name: (optional) If a model is trained by a user, there might be an optional “name” parameter attached during training to help the user identify the model. - :attr str source: (optional) Source language in two letter language code. Use the five letter code when clarifying between multiple supported languages. When model_id is used directly, it will override the source-target language combination. Also, when a two letter language code is used, but no suitable default is found, it returns an error. - :attr str target: (optional) Target language in two letter language code. - :attr str base_model_id: (optional) If this model is a custom model, this returns the base model that it is trained on. For a base model, this response value is empty. + :attr str model_id: A globally unique string that identifies the underlying model that is used for translation. + :attr str name: (optional) Optional name that can be specified when the model is created. + :attr str source: (optional) Translation source language code. + :attr str target: (optional) Translation target language code. + :attr str base_model_id: (optional) Model ID of the base model that was used to customize the model. If the model is not a custom model, this will be an empty string. :attr str domain: (optional) The domain of the translation model. - :attr bool customizable: (optional) Whether this model can be used as a base for customization. Customized models are not further customizable, and we don't allow the customization of certain base models. - :attr bool default_model: (optional) Whether this model is considered a default model and is used when the source and target languages are specified without the model_id. - :attr str owner: (optional) Returns the Bluemix ID of the instance that created the model, or an empty string if it is a model that is trained by IBM. + :attr bool customizable: (optional) Whether this model can be used as a base for customization. Customized models are not further customizable, and some base models are not customizable. + :attr bool default_model: (optional) Whether or not the model is a default model. A default model is the model for a given language pair that will be used when that language pair is specified in the source and target parameters. + :attr str owner: (optional) Either an empty string, indicating the model is not a custom model, or the ID of the service instance that created the model. :attr str status: (optional) Availability of a model. """ @@ -601,15 +605,15 @@ def __init__(self, """ Initialize a TranslationModel object. - :param str model_id: A globally unique string that identifies the underlying model that is used for translation. This string contains all the information about source language, target language, domain, and various other related configurations. - :param str name: (optional) If a model is trained by a user, there might be an optional “name” parameter attached during training to help the user identify the model. - :param str source: (optional) Source language in two letter language code. Use the five letter code when clarifying between multiple supported languages. When model_id is used directly, it will override the source-target language combination. Also, when a two letter language code is used, but no suitable default is found, it returns an error. - :param str target: (optional) Target language in two letter language code. - :param str base_model_id: (optional) If this model is a custom model, this returns the base model that it is trained on. For a base model, this response value is empty. + :param str model_id: A globally unique string that identifies the underlying model that is used for translation. + :param str name: (optional) Optional name that can be specified when the model is created. + :param str source: (optional) Translation source language code. + :param str target: (optional) Translation target language code. + :param str base_model_id: (optional) Model ID of the base model that was used to customize the model. If the model is not a custom model, this will be an empty string. :param str domain: (optional) The domain of the translation model. - :param bool customizable: (optional) Whether this model can be used as a base for customization. Customized models are not further customizable, and we don't allow the customization of certain base models. - :param bool default_model: (optional) Whether this model is considered a default model and is used when the source and target languages are specified without the model_id. - :param str owner: (optional) Returns the Bluemix ID of the instance that created the model, or an empty string if it is a model that is trained by IBM. + :param bool customizable: (optional) Whether this model can be used as a base for customization. Customized models are not further customizable, and some base models are not customizable. + :param bool default_model: (optional) Whether or not the model is a default model. A default model is the model for a given language pair that will be used when that language pair is specified in the source and target parameters. + :param str owner: (optional) Either an empty string, indicating the model is not a custom model, or the ID of the service instance that created the model. :param str status: (optional) Availability of a model. """ self.model_id = model_id @@ -748,18 +752,18 @@ class TranslationResult(object): """ TranslationResult. - :attr int word_count: Number of words of the complete input text. - :attr int character_count: Number of characters of the complete input text. - :attr list[Translation] translations: List of translation output in UTF-8, corresponding to the list of input text. + :attr int word_count: Number of words in the input text. + :attr int character_count: Number of characters in the input text. + :attr list[Translation] translations: List of translation output in UTF-8, corresponding to the input text entries. """ def __init__(self, word_count, character_count, translations): """ Initialize a TranslationResult object. - :param int word_count: Number of words of the complete input text. - :param int character_count: Number of characters of the complete input text. - :param list[Translation] translations: List of translation output in UTF-8, corresponding to the list of input text. + :param int word_count: Number of words in the input text. + :param int character_count: Number of characters in the input text. + :param list[Translation] translations: List of translation output in UTF-8, corresponding to the input text entries. """ self.word_count = word_count self.character_count = character_count From 8e1db1e78c5c065866591b54630a36abd0b61e8e Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 15 Feb 2018 16:31:05 -0500 Subject: [PATCH 15/45] Regenerate tone analyzer --- watson_developer_cloud/tone_analyzer_v3.py | 55 ++-------------------- 1 file changed, 5 insertions(+), 50 deletions(-) diff --git a/watson_developer_cloud/tone_analyzer_v3.py b/watson_developer_cloud/tone_analyzer_v3.py index ee5382894..f688fdae1 100755 --- a/watson_developer_cloud/tone_analyzer_v3.py +++ b/watson_developer_cloud/tone_analyzer_v3.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2017 IBM All Rights Reserved. +# Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,34 +21,6 @@ perceived and then to improve the tone of your communications. Businesses can use the service to learn the tone of their customers' communications and to respond to each customer appropriately, or to understand and improve their customer conversations. -### API Usage -The following information provides details about using the service to analyze tone: -* **The tone method:** The service offers `GET` and `POST /v3/tone` methods that use the -general purpose endpoint to analyze the tone of input content. The methods accept content -in JSON, plain text, or HTML format. -* **The tone_chat method:** The service offers a `POST /v3/tone_chat` method that uses the -customer engagement endpoint to analyze the tone of customer service and customer support -conversations. The method accepts content in JSON format. -* **Authentication:** You authenticate to the service by using your service credentials. -You can use your credentials to authenticate via a proxy server that resides in Bluemix, -or you can use your credentials to obtain a token and contact the service directly. See -[Service credentials for Watson -services](https://console.bluemix.net/docs/services/watson/getting-started-credentials.html) -and [Tokens for -authentication](https://console.bluemix.net/docs/services/watson/getting-started-tokens.html). -* **Request Logging:** By default, all Watson services log requests and their results. -Data is collected only to improve the Watson services. If you do not want to share your -data, set the header parameter `X-Watson-Learning-Opt-Out` to `true` for each request. -Data is collected for any request that omits this header. See [Controlling request logging -for Watson -services](https://console.bluemix.net/docs/services/watson/getting-started-logging.html). - -For more information about the service, see [About Tone -Analyzer](https://console.bluemix.net/docs/services/tone-analyzer/index.html). - -**Note:** Method descriptions apply to the latest version of the interface, `2017-09-21`. -Where necessary, parameters and models describe differences between versions `2017-09-21` -and `2016-05-19`. """ from __future__ import absolute_import @@ -122,23 +94,6 @@ def tone(self, """ Analyze general purpose tone. - Uses the general purpose endpoint to analyze the tone of your input content. The - service analyzes the content for emotional and language tones. The method always - analyzes the tone of the full document; by default, it also analyzes the tone of - each individual sentence of the content. You can submit no more than 128 KB of - total input content and no more than 1000 individual sentences in JSON, plain - text, or HTML format. The service analyzes the first 1000 sentences for - document-level analysis and only the first 100 sentences for sentence-level - analysis. Use the `POST` request method to analyze larger amounts of content in - any of the available formats. Use the `GET` request method to analyze smaller - quantities of plain text content. Per the JSON specification, the default - character encoding for JSON content is effectively always UTF-8; per the HTTP - specification, the default encoding for plain text and HTML is ISO-8859-1 - (effectively, the ASCII character set). When specifying a content type of plain - text or HTML, include the `charset` parameter to indicate the character encoding - of the input text; for example: `Content-Type: text/plain;charset=utf-8`. For - `text/html`, the service removes HTML tags and analyzes only the textual content. - :param ToneInput tone_input: JSON, plain text, or HTML input that contains the content to be analyzed. For JSON input, provide an object of type `ToneInput`. :param str content_type: The type of the input: application/json, text/plain, or text/html. A character encoding can be specified by including a `charset` parameter. For example, 'text/plain;charset=utf-8'. :param bool sentences: Indicates whether the service is to return an analysis of each individual sentence in addition to its analysis of the full document. If `true` (the default), the service returns results for each sentence. @@ -153,14 +108,14 @@ def tone(self, if content_type is None: raise ValueError('content_type must be provided') headers = { - 'content-type': content_type, + 'Content-Type': content_type, 'Content-Language': content_language, 'Accept-Language': accept_language } params = { 'version': self.version, 'sentences': sentences, - 'tones': ",".join(tones) if isinstance(tones, list) else tones + 'tones': self._convert_list(tones) } if content_type == 'application/json' and isinstance(tone_input, dict): data = json.dumps(tone_input) @@ -819,7 +774,7 @@ class UtteranceAnalysis(object): """ UtteranceAnalysis. - :attr str utterance_id: The unique identifier of the utterance. The first utterance has ID 0, and the ID of each subsequent utterance is incremented by one. + :attr int utterance_id: The unique identifier of the utterance. The first utterance has ID 0, and the ID of each subsequent utterance is incremented by one. :attr str utterance_text: The text of the utterance. :attr list[ToneChatScore] tones: An array of `ToneChatScore` objects that provides results for the most prevalent tones of the utterance. The array includes results for any tone whose score is at least 0.5. The array is empty if no tone has a score that meets this threshold. :attr str error: (optional) **`2017-09-21`:** An error message if the utterance contains more than 500 characters. The service does not analyze the utterance. **`2016-05-19`:** Not returned. @@ -829,7 +784,7 @@ def __init__(self, utterance_id, utterance_text, tones, error=None): """ Initialize a UtteranceAnalysis object. - :param str utterance_id: The unique identifier of the utterance. The first utterance has ID 0, and the ID of each subsequent utterance is incremented by one. + :param int utterance_id: The unique identifier of the utterance. The first utterance has ID 0, and the ID of each subsequent utterance is incremented by one. :param str utterance_text: The text of the utterance. :param list[ToneChatScore] tones: An array of `ToneChatScore` objects that provides results for the most prevalent tones of the utterance. The array includes results for any tone whose score is at least 0.5. The array is empty if no tone has a score that meets this threshold. :param str error: (optional) **`2017-09-21`:** An error message if the utterance contains more than 500 characters. The service does not analyze the utterance. **`2016-05-19`:** Not returned. From 663bff4c88f8ce628bd9f62e5dd6b681cdb89d8e Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 15 Feb 2018 16:49:36 -0500 Subject: [PATCH 16/45] Revert back for create_classifier and update_classifier --- .../test_integration_visual_recognition.py | 2 +- test/unit/test_visual_recognition_v3.py | 2 +- .../visual_recognition_v3.py | 133 ++++-------------- 3 files changed, 29 insertions(+), 108 deletions(-) diff --git a/test/integration/test_integration_visual_recognition.py b/test/integration/test_integration_visual_recognition.py index b35af246b..80a25da0a 100644 --- a/test/integration/test_integration_visual_recognition.py +++ b/test/integration/test_integration_visual_recognition.py @@ -28,7 +28,7 @@ def test_detect_faces(self): def test_custom_classifier(self): with open(os.path.join(os.path.dirname(__file__), '../../resources/cars.zip'), 'rb') as cars, \ open(os.path.join(os.path.dirname(__file__), '../../resources/trucks.zip'), 'rb') as trucks: - classifier = self.visual_recognition.create_classifier('Cars vs Trucks', classname_positive_examples=cars, negative_examples=trucks) + classifier = self.visual_recognition.create_classifier('Cars vs Trucks', cars_positive_examples=cars, negative_examples=trucks) assert classifier is not None diff --git a/test/unit/test_visual_recognition_v3.py b/test/unit/test_visual_recognition_v3.py index 320c7833e..11e964f7e 100644 --- a/test/unit/test_visual_recognition_v3.py +++ b/test/unit/test_visual_recognition_v3.py @@ -98,7 +98,7 @@ def test_create_classifier(self): with open(os.path.join(os.path.dirname(__file__), '../../resources/cars.zip'), 'rb') as cars, \ open(os.path.join(os.path.dirname(__file__), '../../resources/trucks.zip'), 'rb') as trucks: - vr_service.create_classifier('Cars vs Trucks', classname_positive_examples=cars, negative_examples=trucks) + vr_service.create_classifier('Cars vs Trucks', cars_positive_examples=cars, negative_examples=trucks) assert len(responses.calls) == 1 diff --git a/watson_developer_cloud/visual_recognition_v3.py b/watson_developer_cloud/visual_recognition_v3.py index 12c33b0c1..9cb698f4a 100644 --- a/watson_developer_cloud/visual_recognition_v3.py +++ b/watson_developer_cloud/visual_recognition_v3.py @@ -172,65 +172,29 @@ def detect_faces(self, ######################### def create_classifier(self, - name, - classname_positive_examples, - negative_examples=None, - classname_positive_examples_filename=None, - negative_examples_filename=None): - """ - Create a classifier. - - Train a new multi-faceted classifier on the uploaded image data. Create your - custom classifier with positive or negative examples. Include at least two sets of - examples, either two positive example files or one positive and one negative file. - You can upload a maximum of 256 MB per call. Encode all names in UTF-8 if they - contain non-ASCII characters (.zip and image file names, and classifier and class - names). The service assumes UTF-8 encoding if it encounters non-ASCII characters. - - :param str name: The name of the new classifier. Encode special characters in UTF-8. - :param file classname_positive_examples: A .zip file of images that depict the visual subject of a class in the new classifier. You can include more than one positive example file in a call. Append `_positive_examples` to the form name. The prefix is used as the class name. For example, `goldenretriever_positive_examples` creates the class **goldenretriever**. Include at least 10 images in .jpg or .png format. The minimum recommended image resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100 MB per .zip file. Encode special characters in the file name in UTF-8. The API explorer limits you to training only one class. To train more classes, use the API functionality. - :param file negative_examples: A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. Encode special characters in the file name in UTF-8. - :param str classname_positive_examples_filename: The filename for classname_positive_examples. - :param str negative_examples_filename: The filename for negative_examples. - :return: A `dict` containing the `Classifier` response. - :rtype: dict - """ - if name is None: - raise ValueError('name must be provided') - if classname_positive_examples is None: - raise ValueError('classname_positive_examples must be provided') - params = {'version': self.version} - name_tuple = (None, name, 'text/plain') - if not classname_positive_examples_filename and hasattr( - classname_positive_examples, 'name'): - classname_positive_examples_filename = classname_positive_examples.name - mime_type = 'application/octet-stream' - classname_positive_examples_tuple = ( - classname_positive_examples_filename, classname_positive_examples, - mime_type) - negative_examples_tuple = None - if negative_examples: - if not negative_examples_filename and hasattr( - negative_examples, 'name'): - negative_examples_filename = negative_examples.name - if not negative_examples_filename: - raise ValueError('negative_examples_filename must be provided') - mime_type = 'application/octet-stream' - negative_examples_tuple = (negative_examples_filename, - negative_examples, mime_type) - url = '/v3/classifiers' - response = self.request( - method='POST', - url=url, - params=params, - files={ - 'name': name_tuple, - 'classname_positive_examples': - classname_positive_examples_tuple, - 'negative_examples': negative_examples_tuple - }, - accept_json=True) - return response + name, + **kwargs): + """ + Create a classifier. + :param str name: The name of the new classifier. Cannot contain special characters. + :param file _positive_examples: A compressed (.zip) file of images that depict the visual subject for a class within the new classifier. Must contain a minimum of 10 images. The swagger limits you to training only one class. To train more classes, use the API functionality. + :param file negative_examples: A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. + :return: A `dict` containing the `Classifier` response. + :rtype: dict + """ + if name is None: + raise ValueError('name must be provided') + params = {'version': self.version} + data = {'name': name} + url = '/v3/classifiers' + response = self.request( + method='POST', + url=url, + params=params, + data=data, + files=kwargs, + accept_json=True) + return response def delete_classifier(self, classifier_id): """ @@ -280,68 +244,25 @@ def list_classifiers(self, verbose=None): def update_classifier(self, classifier_id, - classname_positive_examples=None, - negative_examples=None, - classname_positive_examples_filename=None, - negative_examples_filename=None): + **kwargs): """ Update a classifier. - - Update a custom classifier by adding new positive or negative classes (examples) - or by adding new images to existing classes. You must supply at least one set of - positive or negative examples. For details, see [Updating custom - classifiers](https://console.bluemix.net/docs/services/visual-recognition/customizing.html#updating-custom-classifiers). - Encode all names in UTF-8 if they contain non-ASCII characters (.zip and image - file names, and classifier and class names). The service assumes UTF-8 encoding if - it encounters non-ASCII characters. **Important:** You can't update a custom - classifier with an API key for a Lite plan. To update a custom classifer on a Lite - plan, create another service instance on a Standard plan and re-create your custom - classifier. **Tip:** Don't make retraining calls on a classifier until the status - is ready. When you submit retraining requests in parallel, the last request - overwrites the previous requests. The retrained property shows the last time the - classifier retraining finished. - :param str classifier_id: The ID of the classifier. - :param file classname_positive_examples: A .zip file of images that depict the visual subject of a class in the classifier. The positive examples create or update classes in the classifier. You can include more than one positive example file in a call. Append `_positive_examples` to the form name. The prefix is used to name the class. For example, `goldenretriever_positive_examples` creates the class `goldenretriever`. Include at least 10 images in .jpg or .png format. The minimum recommended image resolution is 32X32 pixels. The maximum number of images is 10,000 images or 100 MB per .zip file. Encode special characters in the file name in UTF-8. - :param file negative_examples: A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. Encode special characters in the file name in UTF-8. - :param str classname_positive_examples_filename: The filename for classname_positive_examples. - :param str negative_examples_filename: The filename for negative_examples. + :param file _positive_examples: A compressed (.zip) file of images that depict the visual subject for a class within the classifier. Must contain a minimum of 10 images. + :param file negative_examples: A compressed (.zip) file of images that do not depict the visual subject of any of the classes of the new classifier. Must contain a minimum of 10 images. :return: A `dict` containing the `Classifier` response. :rtype: dict """ if classifier_id is None: raise ValueError('classifier_id must be provided') params = {'version': self.version} - classname_positive_examples_tuple = None - if classname_positive_examples: - if not classname_positive_examples_filename and hasattr( - classname_positive_examples, 'name'): - classname_positive_examples_filename = classname_positive_examples.name - mime_type = 'application/octet-stream' - classname_positive_examples_tuple = ( - classname_positive_examples_filename, - classname_positive_examples, mime_type) - negative_examples_tuple = None - if negative_examples: - if not negative_examples_filename and hasattr( - negative_examples, 'name'): - negative_examples_filename = negative_examples.name - if not negative_examples_filename: - raise ValueError('negative_examples_filename must be provided') - mime_type = 'application/octet-stream' - negative_examples_tuple = (negative_examples_filename, - negative_examples, mime_type) url = '/v3/classifiers/{0}'.format( *self._encode_path_vars(classifier_id)) response = self.request( method='POST', url=url, params=params, - files={ - 'classname_positive_examples': - classname_positive_examples_tuple, - 'negative_examples': negative_examples_tuple - }, + files=kwargs, accept_json=True) return response From 8dfc0f9492a23f3c69698e16fe4df18daa93a20d Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Fri, 16 Feb 2018 13:43:48 -0500 Subject: [PATCH 17/45] Remove virtualenv (#370) --- test_env/bin/codecov | 11 ---- test_env/bin/coverage | 11 ---- test_env/bin/coverage-2.7 | 11 ---- test_env/bin/coverage2 | 11 ---- test_env/bin/dotenv | 11 ---- test_env/bin/easy_install | 11 ---- test_env/bin/easy_install-2.7 | 11 ---- test_env/bin/epylint | 11 ---- test_env/bin/isort | 11 ---- test_env/bin/pbr | 11 ---- test_env/bin/pip | 11 ---- test_env/bin/pip2 | 11 ---- test_env/bin/pip2.7 | 11 ---- test_env/bin/py.test | 11 ---- test_env/bin/pybabel | 11 ---- test_env/bin/pygmentize | 11 ---- test_env/bin/pylint | 11 ---- test_env/bin/pyreverse | 11 ---- test_env/bin/pytest | 11 ---- test_env/bin/python | Bin 51712 -> 0 bytes test_env/bin/python-config | 78 ----------------------------- test_env/bin/python2 | 1 - test_env/bin/python2.7 | 1 - test_env/bin/rst2html.py | 23 --------- test_env/bin/rst2html4.py | 26 ---------- test_env/bin/rst2html5.py | 35 ------------- test_env/bin/rst2latex.py | 26 ---------- test_env/bin/rst2man.py | 26 ---------- test_env/bin/rst2odt.py | 30 ----------- test_env/bin/rst2odt_prepstyles.py | 67 ------------------------- test_env/bin/rst2pseudoxml.py | 23 --------- test_env/bin/rst2s5.py | 24 --------- test_env/bin/rst2xetex.py | 27 ---------- test_env/bin/rst2xml.py | 23 --------- test_env/bin/rstpep2html.py | 25 --------- test_env/bin/sphinx-apidoc | 11 ---- test_env/bin/sphinx-autogen | 11 ---- test_env/bin/sphinx-build | 11 ---- test_env/bin/sphinx-quickstart | 11 ---- test_env/bin/symilar | 11 ---- test_env/bin/tox | 11 ---- test_env/bin/tox-quickstart | 11 ---- test_env/bin/virtualenv | 11 ---- test_env/bin/wheel | 11 ---- test_env/include/python2.7 | 1 - test_env/pip-selfcheck.json | 1 - 46 files changed, 745 deletions(-) delete mode 100755 test_env/bin/codecov delete mode 100755 test_env/bin/coverage delete mode 100755 test_env/bin/coverage-2.7 delete mode 100755 test_env/bin/coverage2 delete mode 100755 test_env/bin/dotenv delete mode 100755 test_env/bin/easy_install delete mode 100755 test_env/bin/easy_install-2.7 delete mode 100755 test_env/bin/epylint delete mode 100755 test_env/bin/isort delete mode 100755 test_env/bin/pbr delete mode 100755 test_env/bin/pip delete mode 100755 test_env/bin/pip2 delete mode 100755 test_env/bin/pip2.7 delete mode 100755 test_env/bin/py.test delete mode 100755 test_env/bin/pybabel delete mode 100755 test_env/bin/pygmentize delete mode 100755 test_env/bin/pylint delete mode 100755 test_env/bin/pyreverse delete mode 100755 test_env/bin/pytest delete mode 100755 test_env/bin/python delete mode 100755 test_env/bin/python-config delete mode 120000 test_env/bin/python2 delete mode 120000 test_env/bin/python2.7 delete mode 100755 test_env/bin/rst2html.py delete mode 100755 test_env/bin/rst2html4.py delete mode 100755 test_env/bin/rst2html5.py delete mode 100755 test_env/bin/rst2latex.py delete mode 100755 test_env/bin/rst2man.py delete mode 100755 test_env/bin/rst2odt.py delete mode 100755 test_env/bin/rst2odt_prepstyles.py delete mode 100755 test_env/bin/rst2pseudoxml.py delete mode 100755 test_env/bin/rst2s5.py delete mode 100755 test_env/bin/rst2xetex.py delete mode 100755 test_env/bin/rst2xml.py delete mode 100755 test_env/bin/rstpep2html.py delete mode 100755 test_env/bin/sphinx-apidoc delete mode 100755 test_env/bin/sphinx-autogen delete mode 100755 test_env/bin/sphinx-build delete mode 100755 test_env/bin/sphinx-quickstart delete mode 100755 test_env/bin/symilar delete mode 100755 test_env/bin/tox delete mode 100755 test_env/bin/tox-quickstart delete mode 100755 test_env/bin/virtualenv delete mode 100755 test_env/bin/wheel delete mode 120000 test_env/include/python2.7 delete mode 100644 test_env/pip-selfcheck.json diff --git a/test_env/bin/codecov b/test_env/bin/codecov deleted file mode 100755 index b64c2fc17..000000000 --- a/test_env/bin/codecov +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from codecov import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/coverage b/test_env/bin/coverage deleted file mode 100755 index fcc21fa49..000000000 --- a/test_env/bin/coverage +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from coverage import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/coverage-2.7 b/test_env/bin/coverage-2.7 deleted file mode 100755 index fcc21fa49..000000000 --- a/test_env/bin/coverage-2.7 +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from coverage import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/coverage2 b/test_env/bin/coverage2 deleted file mode 100755 index fcc21fa49..000000000 --- a/test_env/bin/coverage2 +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from coverage import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/dotenv b/test_env/bin/dotenv deleted file mode 100755 index 118baf235..000000000 --- a/test_env/bin/dotenv +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from dotenv import cli - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(cli.cli()) diff --git a/test_env/bin/easy_install b/test_env/bin/easy_install deleted file mode 100755 index e834ef311..000000000 --- a/test_env/bin/easy_install +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from setuptools.command.easy_install import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/easy_install-2.7 b/test_env/bin/easy_install-2.7 deleted file mode 100755 index e834ef311..000000000 --- a/test_env/bin/easy_install-2.7 +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from setuptools.command.easy_install import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/epylint b/test_env/bin/epylint deleted file mode 100755 index 169410962..000000000 --- a/test_env/bin/epylint +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pylint import run_epylint - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run_epylint()) diff --git a/test_env/bin/isort b/test_env/bin/isort deleted file mode 100755 index 5a4b1e7f8..000000000 --- a/test_env/bin/isort +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from isort.main import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/pbr b/test_env/bin/pbr deleted file mode 100755 index 17451cb19..000000000 --- a/test_env/bin/pbr +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pbr.cmd.main import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/pip b/test_env/bin/pip deleted file mode 100755 index ca137e6d7..000000000 --- a/test_env/bin/pip +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pip import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/pip2 b/test_env/bin/pip2 deleted file mode 100755 index ca137e6d7..000000000 --- a/test_env/bin/pip2 +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pip import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/pip2.7 b/test_env/bin/pip2.7 deleted file mode 100755 index ca137e6d7..000000000 --- a/test_env/bin/pip2.7 +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pip import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/py.test b/test_env/bin/py.test deleted file mode 100755 index b2fbdb63f..000000000 --- a/test_env/bin/py.test +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pytest import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/pybabel b/test_env/bin/pybabel deleted file mode 100755 index 323f32e1f..000000000 --- a/test_env/bin/pybabel +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from babel.messages.frontend import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/pygmentize b/test_env/bin/pygmentize deleted file mode 100755 index 59cfc88e9..000000000 --- a/test_env/bin/pygmentize +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pygments.cmdline import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/pylint b/test_env/bin/pylint deleted file mode 100755 index 076476a9d..000000000 --- a/test_env/bin/pylint +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pylint import run_pylint - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run_pylint()) diff --git a/test_env/bin/pyreverse b/test_env/bin/pyreverse deleted file mode 100755 index 5d6dc2e31..000000000 --- a/test_env/bin/pyreverse +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pylint import run_pyreverse - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run_pyreverse()) diff --git a/test_env/bin/pytest b/test_env/bin/pytest deleted file mode 100755 index b2fbdb63f..000000000 --- a/test_env/bin/pytest +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pytest import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/python b/test_env/bin/python deleted file mode 100755 index a18406382eba6fce8954e92dbc10f6d9806965d5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 51712 zcmeHw2V7H0*Y^#Aq98>Pv7k}Gf=Ut)#kCOx2?`pC0v3oNM2LhWn*8ySA|QvUbgT&b>DfvRU2l`JV6D-}Ak9ew;aH+PQQ8b7#1BW^(4_ zr``Jrg0N!|M2&2Ms74TO5F9K7&l#u&5X7l54v36H&Nv1E1_1^E1_1^E1_1^E1_1^E z1_1^E1_1^E27&(~1kS#D^a(!lS@4mcZ3p$P1H`UQ5P1Zfyo(0l3xLF8AwME~aCl5V zGP;Tfa|d5^sOKtV6pIIh#}BBQoh;9;8b@EAoPcnEBqVfVu}(HhN6A#Us=gq1xEV;e z2seZd!&RI;k!=|ki?zCpByB~Mb^RRcQTj&FIm}^m0Y_RNKt?)BpEy;fP|GwBW34Zn z)_0Z8k72~U!u_d!QmsxSSEkSzto6lvQuP~HlgfZ$OIJlVB#6Zs%8_zqvRJMht|G%# z@!Qmt(uWg3=rC-pgG!-%Y}ZL-yN2-x@GVl z`gzg%meTq#Y^e|Lw?3VQOe{{8=p>a9*7sY=rt0TKgO6b|R+XhHC2R+(pGHOY?Gqjr zY3^fOx*dR%j^;u?Dv0%BQ6lL#hjD<+0v=-H2t3##Jp{tTpukcdH7khK^Fj(B2&}_^ z4v-(xoj?`>p+o&Y)R!elL9Djd~zc5Res!|f9?>}54NtcaOY0@~cGL2TQQffH? z{+&2<9@HCbq1P_pGBhiz@}A(nL|Iy$EZslUAMQ_{1ew8Mxl|x?eTa~77!qd_94a#> zN}i;VXtFrH%vaZPda5+CUaAZwJZMl0TlL~wP>M2qSNyafNK=vf;ct0zUxmFk4M@s%9ZMCl(J z3(T5isHw2q4XDi?5Qzsh1=1VQgWAH2jq^_ABoPpqw5o;zb{~6f}l;Au{R_LjvWLAP{;dFf$4CWDE&KQ&ngK~`c-&g zxD8Be1!>Xx4-cFxu#ME;3@h9>6Yi8h!Nj5)V4SaYJJHQ&Yzzvpx$@FJnM^#B(FlMD z^oQZ{_3$nY^;#j*-!k67MliJRNd@uph2&#`wKzbg`I2dddhzBg_l&Din<1z-r7DtE zRZPY5sn}3Db~MEDOagtGaRjErC1c4L<3M1M`lC=6FUT6De+?Q(!8fo`00|JJq4)|* z8G@QqT7#*qMjtA+JzbJ&HG%%R(aBu+9w=Ug>fS>8QQGH25Mt+H?Cpx!X;kcVj6Dsp zJZOYh#(h+}aUF21^e*Iu4&$&I&~>_mpUbEt$T@-?YY2ZRKY^df7bhGx;ukR4`C>(p z`d9k*&>vQl`h&&++YOZ?jMGt-q1zfN{y;_iXexd)L>Rk#Fq!b#dNqWfm{1N0gvqxS zT-)U9O!_XQ??(D{NZ*6>JxLz{HTgCq{l=u9;0*KhkeU`u?QP z1wYE*odY%Ei%h;>124uqB+Bo>^;$HWRgKg(Qm|EmLOLN{$U<3n5)*tDZ7I zR-?+4C~Oq$C6USo$kT0M-V1Jn9ja2L*(eqv(MVEcp%SI78kMmTQ!fC$ZdfXEm7OL zMl>`(+>p&AxfZ7b+3-N>G%8y<`}X47xSzgIXIuLEW~3*<45N(_!oR2ii5NDYF4a~k z#M7oajSQM#q>Uo|Gvv}Vd{qAWL5q>4!*YY|dWs~&B^tR6y&}2oXdPTmZW9b@_+Se) zvf(m~OewXQX#gx8g(u5xjRoXQL`$Tpa;2>YBwngYwiQ26Cs)XIP$do@I%=j|2fb0H zl-nLtEHcYALX}3xrebXMZHv4a$#NAtIx;MhEt2Un)HdL%%p9u7km;~}*}dd2IEIgs zrb?74pRYMS56}VA)oeaIpjw?oX$zprYxGnpR2os1Qu?>54^O*RrTBae!sID(okRgW z12n^Mrn5O@Rn{O(u1)(}7mv_Ll0J(UWKgGm;c75!0E%)Yn=ggO%2o%dtXZ@?MMM7U z`kPpaq#C(e$Hs@!=D(pz&GZ8v3CngG_@{+4`hWhHSebo*OauMS=I~ybDOM$y%KBx> zG*!VguO;+^?f_4=RF#gGf;J43OH!07E%d?|nHrv7SeEn`NVK6cnG*h;C}fdJXgRnj zF+?LvhW~c5WsT7L={xIh#zHF(_O_M9DH`3IfJbP*2dKG+|NdOBWYs23X|& z)Aa)gSmgO!hCm`VJVq4RuWxvaSU6~apkH4x9r5P`VN7^0$QBqN4uy3*rHW8f3rz4j z2JcryV<2>v96m_MwQhVtM5+mI&gf8v?>1OJvV-+e%tf%ifrw_X<^O!V1FEy>ilICv z&ZqfGns21}PMRO0`6-%Trui>4|DEP9Y5s}kj`X^99hx_$c}tpeY5o<>!)V@@=H^P9 z*S{0#cp1%AG#^d#Ni?5E^Y3VWmgbLXUJDi{@xEKpJe1~xX`VszX*AzR^V2l{o#xeH zp$_#lqj@))52m@6<}+x%f#yHZyo}~=Xzl@vtEjIn%|mIPK=bi5pGEWUXnu_5w`l&6 z=AI2HeO#K0Xs)99B$_Xw`EHuuqWL?TH-Lp#tQU{wi8P-~^BpvQK=VfM&|>~bny1rz z5zUX%{65W{V8IddccOU;&8N}4l;(G74p|w;AiyBNAiyBNAiyBNAiyBNAiyBNAiyAC z0|EEuUc)^2!$RwXh;i$>M4Bd+rlyHu%0-h*^w+o*hu|h#a+ACmR&Zb z#e~MoJwz2%1Z(K~9nB)M`1I za})$&VMHoc!Y*FSo6R!EKn`-#JVZ{ijKpogn6nroH8P1pp^{>FE{3%*nW@ag;8HTE zljwXR`}PY9@7Mt&*HV!hg^WaR6CG4a)76lSjwKeSr;4dv$1?a`fdreNJ~@$NqdEVt zqe_~8B!9}#3O>5Kg0|3b3Hj3t*a|?nvT)}w^*s*L68K$?pXQ|V1>Z94PlYS$*~uy# zVHF-^6;@b<$5@3YTZLy@g%??c*I0!&S%tS-g%4PTPg;eqT7~afg`Zf3Ut5K7<_-QT z@TqMT_OuG)93Hj-HU>5j?#;%Ze88G9xz`@HT@bjb1a^B6xNwXug+G0%15_8t0|=(x z2$<#~_upgtfeXR?|Nm~?sMyw13*!W=u!zGo8WsG&!#rrfxI+yWH~?7=6jcT!a)_uB zLk&@6RONVK4$>n<s#;CAP7y?U)6HWZEzmD*bB9Hs*z_ir;Zpf9e;f?UP?71UR+ferxx7-`2C$stev+dE29X_0k_5X6No* zJ6W{*QP~g8+;&V|n4Fp1YSU%>GVrlTrc%wEoZ?YXU#Ir zPQQ7k)kV*Hy*LF3LH{AbfRH099uNnNL%cW!p|ntu6$ z{MWTvHSCsVbF=MOES@X3cJ-RW@&zneHG4aj17Vp7N-fQGapUINyKr;uUM;O=$Fj3y zHNz6L9+m@?Na8thYgVr|(7t+IyMZEJU9LL@YSpbpE~T&|l~R9RORf(_I@R^2B74eU z(PcQjJA%#6fS;XcV9k-|$@Rc|)#}!!^2Lx#dVFq^`n7pH9yg!^uTx;>4!ptjYX@|+ z2y(~Uu7k6!Iyir!4uM=ASqEo7x~{Nd$`--;3@qZX|DxjDTvl^STmZ>xxhz)#;%nLE zvRK6OweDY=T;4jRz45I*Hzy%&(7wTq^jq)WeSD_#+!jHd1xuQJ+v?imh3u!v!H#Lp zYtG&jZuhQ!WXQ9Gh6@JB^0l0#yTSWdZpoVTvGNGXLN|w&w;$vkYUguv#GxLOKAhUn zCTdw^mizpS%)3o?p1V+UTa;+j`&(X_%ZgU63phD#&GvV5_H|x=x{=U*(zhcXJs5tH z-Dg9W_D|N|zkFg+d-n(c~O;_~M0we}}9_T#-bD&O^(wDr`AkjM24 zy!y}Hd-hKI?+%p@>YLD57TBuV`jizjdgWZCUXn7h^G@oBkZ_bGzuK3meSaQC0R zdgu6>IXUN=w73~OPb~SAXIG6yuvX-3=H_hVx&XZ1t}K?x-X0#Qd~RI~``SBmYgKn_ zTOFRM8r7<5zmA9=@0iH#h#8vNbGYrfZI`xL+A7~SRi{(KxHWPl zxg!9U1z~BABf<4Qsf43}C(mE1QCPMrJnhh`EJCgQ?o~{^5j*O6m96tTzbJD-lP&Vu z#||C-we7hlH4@Zsu6G?Qh`7;Nk$qBImegp+S0`5tZgRWex^M3p3!Z4*W@r;`zg#q~ z%r)oLx_JlVl?z|$iT!8$nd~mQeE8$VOHIRo%_m#Al&!ruuV}=*dG94B`aQ3|*(>EK zxAbVO$H&8RR_gaW%iIy%xS18ho5vFrRxprS;C==4V}B zvZ_nd{z+Y$KN}|++tBIo)99O#Vb3Q`I`h?#U5m=M>=+WW=IEt(y=KSNQ(M=1c?;+C z@V-7`WAK47S-kzyU9a=<>X)ZaKJuH_OwJWm%&K{te-2wvLp^;`%aJ!=MA*a4t+s+5 z5$vom`cI7r+>YE10YL%04&1EM5)4qxi@cc!hY*>u{CCK1914v!ME zCboajad}+Sz=wqm^kr}RDE_GNZF8=j(X0C#Va8!=0omWN_4*AKmQ{azBT%*r;$j_f>p!p-g<N3sZ_5x3ORUxS<_erQY6y#htRF=ULZ9-CnefY~Aqqr!ytHT&^WH zzWigilCQpd>FPpEciP;{(_eR~Ywa6Jp5LzcdU}Lt;KA=X{H<@^-&UW#w<0QhT+^sD z;cUMCh%U*=Z34d_a5D7f_=kh-1E!>IE%GjtX-z4Kvb~X4GnWK<$d-+qd7?el>2q15 zVHnJ`>h{$?*TZdg;4k*@To(T4@n+lSvK--x_CN&vuS%Niz~bgS=lYpP7(13{6YT%m zzZ?L#IfpAU+i`QYH_X`@BU8vFuqB5Lo1b*4axGh0@o!AVPRe3Slv!-GL<4(gBwB3- zEUmL8u+u^f8&R;#a3~`~QnGQ7fq#DR+efQ!z*Ys=6@WV7cb76H?9#L9n&j{`wBx*}_M>(@nzG>L zi;)YSXI>b|?RT(g-h{kH^SVWJdcJN!$kcW}Z7!~Jb^lB4g2xwjD>!dOBZefGN&`o}{;Ed1*az2Lo4H)yt!>b+On9K02)1G|Batwc)K6w@d!7 zdhwwrf7HRn=d0fvU+-uu&g~t?9}I|@JDJlWR(7}C?^^$DE#BGx{QAMtTbJfP zPjq}HE_k{*LXoj~^OW;-Z;w9sFo~~QJ)y(1w}W-boqG&Yo^4k;Y(kr^_sjZ;YrmM$ zS{0j9(t2AHuHE1IIyH#vj6)Ln9%IKPs=TGxe2vLv zH8zi7bwRx=kW-a0>?1d*Vpw8%wz2sRQ#mZiB<5FZ>*37#zjHVXLUfwi`*YiI{g$>~ z+A`mVMgU&b=*?aD7KghWaZ43$?82A0DzBV1vbmn9#uwi%t=~GSPaA1+bx(Uc__(S1 ztwRo9=>!kEExT#l+NZu05Wo^?uU7@6*YVn^I@W z`ZvGzF{CJHTWUzm9^a{_-klj+C-!RTxq%KPy*HOgKFk_BEm3p6^)~Oe>DT+DEOGd7 z@BX0PJrkRbnb>Lnr`?m=20F|XPT2Wr{*7vT3`x%$q>r3&JL7D}LA*rA^aNI8nJ8vP z=BS1Bo!pwt+}p7@wojP<+-s|O%bqqKK6BmxZQYXo!Fzp{FXeU_Ei2eiy0_lTMDDRg zEBx$l9XT5@F@4ELQ~2Sf4K{S$^Ig%*x4VuV|GK05BO`0~whTe;N4 z!^qYwZPe%P{^SqAwdb39@_D(e9q?Fgh0jNBPT@a3Oxjf$z%4&zE!qyg$Kb7iXgcPy z;?~(N&&$?``eeN;c)G&#-K>DEy<2M16rTRSxWzA;pE>+Stur;eM;~V`{Eb)ibfwR! zK8dpf?mj*Fv-k8<%N9F+G7YJF|9Tgvi;8WVMmTT$w&c^?Zw>Wptg#nAxfWgYO`)LS zYu>KOk#9V%*7830cGb9|5#PP_;BH=Rydx7#a5+2p=dL;XE~_StefV(tm=i^VUq9^D zz)KuvYkY3Tg_Q`KdwR8tixX)O8D`aPovSUULjLOslPPa zleEC#`;?owF&!sX7{?&MAiyBNAiyBNAiyBNAiyBNAiyBNAiyBNAiyBNAiyBNAiyBN zAiyBNAiyB---5sy%KksOMH06|;(p=Qu*n$g|HFvTYg^dNhdsoU5qUK0PwK2{-`>NQAVxvF2W^J{ zMAA7r5ZNf;w$Lxx@Sg%Yeu8{cKnG%0;bytPvJZiDF!S~=*}@lo#uh%9M9MBdzyu?l9Za`nMgTK?gC7&d{~c3$a-kp4p9h}yBnLP(i#gbnuD_n-#XiI3qMtdLY=m^21{B&|0DgBru6 z6QtaMl+Ti~4=I=8r3~#*&PEIH`n=X4^KKl=>k1ro;$`zfARsiApe`eayrx@Tsp~{N znQotop9qG$O_(Xx_z}LVCmeAjP5g^1s#CagFI+0mR9J!+)A>($=p1MKoysbh@X%fu zM=LPipd!K8fBVAj6OP*FnKCZyB;cY#r~%f_Q1TQ|h&3is84LGeRUfniC8?N=PBxVw zRz_(>ZK!;%6Mv*C9V;@GARasMCWYXPAK;@8z|cQ{m^?4qUQkdW< zf(C42kr+)TlFv{P4`l;lhAQ4b(_%BDia0VC`4koPCv#ytVXg}nxrURu$mgyok<8VY z&b7gu%Q2RBLu8y|4m!sg$5+TA-VNhODukT@E}H^M;F-XiiZ!O1GuoRn98D#7Ke0wp zMWmr%3*G@%gg?!2$~l5VLZWyGoC2VPbQmb&V#yIA z5pJ7jiZJ9CgDnKZRJ89OC+u&WV^(Ny$S)^TPSPpgm{Xh$`Hvt@Xw1iF>xf&T*nAcM z&PN|`8aL@6Jol_RFqz#l!%few0HRclxE25Z4d(!4$+VRR0l@oJ75o1XB*XBTLavYF3nEfISffUVGJL7QdL9ct zOtJKz-CS7LUJkSYXgg3T&`F@5fXaZ3K+l0b09A+eYd4^VKt4eJK*2zvKv6)kK)A*| z94H-VBv39;G0;4q5}+$UuYl^bf-}8=dIKc^eFHQPXgkm)pyxm?tzjJwC>$sWXbjK- zplv{>f$jm}LS18^K%m}0GN1yWML;D$XMyemIrtGo6QE$AAwVO6ih_`wkL=lKq{blK*xa|0Wpq2fI)ykfI)yk zfI)ykfI)ykfI)ykfI)yk;Gagoy}9$S*gDN-`(@X)Q?t}|{h>l&w%*oR0FJiK0(c2) zJfy?30Gz9y1>i<1syGY4gVJkh;v33%SzXP17C>VJYJEY1Sx3c)I80fd+%%of3+e3(U?`6K{5 zoxtKCfG7x3=K%N&>>C*$z~u(wmGR7iDkj4rZU7l)J`MnnF)*7CFNTKs51$24)NGw2 zKA&8eE;6450DB3ja{%0^eb|shPA0*W5DKL1PsunHMRc-|BP?V~d;bcHcuPC~lP%(B zTF92R{nuE;Z?cdr?fF~U@;_;j=c(N^4uk;K1gZt(1mp}<8wgg{89V(7=|5nnzp@Wj?S0tO@EDGY9{0Bl_Wzsg^@sek z_WGwK9=?9O@cx@EE8WiBeBQTva&}*rc2oO4u)DL`$7kNj2KT4TzTC6?)bhbo=f51* z((Cd|`48g-&L90wcUaZ!WL(`|-|rHvTG{`2q0C)7D(vf8Rr%`!i#&ACKT>?DaGD`Yr6@ z+B9_gMQh%Sy?(}C|G)VZkuMpxX6*IjAsvjpe#TxuW3T^TI0J~W*U#ANXYBP?J${6- z*U#AN$Gv@(XT1HN*y~>@Y25VN$^D{-rhN0;?}Tvo!;y*`MegxEKeTo~_U89F-i5!e zO&|RzFRR>l_U&0Ues0yd)00a<&r10$X{zw_FQ>;|Tkjv&_j$pRR;4FmJNXY=_-s;5ulGK>vAP8#mjp}4 zcZhNS;g3u0Qzg00=giPY7wnebO#9wRRr0;4^@&qm0!sHz-RSL6YhA>)j=lS4%#j!7 zOi8~Gn0{_d;<|fV4}G(= (3, 2): - valid_opts.insert(-1, 'extension-suffix') - valid_opts.append('abiflags') -if sys.version_info >= (3, 3): - valid_opts.append('configdir') - - -def exit_with_usage(code=1): - sys.stderr.write("Usage: {0} [{1}]\n".format( - sys.argv[0], '|'.join('--'+opt for opt in valid_opts))) - sys.exit(code) - -try: - opts, args = getopt.getopt(sys.argv[1:], '', valid_opts) -except getopt.error: - exit_with_usage() - -if not opts: - exit_with_usage() - -pyver = sysconfig.get_config_var('VERSION') -getvar = sysconfig.get_config_var - -opt_flags = [flag for (flag, val) in opts] - -if '--help' in opt_flags: - exit_with_usage(code=0) - -for opt in opt_flags: - if opt == '--prefix': - print(sysconfig.get_config_var('prefix')) - - elif opt == '--exec-prefix': - print(sysconfig.get_config_var('exec_prefix')) - - elif opt in ('--includes', '--cflags'): - flags = ['-I' + sysconfig.get_path('include'), - '-I' + sysconfig.get_path('platinclude')] - if opt == '--cflags': - flags.extend(getvar('CFLAGS').split()) - print(' '.join(flags)) - - elif opt in ('--libs', '--ldflags'): - abiflags = getattr(sys, 'abiflags', '') - libs = ['-lpython' + pyver + abiflags] - libs += getvar('LIBS').split() - libs += getvar('SYSLIBS').split() - # add the prefix/lib/pythonX.Y/config dir, but only if there is no - # shared library in prefix/lib/. - if opt == '--ldflags': - if not getvar('Py_ENABLE_SHARED'): - libs.insert(0, '-L' + getvar('LIBPL')) - if not getvar('PYTHONFRAMEWORK'): - libs.extend(getvar('LINKFORSHARED').split()) - print(' '.join(libs)) - - elif opt == '--extension-suffix': - ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') - if ext_suffix is None: - ext_suffix = sysconfig.get_config_var('SO') - print(ext_suffix) - - elif opt == '--abiflags': - if not getattr(sys, 'abiflags', None): - exit_with_usage() - print(sys.abiflags) - - elif opt == '--configdir': - print(sysconfig.get_config_var('LIBPL')) diff --git a/test_env/bin/python2 b/test_env/bin/python2 deleted file mode 120000 index d8654aa0e..000000000 --- a/test_env/bin/python2 +++ /dev/null @@ -1 +0,0 @@ -python \ No newline at end of file diff --git a/test_env/bin/python2.7 b/test_env/bin/python2.7 deleted file mode 120000 index d8654aa0e..000000000 --- a/test_env/bin/python2.7 +++ /dev/null @@ -1 +0,0 @@ -python \ No newline at end of file diff --git a/test_env/bin/rst2html.py b/test_env/bin/rst2html.py deleted file mode 100755 index 61577783b..000000000 --- a/test_env/bin/rst2html.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $ -# Author: David Goodger -# Copyright: This module has been placed in the public domain. - -""" -A minimal front end to the Docutils Publisher, producing HTML. -""" - -try: - import locale - locale.setlocale(locale.LC_ALL, '') -except: - pass - -from docutils.core import publish_cmdline, default_description - - -description = ('Generates (X)HTML documents from standalone reStructuredText ' - 'sources. ' + default_description) - -publish_cmdline(writer_name='html', description=description) diff --git a/test_env/bin/rst2html4.py b/test_env/bin/rst2html4.py deleted file mode 100755 index a1dcbdf0a..000000000 --- a/test_env/bin/rst2html4.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# $Id: rst2html4.py 7994 2016-12-10 17:41:45Z milde $ -# Author: David Goodger -# Copyright: This module has been placed in the public domain. - -""" -A minimal front end to the Docutils Publisher, producing (X)HTML. - -The output conforms to XHTML 1.0 transitional -and almost to HTML 4.01 transitional (except for closing empty tags). -""" - -try: - import locale - locale.setlocale(locale.LC_ALL, '') -except: - pass - -from docutils.core import publish_cmdline, default_description - - -description = ('Generates (X)HTML documents from standalone reStructuredText ' - 'sources. ' + default_description) - -publish_cmdline(writer_name='html4', description=description) diff --git a/test_env/bin/rst2html5.py b/test_env/bin/rst2html5.py deleted file mode 100755 index 40bad1c6f..000000000 --- a/test_env/bin/rst2html5.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python -# -*- coding: utf8 -*- -# :Copyright: © 2015 Günter Milde. -# :License: Released under the terms of the `2-Clause BSD license`_, in short: -# -# Copying and distribution of this file, with or without modification, -# are permitted in any medium without royalty provided the copyright -# notice and this notice are preserved. -# This file is offered as-is, without any warranty. -# -# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause -# -# Revision: $Revision: 7847 $ -# Date: $Date: 2015-03-17 18:30:47 +0100 (Di, 17 Mär 2015) $ - -""" -A minimal front end to the Docutils Publisher, producing HTML 5 documents. - -The output also conforms to XHTML 1.0 transitional -(except for the doctype declaration). -""" - -try: - import locale # module missing in Jython - locale.setlocale(locale.LC_ALL, '') -except locale.Error: - pass - -from docutils.core import publish_cmdline, default_description - -description = (u'Generates HTML 5 documents from standalone ' - u'reStructuredText sources ' - + default_description) - -publish_cmdline(writer_name='html5', description=description) diff --git a/test_env/bin/rst2latex.py b/test_env/bin/rst2latex.py deleted file mode 100755 index 6c1c5bec2..000000000 --- a/test_env/bin/rst2latex.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $ -# Author: David Goodger -# Copyright: This module has been placed in the public domain. - -""" -A minimal front end to the Docutils Publisher, producing LaTeX. -""" - -try: - import locale - locale.setlocale(locale.LC_ALL, '') -except: - pass - -from docutils.core import publish_cmdline - -description = ('Generates LaTeX documents from standalone reStructuredText ' - 'sources. ' - 'Reads from (default is stdin) and writes to ' - ' (default is stdout). See ' - ' for ' - 'the full reference.') - -publish_cmdline(writer_name='latex', description=description) diff --git a/test_env/bin/rst2man.py b/test_env/bin/rst2man.py deleted file mode 100755 index 5db6ead2c..000000000 --- a/test_env/bin/rst2man.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# Author: -# Contact: grubert@users.sf.net -# Copyright: This module has been placed in the public domain. - -""" -man.py -====== - -This module provides a simple command line interface that uses the -man page writer to output from ReStructuredText source. -""" - -import locale -try: - locale.setlocale(locale.LC_ALL, '') -except: - pass - -from docutils.core import publish_cmdline, default_description -from docutils.writers import manpage - -description = ("Generates plain unix manual documents. " + default_description) - -publish_cmdline(writer=manpage.Writer(), description=description) diff --git a/test_env/bin/rst2odt.py b/test_env/bin/rst2odt.py deleted file mode 100755 index 5644184f5..000000000 --- a/test_env/bin/rst2odt.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# $Id: rst2odt.py 5839 2009-01-07 19:09:28Z dkuhlman $ -# Author: Dave Kuhlman -# Copyright: This module has been placed in the public domain. - -""" -A front end to the Docutils Publisher, producing OpenOffice documents. -""" - -import sys -try: - import locale - locale.setlocale(locale.LC_ALL, '') -except: - pass - -from docutils.core import publish_cmdline_to_binary, default_description -from docutils.writers.odf_odt import Writer, Reader - - -description = ('Generates OpenDocument/OpenOffice/ODF documents from ' - 'standalone reStructuredText sources. ' + default_description) - - -writer = Writer() -reader = Reader() -output = publish_cmdline_to_binary(reader=reader, writer=writer, - description=description) - diff --git a/test_env/bin/rst2odt_prepstyles.py b/test_env/bin/rst2odt_prepstyles.py deleted file mode 100755 index 57dcb5667..000000000 --- a/test_env/bin/rst2odt_prepstyles.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# $Id: rst2odt_prepstyles.py 5839 2009-01-07 19:09:28Z dkuhlman $ -# Author: Dave Kuhlman -# Copyright: This module has been placed in the public domain. - -""" -Fix a word-processor-generated styles.odt for odtwriter use: Drop page size -specifications from styles.xml in STYLE_FILE.odt. -""" - -# -# Author: Michael Schutte - -from lxml import etree -import sys -import zipfile -from tempfile import mkstemp -import shutil -import os - -NAMESPACES = { - "style": "urn:oasis:names:tc:opendocument:xmlns:style:1.0", - "fo": "urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0" -} - -def prepstyle(filename): - - zin = zipfile.ZipFile(filename) - styles = zin.read("styles.xml") - - root = etree.fromstring(styles) - for el in root.xpath("//style:page-layout-properties", - namespaces=NAMESPACES): - for attr in el.attrib: - if attr.startswith("{%s}" % NAMESPACES["fo"]): - del el.attrib[attr] - - tempname = mkstemp() - zout = zipfile.ZipFile(os.fdopen(tempname[0], "w"), "w", - zipfile.ZIP_DEFLATED) - - for item in zin.infolist(): - if item.filename == "styles.xml": - zout.writestr(item, etree.tostring(root)) - else: - zout.writestr(item, zin.read(item.filename)) - - zout.close() - zin.close() - shutil.move(tempname[1], filename) - - -def main(): - args = sys.argv[1:] - if len(args) != 1: - print >> sys.stderr, __doc__ - print >> sys.stderr, "Usage: %s STYLE_FILE.odt\n" % sys.argv[0] - sys.exit(1) - filename = args[0] - prepstyle(filename) - -if __name__ == '__main__': - main() - - -# vim:tw=78:sw=4:sts=4:et: diff --git a/test_env/bin/rst2pseudoxml.py b/test_env/bin/rst2pseudoxml.py deleted file mode 100755 index 8d4711b24..000000000 --- a/test_env/bin/rst2pseudoxml.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# $Id: rst2pseudoxml.py 4564 2006-05-21 20:44:42Z wiemann $ -# Author: David Goodger -# Copyright: This module has been placed in the public domain. - -""" -A minimal front end to the Docutils Publisher, producing pseudo-XML. -""" - -try: - import locale - locale.setlocale(locale.LC_ALL, '') -except: - pass - -from docutils.core import publish_cmdline, default_description - - -description = ('Generates pseudo-XML from standalone reStructuredText ' - 'sources (for testing purposes). ' + default_description) - -publish_cmdline(description=description) diff --git a/test_env/bin/rst2s5.py b/test_env/bin/rst2s5.py deleted file mode 100755 index b97a6643b..000000000 --- a/test_env/bin/rst2s5.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $ -# Author: Chris Liechti -# Copyright: This module has been placed in the public domain. - -""" -A minimal front end to the Docutils Publisher, producing HTML slides using -the S5 template system. -""" - -try: - import locale - locale.setlocale(locale.LC_ALL, '') -except: - pass - -from docutils.core import publish_cmdline, default_description - - -description = ('Generates S5 (X)HTML slideshow documents from standalone ' - 'reStructuredText sources. ' + default_description) - -publish_cmdline(writer_name='s5', description=description) diff --git a/test_env/bin/rst2xetex.py b/test_env/bin/rst2xetex.py deleted file mode 100755 index 869a93034..000000000 --- a/test_env/bin/rst2xetex.py +++ /dev/null @@ -1,27 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# $Id: rst2xetex.py 7847 2015-03-17 17:30:47Z milde $ -# Author: Guenter Milde -# Copyright: This module has been placed in the public domain. - -""" -A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code. -""" - -try: - import locale - locale.setlocale(locale.LC_ALL, '') -except: - pass - -from docutils.core import publish_cmdline - -description = ('Generates LaTeX documents from standalone reStructuredText ' - 'sources for compilation with the Unicode-aware TeX variants ' - 'XeLaTeX or LuaLaTeX. ' - 'Reads from (default is stdin) and writes to ' - ' (default is stdout). See ' - ' for ' - 'the full reference.') - -publish_cmdline(writer_name='xetex', description=description) diff --git a/test_env/bin/rst2xml.py b/test_env/bin/rst2xml.py deleted file mode 100755 index 8d6c45db3..000000000 --- a/test_env/bin/rst2xml.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $ -# Author: David Goodger -# Copyright: This module has been placed in the public domain. - -""" -A minimal front end to the Docutils Publisher, producing Docutils XML. -""" - -try: - import locale - locale.setlocale(locale.LC_ALL, '') -except: - pass - -from docutils.core import publish_cmdline, default_description - - -description = ('Generates Docutils-native XML from standalone ' - 'reStructuredText sources. ' + default_description) - -publish_cmdline(writer_name='xml', description=description) diff --git a/test_env/bin/rstpep2html.py b/test_env/bin/rstpep2html.py deleted file mode 100755 index 753d78325..000000000 --- a/test_env/bin/rstpep2html.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $ -# Author: David Goodger -# Copyright: This module has been placed in the public domain. - -""" -A minimal front end to the Docutils Publisher, producing HTML from PEP -(Python Enhancement Proposal) documents. -""" - -try: - import locale - locale.setlocale(locale.LC_ALL, '') -except: - pass - -from docutils.core import publish_cmdline, default_description - - -description = ('Generates (X)HTML from reStructuredText-format PEP files. ' - + default_description) - -publish_cmdline(reader_name='pep', writer_name='pep_html', - description=description) diff --git a/test_env/bin/sphinx-apidoc b/test_env/bin/sphinx-apidoc deleted file mode 100755 index afd35fa1d..000000000 --- a/test_env/bin/sphinx-apidoc +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from sphinx.ext.apidoc import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/sphinx-autogen b/test_env/bin/sphinx-autogen deleted file mode 100755 index c49b281f1..000000000 --- a/test_env/bin/sphinx-autogen +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from sphinx.ext.autosummary.generate import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/sphinx-build b/test_env/bin/sphinx-build deleted file mode 100755 index 59d142cb9..000000000 --- a/test_env/bin/sphinx-build +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from sphinx.cmd.build import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/sphinx-quickstart b/test_env/bin/sphinx-quickstart deleted file mode 100755 index 476d96286..000000000 --- a/test_env/bin/sphinx-quickstart +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from sphinx.cmd.quickstart import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/symilar b/test_env/bin/symilar deleted file mode 100755 index a49fa4708..000000000 --- a/test_env/bin/symilar +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from pylint import run_symilar - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run_symilar()) diff --git a/test_env/bin/tox b/test_env/bin/tox deleted file mode 100755 index 91cad0cd3..000000000 --- a/test_env/bin/tox +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from tox import cmdline - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(cmdline()) diff --git a/test_env/bin/tox-quickstart b/test_env/bin/tox-quickstart deleted file mode 100755 index ff9d96b14..000000000 --- a/test_env/bin/tox-quickstart +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from tox._quickstart import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/virtualenv b/test_env/bin/virtualenv deleted file mode 100755 index f279ccd05..000000000 --- a/test_env/bin/virtualenv +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from virtualenv import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/bin/wheel b/test_env/bin/wheel deleted file mode 100755 index 3e42eeba9..000000000 --- a/test_env/bin/wheel +++ /dev/null @@ -1,11 +0,0 @@ -#!/Users/erikadsouza/workspace/public/python-sdk/test_env/bin/python - -# -*- coding: utf-8 -*- -import re -import sys - -from wheel.tool import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/test_env/include/python2.7 b/test_env/include/python2.7 deleted file mode 120000 index 3fe034fcc..000000000 --- a/test_env/include/python2.7 +++ /dev/null @@ -1 +0,0 @@ -/System/Library/Frameworks/Python.framework/Versions/2.7/include/python2.7 \ No newline at end of file diff --git a/test_env/pip-selfcheck.json b/test_env/pip-selfcheck.json deleted file mode 100644 index 5e657e400..000000000 --- a/test_env/pip-selfcheck.json +++ /dev/null @@ -1 +0,0 @@ -{"last_check":"2018-02-14T18:10:50Z","pypi_version":"9.0.1"} \ No newline at end of file From 885f6800979ff01c36c4bcbef3d3b1cac48cd109 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Fri, 2 Mar 2018 17:59:16 -0500 Subject: [PATCH 18/45] feat(SpeechToText): Add support for Web Sockets (#376) * Initial implementation of set using web sockets * Speech to text using web sockets * fix: Adding the pyOpenSSL * docs: Adding IBM copyright * refactor: Following clean coding practices better naming conventions, added method description in the method body * refactor(Better naming conventions): * refactor: removing the extra close signal * refactor: naming conventions, error message * Refactor: remove print * feat: Adding microphone example for speech to text --- examples/microphone-speech-to-text.py | 64 +++++++ examples/speech_to_text_v1.py | 36 +++- requirements-dev.txt | 6 + requirements.txt | 4 + test/integration/test_examples.py | 2 +- watson_developer_cloud/speech_to_text_v1.py | 97 ++++++++++- watson_developer_cloud/websocket/__init__.py | 18 ++ .../websocket/recognize_abstract_callback.py | 59 +++++++ .../speech_to_text_websocket_listener.py | 163 ++++++++++++++++++ 9 files changed, 445 insertions(+), 4 deletions(-) create mode 100644 examples/microphone-speech-to-text.py create mode 100644 watson_developer_cloud/websocket/__init__.py create mode 100644 watson_developer_cloud/websocket/recognize_abstract_callback.py create mode 100644 watson_developer_cloud/websocket/speech_to_text_websocket_listener.py diff --git a/examples/microphone-speech-to-text.py b/examples/microphone-speech-to-text.py new file mode 100644 index 000000000..14e5c9a6e --- /dev/null +++ b/examples/microphone-speech-to-text.py @@ -0,0 +1,64 @@ +from __future__ import print_function +import pyaudio +import tempfile +from watson_developer_cloud import SpeechToTextV1 +from watson_developer_cloud.websocket import RecognizeCallback, RecognizeListener + +speech_to_text = SpeechToTextV1( + username='YOUR SERVICE USERNAME', + password='YOUR SERVICE PASSWORD', + url='https://stream.watsonplatform.net/speech-to-text/api') + +# Example using websockets +class MyRecognizeCallback(RecognizeCallback): + def __init__(self): + pass + + def on_transcription(self, transcript): + print(transcript) + + def on_connected(self): + print('Connection was successful') + + def on_error(self, error): + print('Error received: {}'.format(error)) + + def on_inactivity_timeout(self, error): + print('Inactivity timeout: {}'.format(error)) + + def on_listening(self): + print('Service is listening') + + def on_transcription_complete(self): + print('Transcription completed') + + def on_hypothesis(self, hypothesis): + print(hypothesis) + +mycallback = MyRecognizeCallback() +tmp = tempfile.NamedTemporaryFile() + +FORMAT = pyaudio.paInt16 +CHANNELS = 1 +RATE = 44100 +CHUNK = 1024 +RECORD_SECONDS = 5 + +audio = pyaudio.PyAudio() +stream = audio.open(format=FORMAT, channels=CHANNELS, + rate=RATE, input=True, + frames_per_buffer=CHUNK) + +print('recording....') +with open(tmp.name, 'w') as f: + for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): + data = stream.read(CHUNK) + f.write(data) + +stream.stop_stream() +stream.close() +audio.terminate() +print('Done recording...') + +with open(tmp.name) as f: + speech_to_text.recognize_with_websocket(audio=f, recognize_callback=mycallback) \ No newline at end of file diff --git a/examples/speech_to_text_v1.py b/examples/speech_to_text_v1.py index b89e99d51..56f606aa6 100644 --- a/examples/speech_to_text_v1.py +++ b/examples/speech_to_text_v1.py @@ -2,10 +2,12 @@ import json from os.path import join, dirname from watson_developer_cloud import SpeechToTextV1 +from watson_developer_cloud.websocket import RecognizeCallback, RecognizeListener speech_to_text = SpeechToTextV1( username='YOUR SERVICE USERNAME', - password='YOUR SERVICE PASSWORD') + password='YOUR SERVICE PASSWORD', + url='https://stream.watsonplatform.net/speech-to-text/api') print(json.dumps(speech_to_text.list_models(), indent=2)) @@ -21,3 +23,35 @@ timestamps=True, word_confidence=True), indent=2)) + +# Example using websockets +class MyRecognizeCallback(RecognizeCallback): + def __init__(self): + pass + + def on_transcription(self, transcript): + print(transcript) + + def on_connected(self): + print('Connection was successful') + + def on_error(self, error): + print('Error received: {}'.format(error)) + + def on_inactivity_timeout(self, error): + print('Inactivity timeout: {}'.format(error)) + + def on_listening(self): + print('Service is listening') + + def on_transcription_complete(self): + print('Transcription completed') + + def on_hypothesis(self, hypothesis): + print(hypothesis) + +mycallback = MyRecognizeCallback() +with open(join(dirname(__file__), '../resources/speech.wav'), + 'rb') as audio_file: + speech_to_text.recognize_with_websocket( + audio=audio_file, recognize_callback=mycallback) diff --git a/requirements-dev.txt b/requirements-dev.txt index 26f018418..d799f934e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -15,3 +15,9 @@ pytest-cov>=2.2.1 recommonmark>=0.2.0 Sphinx>=1.3.1 bumpversion>=0.5.3 + +# Web sockets +autobahn>=0.10.9 +Twisted>=13.2.0 +pyOpenSSL>=16.2.0 +service-identity>=17.0.0 diff --git a/requirements.txt b/requirements.txt index bc1b2947d..bc4a238b1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,6 @@ requests>=2.0,<3.0 python_dateutil>=2.5.3 +autobahn>=0.10.9 +Twisted>=13.2.0 +pyOpenSSL>=16.2.0 +service-identity>=17.0.0 \ No newline at end of file diff --git a/test/integration/test_examples.py b/test/integration/test_examples.py index 8c50fe682..6bd51b9fb 100644 --- a/test/integration/test_examples.py +++ b/test/integration/test_examples.py @@ -10,7 +10,7 @@ from glob import glob # tests to exclude -excludes = ['authorization_v1.py', 'discovery_v1.ipynb', '__init__.py'] +excludes = ['authorization_v1.py', 'discovery_v1.ipynb', '__init__.py', 'microphone-speech-to-text.py'] # examples path. /examples examples_path = join(dirname(__file__), '../', 'examples', '*.py') diff --git a/watson_developer_cloud/speech_to_text_v1.py b/watson_developer_cloud/speech_to_text_v1.py index 69a2d76c1..eb5c6ded8 100644 --- a/watson_developer_cloud/speech_to_text_v1.py +++ b/watson_developer_cloud/speech_to_text_v1.py @@ -25,9 +25,12 @@ from __future__ import absolute_import import json -from .watson_service import WatsonService +from .watson_service import WatsonService, _remove_null_values from .utils import deprecated - +from watson_developer_cloud.websocket import RecognizeCallback, RecognizeListener +from os.path import isfile +import base64 +import urllib ############################################################################## # Service ############################################################################## @@ -183,6 +186,96 @@ def recognize(self, accept_json=True) return response + def recognize_with_websocket(self, + audio=None, + content_type='audio/l16; rate=44100', + model='en-US_BroadbandModel', + recognize_callback=None, + customization_id=None, + acoustic_customization_id=None, + customization_weight=None, + version=None, + inactivity_timeout=None, + interim_results=True, + keywords=None, + keywords_threshold=None, + max_alternatives=1, + word_alternatives_threshold=None, + word_confidence=False, + timestamps=False, + profanity_filter=None, + smart_formatting=False, + speaker_labels=None): + """ + Sends audio for speech recognition using web sockets. + + :param str audio: Audio to transcribe in the format specified by the `Content-Type` header. + :param str content_type: The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, audio/webm;codecs=vorbis, or multipart/form-data. + :param str model: The identifier of the model to be used for the recognition request. + :param RecognizeCallback recognize_callback: The instance handling events returned from the service. + :param str customization_id: The GUID of a custom language model that is to be used with the request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. + :param str acoustic_customization_id: The GUID of a custom acoustic model that is to be used with the request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. + :param float customization_weight: If you specify a `customization_id` with the request, you can use the `customization_weight` parameter to tell the service how much weight to give to words from the custom language model compared to those from the base model for speech recognition. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. + :param str version: The version of the specified base `model` that is to be used for speech recognition. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. For more information, see [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version). + :param int inactivity_timeout: The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. Useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity. + :param bool interim_results: Send back non-final previews of each "sentence" as it is being processed. These results are ignored in text mode. + :param list[str] keywords: Array of keyword strings to spot in the audio. Each keyword string can include one or more tokens. Keywords are spotted only in the final hypothesis, not in interim results. If you specify any keywords, you must also specify a keywords threshold. Omit the parameter or specify an empty array if you do not need to spot keywords. + :param float keywords_threshold: Confidence value that is the lower bound for spotting a keyword. A word is considered to match a keyword if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No keyword spotting is performed if you omit the parameter. If you specify a threshold, you must also specify one or more keywords. + :param int max_alternatives: Maximum number of alternative transcripts to be returned. By default, a single transcription is returned. + :param float word_alternatives_threshold: Confidence value that is the lower bound for identifying a hypothesis as a possible word alternative (also known as \"Confusion Networks\"). An alternative word is considered if its confidence is greater than or equal to the threshold. Specify a probability between 0 and 1 inclusive. No alternative words are computed if you omit the parameter. + :param bool word_confidence: If `true`, confidence measure per word is returned. + :param bool timestamps: If `true`, time alignment for each word is returned. + :param bool profanity_filter: If `true` (the default), filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only. + :param bool smart_formatting: If `true`, converts dates, times, series of digits and numbers, phone numbers, currency values, and Internet addresses into more readable, conventional representations in the final transcript of a recognition request. If `false` (the default), no formatting is performed. Applies to US English transcription only. + :param bool speaker_labels: Indicates whether labels that identify which words were spoken by which participants in a multi-person exchange are to be included in the response. The default is `false`; no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the `GET /v1/models` method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels). + :return: + """ + if audio is None: + raise ValueError('Audio must be provided') + if recognize_callback is None: + raise ValueError('Recognize callback must be provided') + if not isinstance(recognize_callback, RecognizeCallback): + raise Exception('Callback is not a derived class of RecognizeCallback') + + headers = {} + if self.default_headers is not None: + headers = self.default_headers.copy() + base64_authorization = base64.b64encode(self.username + ':' + self.password) + headers['Authorization'] = 'Basic {0}'.format(base64_authorization) + + url = self.url.replace('https:', 'wss:') + params = { + 'model': model, + 'customization_id': customization_id, + 'acoustic_customization_id': acoustic_customization_id, + 'customization_weight': customization_weight, + 'version': version + } + params = _remove_null_values(params) + url = url + '/v1/recognize?{0}'.format(urllib.urlencode(params)) + + options = { + 'content_type': content_type, + 'inactivity_timeout': inactivity_timeout, + 'interim_results': interim_results, + 'keywords': keywords, + 'keywords_threshold': keywords_threshold, + 'max_alternatives': max_alternatives, + 'word_alternatives_threshold': word_alternatives_threshold, + 'word_confidence': word_confidence, + 'timestamps': timestamps, + 'profanity_filter': profanity_filter, + 'smart_formatting': smart_formatting, + 'speaker_labels': speaker_labels + } + options = _remove_null_values(options) + + recognizeListener = RecognizeListener(audio, + options, + recognize_callback, + url, + headers) + ######################### # asynchronous ######################### diff --git a/watson_developer_cloud/websocket/__init__.py b/watson_developer_cloud/websocket/__init__.py new file mode 100644 index 000000000..ec29ef3ca --- /dev/null +++ b/watson_developer_cloud/websocket/__init__.py @@ -0,0 +1,18 @@ +# coding: utf-8 + +# Copyright 2018 IBM All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .recognize_abstract_callback import RecognizeCallback +from .speech_to_text_websocket_listener import RecognizeListener diff --git a/watson_developer_cloud/websocket/recognize_abstract_callback.py b/watson_developer_cloud/websocket/recognize_abstract_callback.py new file mode 100644 index 000000000..0b5c2f968 --- /dev/null +++ b/watson_developer_cloud/websocket/recognize_abstract_callback.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +# Copyright 2018 IBM All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +class RecognizeCallback: + + def on_transcription(self, transcript): + """ + Called when an interim result is received + """ + pass + + def on_connected(self): + """ + Called when a WebSocket connection was made + """ + pass + + def on_error(self, error): + """ + Called when there is an error in the Web Socket connection. + """ + pass + + def on_inactivity_timeout(self): + """ + Called when there is an inactivity timeout. + """ + pass + + def on_listening(self): + """ + Called when the service is listening for audio. + """ + pass + + def on_transcription_complete(self): + """ + Called after the service returns the final result for the transcription. + """ + pass + + def on_hypothesis(self, hypothesis): + """ + Called when the service returns the final hypothesis + """ + pass \ No newline at end of file diff --git a/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py b/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py new file mode 100644 index 000000000..c317a8c95 --- /dev/null +++ b/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py @@ -0,0 +1,163 @@ +# coding: utf-8 + +# Copyright 2018 IBM All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import os +import json + +# WebSockets +from autobahn.twisted.websocket import WebSocketClientProtocol, WebSocketClientFactory, connectWS +from twisted.internet import ssl, reactor + +ONE_KB = 1024 +TIMEOUT_PREFIX = "No speech detected for" +CLOSE_SIGNAL = 1000 +TEN_MILLISECONDS = 0.01 + +class RecognizeListener: + def __init__(self, audio, options, recognize_callback, url, headers): + self.audio = audio + self.options = options + self.callback = recognize_callback + self.url = url + self.headers = headers + + factory = self.WebSocketClientFactory(self.audio, self.options, + self.callback, self.url, self.headers) + factory.protocol = self.WebSocketClient + + if factory.isSecure: + contextFactory = ssl.ClientContextFactory() + else: + contextFactory = None + connectWS(factory, contextFactory) + + reactor.run() + + class WebSocketClient(WebSocketClientProtocol): + def __init__(self, factory, audio, options, callback): + self.factory = factory + self.audio = audio + self.options = options + self.callback = callback + self.isListening = False + self.bytes_sent = 0 + + super(self.__class__, self).__init__() + + def build_start_message(self, options): + options['action'] = 'start' + return options + + def build_close_message(self): + return json.dumps({'action': 'close'}) + + # helper method that sends a chunk of audio if needed (as required what the specified pacing is) + def send_audio(self, data): + def send_chunk(chunk, final=False): + self.bytes_sent += len(chunk) + self.sendMessage(chunk, isBinary=True) + if final: + self.sendMessage(b'', isBinary=True) + + if (self.bytes_sent + ONE_KB >= len(data)): + if (len(data) > self.bytes_sent): + send_chunk(data[self.bytes_sent:len(data)], True) + return + + send_chunk(data[self.bytes_sent:self.bytes_sent + ONE_KB]) + self.factory.reactor.callLater(TEN_MILLISECONDS, self.send_audio, data=data) + + def extract_transcripts(self, alternatives): + transcripts = [] + for alternative in alternatives: + transcript = {} + if 'confidence' in alternative: + transcript['confidence'] = alternative['confidence'] + transcript['transcript'] = alternative['transcript'] + transcripts.append(transcript) + return transcripts + + def onConnect(self, response): + self.callback.on_connected() + + def onOpen(self): + # send the initialization parameters + init_data = self.build_start_message(self.options) + self.sendMessage(json.dumps(init_data).encode('utf8')) + + # start sending audio right away (it will get buffered in the STT service) + self.send_audio(self.audio.read()) + + def onMessage(self, payload, isBinary): + json_object = json.loads(payload.decode('utf8')) + + if 'error' in json_object: + # Only call on_error() if a real error occurred. The STT service sends + # // {"error" : "No speech detected for 5s"} for valid timeouts, configured by + # options.inactivity_timeout + error = json_object['error'] + if error.startswith(TIMEOUT_PREFIX): + self.callback.on_inactivity_timeout(error) + else: + self.callback.on_error(error) + + # if uninitialized, receive the initialization response from the server + elif 'state' in json_object: + if not self.isListening: + self.isListening = True + else: + # close the connection + self.sendMessage(self.build_close_message()) + self.callback.on_transcription_complete() + self.sendClose(CLOSE_SIGNAL) + + # if in streaming + elif 'results' in json_object or 'speaker_labels' in json_object: + hypothesis = '' + # empty hypothesis + if len(json_object['results']) != 0: + hypothesis = json_object['results'][0]['alternatives'][0][ + 'transcript'] + b_final = (json_object['results'][0]['final'] == True) + transcripts = self.extract_transcripts( + json_object['results'][0]['alternatives']) + + if b_final: + self.callback.on_hypothesis(hypothesis) + else: + self.callback.on_transcription(transcripts) + + def onClose(self, wasClean, code, reason): + self.factory.endReactor() + + class WebSocketClientFactory(WebSocketClientFactory): + def __init__(self, audio, options, callback, url=None, headers=None): + WebSocketClientFactory.__init__(self, url=url, headers=headers) + self.audio = audio + self.options = options + self.callback = callback + self.SIX_SECONDS = 6 + self.openHandshakeTimeout = self.SIX_SECONDS + self.closeHandshakeTimeout = self.SIX_SECONDS + + def endReactor(self): + reactor.stop() + + # this function gets called every time connectWS is called (once per WebSocket connection/session) + def buildProtocol(self, addr): + return RecognizeListener.WebSocketClient( + self, self.audio, self.options, self.callback) From d487608c78fe05258cc2db0a06ab04f668d228d1 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Mon, 5 Mar 2018 17:48:27 -0500 Subject: [PATCH 19/45] fix(conversation): Adding two step process that supports special naming for nested models related to issue: https://github.com/watson-developer-cloud/python-sdk/issues/359 --- examples/conversation_v1.py | 25 ++++++++++ watson_developer_cloud/conversation_v1.py | 58 ++++++++++++----------- watson_developer_cloud/watson_service.py | 8 ++++ 3 files changed, 64 insertions(+), 27 deletions(-) diff --git a/examples/conversation_v1.py b/examples/conversation_v1.py index 1ac66200a..6c52dbc7e 100644 --- a/examples/conversation_v1.py +++ b/examples/conversation_v1.py @@ -134,6 +134,31 @@ values=values) print(json.dumps(response, indent=2)) +entities = [{ + 'entity': 'pattern_entity', + 'values': [{ + 'value': 'value0', 'patterns': ['\\d{6}\\w{1}\\d{7}'], 'value_type': 'patterns' + }, + {'value': 'value1', + 'patterns': ['[-9][0-9][0-9][0-9][0-9]~! [1-9][1-9][1-9][1-9][1-9][1-9]'], + 'value_type': 'patterns'}, + {'value': 'value2', + 'patterns': ['[a-z-9]{17}'], + 'value_type': 'patterns'}, + {'value': 'value3', + 'patterns': [ + '\\d{3}(\\ |-)\\d{3}(\\ |-)\\d{4}', + '\\(\\d{3}\\)(\\ |-)\\d{3}(\\ |-)\\d{4}'], + 'value_type': 'patterns'}, + {'value': 'value4', + 'patterns': ['\\b\\d{5}\\b'], + 'value_type': 'patterns'}] +}] +response = conversation.create_entity(workspace_id, + entity=entities[0]['entity'], + values=entities[0]['values']) +print(json.dumps(response, indent=2)) + response = conversation.get_entity(workspace_id=workspace_id, entity='test_entity', export=True) diff --git a/watson_developer_cloud/conversation_v1.py b/watson_developer_cloud/conversation_v1.py index f9c9de6a4..fd6c0a7e0 100644 --- a/watson_developer_cloud/conversation_v1.py +++ b/watson_developer_cloud/conversation_v1.py @@ -34,11 +34,6 @@ class ConversationV1(WatsonService): """The Conversation V1 service.""" default_url = 'https://gateway.watsonplatform.net/conversation/api' - VERSION_DATE_2017_05_26 = '2017-05-26' - VERSION_DATE_2017_04_21 = '2017-04-21' - VERSION_DATE_2017_02_03 = '2017-02-03' - VERSION_DATE_2016_09_20 = '2016-09-20' - VERSION_DATE_2016_07_11 = '2016-07-11' def __init__(self, version, url=default_url, username=None, password=None): """ @@ -115,13 +110,18 @@ def create_workspace(self, :rtype: dict """ if intents is not None: - intents = [self._convert_model(x) for x in intents] + intents = [self._convert_model(x, CreateIntent) for x in intents] if entities is not None: - entities = [self._convert_model(x) for x in entities] + entities = [self._convert_model(x, CreateEntity) for x in entities] if dialog_nodes is not None: - dialog_nodes = [self._convert_model(x) for x in dialog_nodes] + dialog_nodes = [ + self._convert_model(x, CreateDialogNode) for x in dialog_nodes + ] if counterexamples is not None: - counterexamples = [self._convert_model(x) for x in counterexamples] + counterexamples = [ + self._convert_model(x, CreateCounterexample) + for x in counterexamples + ] params = {'version': self.version} data = { 'name': name, @@ -238,13 +238,18 @@ def update_workspace(self, if workspace_id is None: raise ValueError('workspace_id must be provided') if intents is not None: - intents = [self._convert_model(x) for x in intents] + intents = [self._convert_model(x, CreateIntent) for x in intents] if entities is not None: - entities = [self._convert_model(x) for x in entities] + entities = [self._convert_model(x, CreateEntity) for x in entities] if dialog_nodes is not None: - dialog_nodes = [self._convert_model(x) for x in dialog_nodes] + dialog_nodes = [ + self._convert_model(x, CreateDialogNode) for x in dialog_nodes + ] if counterexamples is not None: - counterexamples = [self._convert_model(x) for x in counterexamples] + counterexamples = [ + self._convert_model(x, CreateCounterexample) + for x in counterexamples + ] params = {'version': self.version, 'append': append} data = { 'name': name, @@ -266,7 +271,6 @@ def update_workspace(self, # message ######################### - def message(self, workspace_id, input=None, @@ -293,15 +297,15 @@ def message(self, if workspace_id is None: raise ValueError('workspace_id must be provided') if input is not None: - input = self._convert_model(input) + input = self._convert_model(input, InputData) if context is not None: - context = self._convert_model(context) + context = self._convert_model(context, Context) if entities is not None: - entities = [self._convert_model(x) for x in entities] + entities = [self._convert_model(x, RuntimeEntity) for x in entities] if intents is not None: - intents = [self._convert_model(x) for x in intents] + intents = [self._convert_model(x, RuntimeIntent) for x in intents] if output is not None: - output = self._convert_model(output) + output = self._convert_model(output, OutputData) params = { 'version': self.version, 'nodes_visited_details': nodes_visited_details @@ -346,7 +350,7 @@ def create_intent(self, if intent is None: raise ValueError('intent must be provided') if examples is not None: - examples = [self._convert_model(x) for x in examples] + examples = [self._convert_model(x, CreateExample) for x in examples] params = {'version': self.version} data = { 'intent': intent, @@ -464,7 +468,7 @@ def update_intent(self, if intent is None: raise ValueError('intent must be provided') if new_examples is not None: - new_examples = [self._convert_model(x) for x in new_examples] + new_examples = [self._convert_model(x, CreateExample) for x in new_examples] params = {'version': self.version} data = { 'intent': new_intent, @@ -650,7 +654,7 @@ def create_entity(self, if entity is None: raise ValueError('entity must be provided') if values is not None: - values = [self._convert_model(x) for x in values] + values = [self._convert_model(x, CreateValue) for x in values] params = {'version': self.version} data = { 'entity': entity, @@ -773,7 +777,7 @@ def update_entity(self, if entity is None: raise ValueError('entity must be provided') if new_values is not None: - new_values = [self._convert_model(x) for x in new_values] + new_values = [self._convert_model(x, CreateValue) for x in new_values] params = {'version': self.version} data = { 'entity': new_entity, @@ -1182,9 +1186,9 @@ def create_dialog_node(self, if dialog_node is None: raise ValueError('dialog_node must be provided') if next_step is not None: - next_step = self._convert_model(next_step) + next_step = self._convert_model(next_step, DialogNodeNextStep) if actions is not None: - actions = [self._convert_model(x) for x in actions] + actions = [self._convert_model(x, DialogNodeAction) for x in actions] params = {'version': self.version} data = { 'dialog_node': dialog_node, @@ -1332,9 +1336,9 @@ def update_dialog_node(self, if new_dialog_node is None: raise ValueError('new_dialog_node must be provided') if new_next_step is not None: - new_next_step = self._convert_model(new_next_step) + new_next_step = self._convert_model(new_next_step, DialogNodeNextStep) if new_actions is not None: - new_actions = [self._convert_model(x) for x in new_actions] + new_actions = [self._convert_model(x, DialogNodeAction) for x in new_actions] params = {'version': self.version} data = { 'dialog_node': new_dialog_node, diff --git a/watson_developer_cloud/watson_service.py b/watson_developer_cloud/watson_service.py index a9cabf82d..8ed4e71d8 100755 --- a/watson_developer_cloud/watson_service.py +++ b/watson_developer_cloud/watson_service.py @@ -250,6 +250,14 @@ def _convert_model(val): return val._to_dict() return val + @staticmethod + def _convert_model(val, classname=None): + if classname is not None: + val = classname(**val) + if hasattr(val, "_to_dict"): + return val._to_dict() + return val + @staticmethod def _convert_list(val): if isinstance(val, list): From 29b04b8703c92fff84894d7b71b8c5070cc87c79 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Tue, 6 Mar 2018 17:48:46 -0500 Subject: [PATCH 20/45] fix(python sdk conversation): handling renamed props in nested json objects --- examples/conversation_v1.py | 88 ++++++- test/unit/test_conversation_v1.py | 2 +- watson_developer_cloud/conversation_v1.py | 282 +++++++++++++--------- watson_developer_cloud/watson_service.py | 7 +- 4 files changed, 262 insertions(+), 117 deletions(-) diff --git a/examples/conversation_v1.py b/examples/conversation_v1.py index 6c52dbc7e..9cbaadd7a 100644 --- a/examples/conversation_v1.py +++ b/examples/conversation_v1.py @@ -19,13 +19,50 @@ # workspaces ######################### -response = conversation.create_workspace(name='test_workspace', - description='Test workspace.', - language='en', - metadata={}) +create_workspace_data = { + "name": "test_workspace", + "description": "integration tests", + "language": "en", + "intents": [ + { + "intent": "hello", + "description": "string", + "examples": [ + { + "text": "good morning" + } + ] + } + ], + "entities": [ + { + "entity": "pizza_toppings", + "description": "Tasty pizza toppings", + "metadata": { + "property": "value" + } + } + ], + "counterexamples": [ + { + "text": "string" + } + ], + "metadata": {}, +} + + +response = conversation.create_workspace(name=create_workspace_data['name'], + description=create_workspace_data['description'], + language='en', + intents=create_workspace_data['intents'], + entities=create_workspace_data['entities'], + counterexamples=create_workspace_data['counterexamples'], + metadata=create_workspace_data['metadata']) print(json.dumps(response, indent=2)) workspace_id = response['workspace_id'] +print("Workspace id ".format(workspace_id)) response = conversation.get_workspace(workspace_id=workspace_id, export=True) print(json.dumps(response, indent=2)) @@ -48,9 +85,11 @@ # intents ######################### +examples = [{ "text": "good morning"}] response = conversation.create_intent(workspace_id=workspace_id, intent='test_intent', - description='Test intent.') + description='Test intent.', + examples=examples) print(json.dumps(response, indent=2)) response = conversation.get_intent(workspace_id=workspace_id, @@ -160,7 +199,7 @@ print(json.dumps(response, indent=2)) response = conversation.get_entity(workspace_id=workspace_id, - entity='test_entity', + entity=entities[0]['entity'], export=True) print(json.dumps(response, indent=2)) @@ -223,6 +262,43 @@ conversation.delete_entity(workspace_id, 'test_entity') +######################### +# Dialog nodes +######################### +create_dialog_node = { + "dialog_node": "greeting", + "description": "greeting messages", + "actions": [ + { + "name": "hello", + "type": "client", + "parameters": {}, + "result_variable": "string", + "credentials": "string" + } + ] +} +response = conversation.create_dialog_node(workspace_id, + create_dialog_node['dialog_node'], + create_dialog_node['description'], + actions=create_dialog_node['actions']) +print(json.dumps(response, indent=2)) + +response = conversation.get_dialog_node(workspace_id, + create_dialog_node['dialog_node']) +print(json.dumps(response, indent=2)) + +response = conversation.list_dialog_nodes(workspace_id) +print(json.dumps(response, indent=2)) + +response = conversation.update_dialog_node(workspace_id, + create_dialog_node['dialog_node'], + new_dialog_node='updated_node') +print(json.dumps(response, indent=2)) + +response = conversation.delete_dialog_node(workspace_id, 'updated_node') +print(json.dumps(response, indent=2)) + ######################### # logs ######################### diff --git a/test/unit/test_conversation_v1.py b/test/unit/test_conversation_v1.py index 9a851ac80..e7d1de553 100644 --- a/test/unit/test_conversation_v1.py +++ b/test/unit/test_conversation_v1.py @@ -899,7 +899,7 @@ def test_message(): message = conversation.message( workspace_id=workspace_id, input={'text': 'Turn on the lights'}, - context=json.dumps(message_ctx)) + context=json.dumps(message_ctx['context'])) assert message is not None assert responses.calls[1].request.url == message_url1 diff --git a/watson_developer_cloud/conversation_v1.py b/watson_developer_cloud/conversation_v1.py index fd6c0a7e0..d7385b6fe 100644 --- a/watson_developer_cloud/conversation_v1.py +++ b/watson_developer_cloud/conversation_v1.py @@ -155,7 +155,7 @@ def delete_workspace(self, workspace_id): self.request(method='DELETE', url=url, params=params, accept_json=True) return None - def get_workspace(self, workspace_id, export=None): + def get_workspace(self, workspace_id, export=None, include_audit=None): """ Get information about a workspace. @@ -163,12 +163,17 @@ def get_workspace(self, workspace_id, export=None): :param str workspace_id: The workspace ID. :param bool export: Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `WorkspaceExport` response. :rtype: dict """ if workspace_id is None: raise ValueError('workspace_id must be provided') - params = {'version': self.version, 'export': export} + params = { + 'version': self.version, + 'export': export, + 'include_audit': include_audit + } url = '/v1/workspaces/{0}'.format(*self._encode_path_vars(workspace_id)) response = self.request( method='GET', url=url, params=params, accept_json=True) @@ -178,7 +183,8 @@ def list_workspaces(self, page_limit=None, include_count=None, sort=None, - cursor=None): + cursor=None, + include_audit=None): """ List workspaces. @@ -188,6 +194,7 @@ def list_workspaces(self, :param bool include_count: Whether to include information about the number of records returned. :param str sort: Sorts the response according to the value of the specified property, in ascending or descending order. :param str cursor: A token identifying the last value from the previous page of results. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `WorkspaceCollection` response. :rtype: dict """ @@ -196,7 +203,8 @@ def list_workspaces(self, 'page_limit': page_limit, 'include_count': include_count, 'sort': sort, - 'cursor': cursor + 'cursor': cursor, + 'include_audit': include_audit } url = '/v1/workspaces' response = self.request( @@ -383,7 +391,7 @@ def delete_intent(self, workspace_id, intent): self.request(method='DELETE', url=url, params=params, accept_json=True) return None - def get_intent(self, workspace_id, intent, export=None): + def get_intent(self, workspace_id, intent, export=None, include_audit=None): """ Get intent. @@ -392,6 +400,7 @@ def get_intent(self, workspace_id, intent, export=None): :param str workspace_id: The workspace ID. :param str intent: The intent name (for example, `pizza_order`). :param bool export: Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `IntentExport` response. :rtype: dict """ @@ -399,9 +408,13 @@ def get_intent(self, workspace_id, intent, export=None): raise ValueError('workspace_id must be provided') if intent is None: raise ValueError('intent must be provided') - params = {'version': self.version, 'export': export} - url = '/v1/workspaces/{0}/intents/{1}'.format(*self._encode_path_vars( - workspace_id, intent)) + params = { + 'version': self.version, + 'export': export, + 'include_audit': include_audit + } + url = '/v1/workspaces/{0}/intents/{1}'.format( + *self._encode_path_vars(workspace_id, intent)) response = self.request( method='GET', url=url, params=params, accept_json=True) return response @@ -412,7 +425,8 @@ def list_intents(self, page_limit=None, include_count=None, sort=None, - cursor=None): + cursor=None, + include_audit=None): """ List intents. @@ -424,6 +438,7 @@ def list_intents(self, :param bool include_count: Whether to include information about the number of records returned. :param str sort: Sorts the response according to the value of the specified property, in ascending or descending order. :param str cursor: A token identifying the last value from the previous page of results. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `IntentCollection` response. :rtype: dict """ @@ -435,7 +450,8 @@ def list_intents(self, 'page_limit': page_limit, 'include_count': include_count, 'sort': sort, - 'cursor': cursor + 'cursor': cursor, + 'include_audit': include_audit } url = '/v1/workspaces/{0}/intents'.format( *self._encode_path_vars(workspace_id)) @@ -468,7 +484,9 @@ def update_intent(self, if intent is None: raise ValueError('intent must be provided') if new_examples is not None: - new_examples = [self._convert_model(x, CreateExample) for x in new_examples] + new_examples = [ + self._convert_model(x, CreateExample) for x in new_examples + ] params = {'version': self.version} data = { 'intent': new_intent, @@ -534,7 +552,7 @@ def delete_example(self, workspace_id, intent, text): self.request(method='DELETE', url=url, params=params, accept_json=True) return None - def get_example(self, workspace_id, intent, text): + def get_example(self, workspace_id, intent, text, include_audit=None): """ Get user input example. @@ -543,6 +561,7 @@ def get_example(self, workspace_id, intent, text): :param str workspace_id: The workspace ID. :param str intent: The intent name (for example, `pizza_order`). :param str text: The text of the user input example. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `Example` response. :rtype: dict """ @@ -552,7 +571,7 @@ def get_example(self, workspace_id, intent, text): raise ValueError('intent must be provided') if text is None: raise ValueError('text must be provided') - params = {'version': self.version} + params = {'version': self.version, 'include_audit': include_audit} url = '/v1/workspaces/{0}/intents/{1}/examples/{2}'.format( *self._encode_path_vars(workspace_id, intent, text)) response = self.request( @@ -565,7 +584,8 @@ def list_examples(self, page_limit=None, include_count=None, sort=None, - cursor=None): + cursor=None, + include_audit=None): """ List user input examples. @@ -577,6 +597,7 @@ def list_examples(self, :param bool include_count: Whether to include information about the number of records returned. :param str sort: Sorts the response according to the value of the specified property, in ascending or descending order. :param str cursor: A token identifying the last value from the previous page of results. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `ExampleCollection` response. :rtype: dict """ @@ -589,7 +610,8 @@ def list_examples(self, 'page_limit': page_limit, 'include_count': include_count, 'sort': sort, - 'cursor': cursor + 'cursor': cursor, + 'include_audit': include_audit } url = '/v1/workspaces/{0}/intents/{1}/examples'.format( *self._encode_path_vars(workspace_id, intent)) @@ -689,7 +711,7 @@ def delete_entity(self, workspace_id, entity): self.request(method='DELETE', url=url, params=params, accept_json=True) return None - def get_entity(self, workspace_id, entity, export=None): + def get_entity(self, workspace_id, entity, export=None, include_audit=None): """ Get entity. @@ -698,6 +720,7 @@ def get_entity(self, workspace_id, entity, export=None): :param str workspace_id: The workspace ID. :param str entity: The name of the entity. :param bool export: Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `EntityExport` response. :rtype: dict """ @@ -705,9 +728,13 @@ def get_entity(self, workspace_id, entity, export=None): raise ValueError('workspace_id must be provided') if entity is None: raise ValueError('entity must be provided') - params = {'version': self.version, 'export': export} - url = '/v1/workspaces/{0}/entities/{1}'.format(*self._encode_path_vars( - workspace_id, entity)) + params = { + 'version': self.version, + 'export': export, + 'include_audit': include_audit + } + url = '/v1/workspaces/{0}/entities/{1}'.format( + *self._encode_path_vars(workspace_id, entity)) response = self.request( method='GET', url=url, params=params, accept_json=True) return response @@ -718,7 +745,8 @@ def list_entities(self, page_limit=None, include_count=None, sort=None, - cursor=None): + cursor=None, + include_audit=None): """ List entities. @@ -730,6 +758,7 @@ def list_entities(self, :param bool include_count: Whether to include information about the number of records returned. :param str sort: Sorts the response according to the value of the specified property, in ascending or descending order. :param str cursor: A token identifying the last value from the previous page of results. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `EntityCollection` response. :rtype: dict """ @@ -741,7 +770,8 @@ def list_entities(self, 'page_limit': page_limit, 'include_count': include_count, 'sort': sort, - 'cursor': cursor + 'cursor': cursor, + 'include_audit': include_audit } url = '/v1/workspaces/{0}/entities'.format( *self._encode_path_vars(workspace_id)) @@ -862,7 +892,12 @@ def delete_value(self, workspace_id, entity, value): self.request(method='DELETE', url=url, params=params, accept_json=True) return None - def get_value(self, workspace_id, entity, value, export=None): + def get_value(self, + workspace_id, + entity, + value, + export=None, + include_audit=None): """ Get entity value. @@ -872,6 +907,7 @@ def get_value(self, workspace_id, entity, value, export=None): :param str entity: The name of the entity. :param str value: The text of the entity value. :param bool export: Whether to include all element content in the returned data. If export=`false`, the returned data includes only information about the element itself. If export=`true`, all content, including subelements, is included. The default value is `false`. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `ValueExport` response. :rtype: dict """ @@ -881,7 +917,11 @@ def get_value(self, workspace_id, entity, value, export=None): raise ValueError('entity must be provided') if value is None: raise ValueError('value must be provided') - params = {'version': self.version, 'export': export} + params = { + 'version': self.version, + 'export': export, + 'include_audit': include_audit + } url = '/v1/workspaces/{0}/entities/{1}/values/{2}'.format( *self._encode_path_vars(workspace_id, entity, value)) response = self.request( @@ -895,7 +935,8 @@ def list_values(self, page_limit=None, include_count=None, sort=None, - cursor=None): + cursor=None, + include_audit=None): """ List entity values. @@ -908,6 +949,7 @@ def list_values(self, :param bool include_count: Whether to include information about the number of records returned. :param str sort: Sorts the response according to the value of the specified property, in ascending or descending order. :param str cursor: A token identifying the last value from the previous page of results. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `ValueCollection` response. :rtype: dict """ @@ -921,7 +963,8 @@ def list_values(self, 'page_limit': page_limit, 'include_count': include_count, 'sort': sort, - 'cursor': cursor + 'cursor': cursor, + 'include_audit': include_audit } url = '/v1/workspaces/{0}/entities/{1}/values'.format( *self._encode_path_vars(workspace_id, entity)) @@ -1033,7 +1076,12 @@ def delete_synonym(self, workspace_id, entity, value, synonym): self.request(method='DELETE', url=url, params=params, accept_json=True) return None - def get_synonym(self, workspace_id, entity, value, synonym): + def get_synonym(self, + workspace_id, + entity, + value, + synonym, + include_audit=None): """ Get entity value synonym. @@ -1043,6 +1091,7 @@ def get_synonym(self, workspace_id, entity, value, synonym): :param str entity: The name of the entity. :param str value: The text of the entity value. :param str synonym: The text of the synonym. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `Synonym` response. :rtype: dict """ @@ -1054,7 +1103,7 @@ def get_synonym(self, workspace_id, entity, value, synonym): raise ValueError('value must be provided') if synonym is None: raise ValueError('synonym must be provided') - params = {'version': self.version} + params = {'version': self.version, 'include_audit': include_audit} url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms/{3}'.format( *self._encode_path_vars(workspace_id, entity, value, synonym)) response = self.request( @@ -1068,7 +1117,8 @@ def list_synonyms(self, page_limit=None, include_count=None, sort=None, - cursor=None): + cursor=None, + include_audit=None): """ List entity value synonyms. @@ -1081,6 +1131,7 @@ def list_synonyms(self, :param bool include_count: Whether to include information about the number of records returned. :param str sort: Sorts the response according to the value of the specified property, in ascending or descending order. :param str cursor: A token identifying the last value from the previous page of results. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `SynonymCollection` response. :rtype: dict """ @@ -1095,7 +1146,8 @@ def list_synonyms(self, 'page_limit': page_limit, 'include_count': include_count, 'sort': sort, - 'cursor': cursor + 'cursor': cursor, + 'include_audit': include_audit } url = '/v1/workspaces/{0}/entities/{1}/values/{2}/synonyms'.format( *self._encode_path_vars(workspace_id, entity, value)) @@ -1188,7 +1240,9 @@ def create_dialog_node(self, if next_step is not None: next_step = self._convert_model(next_step, DialogNodeNextStep) if actions is not None: - actions = [self._convert_model(x, DialogNodeAction) for x in actions] + actions = [ + self._convert_model(x, DialogNodeAction) for x in actions + ] params = {'version': self.version} data = { 'dialog_node': dialog_node, @@ -1232,7 +1286,7 @@ def delete_dialog_node(self, workspace_id, dialog_node): self.request(method='DELETE', url=url, params=params, accept_json=True) return None - def get_dialog_node(self, workspace_id, dialog_node): + def get_dialog_node(self, workspace_id, dialog_node, include_audit=None): """ Get dialog node. @@ -1240,6 +1294,7 @@ def get_dialog_node(self, workspace_id, dialog_node): :param str workspace_id: The workspace ID. :param str dialog_node: The dialog node ID (for example, `get_order`). + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `DialogNode` response. :rtype: dict """ @@ -1247,7 +1302,7 @@ def get_dialog_node(self, workspace_id, dialog_node): raise ValueError('workspace_id must be provided') if dialog_node is None: raise ValueError('dialog_node must be provided') - params = {'version': self.version} + params = {'version': self.version, 'include_audit': include_audit} url = '/v1/workspaces/{0}/dialog_nodes/{1}'.format( *self._encode_path_vars(workspace_id, dialog_node)) response = self.request( @@ -1259,7 +1314,8 @@ def list_dialog_nodes(self, page_limit=None, include_count=None, sort=None, - cursor=None): + cursor=None, + include_audit=None): """ List dialog nodes. @@ -1270,6 +1326,7 @@ def list_dialog_nodes(self, :param bool include_count: Whether to include information about the number of records returned. :param str sort: Sorts the response according to the value of the specified property, in ascending or descending order. :param str cursor: A token identifying the last value from the previous page of results. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `DialogNodeCollection` response. :rtype: dict """ @@ -1280,7 +1337,8 @@ def list_dialog_nodes(self, 'page_limit': page_limit, 'include_count': include_count, 'sort': sort, - 'cursor': cursor + 'cursor': cursor, + 'include_audit': include_audit } url = '/v1/workspaces/{0}/dialog_nodes'.format( *self._encode_path_vars(workspace_id)) @@ -1291,7 +1349,7 @@ def list_dialog_nodes(self, def update_dialog_node(self, workspace_id, dialog_node, - new_dialog_node, + new_dialog_node=None, new_description=None, new_conditions=None, new_parent=None, @@ -1333,12 +1391,13 @@ def update_dialog_node(self, raise ValueError('workspace_id must be provided') if dialog_node is None: raise ValueError('dialog_node must be provided') - if new_dialog_node is None: - raise ValueError('new_dialog_node must be provided') if new_next_step is not None: - new_next_step = self._convert_model(new_next_step, DialogNodeNextStep) + new_next_step = self._convert_model(new_next_step, + DialogNodeNextStep) if new_actions is not None: - new_actions = [self._convert_model(x, DialogNodeAction) for x in new_actions] + new_actions = [ + self._convert_model(x, DialogNodeAction) for x in new_actions + ] params = {'version': self.version} data = { 'dialog_node': new_dialog_node, @@ -1476,7 +1535,7 @@ def delete_counterexample(self, workspace_id, text): self.request(method='DELETE', url=url, params=params, accept_json=True) return None - def get_counterexample(self, workspace_id, text): + def get_counterexample(self, workspace_id, text, include_audit=None): """ Get counterexample. @@ -1485,6 +1544,7 @@ def get_counterexample(self, workspace_id, text): :param str workspace_id: The workspace ID. :param str text: The text of a user input counterexample (for example, `What are you wearing?`). + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `Counterexample` response. :rtype: dict """ @@ -1492,7 +1552,7 @@ def get_counterexample(self, workspace_id, text): raise ValueError('workspace_id must be provided') if text is None: raise ValueError('text must be provided') - params = {'version': self.version} + params = {'version': self.version, 'include_audit': include_audit} url = '/v1/workspaces/{0}/counterexamples/{1}'.format( *self._encode_path_vars(workspace_id, text)) response = self.request( @@ -1504,7 +1564,8 @@ def list_counterexamples(self, page_limit=None, include_count=None, sort=None, - cursor=None): + cursor=None, + include_audit=None): """ List counterexamples. @@ -1516,6 +1577,7 @@ def list_counterexamples(self, :param bool include_count: Whether to include information about the number of records returned. :param str sort: Sorts the response according to the value of the specified property, in ascending or descending order. :param str cursor: A token identifying the last value from the previous page of results. + :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :return: A `dict` containing the `CounterexampleCollection` response. :rtype: dict """ @@ -1526,7 +1588,8 @@ def list_counterexamples(self, 'page_limit': page_limit, 'include_count': include_count, 'sort': sort, - 'cursor': cursor + 'cursor': cursor, + 'include_audit': include_audit } url = '/v1/workspaces/{0}/counterexamples'.format( *self._encode_path_vars(workspace_id)) @@ -1564,6 +1627,7 @@ def update_counterexample(self, workspace_id, text, new_text=None): # Models ############################################################################## + class CaptureGroup(object): """ CaptureGroup. @@ -1985,6 +2049,8 @@ def _from_dict(cls, _dict): ] if 'title' in _dict: args['title'] = _dict['title'] + if 'node_type' in _dict: + args['node_type'] = _dict['node_type'] if 'type' in _dict: args['node_type'] = _dict['type'] if 'event_name' in _dict: @@ -2284,6 +2350,8 @@ def _from_dict(cls, _dict): args['synonyms'] = _dict['synonyms'] if 'patterns' in _dict: args['patterns'] = _dict['patterns'] + if 'value_type' in _dict: + args['value_type'] = _dict['value_type'] if 'type' in _dict: args['value_type'] = _dict['type'] return cls(**args) @@ -2323,18 +2391,18 @@ class DialogNode(object): DialogNode. :attr str dialog_node_id: The dialog node ID. - :attr str description: The description of the dialog node. - :attr str conditions: The condition that triggers the dialog node. - :attr str parent: The ID of the parent dialog node. - :attr str previous_sibling: The ID of the previous sibling dialog node. - :attr object output: The output of the dialog node. - :attr object context: The context (if defined) for the dialog node. - :attr object metadata: The metadata (if any) for the dialog node. - :attr DialogNodeNextStep next_step: The next step to execute following this dialog node. + :attr str description: (optional) The description of the dialog node. + :attr str conditions: (optional) The condition that triggers the dialog node. + :attr str parent: (optional) The ID of the parent dialog node. + :attr str previous_sibling: (optional) The ID of the previous sibling dialog node. + :attr object output: (optional) The output of the dialog node. + :attr object context: (optional) The context (if defined) for the dialog node. + :attr object metadata: (optional) The metadata (if any) for the dialog node. + :attr DialogNodeNextStep next_step: (optional) The next step to execute following this dialog node. :attr datetime created: The timestamp for creation of the dialog node. - :attr datetime updated: (optional) The timestamp for the most recent update to the dialog node. + :attr datetime updated: The timestamp for the most recent update to the dialog node. :attr list[DialogNodeAction] actions: (optional) The actions for the dialog node. - :attr str title: The alias used to identify the dialog node. + :attr str title: (optional) The alias used to identify the dialog node. :attr str node_type: (optional) How the dialog node is processed. :attr str event_name: (optional) How an `event_handler` node is processed. :attr str variable: (optional) The location in the dialog context where output is stored. @@ -2342,18 +2410,18 @@ class DialogNode(object): def __init__(self, dialog_node_id, - description, - conditions, - parent, - previous_sibling, - output, - context, - metadata, - next_step, created, - title, - updated=None, + updated, + description=None, + conditions=None, + parent=None, + previous_sibling=None, + output=None, + context=None, + metadata=None, + next_step=None, actions=None, + title=None, node_type=None, event_name=None, variable=None): @@ -2361,18 +2429,18 @@ def __init__(self, Initialize a DialogNode object. :param str dialog_node_id: The dialog node ID. - :param str description: The description of the dialog node. - :param str conditions: The condition that triggers the dialog node. - :param str parent: The ID of the parent dialog node. - :param str previous_sibling: The ID of the previous sibling dialog node. - :param object output: The output of the dialog node. - :param object context: The context (if defined) for the dialog node. - :param object metadata: The metadata (if any) for the dialog node. - :param DialogNodeNextStep next_step: The next step to execute following this dialog node. :param datetime created: The timestamp for creation of the dialog node. - :param str title: The alias used to identify the dialog node. - :param datetime updated: (optional) The timestamp for the most recent update to the dialog node. + :param datetime updated: The timestamp for the most recent update to the dialog node. + :param str description: (optional) The description of the dialog node. + :param str conditions: (optional) The condition that triggers the dialog node. + :param str parent: (optional) The ID of the parent dialog node. + :param str previous_sibling: (optional) The ID of the previous sibling dialog node. + :param object output: (optional) The output of the dialog node. + :param object context: (optional) The context (if defined) for the dialog node. + :param object metadata: (optional) The metadata (if any) for the dialog node. + :param DialogNodeNextStep next_step: (optional) The next step to execute following this dialog node. :param list[DialogNodeAction] actions: (optional) The actions for the dialog node. + :param str title: (optional) The alias used to identify the dialog node. :param str node_type: (optional) How the dialog node is processed. :param str event_name: (optional) How an `event_handler` node is processed. :param str variable: (optional) The location in the dialog context where output is stored. @@ -2398,6 +2466,8 @@ def __init__(self, def _from_dict(cls, _dict): """Initialize a DialogNode object from a json dictionary.""" args = {} + if 'dialog_node_id' in _dict: + args['dialog_node_id'] = _dict['dialog_node_id'] if 'dialog_node' in _dict: args['dialog_node_id'] = _dict['dialog_node'] else: @@ -2406,49 +2476,21 @@ def _from_dict(cls, _dict): ) if 'description' in _dict: args['description'] = _dict['description'] - else: - raise ValueError( - 'Required property \'description\' not present in DialogNode JSON' - ) if 'conditions' in _dict: args['conditions'] = _dict['conditions'] - else: - raise ValueError( - 'Required property \'conditions\' not present in DialogNode JSON' - ) if 'parent' in _dict: args['parent'] = _dict['parent'] - else: - raise ValueError( - 'Required property \'parent\' not present in DialogNode JSON') if 'previous_sibling' in _dict: args['previous_sibling'] = _dict['previous_sibling'] - else: - raise ValueError( - 'Required property \'previous_sibling\' not present in DialogNode JSON' - ) if 'output' in _dict: args['output'] = _dict['output'] - else: - raise ValueError( - 'Required property \'output\' not present in DialogNode JSON') if 'context' in _dict: args['context'] = _dict['context'] - else: - raise ValueError( - 'Required property \'context\' not present in DialogNode JSON') if 'metadata' in _dict: args['metadata'] = _dict['metadata'] - else: - raise ValueError( - 'Required property \'metadata\' not present in DialogNode JSON') if 'next_step' in _dict: args['next_step'] = DialogNodeNextStep._from_dict( _dict['next_step']) - else: - raise ValueError( - 'Required property \'next_step\' not present in DialogNode JSON' - ) if 'created' in _dict: args['created'] = string_to_datetime(_dict['created']) else: @@ -2456,15 +2498,17 @@ def _from_dict(cls, _dict): 'Required property \'created\' not present in DialogNode JSON') if 'updated' in _dict: args['updated'] = string_to_datetime(_dict['updated']) + else: + raise ValueError( + 'Required property \'updated\' not present in DialogNode JSON') if 'actions' in _dict: args['actions'] = [ DialogNodeAction._from_dict(x) for x in _dict['actions'] ] if 'title' in _dict: args['title'] = _dict['title'] - else: - raise ValueError( - 'Required property \'title\' not present in DialogNode JSON') + if 'node_type' in _dict: + args['node_type'] = _dict['node_type'] if 'type' in _dict: args['node_type'] = _dict['type'] if 'event_name' in _dict: @@ -2568,6 +2612,8 @@ def _from_dict(cls, _dict): raise ValueError( 'Required property \'name\' not present in DialogNodeAction JSON' ) + if 'action_type' in _dict: + args['action_type'] = _dict['action_type'] if 'type' in _dict: args['action_type'] = _dict['type'] if 'parameters' in _dict: @@ -2679,8 +2725,8 @@ class DialogNodeNextStep(object): """ The next step to execute following this dialog node. - :attr str behavior: How the `next_step` reference is processed. - :attr str dialog_node: (optional) The ID of the dialog node to process next. + :attr str behavior: How the `next_step` reference is processed. If you specify `jump_to`, then you must also specify a value for the `dialog_node` property. + :attr str dialog_node: (optional) The ID of the dialog node to process next. This parameter is required if `behavior`=`jump_to`. :attr str selector: (optional) Which part of the dialog node to process next. """ @@ -2688,8 +2734,8 @@ def __init__(self, behavior, dialog_node=None, selector=None): """ Initialize a DialogNodeNextStep object. - :param str behavior: How the `next_step` reference is processed. - :param str dialog_node: (optional) The ID of the dialog node to process next. + :param str behavior: How the `next_step` reference is processed. If you specify `jump_to`, then you must also specify a value for the `dialog_node` property. + :param str dialog_node: (optional) The ID of the dialog node to process next. This parameter is required if `behavior`=`jump_to`. :param str selector: (optional) Which part of the dialog node to process next. """ self.behavior = behavior @@ -2830,6 +2876,8 @@ def __init__(self, def _from_dict(cls, _dict): """Initialize a Entity object from a json dictionary.""" args = {} + if 'entity_name' in _dict: + args['entity_name'] = _dict['entity_name'] if 'entity' in _dict: args['entity_name'] = _dict['entity'] else: @@ -2991,6 +3039,8 @@ def __init__(self, def _from_dict(cls, _dict): """Initialize a EntityExport object from a json dictionary.""" args = {} + if 'entity_name' in _dict: + args['entity_name'] = _dict['entity_name'] if 'entity' in _dict: args['entity_name'] = _dict['entity'] else: @@ -3079,6 +3129,8 @@ def __init__(self, example_text, created, updated): def _from_dict(cls, _dict): """Initialize a Example object from a json dictionary.""" args = {} + if 'example_text' in _dict: + args['example_text'] = _dict['example_text'] if 'text' in _dict: args['example_text'] = _dict['text'] else: @@ -3260,6 +3312,8 @@ def __init__(self, intent_name, created, updated, description=None): def _from_dict(cls, _dict): """Initialize a Intent object from a json dictionary.""" args = {} + if 'intent_name' in _dict: + args['intent_name'] = _dict['intent_name'] if 'intent' in _dict: args['intent_name'] = _dict['intent'] else: @@ -3405,6 +3459,8 @@ def __init__(self, def _from_dict(cls, _dict): """Initialize a IntentExport object from a json dictionary.""" args = {} + if 'intent_name' in _dict: + args['intent_name'] = _dict['intent_name'] if 'intent' in _dict: args['intent_name'] = _dict['intent'] else: @@ -4450,6 +4506,8 @@ def __init__(self, synonym_text, created, updated): def _from_dict(cls, _dict): """Initialize a Synonym object from a json dictionary.""" args = {} + if 'synonym_text' in _dict: + args['synonym_text'] = _dict['synonym_text'] if 'synonym' in _dict: args['synonym_text'] = _dict['synonym'] else: @@ -4656,6 +4714,8 @@ def __init__(self, def _from_dict(cls, _dict): """Initialize a Value object from a json dictionary.""" args = {} + if 'value_text' in _dict: + args['value_text'] = _dict['value_text'] if 'value' in _dict: args['value_text'] = _dict['value'] else: @@ -4677,6 +4737,8 @@ def _from_dict(cls, _dict): args['synonyms'] = _dict['synonyms'] if 'patterns' in _dict: args['patterns'] = _dict['patterns'] + if 'value_type' in _dict: + args['value_type'] = _dict['value_type'] if 'type' in _dict: args['value_type'] = _dict['type'] else: @@ -4824,6 +4886,8 @@ def __init__(self, def _from_dict(cls, _dict): """Initialize a ValueExport object from a json dictionary.""" args = {} + if 'value_text' in _dict: + args['value_text'] = _dict['value_text'] if 'value' in _dict: args['value_text'] = _dict['value'] else: @@ -4845,6 +4909,8 @@ def _from_dict(cls, _dict): args['synonyms'] = _dict['synonyms'] if 'patterns' in _dict: args['patterns'] = _dict['patterns'] + if 'value_type' in _dict: + args['value_type'] = _dict['value_type'] if 'type' in _dict: args['value_type'] = _dict['type'] else: diff --git a/watson_developer_cloud/watson_service.py b/watson_developer_cloud/watson_service.py index 8ed4e71d8..768f9a893 100755 --- a/watson_developer_cloud/watson_service.py +++ b/watson_developer_cloud/watson_service.py @@ -18,6 +18,7 @@ import sys from requests.structures import CaseInsensitiveDict import dateutil.parser as date_parser +import json try: from http.cookiejar import CookieJar # Python 3 @@ -252,8 +253,10 @@ def _convert_model(val): @staticmethod def _convert_model(val, classname=None): - if classname is not None: - val = classname(**val) + if classname is not None and not hasattr(val, "_from_dict"): + if type(val) == str: + val = json.loads(val) + val = classname._from_dict(dict(val)) if hasattr(val, "_to_dict"): return val._to_dict() return val From 598f8f224aa3cb463ac271d8de0089d6a6cc216d Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 13:15:21 -0500 Subject: [PATCH 21/45] feat(python discovery): regenerate discovery --- .env.enc | Bin 2592 -> 2592 bytes examples/discovery_v1.py | 4 +- .../test_integration_discovery_v1.py | 78 +++++++++++++ test/unit/test_discovery_v1.py | 4 +- watson_developer_cloud/discovery_v1.py | 109 +++++++++++++----- 5 files changed, 161 insertions(+), 34 deletions(-) create mode 100644 test/integration/test_integration_discovery_v1.py diff --git a/.env.enc b/.env.enc index 5903c88749cdd1ae8aa30d7021ffb66353a8cdfe..6e1f6ead725c7eba24402eb89c43fe727b0eefe9 100644 GIT binary patch literal 2592 zcmV+*3g7kTb?Rs`XYL91^<~-c(NC63B%gTOTPL}#md~2+W^IB37TVsJCaVIvY|`An z6gF)xaAo`l>~gvrguH!^;O1ldObR@Gl2nEL23?ZPH9xpu+ETi_ATz~wRc{{(ni|Db%IsO zFSy&{dlHd@uNQi?fHziP@m2cqF-s7{RUI5$d%J>NJ2g^`6;{bre#N1C;P-dr{EhCL zz9kunE}{j){clwxqRWeI2;UiS(<6LJH3VvbQ&0FZnVs+|0_ajc_=5tu49JD4vnG`} z)M7=&_AMuOdDuT*-CPB^GIsIoM)C9yEmHgFBeMn$ao<~ZR{lv*#?}YEP<#xtn`3h> zbyx3bMr}(7dCsBU2eR}z{!CrSWQ!u;c)xq8)Y0OVx!U*6`^`tF7gI2PSjR!yD#{dj z-x0bNzaW~2T~EZ-Q_zZ3f;d`H(0POr(Nc|``YkQu%$Yw&gAay<6_Hi+_tYnu%8?y+ zO$0Oa(e6&AVCq%vN95>oktut6T{UO!HFGk`0W@NabuHJsl8q6Hx-R+fGq#yKAN5r# zKC;4866ISjwLv#x+Fpuc7{GITHn7|F-mio;nBez~kcxw{(uz&5f1OZQL>hihiq0^f zf>^VQNq+0N+RSl`*x%)p(c&$G^qxS<-aaD;-P!Ejz8bJh(&V^JPNNy1gGgO4KlAL`kj74(y}!i9(*!kY2mL|RA{%2;rST|H5@PM!A?DWn9ohu;T_z0yQi%p zY(nZaZV(@Pf_RT+PR~s6C7B><%N%icJDp?zr?UxfGCFU>^>|+d3e(RLT)C}Z)c+ju zfV}qY6JmM^n!w@rX)}DG>ibX&QC&LP4-_&M*Mho1rQrJn7I4zmVm~LblU;N`_akvy zH;!Q;P_yi6S+H%r?6lfE6ce&QH!{d1$LgcrPv$NH7@6&`z``d>i>}#FR^^F=aQ5li z?c199C6jy%u1v=cXuHAf%Hc<;D((h&C`_2QdIGuv`@T7SjDLXvxE+IljpOx_vfQ4X zY6rur=&VHFDTF5vm<0xv*>$ZoAXsWOtJM$snnz>HF#a%trMJ)yrOESB4#zL}uAX<8 z5YgA_z?bm~V97hW?Tg@Xv71;|wk1ezqc?O6G*D}a05Xa}s$s7U8b86CkjJXk@DbeTnzIki41 z2oya(Q^gX9x-+FK1cyg#;Vx95!|?hl&Z+a1&(H^t)!+yRg7VGd@5vMvarWu>*ag*u z1UTrTF5ix<$~&Zpno(;`*6RElTgd=8LWzOFkoQ6cr2>g9-F-HHY*uBX(x+V*ejb1p zb?r!TkclEsAp5=K!QO&@8>tj0E06VR!TxzNbHo+WR(Hlk*FJJwD}TC?gh<@nzNJ zlX}j?!h*0l8Raln!jC6y=x@{XQtXPV->HuL9BM=g2f}EBO6r=D1ftI}^gi^ywA!!m z3iCvu(*7*h9p0IK=3Q9IE^R&QV``t@5;J72CKvnTg6ShX@TN3FLqC)IlQ~gVPRW$bmW)3PoDGD|FtP93SeG1Bise zPB=#rr;`GJyN5`5h@mE^JQVybHoP;24^a+<4ro-(>`QSgmD%SOIx!(GXjHMoy6#d7 z|1gLH)r(QD)e{j#Q8!Up<}GXM33OqCAV9&2FBdeuyw;KprK1kCTxIT$+afWyPu?mP z=7xPO{z`i{{%tBZqLY(A-%bi!ppOz6^z_E9F3ZyK&Y^Ty&?Fj0`ue${PENikl5Lm=Dcz` zl&GhObhPASbFj-K6&QweGTPC6a8n|KF@GIH`6NHk3M-ti-}8n42v^?6)r2U6i38MW zLCRG70|n`Hv8e!2pi+EkY>^KxIJ*a*M@R4{GJy1QfksM|?23g#p0_lExx*ZkPHzP# z!1lLxNj&@{+1aQpxejt$7mU9_Dm#9lCb!KbExZ&LMcmw{e}u4J>s)}>LAg?bh=#Qo zrqECk&OpOY`9xB!%hf&?W;FUG#n151W(e-@%%ff`c55H8vr0*Xck>KkhhHZ% zAv`@`18RZ@m>VdX3T>-f(gpApn5l&uw~V*wIOi?I{nMa!6}T0?KQ=abnYHzWT@ONo z=}tw(e|8**#1)}{i$^S;{pIZhoG|VwQ30Z#`(k8wv72_dwf4tovu$m#3wI%xG<<7e zn{$;ZE@W6zYY+8!XhR|ZvIV>Muex-62EX8M_M5p->2kz)057_h2(1y<{|dHh;L@kp z5M~o+9h}qR3h8QBs&8Fh;~yQas@eLn2+gh zc+6F4mnSi1;KK^tK{b5yN44OLo$LmuQXL{otV+k?rLvD*{|0@p-MxH0zxU=uRa~E2 zK3*Q|z(w&1wxxTpBFpw|zE+b!^EiyIyOK$)ryN>^_VFi|xq)Sbkk;dqIb9RWGRi1o zyykDEZwz|f=AM4f7>7EK4fd&1#dW#2Lzz2?xspx!%=l+`Lg;5QCc24uQ_E{#1}%~I C!3;kD literal 2592 zcmV+*3g7i)Hd2VlS4F!~1T4eq37^*ZzGm7<9-GEY^z_E7*qQ zjg({kfYOzakHX0^Dm2B#v!0|21Ej*$(4;R^Upbo_Fs4dMHOy(f>WjO+BBM*}f${OQ zD|A7Qw_|goue)DHZeaIsX7RQB4Wjsdh?O+z?FT|m|3c%0&_Bs`#~V}_&zyBEgKyEe z3t97EWrBx=X1d>3FZO%BNHWiQwh?e_A+Csl^=$Uduvm}E&V7E-S+s5fRVp9XTRRcY$1MCPly9{YzX6*Z5fvR zXIz@2tJ5N)>6Ao=DcDZSooaNxYLG5I9*9K1S86XtnFI_%O>aGmca&w&-&11vrtCMR zQ;8#1Peo>XG8gl?uI8(90{#TN7KJK`YjUqHN+|DfBbVSD3sBsTwVbWA8U^?2yZ9>B zD?~@e%Z@1Z$|zv7mu-A(a(q>fF1%l5pOZR-)9Ev@lcoo)@}86SN}7i+3L1_^JWYi! zVDIUPI0C{#27-V_UUC?(#_Z*hp=W9K1DFCnJh9JcG!;7^P!E&q{b?%M%moc*_tv0( zL=0JhFAIzWnTkNV2ssRUV=2>ENlL@!XsYPwPT=k01s%HrThhpJcEDn>2* zZ_k|pA(PmCmjMIk68lftag&6^*k2OdVi;b2GcL9&A-AvFScS%N{F1F+FC~r3k6)Y` zdwV842T4#&)!QB*3d5T;#wP60!b`w5`sLP2OSy`Wt9Jj% z>sh{VzK?zlM+1jk8}og&yZNB(b=PcN6_Ys^EMn4$=fKHyh`48hyavN1zj(3+GSlvf z`EAGnWVxRv&_Z&N86;1brL$Z9UhYKX4G|{^sWYH=)5*~&dpv&jO9GilKT(oxc$Pi( z(?)RPQQ^ub;*tJP^t_oT<8YKUt9cGpeA++r(;VQl_-~r+sbkU^(UAJYx&0+0fix+y z&^3g68dZ9Z&{!ljbf-m1pS)kMq2p~kz|Nha(0HMkzF;Pt#rRvxqO9_5<+E<2L0`-fQeI2nkBllsulk z5kCVpE%r~q;D-KP522kkjA;7CZ-~(#$MR5c?crXS zAFj3&S~OoZ8DvUQC+eK5K(|g=xrw)s^y)LdIyHsnd#souCiEg)`JgLj)9HheS#-n# z@cw*VF77*e;sh;9SkL|l_MrHzgUA9}8Y0rnrbO|1Vs_l{lrBGW;TM&N9oc8o3vD{2 zE3IQk?fM?e)vbDY7@;7&4`zjcLlj)zF$ZD@CGz=dYTE-%xG>k}EYI(O8rJUNm?MUp zeA0L@-3V_!WH-l**=lbG6~~A>?ybY#j@N88q2if`{>u&Vi8d!S3WaFX600%|H7-Nq zf;-~8OBbEzHZMgk?2dq%HH+{^_ye@&W&l{^G(^>v46%)|Augv0r4?Pi*d?l=cYsS+ zqUT0j_0ahsESf5KpEdIl>**qF6;6^+aU+puu*)N~|7VWqaC|!HR4;IhH2oJ*B{;no zwb+bw5b^(F>90t*?-h2W>#La;WV8`5D_?vf+Ldjvi9c@tA%nN4*7+-=uePZ7D_9jG zmL%qWS&EbX+Wh~&TSfZ`y;Xrta=mtD87sd}>}<&EPO$;ccYM_CU+h`2U6^!yk(92i z?6PvTcm-kn-tVqgCJkYL&sNtK2$Q*P zNvxF;zW()AjH`hko|PifcNi?sUME4zpVQBw4yo4vCiPyT73m*G8v9r zHtIwXRX47ZfFx1gXyqu72$PAp7AQGAtuda9dn_8V0muDVbTFG%WTgrzz>uAnQ}W*G zZm5y2?uc_O$Szs@Noh~>u2C&GrK+NKziG8ys8|(PR?^-suZi>z9AyLz!+IhOK zpL~sm`pg;3*u0=d(snv5nJ5FUYtcN<+zph>-AO!dE9$b#4W-nLy&=vvm|7gDYMXYhw1fG06+2kFTgD(O~eDrs&<7@hUzMoD##X+Xkhv+)?nhUj^@9G}=G33p&hG2;fgBYXe^arPMrWUXFICgGqF2WU$i zTxUJh_%W3~XT&s~vh(07zTW+(>izRE65P|cu1Orr?PU@C>}Q1qJUUg9A1+1iq;I?q Cv=dkW diff --git a/examples/discovery_v1.py b/examples/discovery_v1.py index a791eac43..646d6a68c 100644 --- a/examples/discovery_v1.py +++ b/examples/discovery_v1.py @@ -22,10 +22,10 @@ environment_id=news_environment_id) print(json.dumps(configurations, indent=2)) -query_options = {'query': 'IBM'} query_results = discovery.query(news_environment_id, news_collections[0]['collection_id'], - query_options) + filter='extracted_metadata.sha1::f5*', + return_fields='extracted_metadata.sha1') print(json.dumps(query_results, indent=2)) # new_environment = discovery.create_environment(name="new env", description="bogus env") diff --git a/test/integration/test_integration_discovery_v1.py b/test/integration/test_integration_discovery_v1.py new file mode 100644 index 000000000..c011177d4 --- /dev/null +++ b/test/integration/test_integration_discovery_v1.py @@ -0,0 +1,78 @@ +from unittest import TestCase +import os +import watson_developer_cloud +import json + +class Discoveryv1(TestCase): + def setUp(self): + self.discovery = watson_developer_cloud.DiscoveryV1( + version='2017-10-16', + username=os.getenv('SPEECH_TO_TEXT_USERNAME'), + password=os.getenv('SPEECH_TO_TEXT_PASSWORD')) + self.discovery.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) + self.environment_id = 'e15f6424-f887-4f50-b4ea-68267c36fc9c' # This environment is created for integration testing + self.collection_id = self.discovery.list_collections(self.environment_id)['collections'][0]['collection_id'] + + def test_environments(self): + envs = self.discovery.list_environments() + assert envs is not None + env = self.discovery.get_environment(envs['environments'][0]['environment_id']) + assert env is not None + fields = self.discovery.list_fields(self.environment_id, self.collection_id) + assert fields is not None + + def test_configurations(self): + configs = self.discovery.list_configurations(self.environment_id) + assert configs is not None + new_configuration_id = self.discovery.create_configuration(self.environment_id, 'test', 'creating new config for python sdk')['configuration_id'] + assert new_configuration_id is not None + self.discovery.get_configuration(self.environment_id, new_configuration_id) + updated_config = self.discovery.update_configuration(self.environment_id, new_configuration_id, 'lala') + assert updated_config['name'] == 'lala' + deleted_config = self.discovery.delete_configuration(self.environment_id, new_configuration_id) + assert deleted_config['status'] == 'deleted' + + def test_collections_and_expansions(self): + new_collection_id = self.discovery.create_collection(self.environment_id, + name='Example collection for python', + description="Integration test for python sdk")['collection_id'] + assert new_collection_id is not None + self.discovery.get_collection(self.environment_id, new_collection_id) + updated_collection = self.discovery.update_collection(self.environment_id, new_collection_id, name='lala') + assert updated_collection['name'] == 'lala' + + self.discovery.create_expansions(self.environment_id, new_collection_id, [{'input_terms': ['a'], 'expanded_terms': ['aa']}]) + expansions = self.discovery.list_expansions(self.environment_id, new_collection_id) + assert len(expansions['expansions']) > 0 + self.discovery.delete_expansions(self.environment_id, new_collection_id) + + deleted_collection = self.discovery.delete_collection(self.environment_id, new_collection_id) + assert deleted_collection['status'] == 'deleted' + + def test_documents(self): + with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as fileinfo: + add_doc = self.discovery.add_document(environment_id=self.environment_id, + collection_id=self.collection_id, + file=fileinfo) + add_doc['document_id'] is not None + + doc_status = self.discovery.get_document_status(self.environment_id, self.collection_id, add_doc['document_id']) + assert doc_status is not None + + with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as fileinfo: + update_doc = self.discovery.update_document(self.environment_id, + self.collection_id, + add_doc['document_id'], + file=fileinfo, + filename='newname.html') + assert update_doc is not None + delete_doc = self.discovery.delete_document(self.environment_id, self.collection_id, add_doc['document_id']) + assert delete_doc['status'] == 'deleted' + + + def test_queries(self): + query_results = self.discovery.query(self.environment_id, + self.collection_id, + filter='extracted_metadata.sha1::9181d244*', + return_fields='extracted_metadata.sha1') + assert query_results is not None \ No newline at end of file diff --git a/test/unit/test_discovery_v1.py b/test/unit/test_discovery_v1.py index c28240647..619b5ad2a 100644 --- a/test/unit/test_discovery_v1.py +++ b/test/unit/test_discovery_v1.py @@ -250,7 +250,7 @@ def test_query_relations(): discovery = watson_developer_cloud.DiscoveryV1( '2016-11-07', username='username', password='password') - discovery.query_relations('envid', 'collid', {'count': 10}) + discovery.query_relations('envid', 'collid', count=10) called_url = urlparse(responses.calls[0].request.url) test_url = urlparse(discovery_url) assert called_url.netloc == test_url.netloc @@ -792,7 +792,7 @@ def test_expansions(): discovery.list_expansions('envid', 'colid') assert responses.calls[0].response.json() == {"expansions": "results"} - discovery.create_expansions('envid', 'colid', { "expansions": [{"input_terms": "dumb"}] }) + discovery.create_expansions('envid', 'colid', [{"input_terms": "dumb", "expanded_terms": "dumb2"}]) assert responses.calls[1].response.json() == {"expansions": "success" } discovery.delete_expansions('envid', 'colid') diff --git a/watson_developer_cloud/discovery_v1.py b/watson_developer_cloud/discovery_v1.py index 3c77a7018..9a81dceeb 100644 --- a/watson_developer_cloud/discovery_v1.py +++ b/watson_developer_cloud/discovery_v1.py @@ -36,12 +36,6 @@ class DiscoveryV1(WatsonService): """The Discovery V1 service.""" default_url = 'https://gateway.watsonplatform.net/discovery/api' - VERSION_DATE_2017_11_07 = '2017-11-07' - VERSION_DATE_2017_09_01 = '2017-09-01' - VERSION_DATE_2017_08_01 = '2017-08-01' - VERSION_DATE_2017_07_19 = '2017-07-19' - VERSION_DATE_2017_06_25 = '2017-06-25' - VERSION_DATE_2016_12_01 = '2016-12-01' def __init__(self, version, url=default_url, username=None, password=None): """ @@ -163,10 +157,10 @@ def list_environments(self, name=None): def list_fields(self, environment_id, collection_ids): """ - List fields in specified collecitons. + List fields in specified collections. Gets a list of the unique fields (and their types) stored in the indexes of the - specified collecitons. + specified collections. :param str environment_id: The ID of the environment. :param list[str] collection_ids: A comma-separated list of collection IDs to be queried against. @@ -247,11 +241,16 @@ def create_configuration(self, if name is None: raise ValueError('name must be provided') if conversions is not None: - conversions = self._convert_model(conversions) + conversions = self._convert_model(conversions, Conversions) if enrichments is not None: - enrichments = [self._convert_model(x) for x in enrichments] + enrichments = [ + self._convert_model(x, Enrichment) for x in enrichments + ] if normalizations is not None: - normalizations = [self._convert_model(x) for x in normalizations] + normalizations = [ + self._convert_model(x, NormalizationOperation) + for x in normalizations + ] params = {'version': self.version} data = { 'name': name, @@ -369,11 +368,16 @@ def update_configuration(self, if name is None: raise ValueError('name must be provided') if conversions is not None: - conversions = self._convert_model(conversions) + conversions = self._convert_model(conversions, Conversions) if enrichments is not None: - enrichments = [self._convert_model(x) for x in enrichments] + enrichments = [ + self._convert_model(x, Enrichment) for x in enrichments + ] if normalizations is not None: - normalizations = [self._convert_model(x) for x in normalizations] + normalizations = [ + self._convert_model(x, NormalizationOperation) + for x in normalizations + ] params = {'version': self.version} data = { 'name': name, @@ -512,7 +516,7 @@ def create_expansions(self, environment_id, collection_id, expansions): raise ValueError('collection_id must be provided') if expansions is None: raise ValueError('expansions must be provided') - expansions = [self._convert_model(x) for x in expansions] + expansions = [self._convert_model(x, Expansion) for x in expansions] params = {'version': self.version} data = {'expansions': expansions} url = '/v1/environments/{0}/collections/{1}/expansions'.format( @@ -869,7 +873,10 @@ def federated_query(self, sort=None, highlight=None, deduplicate=None, - deduplicate_field=None): + deduplicate_field=None, + similar=None, + similar_document_ids=None, + similar_fields=None): """ Query documents in multiple collections. @@ -890,6 +897,9 @@ def federated_query(self, :param bool highlight: When true a highlight field is returned for each result which contains the fields that match the query with `` tags around the matching query terms. Defaults to false. :param bool deduplicate: When `true` and used with a Watson Discovery News collection, duplicate results (based on the contents of the `title` field) are removed. Duplicate comparison is limited to the current query only, `offset` is not considered. Defaults to `false`. This parameter is currently Beta functionality. :param str deduplicate_field: When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, `offset` is not considered. This parameter is currently Beta functionality. + :param bool similar: When `true`, results are returned based on their similarity to the document IDs specified in the `similar.document_ids` parameter. The default is `false`. + :param list[str] similar_document_ids: A comma-separated list of document IDs that will be used to find similar documents. **Note:** If the `natural_language_query` parameter is also specified, it will be used to expand the scope of the document similarity search to include the natural language query. Other query parameters, such as `filter` and `query` are subsequently applied and reduce the query scope. + :param list[str] similar_fields: A comma-separated list of field names that will be used as a basis for comparison to identify similar documents. If not specified, the entire document is used for comparison. :return: A `dict` containing the `QueryResponse` response. :rtype: dict """ @@ -910,7 +920,10 @@ def federated_query(self, 'sort': self._convert_list(sort), 'highlight': highlight, 'deduplicate': deduplicate, - 'deduplicate.field': deduplicate_field + 'deduplicate.field': deduplicate_field, + 'similar': similar, + 'similar.document_ids': self._convert_list(similar_document_ids), + 'similar.fields': self._convert_list(similar_fields) } url = '/v1/environments/{0}/query'.format( *self._encode_path_vars(environment_id)) @@ -930,7 +943,10 @@ def federated_query_notices(self, offset=None, sort=None, highlight=None, - deduplicate_field=None): + deduplicate_field=None, + similar=None, + similar_document_ids=None, + similar_fields=None): """ Query multiple collection system notices. @@ -952,6 +968,9 @@ def federated_query_notices(self, :param list[str] sort: A comma separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. :param bool highlight: When true a highlight field is returned for each result which contains the fields that match the query with `` tags around the matching query terms. Defaults to false. :param str deduplicate_field: When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, `offset` is not considered. This parameter is currently Beta functionality. + :param bool similar: When `true`, results are returned based on their similarity to the document IDs specified in the `similar.document_ids` parameter. The default is `false`. + :param list[str] similar_document_ids: A comma-separated list of document IDs that will be used to find similar documents. **Note:** If the `natural_language_query` parameter is also specified, it will be used to expand the scope of the document similarity search to include the natural language query. Other query parameters, such as `filter` and `query` are subsequently applied and reduce the query scope. + :param list[str] similar_fields: A comma-separated list of field names that will be used as a basis for comparison to identify similar documents. If not specified, the entire document is used for comparison. :return: A `dict` containing the `QueryNoticesResponse` response. :rtype: dict """ @@ -971,7 +990,10 @@ def federated_query_notices(self, 'offset': offset, 'sort': self._convert_list(sort), 'highlight': highlight, - 'deduplicate.field': deduplicate_field + 'deduplicate.field': deduplicate_field, + 'similar': similar, + 'similar.document_ids': self._convert_list(similar_document_ids), + 'similar.fields': self._convert_list(similar_fields) } url = '/v1/environments/{0}/notices'.format( *self._encode_path_vars(environment_id)) @@ -996,7 +1018,10 @@ def query(self, passages_count=None, passages_characters=None, deduplicate=None, - deduplicate_field=None): + deduplicate_field=None, + similar=None, + similar_document_ids=None, + similar_fields=None): """ Query documents. @@ -1021,6 +1046,9 @@ def query(self, :param int passages_characters: The approximate number of characters that any one passage will have. The default is `400`. The minimum is `50`. The maximum is `2000`. :param bool deduplicate: When `true` and used with a Watson Discovery News collection, duplicate results (based on the contents of the `title` field) are removed. Duplicate comparison is limited to the current query only, `offset` is not considered. Defaults to `false`. This parameter is currently Beta functionality. :param str deduplicate_field: When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, `offset` is not considered. This parameter is currently Beta functionality. + :param bool similar: When `true`, results are returned based on their similarity to the document IDs specified in the `similar.document_ids` parameter. The default is `false`. + :param list[str] similar_document_ids: A comma-separated list of document IDs that will be used to find similar documents. **Note:** If the `natural_language_query` parameter is also specified, it will be used to expand the scope of the document similarity search to include the natural language query. Other query parameters, such as `filter` and `query` are subsequently applied and reduce the query scope. + :param list[str] similar_fields: A comma-separated list of field names that will be used as a basis for comparison to identify similar documents. If not specified, the entire document is used for comparison. :return: A `dict` containing the `QueryResponse` response. :rtype: dict """ @@ -1044,7 +1072,10 @@ def query(self, 'passages.count': passages_count, 'passages.characters': passages_characters, 'deduplicate': deduplicate, - 'deduplicate.field': deduplicate_field + 'deduplicate.field': deduplicate_field, + 'similar': similar, + 'similar.document_ids': self._convert_list(similar_document_ids), + 'similar.fields': self._convert_list(similar_fields) } url = '/v1/environments/{0}/collections/{1}/query'.format( *self._encode_path_vars(environment_id, collection_id)) @@ -1080,9 +1111,9 @@ def query_entities(self, if collection_id is None: raise ValueError('collection_id must be provided') if entity is not None: - entity = self._convert_model(entity) + entity = self._convert_model(entity, QueryEntitiesEntity) if context is not None: - context = self._convert_model(context) + context = self._convert_model(context, QueryEntitiesContext) params = {'version': self.version} data = { 'feature': feature, @@ -1112,7 +1143,10 @@ def query_notices(self, passages_fields=None, passages_count=None, passages_characters=None, - deduplicate_field=None): + deduplicate_field=None, + similar=None, + similar_document_ids=None, + similar_fields=None): """ Query system notices. @@ -1138,6 +1172,9 @@ def query_notices(self, :param int passages_count: The maximum number of passages to return. The search returns fewer passages if the requested total is not found. The default is `10`. The maximum is `100`. :param int passages_characters: The approximate number of characters that any one passage will have. The default is `400`. The minimum is `50`. The maximum is `2000`. :param str deduplicate_field: When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, `offset` is not considered. This parameter is currently Beta functionality. + :param bool similar: When `true`, results are returned based on their similarity to the document IDs specified in the `similar.document_ids` parameter. The default is `false`. + :param list[str] similar_document_ids: A comma-separated list of document IDs that will be used to find similar documents. **Note:** If the `natural_language_query` parameter is also specified, it will be used to expand the scope of the document similarity search to include the natural language query. Other query parameters, such as `filter` and `query` are subsequently applied and reduce the query scope. + :param list[str] similar_fields: A comma-separated list of field names that will be used as a basis for comparison to identify similar documents. If not specified, the entire document is used for comparison. :return: A `dict` containing the `QueryNoticesResponse` response. :rtype: dict """ @@ -1160,7 +1197,10 @@ def query_notices(self, 'passages.fields': self._convert_list(passages_fields), 'passages.count': passages_count, 'passages.characters': passages_characters, - 'deduplicate.field': deduplicate_field + 'deduplicate.field': deduplicate_field, + 'similar': similar, + 'similar.document_ids': self._convert_list(similar_document_ids), + 'similar.fields': self._convert_list(similar_fields) } url = '/v1/environments/{0}/collections/{1}/notices'.format( *self._encode_path_vars(environment_id, collection_id)) @@ -1198,11 +1238,13 @@ def query_relations(self, if collection_id is None: raise ValueError('collection_id must be provided') if entities is not None: - entities = [self._convert_model(x) for x in entities] + entities = [ + self._convert_model(x, QueryRelationsEntity) for x in entities + ] if context is not None: - context = self._convert_model(context) + context = self._convert_model(context, QueryEntitiesContext) if filter is not None: - filter = self._convert_model(filter) + filter = self._convert_model(filter, QueryRelationsFilter) params = {'version': self.version} data = { 'entities': entities, @@ -1244,7 +1286,9 @@ def add_training_data(self, if collection_id is None: raise ValueError('collection_id must be provided') if examples is not None: - examples = [self._convert_model(x) for x in examples] + examples = [ + self._convert_model(x, TrainingExample) for x in examples + ] params = {'version': self.version} data = { 'natural_language_query': natural_language_query, @@ -2681,6 +2725,8 @@ def _from_dict(cls, _dict): ) if 'overwrite' in _dict: args['overwrite'] = _dict['overwrite'] + if 'enrichment_name' in _dict: + args['enrichment_name'] = _dict['enrichment_name'] if 'enrichment' in _dict: args['enrichment_name'] = _dict['enrichment'] else: @@ -3078,8 +3124,12 @@ def __init__(self, field_name=None, field_type=None): def _from_dict(cls, _dict): """Initialize a Field object from a json dictionary.""" args = {} + if 'field_name' in _dict: + args['field_name'] = _dict['field_name'] if 'field' in _dict: args['field_name'] = _dict['field'] + if 'field_type' in _dict: + args['field_type'] = _dict['field_type'] if 'type' in _dict: args['field_type'] = _dict['type'] return cls(**args) @@ -4777,7 +4827,6 @@ def __ne__(self, other): return not self == other - class QueryNoticesResult(object): """ QueryNoticesResult. From 01de0d6f1793198a5868bda834159b65457723f5 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 13:35:27 -0500 Subject: [PATCH 22/45] fix(python discovery): correcting the username and password --- test/integration/test_integration_discovery_v1.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/test_integration_discovery_v1.py b/test/integration/test_integration_discovery_v1.py index c011177d4..144927e66 100644 --- a/test/integration/test_integration_discovery_v1.py +++ b/test/integration/test_integration_discovery_v1.py @@ -7,8 +7,8 @@ class Discoveryv1(TestCase): def setUp(self): self.discovery = watson_developer_cloud.DiscoveryV1( version='2017-10-16', - username=os.getenv('SPEECH_TO_TEXT_USERNAME'), - password=os.getenv('SPEECH_TO_TEXT_PASSWORD')) + username=os.getenv('DISCOVERY_TO_TEXT_USERNAME'), + password=os.getenv('DISCOVERY_TO_TEXT_PASSWORD')) self.discovery.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) self.environment_id = 'e15f6424-f887-4f50-b4ea-68267c36fc9c' # This environment is created for integration testing self.collection_id = self.discovery.list_collections(self.environment_id)['collections'][0]['collection_id'] From e067605a81de3f9960c0ca4183715ecf423799af Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 14:04:54 -0500 Subject: [PATCH 23/45] style(python language translator): regenerated service --- .../language_translator_v2.py | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/watson_developer_cloud/language_translator_v2.py b/watson_developer_cloud/language_translator_v2.py index 678f55a55..1613518db 100644 --- a/watson_developer_cloud/language_translator_v2.py +++ b/watson_developer_cloud/language_translator_v2.py @@ -66,7 +66,7 @@ def __init__(self, url=default_url, username=None, password=None): use_vcap_services=True) ######################### - # translate + # Translate ######################### def translate(self, text, model_id=None, source=None, target=None): @@ -94,7 +94,7 @@ def translate(self, text, model_id=None, source=None, target=None): return response ######################### - # identify + # Identify ######################### def identify(self, text): @@ -133,7 +133,7 @@ def list_identifiable_languages(self): return response ######################### - # models + # Models ######################### def create_model(self, @@ -219,9 +219,10 @@ def delete_model(self, model_id): def get_model(self, model_id): """ - Get information about the given translation model, including training status. + Gets information about a translation model, including training status for custom + models. - :param str model_id: Model ID to use. + :param str model_id: Model ID of the model to get. :return: A `dict` containing the `TranslationModel` response. :rtype: dict """ @@ -233,15 +234,19 @@ def get_model(self, model_id): def list_models(self, source=None, target=None, default_models=None): """ - Lists available standard and custom models by source or target language. + Lists available translation models. :param str source: Specify a language code to filter results by source language. :param str target: Specify a language code to filter results by target language. - :param bool default_models: If the default_models parameter isn't specified, the service will return all models (default_models and non-default_models) for each language pair. To return only default_models models, set this to `true`. To return only non-default_models models, set this to `false`. + :param bool default_models: If the default parameter isn't specified, the service will return all models (default and non-default) for each language pair. To return only default models, set this to `true`. To return only non-default models, set this to `false`. :return: A `dict` containing the `TranslationModels` response. :rtype: dict """ - params = {'source': source, 'target': target, 'default': default_models} + params = { + 'source': source, + 'target': target, + 'default': default_models + } url = '/v2/models' response = self.request( method='GET', url=url, params=params, accept_json=True) @@ -543,6 +548,8 @@ def __init__(self, translation_output): def _from_dict(cls, _dict): """Initialize a Translation object from a json dictionary.""" args = {} + if 'translation_output' in _dict: + args['translation_output'] = _dict['translation_output'] if 'translation' in _dict: args['translation_output'] = _dict['translation'] else: From aa092b1e5f71f10f0d93dbf9ac3e854561401d3e Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 14:21:54 -0500 Subject: [PATCH 24/45] chore: regenerated natural language classifier --- .../natural_language_classifier_v1.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/watson_developer_cloud/natural_language_classifier_v1.py b/watson_developer_cloud/natural_language_classifier_v1.py index 9a3bc0051..6831c5de4 100644 --- a/watson_developer_cloud/natural_language_classifier_v1.py +++ b/watson_developer_cloud/natural_language_classifier_v1.py @@ -67,14 +67,13 @@ def __init__(self, url=default_url, username=None, password=None): use_vcap_services=True) ######################### - # naturallanguageclassifier + # Classify text ######################### def classify(self, classifier_id, text): """ Returns label information for the input. The status must be `Available` before you - can use the classifier to classify text. Use `Get information about a classifier` - to retrieve the status. + can use the classifier to classify text. :param str classifier_id: Classifier ID to use. :param str text: The submitted phrase. @@ -92,6 +91,10 @@ def classify(self, classifier_id, text): method='POST', url=url, json=data, accept_json=True) return response + ######################### + # Manage classifiers + ######################### + def create_classifier(self, metadata, training_data, @@ -103,8 +106,8 @@ def create_classifier(self, Sends data to create and train a classifier and returns information about the new classifier. - :param file metadata: Metadata in JSON format. The metadata identifies the language of the data, and an optional name to identify the classifier. For details, see the [API reference](https://www.ibm.com/watson/developercloud/natural-language-classifier/api/v1/#create_classifier). - :param file training_data: Training data in CSV format. Each text value must have at least one class. The data can include up to 15,000 records. For details, see [Using your own data](https://www.ibm.com/watson/developercloud/doc/natural-language-classifier/using-your-data.html). + :param file metadata: Metadata in JSON format. The metadata identifies the language of the data, and an optional name to identify the classifier. + :param file training_data: Training data in CSV format. Each text value must have at least one class. The data can include up to 15,000 records. For details, see [Using your own data](https://console.bluemix.net/docs/services/natural-language-classifier/using-your-data.html). :param str metadata_filename: The filename for training_metadata. :param str training_data_filename: The filename for training_data. :return: A `dict` containing the `Classifier` response. From dfa592a7bfa1ddf4bbaaf6b5965bf49e72c31b23 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 14:47:37 -0500 Subject: [PATCH 25/45] test: Adding random generator --- .pytest_cache/v/cache/lastfailed | 30 +++++++++++++++++++ .../test_integration_discovery_v1.py | 5 +++- 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 .pytest_cache/v/cache/lastfailed diff --git a/.pytest_cache/v/cache/lastfailed b/.pytest_cache/v/cache/lastfailed new file mode 100644 index 000000000..89a8a7ffc --- /dev/null +++ b/.pytest_cache/v/cache/lastfailed @@ -0,0 +1,30 @@ +{ + "test/integration/test_integration_discovery_v1.py": true, + "test/integration/test_integration_discovery_v1.py::Discoveryv1": true, + "test/integration/test_integration_discovery_v1.py::TestCase": true, + "test/integration/test_integration_speech_to_text_v1.py": true, + "test/integration/test_integration_speech_to_text_v1.py::TestCase": true, + "test/integration/test_integration_speech_to_text_v1.py::TestSpeechToTextV1": true, + "test/integration/test_integration_text_to_speech_v1.py": true, + "test/integration/test_integration_text_to_speech_v1.py::TestIntegrationTextToSpeechV1": true, + "test/integration/test_integration_visual_recognition.py": true, + "test/integration/test_integration_visual_recognition.py::IntegrationTestVisualRecognitionV3": true, + "test/integration/test_integration_visual_recognition.py::TestCase": true, + "test/unit/test_conversation_v1.py": true, + "test/unit/test_discovery_v1.py": true, + "test/unit/test_language_translator_v2.py": true, + "test/unit/test_natural_language_classifier_v1.py": true, + "test/unit/test_natural_language_understanding.py": true, + "test/unit/test_natural_language_understanding.py::TestCase": true, + "test/unit/test_natural_language_understanding.py::TestFeatures": true, + "test/unit/test_natural_language_understanding.py::TestNaturalLanguageUnderstanding": true, + "test/unit/test_personality_insights_v2.py": true, + "test/unit/test_personality_insights_v3.py": true, + "test/unit/test_speech_to_text_v1.py": true, + "test/unit/test_text_to_speech_v1.py": true, + "test/unit/test_tone_analyzer_v3.py": true, + "test/unit/test_visual_recognition_v3.py": true, + "test/unit/test_visual_recognition_v3.py::TestCase": true, + "test/unit/test_visual_recognition_v3.py::TestVisualRecognitionV3": true, + "test/unit/test_watson_service.py": true +} \ No newline at end of file diff --git a/test/integration/test_integration_discovery_v1.py b/test/integration/test_integration_discovery_v1.py index 144927e66..7982696f0 100644 --- a/test/integration/test_integration_discovery_v1.py +++ b/test/integration/test_integration_discovery_v1.py @@ -1,7 +1,10 @@ +# coding: utf-8 + from unittest import TestCase import os import watson_developer_cloud import json +import random class Discoveryv1(TestCase): def setUp(self): @@ -34,7 +37,7 @@ def test_configurations(self): def test_collections_and_expansions(self): new_collection_id = self.discovery.create_collection(self.environment_id, - name='Example collection for python', + name='Example collection for python' + random.choice('ABCDEFGHIJKLMNOPQ'), description="Integration test for python sdk")['collection_id'] assert new_collection_id is not None self.discovery.get_collection(self.environment_id, new_collection_id) From e31cbbf51e17aa0110f784648e19125f369fd34b Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 14:53:18 -0500 Subject: [PATCH 26/45] style: Adding # coding: utf-8 --- test/integration/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/integration/__init__.py b/test/integration/__init__.py index b8c9cd96b..57a3e4a29 100644 --- a/test/integration/__init__.py +++ b/test/integration/__init__.py @@ -1,3 +1,5 @@ +# coding: utf-8 + # Copyright 2015 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); From 76c09281670baa798b5c078ceffcb155d97dd3c3 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 14:59:57 -0500 Subject: [PATCH 27/45] chore: regenerate nlu --- watson_developer_cloud/natural_language_understanding_v1.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/watson_developer_cloud/natural_language_understanding_v1.py b/watson_developer_cloud/natural_language_understanding_v1.py index ff87cc011..c145f2b7a 100644 --- a/watson_developer_cloud/natural_language_understanding_v1.py +++ b/watson_developer_cloud/natural_language_understanding_v1.py @@ -76,7 +76,6 @@ class NaturalLanguageUnderstandingV1(WatsonService): """The Natural Language Understanding V1 service.""" default_url = 'https://gateway.watsonplatform.net/natural-language-understanding/api' - VERSION_DATE_2017_02_27 = '2017-02-27' def __init__(self, version, url=default_url, username=None, password=None): """ @@ -155,7 +154,7 @@ def analyze(self, """ if features is None: raise ValueError('features must be provided') - features = self._convert_model(features) + features = self._convert_model(features, Features) params = {'version': self.version} data = { 'features': features, From d226c9ef79c1196b5ec434d51c5469b09759ee48 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 15:14:57 -0500 Subject: [PATCH 28/45] style: regenerate personality insights --- .../personality_insights_v3.py | 38 ++----------------- 1 file changed, 4 insertions(+), 34 deletions(-) diff --git a/watson_developer_cloud/personality_insights_v3.py b/watson_developer_cloud/personality_insights_v3.py index 581289514..8e5aacf54 100755 --- a/watson_developer_cloud/personality_insights_v3.py +++ b/watson_developer_cloud/personality_insights_v3.py @@ -1,6 +1,6 @@ # coding: utf-8 -# Copyright 2017 IBM All Rights Reserved. +# Copyright 2018 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,8 +15,7 @@ # limitations under the License. """ ### Service Overview -The IBM Watson Personality Insights service provides a Representational State Transfer -(REST) Application Programming Interface (API) that enables applications to derive +The IBM Watson Personality Insights service enables applications to derive insights from social media, enterprise data, or other digital communications. The service uses linguistic analytics to infer individuals' intrinsic personality characteristics, including Big Five, Needs, and Values, from digital communications such as email, text @@ -24,35 +23,6 @@ noisy social media, portraits of individuals that reflect their personality characteristics. The service can report consumption preferences based on the results of its analysis, and for JSON content that is timestamped, it can report temporal behavior. -### API Usage -The following information provides details about using the service to obtain a personality -profile: -* **The profile method:** The service offers a single `/v3/profile` method that accepts up -to 20 MB of input data and produces results in JSON or CSV format. The service accepts -input in Arabic, English, Japanese, Korean, or Spanish and can produce output in a variety -of languages. -* **Authentication:** You authenticate to the service by using your service credentials. -You can use your credentials to authenticate via a proxy server that resides in Bluemix, -or you can use your credentials to obtain a token and contact the service directly. See -[Service credentials for Watson -services](https://console.bluemix.net/docs/services/watson/getting-started-credentials.html) -and [Tokens for -authentication](https://console.bluemix.net/docs/services/watson/getting-started-tokens.html). -* **Request Logging:** By default, all Watson services log requests and their results. -Data is collected only to improve the Watson services. If you do not want to share your -data, set the header parameter `X-Watson-Learning-Opt-Out` to `true` for each request. -Data is collected for any request that omits this header. See [Controlling request logging -for Watson -services](https://console.bluemix.net/docs/services/watson/getting-started-logging.html). - -For more information about the service, see [About Personality -Insights](https://console.bluemix.net/docs/services/personality-insights/index.html). For -information about calling the service and the responses it can generate, see [Requesting a -profile](https://console.bluemix.net/docs/services/personality-insights/input.html), -[Understanding a JSON -profile](https://console.bluemix.net/docs/services/personality-insights/output.html), and -[Understanding a CSV -profile](https://console.bluemix.net/docs/services/personality-insights/output-csv.html). """ from __future__ import absolute_import @@ -733,7 +703,7 @@ class Trait(object): :attr str category: The category of the characteristic: * `personality` for Big Five personality characteristics * `needs` for Needs * `values` for Values. :attr float percentile: The normalized percentile score for the characteristic. The range is 0 to 1. For example, if the percentage for Openness is 0.60, the author scored in the 60th percentile; the author is more open than 59 percent of the population and less open than 39 percent of the population. :attr float raw_score: (optional) The raw score for the characteristic. The range is 0 to 1. A higher score generally indicates a greater likelihood that the author has that characteristic, but raw scores must be considered in aggregate: The range of values in practice might be much smaller than 0 to 1, so an individual score must be considered in the context of the overall scores and their range. The raw score is computed based on the input and the service model; it is not normalized or compared with a sample population. The raw score enables comparison of the results against a different sampling population and with a custom normalization approach. - :attr bool significant: (optional) **`2017-10-13`**: Indicates whether the characteristic is meaningful for the input language. The field is always `true` for all characteristics of English, Spanish, and Japanese input. The field is `false` for the subset of characteristics of Arabic and Korean input for which the service's models are unable to generate meaningful results. **`2016-10-20`**: Not returned. + :attr bool significant: (optional) **`2017-10-13`**: Indicates whether the characteristic is meaningful for the input language. The field is always `true` for all characteristics of English, Spanish, and Japanese input. The field is `false` for the subset of characteristics of Arabic and Korean input for which the service's models are unable to generate meaningful results. **`2016-10-19`**: Not returned. :attr list[Trait] children: (optional) For `personality` (Big Five) dimensions, more detailed results for the facets of each dimension as inferred from the input text. """ @@ -753,7 +723,7 @@ def __init__(self, :param str category: The category of the characteristic: * `personality` for Big Five personality characteristics * `needs` for Needs * `values` for Values. :param float percentile: The normalized percentile score for the characteristic. The range is 0 to 1. For example, if the percentage for Openness is 0.60, the author scored in the 60th percentile; the author is more open than 59 percent of the population and less open than 39 percent of the population. :param float raw_score: (optional) The raw score for the characteristic. The range is 0 to 1. A higher score generally indicates a greater likelihood that the author has that characteristic, but raw scores must be considered in aggregate: The range of values in practice might be much smaller than 0 to 1, so an individual score must be considered in the context of the overall scores and their range. The raw score is computed based on the input and the service model; it is not normalized or compared with a sample population. The raw score enables comparison of the results against a different sampling population and with a custom normalization approach. - :param bool significant: (optional) **`2017-10-13`**: Indicates whether the characteristic is meaningful for the input language. The field is always `true` for all characteristics of English, Spanish, and Japanese input. The field is `false` for the subset of characteristics of Arabic and Korean input for which the service's models are unable to generate meaningful results. **`2016-10-20`**: Not returned. + :param bool significant: (optional) **`2017-10-13`**: Indicates whether the characteristic is meaningful for the input language. The field is always `true` for all characteristics of English, Spanish, and Japanese input. The field is `false` for the subset of characteristics of Arabic and Korean input for which the service's models are unable to generate meaningful results. **`2016-10-19`**: Not returned. :param list[Trait] children: (optional) For `personality` (Big Five) dimensions, more detailed results for the facets of each dimension as inferred from the input text. """ self.trait_id = trait_id From f8d655ba7ef78ecf2cc33aaa1e5e5d14b1a44e8e Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 15:40:40 -0500 Subject: [PATCH 29/45] Remove pytest_cache --- .pytest_cache/v/cache/lastfailed | 30 ------------------------------ 1 file changed, 30 deletions(-) delete mode 100644 .pytest_cache/v/cache/lastfailed diff --git a/.pytest_cache/v/cache/lastfailed b/.pytest_cache/v/cache/lastfailed deleted file mode 100644 index 89a8a7ffc..000000000 --- a/.pytest_cache/v/cache/lastfailed +++ /dev/null @@ -1,30 +0,0 @@ -{ - "test/integration/test_integration_discovery_v1.py": true, - "test/integration/test_integration_discovery_v1.py::Discoveryv1": true, - "test/integration/test_integration_discovery_v1.py::TestCase": true, - "test/integration/test_integration_speech_to_text_v1.py": true, - "test/integration/test_integration_speech_to_text_v1.py::TestCase": true, - "test/integration/test_integration_speech_to_text_v1.py::TestSpeechToTextV1": true, - "test/integration/test_integration_text_to_speech_v1.py": true, - "test/integration/test_integration_text_to_speech_v1.py::TestIntegrationTextToSpeechV1": true, - "test/integration/test_integration_visual_recognition.py": true, - "test/integration/test_integration_visual_recognition.py::IntegrationTestVisualRecognitionV3": true, - "test/integration/test_integration_visual_recognition.py::TestCase": true, - "test/unit/test_conversation_v1.py": true, - "test/unit/test_discovery_v1.py": true, - "test/unit/test_language_translator_v2.py": true, - "test/unit/test_natural_language_classifier_v1.py": true, - "test/unit/test_natural_language_understanding.py": true, - "test/unit/test_natural_language_understanding.py::TestCase": true, - "test/unit/test_natural_language_understanding.py::TestFeatures": true, - "test/unit/test_natural_language_understanding.py::TestNaturalLanguageUnderstanding": true, - "test/unit/test_personality_insights_v2.py": true, - "test/unit/test_personality_insights_v3.py": true, - "test/unit/test_speech_to_text_v1.py": true, - "test/unit/test_text_to_speech_v1.py": true, - "test/unit/test_tone_analyzer_v3.py": true, - "test/unit/test_visual_recognition_v3.py": true, - "test/unit/test_visual_recognition_v3.py::TestCase": true, - "test/unit/test_visual_recognition_v3.py::TestVisualRecognitionV3": true, - "test/unit/test_watson_service.py": true -} \ No newline at end of file From c309df6a5b367cb4e54dd1d754aaaf6f914e3c4f Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 15:54:57 -0500 Subject: [PATCH 30/45] refactor(python regenerate): regenerate tone analyzer --- .pytest_cache/v/cache/lastfailed | 17 +++++++++++ test/unit/test_tone_analyzer_v3.py | 6 ++-- watson_developer_cloud/tone_analyzer_v3.py | 34 +++++++++------------- 3 files changed, 34 insertions(+), 23 deletions(-) create mode 100644 .pytest_cache/v/cache/lastfailed diff --git a/.pytest_cache/v/cache/lastfailed b/.pytest_cache/v/cache/lastfailed new file mode 100644 index 000000000..eba068d48 --- /dev/null +++ b/.pytest_cache/v/cache/lastfailed @@ -0,0 +1,17 @@ +{ + "test/integration/test_integration_speech_to_text_v1.py": true, + "test/integration/test_integration_text_to_speech_v1.py": true, + "test/integration/test_integration_visual_recognition.py": true, + "test/unit/test_conversation_v1.py": true, + "test/unit/test_discovery_v1.py": true, + "test/unit/test_language_translator_v2.py": true, + "test/unit/test_natural_language_classifier_v1.py": true, + "test/unit/test_natural_language_understanding.py": true, + "test/unit/test_personality_insights_v2.py": true, + "test/unit/test_personality_insights_v3.py": true, + "test/unit/test_speech_to_text_v1.py": true, + "test/unit/test_text_to_speech_v1.py": true, + "test/unit/test_tone_analyzer_v3.py": true, + "test/unit/test_visual_recognition_v3.py": true, + "test/unit/test_watson_service.py": true +} \ No newline at end of file diff --git a/test/unit/test_tone_analyzer_v3.py b/test/unit/test_tone_analyzer_v3.py index 456009715..e078f0b81 100755 --- a/test/unit/test_tone_analyzer_v3.py +++ b/test/unit/test_tone_analyzer_v3.py @@ -22,7 +22,7 @@ def test_tone(): with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text: tone_analyzer = watson_developer_cloud.ToneAnalyzerV3("2016-05-19", username="username", password="password") - tone_analyzer.tone(tone_text.read()) + tone_analyzer.tone(tone_text.read(), 'application/json') assert responses.calls[0].request.url == tone_url + tone_args assert responses.calls[0].response.text == tone_response @@ -46,7 +46,7 @@ def test_tone_with_args(): with open(os.path.join(os.path.dirname(__file__), '../../resources/personality.txt')) as tone_text: tone_analyzer = watson_developer_cloud.ToneAnalyzerV3("2016-05-19", username="username", password="password") - tone_analyzer.tone(tone_text.read(), sentences=False) + tone_analyzer.tone(tone_text.read(), 'application/json', sentences=False) assert responses.calls[0].request.url.split('?')[0] == tone_url # Compare args. Order is not deterministic! @@ -134,7 +134,7 @@ def test_error(): username="username", password="password") text = "Team, I know that times are tough!" try: - tone_analyzer.tone(text) + tone_analyzer.tone(text, 'application/json') except WatsonException as ex: assert len(responses.calls) == 1 assert isinstance(ex, WatsonApiException) diff --git a/watson_developer_cloud/tone_analyzer_v3.py b/watson_developer_cloud/tone_analyzer_v3.py index f688fdae1..d419e512f 100755 --- a/watson_developer_cloud/tone_analyzer_v3.py +++ b/watson_developer_cloud/tone_analyzer_v3.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -### Service Overview The IBM Watson Tone Analyzer service uses linguistic analysis to detect emotional and language tones in written text. The service can analyze tone at both the document and sentence levels. You can use the service to understand how your written communications are @@ -81,12 +80,12 @@ def __init__(self, version, url=default_url, username=None, password=None): self.version = version ######################### - # tone + # Methods ######################### def tone(self, tone_input, - content_type='application/json', + content_type, sentences=None, tones=None, content_language=None, @@ -98,8 +97,8 @@ def tone(self, :param str content_type: The type of the input: application/json, text/plain, or text/html. A character encoding can be specified by including a `charset` parameter. For example, 'text/plain;charset=utf-8'. :param bool sentences: Indicates whether the service is to return an analysis of each individual sentence in addition to its analysis of the full document. If `true` (the default), the service returns results for each sentence. :param list[str] tones: **`2017-09-21`:** Deprecated. The service continues to accept the parameter for backward-compatibility, but the parameter no longer affects the response. **`2016-05-19`:** A comma-separated list of tones for which the service is to return its analysis of the input; the indicated tones apply both to the full document and to individual sentences of the document. You can specify one or more of the valid values. Omit the parameter to request results for all three tones. - :param str content_language: The language of the input text for the request: English or French. Regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. The input content must match the specified language. Do not submit content that contains both languages. You can specify any combination of languages for `content_language` and `Accept-Language`. * **`2017-09-21`:** Accepts `en` or `fr`. * **`2016-05-19`:** Accepts only `en`. - :param str accept_language: The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. You can specify any combination of languages for `Content-Language` and `accept_language`. + :param str content_language: The language of the input text for the request: English or French. Regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. The input content must match the specified language. Do not submit content that contains both languages. You can specify any combination of languages for `Content-Language` and `Accept-Language`. * **`2017-09-21`:** Accepts `en` or `fr`. * **`2016-05-19`:** Accepts only `en`. + :param str accept_language: The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. You can specify any combination of languages for `Content-Language` and `Accept-Language`. :return: A `dict` containing the `ToneAnalysis` response. :rtype: dict """ @@ -131,30 +130,26 @@ def tone(self, accept_json=True) return response - def tone_chat(self, utterances, accept_language=None): + def tone_chat(self, + utterances, + content_language=None, + accept_language=None): """ Analyze customer engagement tone. - Use the customer engagement endpoint to analyze the tone of customer service and - customer support conversations. For each utterance of a conversation, the method - reports the most prevalent subset of the following seven tones: sad, frustrated, - satisfied, excited, polite, impolite, and sympathetic. If you submit more than - 50 utterances, the service returns a warning for the overall content and analyzes - only the first 50 utterances. If you submit a single utterance that contains more - than 500 characters, the service returns an error for that utterance and does not - analyze the utterance. The request fails if all utterances have more than 500 - characters. Per the JSON specification, the default character encoding for JSON - content is effectively always UTF-8. - :param list[Utterance] utterances: An array of `Utterance` objects that provides the input content that the service is to analyze. + :param str content_language: The language of the input text for the request: English or French. Regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. The input content must match the specified language. Do not submit content that contains both languages. You can specify any combination of languages for `Content-Language` and `Accept-Language`. * **`2017-09-21`:** Accepts `en` or `fr`. * **`2016-05-19`:** Accepts only `en`. :param str accept_language: The desired language of the response. For two-character arguments, regional variants are treated as their parent language; for example, `en-US` is interpreted as `en`. :return: A `dict` containing the `UtteranceAnalyses` response. :rtype: dict """ if utterances is None: raise ValueError('utterances must be provided') - utterances = [self._convert_model(x) for x in utterances] - headers = {'Accept-Language': accept_language} + utterances = [self._convert_model(x, Utterance) for x in utterances] + headers = { + 'Content-Language': content_language, + 'Accept-Language': accept_language + } params = {'version': self.version} data = {'utterances': utterances} url = '/v3/tone_chat' @@ -167,7 +162,6 @@ def tone_chat(self, utterances, accept_language=None): accept_json=True) return response - ############################################################################## # Models ############################################################################## From 25108e291b1ca64d42efb7a5978af1fccb6cde07 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 15:56:19 -0500 Subject: [PATCH 31/45] Remove file --- .pytest_cache/v/cache/lastfailed | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 .pytest_cache/v/cache/lastfailed diff --git a/.pytest_cache/v/cache/lastfailed b/.pytest_cache/v/cache/lastfailed deleted file mode 100644 index eba068d48..000000000 --- a/.pytest_cache/v/cache/lastfailed +++ /dev/null @@ -1,17 +0,0 @@ -{ - "test/integration/test_integration_speech_to_text_v1.py": true, - "test/integration/test_integration_text_to_speech_v1.py": true, - "test/integration/test_integration_visual_recognition.py": true, - "test/unit/test_conversation_v1.py": true, - "test/unit/test_discovery_v1.py": true, - "test/unit/test_language_translator_v2.py": true, - "test/unit/test_natural_language_classifier_v1.py": true, - "test/unit/test_natural_language_understanding.py": true, - "test/unit/test_personality_insights_v2.py": true, - "test/unit/test_personality_insights_v3.py": true, - "test/unit/test_speech_to_text_v1.py": true, - "test/unit/test_text_to_speech_v1.py": true, - "test/unit/test_tone_analyzer_v3.py": true, - "test/unit/test_visual_recognition_v3.py": true, - "test/unit/test_watson_service.py": true -} \ No newline at end of file From cc90b0b78c5f9079dee5309bd8f273d8087d88f9 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 16:44:52 -0500 Subject: [PATCH 32/45] Regenerate text to speech --- test/unit/test_text_to_speech_v1.py | 4 +- watson_developer_cloud/text_to_speech_v1.py | 103 +++++--------------- 2 files changed, 25 insertions(+), 82 deletions(-) diff --git a/test/unit/test_text_to_speech_v1.py b/test/unit/test_text_to_speech_v1.py index bb18e4e46..3ed6d3c83 100644 --- a/test/unit/test_text_to_speech_v1.py +++ b/test/unit/test_text_to_speech_v1.py @@ -99,7 +99,7 @@ def test_success(): assert responses.calls[1].request.url == voice_url assert responses.calls[1].response.text == json.dumps(voice_response) - text_to_speech.synthesize('hello') + text_to_speech.synthesize('hello', 'audio/basic') assert responses.calls[2].request.url == synthesize_url assert responses.calls[2].response.text == synthesize_response_body @@ -220,7 +220,7 @@ def test_custom_words(): text_to_speech.list_words(customization_id="custid") text_to_speech.add_words( - customization_id="custid", words=["one", "two", "three"]) + customization_id="custid", words=[{"word": "one", "translation": "one"}, {"word": "two", "translation": "two"}]) text_to_speech.get_word(customization_id="custid", word="word") text_to_speech.add_word( customization_id='custid', word="word", translation="I'm translated") diff --git a/watson_developer_cloud/text_to_speech_v1.py b/watson_developer_cloud/text_to_speech_v1.py index 258b08e05..c1fcade98 100644 --- a/watson_developer_cloud/text_to_speech_v1.py +++ b/watson_developer_cloud/text_to_speech_v1.py @@ -65,7 +65,7 @@ def __init__(self, url=default_url, username=None, password=None): use_vcap_services=True) ######################### - # voices + # Voices ######################### def get_voice(self, voice, customization_id=None): @@ -106,14 +106,14 @@ def voices(self): def synthesize(self, text, - accept=None, + accept, voice=None, customization_id=None): """ Streaming speech synthesis of the text in the body parameter. Synthesizes text to spoken audio, returning the synthesized audio stream as an array of bytes. :param str text: The text to synthesize. - :param str accept: The requested audio format (MIME type) of the audio. You can use this header or the `accept` query parameter to specify the audio format. (For the `audio/l16` format, you can optionally specify `endianness=big-endian` or `endianness=little-endian`; the default is little endian.). + :param str accept: The type of the response: audio/basic, audio/flac, audio/l16;rate=nnnn, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/mp3, audio/mpeg, audio/mulaw;rate=nnnn, audio/wav, audio/webm, audio/webm;codecs=opus, or audio/webm;codecs=vorbis. :param str voice: The voice to use for synthesis. Retrieve available voices with the `GET /v1/voices` method. :param str customization_id: The GUID of a custom voice model to use for the synthesis. If a custom voice model is specified, it is guaranteed to work only if it matches the language of the indicated voice. You must make the request with service credentials created for the instance of the service that owns the custom model. Omit the parameter to use the specified voice with no customization. :return: A `Response ` object representing the response. @@ -121,6 +121,8 @@ def synthesize(self, """ if text is None: raise ValueError('text must be provided') + if accept is None: + raise ValueError('accept must be provided') headers = {'Accept': accept} params = { 'voice': voice, @@ -139,7 +141,7 @@ def synthesize(self, return response ######################### - # pronunciation + # Pronunciation ######################### def get_pronunciation(self, @@ -181,7 +183,7 @@ def pronunciation(self, text, voice=None, pronunciation_format='ipa'): return self.get_pronunciation(text, voice, pronunciation_format) ######################### - # customVoiceModels + # Custom voice models ######################### def create_voice_model(self, name, language=None, description=None): @@ -239,7 +241,10 @@ def get_voice_model(self, customization_id): Lists all information about the custom voice model with the specified `customization_id`. In addition to metadata such as the name and description of the voice model, the output includes the words in the model and their translations - as defined in the model. **Note:** This method is currently a beta release. + as defined in the model. To see just the metadata for a voice model, use the `GET + /v1/customizations` method. You must use credentials for the instance of the + service that owns a model to list information about it. **Note:** This method is + currently a beta release. :param str customization_id: The GUID of the custom voice model that is to be queried. You must make the request with service credentials created for the instance of the service that owns the custom model. :return: A `dict` containing the `VoiceModel` response. @@ -258,8 +263,13 @@ def get_customization(self, customization_id): def list_voice_models(self, language=None): """ - Lists all available custom voice models for a language or for all languages. - **Note:** This method is currently a beta release. + Lists metadata such as the name and description for the custom voice models that + you own. Use the `language` query parameter to list the voice models that you own + for the specified language only. Omit the parameter to see all voice models that + you own for all languages. To see the words in addition to the metadata for a + specific voice model, use the `GET /v1/customizations/{customization_id}` method. + You must use credentials for the instance of the service that owns a model to list + information about it. **Note:** This method is currently a beta release. :param str language: The language for which custom voice models that are owned by the requesting service credentials are to be returned. Omit the parameter to see all custom voice models that are owned by the requester. :return: A `dict` containing the `VoiceModels` response. @@ -295,13 +305,13 @@ def update_voice_model(self, :param str customization_id: The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. :param str name: A new name for the custom voice model. :param str description: A new description for the custom voice model. - :param list[CustomWord] words: An array of words and their translations that are to be added or updated for the custom voice model. Pass an empty array to make no additions or updates. + :param list[Word] words: An array of words and their translations that are to be added or updated for the custom voice model. Pass an empty array to make no additions or updates. :rtype: None """ if customization_id is None: raise ValueError('customization_id must be provided') if words is not None: - words = [self._convert_model(x) for x in words] + words = [self._convert_model(x, Word) for x in words] data = {'name': name, 'description': description, 'words': words} url = '/v1/customizations/{0}'.format( *self._encode_path_vars(customization_id)) @@ -314,7 +324,7 @@ def update_customization(self, customization_id, name=None, return self.update_voice_model(customization_id, name, description, words) ######################### - # customWords + # Custom words ######################### def add_word(self, customization_id, word, translation, @@ -363,14 +373,14 @@ def add_words(self, customization_id, words): method is currently a beta release. :param str customization_id: The GUID of the custom voice model that is to be updated. You must make the request with service credentials created for the instance of the service that owns the custom model. - :param list[CustomWord] words: An array of `CustomWord` objects that provides information about the words and their translations that are to be added or updated for the custom voice model. + :param list[Word] words: An array of words and their translations from the custom voice model. The words are listed in alphabetical order, with uppercase letters listed before lowercase letters. The array is empty if the custom model contains no words. :rtype: None """ if customization_id is None: raise ValueError('customization_id must be provided') if words is None: raise ValueError('words must be provided') - words = [self._convert_model(x) for x in words] + words = [self._convert_model(x, Word) for x in words] data = {'words': words} url = '/v1/customizations/{0}/words'.format( *self._encode_path_vars(customization_id)) @@ -464,73 +474,6 @@ def get_customization_words(self, customization_id): # Models ############################################################################## - -class CustomWord(object): - """ - CustomWord. - - :attr str word: A word that is to be added or updated for the custom voice model. - :attr str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. A sounds-like translation consists of one or more words that, when combined, sound like the word. - :attr str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). - """ - - def __init__(self, word, translation, part_of_speech=None): - """ - Initialize a CustomWord object. - - :param str word: A word that is to be added or updated for the custom voice model. - :param str translation: The phonetic or sounds-like translation for the word. A phonetic translation is based on the SSML format for representing the phonetic string of a word either as an IPA or IBM SPR translation. A sounds-like translation consists of one or more words that, when combined, sound like the word. - :param str part_of_speech: (optional) **Japanese only.** The part of speech for the word. The service uses the value to produce the correct intonation for the word. You can create only a single entry, with or without a single part of speech, for any word; you cannot create multiple entries with different parts of speech for the same word. For more information, see [Working with Japanese entries](https://console.bluemix.net/docs/services/text-to-speech/custom-rules.html#jaNotes). - """ - self.word = word - self.translation = translation - self.part_of_speech = part_of_speech - - @classmethod - def _from_dict(cls, _dict): - """Initialize a CustomWord object from a json dictionary.""" - args = {} - if 'word' in _dict: - args['word'] = _dict['word'] - else: - raise ValueError( - 'Required property \'word\' not present in CustomWord JSON') - if 'translation' in _dict: - args['translation'] = _dict['translation'] - else: - raise ValueError( - 'Required property \'translation\' not present in CustomWord JSON' - ) - if 'part_of_speech' in _dict: - args['part_of_speech'] = _dict['part_of_speech'] - return cls(**args) - - def _to_dict(self): - """Return a json dictionary representing this model.""" - _dict = {} - if hasattr(self, 'word') and self.word is not None: - _dict['word'] = self.word - if hasattr(self, 'translation') and self.translation is not None: - _dict['translation'] = self.translation - if hasattr(self, 'part_of_speech') and self.part_of_speech is not None: - _dict['part_of_speech'] = self.part_of_speech - return _dict - - def __str__(self): - """Return a `str` version of this CustomWord object.""" - return json.dumps(self._to_dict(), indent=2) - - def __eq__(self, other): - """Return `true` when self and other are equal, false otherwise.""" - if not isinstance(other, self.__class__): - return False - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Return `true` when self and other are not equal, false otherwise.""" - return not self == other - - class Pronunciation(object): """ Pronunciation. From 1c3d728c8272a18f3232e84e1f2c47e6284b2d1e Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 17:22:31 -0500 Subject: [PATCH 33/45] Regenerate speech to text --- test/unit/test_speech_to_text_v1.py | 2 +- watson_developer_cloud/speech_to_text_v1.py | 39 +++++++++++++-------- 2 files changed, 25 insertions(+), 16 deletions(-) diff --git a/test/unit/test_speech_to_text_v1.py b/test/unit/test_speech_to_text_v1.py index 7165d9aab..50a06c248 100755 --- a/test/unit/test_speech_to_text_v1.py +++ b/test/unit/test_speech_to_text_v1.py @@ -119,7 +119,7 @@ def test_recognitions(): with open( os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: - speech_to_text.create_job(audio=audio_file) + speech_to_text.create_job(audio=audio_file, content_type='audio/basic') assert responses.calls[2].response.json() == {'status': 'waiting'} speech_to_text.delete_job('jobid') diff --git a/watson_developer_cloud/speech_to_text_v1.py b/watson_developer_cloud/speech_to_text_v1.py index eb5c6ded8..3d684514f 100644 --- a/watson_developer_cloud/speech_to_text_v1.py +++ b/watson_developer_cloud/speech_to_text_v1.py @@ -72,7 +72,7 @@ def __init__(self, url=default_url, username=None, password=None): use_vcap_services=True) ######################### - # models + # Models ######################### def get_model(self, model_id): @@ -113,7 +113,7 @@ def models(self): return self.list_models ######################### - # recognize + # Sessionless ######################### def recognize(self, @@ -123,7 +123,7 @@ def recognize(self, customization_weight=None, version=None, audio=None, - content_type='audio/basic', + content_type=None, inactivity_timeout=None, keywords=None, keywords_threshold=None, @@ -277,7 +277,7 @@ def recognize_with_websocket(self, headers) ######################### - # asynchronous + # Asynchronous ######################### def check_job(self, id): @@ -307,12 +307,13 @@ def check_jobs(self): def create_job(self, audio, - content_type='audio/basic', + content_type, + transfer_encoding=None, + model=None, callback_url=None, events=None, user_token=None, results_ttl=None, - model=None, customization_id=None, acoustic_customization_id=None, customization_weight=None, @@ -332,11 +333,12 @@ def create_job(self, :param str audio: Audio to transcribe in the format specified by the `Content-Type` header. :param str content_type: The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or audio/webm;codecs=vorbis. + :param str transfer_encoding: Set to `chunked` to send the audio in streaming mode. The data does not need to exist fully before being streamed to the service. + :param str model: The identifier of the model to be used for the recognition request. (Use `GET /v1/models` for a list of available models.). :param str callback_url: A URL to which callback notifications are to be sent. The URL must already be successfully white-listed by using the `POST /v1/register_callback` method. Omit the parameter to poll the service for job completion and results. You can include the same callback URL with any number of job creation requests. Use the `user_token` query parameter to specify a unique user-specified string with each job to differentiate the callback notifications for the jobs. :param str events: If the job includes a callback URL, a comma-separated list of notification events to which to subscribe. Valid events are: `recognitions.started` generates a callback notification when the service begins to process the job. `recognitions.completed` generates a callback notification when the job is complete; you must use the `GET /v1/recognitions/{id}` method to retrieve the results before they time out or are deleted. `recognitions.completed_with_results` generates a callback notification when the job is complete; the notification includes the results of the request. `recognitions.failed` generates a callback notification if the service experiences an error while processing the job. Omit the parameter to subscribe to the default events: `recognitions.started`, `recognitions.completed`, and `recognitions.failed`. The `recognitions.completed` and `recognitions.completed_with_results` events are incompatible; you can specify only of the two events. If the job does not include a callback URL, omit the parameter. :param str user_token: If the job includes a callback URL, a user-specified string that the service is to include with each callback notification for the job; the token allows the user to maintain an internal mapping between jobs and notification events. If the job does not include a callback URL, omit the parameter. :param int results_ttl: The number of minutes for which the results are to be available after the job has finished. If not delivered via a callback, the results must be retrieved within this time. Omit the parameter to use a time to live of one week. The parameter is valid with or without a callback URL. - :param str model: The identifier of the model to be used for the recognition request. (Use `GET /v1/models` for a list of available models.). :param str customization_id: The GUID of a custom language model that is to be used with the request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used. :param str acoustic_customization_id: The GUID of a custom acoustic model that is to be used with the request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used. :param float customization_weight: If you specify a `customization_id` with the request, you can use the `customization_weight` parameter to tell the service how much weight to give to words from the custom language model compared to those from the base model for speech recognition. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases. @@ -358,13 +360,16 @@ def create_job(self, raise ValueError('audio must be provided') if content_type is None: raise ValueError('content_type must be provided') - headers = {'Content-Type': content_type} + headers = { + 'Content-Type': content_type, + 'Transfer-Encoding': transfer_encoding + } params = { + 'model': model, 'callback_url': callback_url, 'events': events, 'user_token': user_token, 'results_ttl': results_ttl, - 'model': model, 'customization_id': customization_id, 'acoustic_customization_id': acoustic_customization_id, 'customization_weight': customization_weight, @@ -447,7 +452,7 @@ def unregister_callback(self, callback_url): return None ######################### - # customLanguageModels + # Custom language models ######################### def create_language_model(self, @@ -631,7 +636,7 @@ def upgrade_language_model(self, customization_id): return None ######################### - # customCorpora + # Custom corpora ######################### def add_corpus(self, @@ -734,7 +739,7 @@ def list_corpora(self, customization_id): return response ######################### - # customWords + # Custom words ######################### def add_word(self, @@ -785,7 +790,7 @@ def add_words(self, customization_id, words): raise ValueError('customization_id must be provided') if words is None: raise ValueError('words must be provided') - words = [self._convert_model(x) for x in words] + words = [self._convert_model(x, CustomWord) for x in words] data = {'words': words} url = '/v1/customizations/{0}/words'.format( *self._encode_path_vars(customization_id)) @@ -1031,14 +1036,14 @@ def upgrade_acoustic_model(self, return None ######################### - # customAudioResources + # Custom audio resources ######################### def add_audio(self, customization_id, audio_name, audio_resource, - content_type='application/zip', + content_type, contained_content_type=None, allow_overwrite=None): """ @@ -2382,6 +2387,8 @@ def _from_dict(cls, _dict): raise ValueError( 'Required property \'confidence\' not present in SpeakerLabelsResult JSON' ) + if 'final_results' in _dict: + args['final_results'] = _dict['final_results'] if 'final' in _dict: args['final_results'] = _dict['final'] else: @@ -2696,6 +2703,8 @@ def __init__(self, def _from_dict(cls, _dict): """Initialize a SpeechRecognitionResult object from a json dictionary.""" args = {} + if 'final_results' in _dict: + args['final_results'] = _dict['final_results'] if 'final' in _dict: args['final_results'] = _dict['final'] else: From c5aef0406221d58409d264f1ce6ff5a7a276a74c Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 18:20:07 -0500 Subject: [PATCH 34/45] Regenerate visual recognition --- .../visual_recognition_v3.py | 64 ++++++++++++++----- 1 file changed, 49 insertions(+), 15 deletions(-) diff --git a/watson_developer_cloud/visual_recognition_v3.py b/watson_developer_cloud/visual_recognition_v3.py index 9cb698f4a..e5e0e6edc 100644 --- a/watson_developer_cloud/visual_recognition_v3.py +++ b/watson_developer_cloud/visual_recognition_v3.py @@ -41,7 +41,6 @@ class VisualRecognitionV3(WatsonService): """The Visual Recognition V3 service.""" default_url = 'https://gateway-a.watsonplatform.net/visual-recognition/api' - VERSION_DATE_2016_05_20 = '2016-05-20' def __init__(self, version, url=default_url, api_key=None): """ @@ -75,7 +74,7 @@ def __init__(self, version, url=default_url, api_key=None): self.version = version ######################### - # general + # General ######################### def classify(self, @@ -83,17 +82,25 @@ def classify(self, parameters=None, accept_language=None, images_file_content_type=None, - images_filename=None): + images_filename=None, + url=None, + threshold=None, + owners=None, + classifier_ids=None): """ Classify images. Classify images with built-in or custom classifiers. :param file images_file: An image file (.jpg, .png) or .zip file with images. Maximum image size is 10 MB. Include no more than 20 images and limit the .zip file to 100 MB. Encode the image and .zip file names in UTF-8 if they contain non-ASCII characters. The service assumes UTF-8 encoding if it encounters non-ASCII characters. You can also include images with the `url` property in the **parameters** object. - :param str parameters: A JSON object that specifies additional request options. The parameter can be sent as a string or a file, and can include these inputs: - **url**: A string with the image URL to analyze. Must be in .jpg, or .png format. The minimum recommended pixel density is 32X32 pixels per inch, and the maximum image size is 10 MB. You can also include images in the **images_file** parameter. - **threshold**: A floating point value that specifies the minimum score a class must have to be displayed in the response. The default threshold for returning scores from a classifier is `0.5`. Set the threshold to `0.0` to ignore the classification score and return all values. - **owners**: An array of the categories of classifiers to apply. Use `IBM` to classify against the `default` general classifier, and use `me` to classify against your custom classifiers. To analyze the image against both classifier categories, set the value to both `IBM` and `me`. The built-in `default` classifier is used if both **classifier_ids** and **owners** parameters are empty. The **classifier_ids** parameter overrides **owners**, so make sure that **classifier_ids** is empty. - **classifier_ids**: Specifies which classifiers to apply and overrides the **owners** parameter. You can specify both custom and built-in classifiers. The built-in `default` classifier is used if both **classifier_ids** and **owners** parameters are empty. The following built-in classifier IDs require no training: - `default`: Returns classes from thousands of general tags. - `food`: (Beta) Enhances specificity and accuracy for images of food items. - `explicit`: (Beta) Evaluates whether the image might be pornographic. Example: `{\"classifier_ids\":[\"CarsvsTrucks_1479118188\",\"explicit\"],\"threshold\":0.6}`. + :param str parameters: (Deprecated) A JSON object that specifies additional request options. The parameter can be sent as a string or a file, and can include these inputs: - **url**: A string with the image URL to analyze. Must be in .jpg, or .png format. The minimum recommended pixel density is 32X32 pixels per inch, and the maximum image size is 10 MB. You can also include images in the **images_file** parameter. - **threshold**: A floating point value that specifies the minimum score a class must have to be displayed in the response. The default threshold for returning scores from a classifier is `0.5`. Set the threshold to `0.0` to ignore the classification score and return all values. - **owners**: An array of the categories of classifiers to apply. Use `IBM` to classify against the `default` general classifier, and use `me` to classify against your custom classifiers. To analyze the image against both classifier categories, set the value to both `IBM` and `me`. The built-in `default` classifier is used if both **classifier_ids** and **owners** parameters are empty. The **classifier_ids** parameter overrides **owners**, so make sure that **classifier_ids** is empty. - **classifier_ids**: Specifies which classifiers to apply and overrides the **owners** parameter. You can specify both custom and built-in classifiers. The built-in `default` classifier is used if both **classifier_ids** and **owners** parameters are empty. The following built-in classifier IDs require no training: - `default`: Returns classes from thousands of general tags. - `food`: (Beta) Enhances specificity and accuracy for images of food items. - `explicit`: (Beta) Evaluates whether the image might be pornographic. Example: `{\"classifier_ids\":[\"CarsvsTrucks_1479118188\",\"explicit\"],\"threshold\":0.6}`. :param str accept_language: Specifies the language of the output class names. Can be `en` (English), `ar` (Arabic), `de` (German), `es` (Spanish), `it` (Italian), `ja` (Japanese), or `ko` (Korean). Classes for which no translation is available are omitted. The response might not be in the specified language under these conditions: - English is returned when the requested language is not supported. - Classes are not returned when there is no translation for them. - Custom classifiers returned with this method return tags in the language of the custom classifier. :param str images_file_content_type: The content type of images_file. :param str images_filename: The filename for images_file. + :param str url: A string with the image URL to analyze. Must be in .jpg, or .png format. The minimum recommended pixel density is 32X32 pixels per inch, and the maximum image size is 10 MB. You can also include images in the **images_file** parameter. + :param float threshold: A floating point value that specifies the minimum score a class must have to be displayed in the response. The default threshold for returning scores from a classifier is `0.5`. Set the threshold to `0.0` to ignore the classification score and return all values. + :param list[str] owners: An array of the categories of classifiers to apply. Use `IBM` to classify against the `default` general classifier, and use `me` to classify against your custom classifiers. To analyze the image against both classifier categories, set the value to both `IBM` and `me`. The built-in `default` classifier is used if both **classifier_ids** and **owners** parameters are empty. The **classifier_ids** parameter overrides **owners**, so make sure that **classifier_ids** is empty. + :param list[str] classifier_ids: The **classifier_ids** parameter overrides **owners**, so make sure that **classifier_ids** is empty. - **classifier_ids**: Specifies which classifiers to apply and overrides the **owners** parameter. You can specify both custom and built-in classifiers. The built-in `default` classifier is used if both **classifier_ids** and **owners** parameters are empty. The following built-in classifier IDs require no training: - `default`: Returns classes from thousands of general tags. - `food`: (Beta) Enhances specificity and accuracy for images of food items. - `explicit`: (Beta) Evaluates whether the image might be pornographic. Example: `\"classifier_ids=\"CarsvsTrucks_1479118188\",\"explicit\"`. :return: A `dict` containing the `ClassifiedImages` response. :rtype: dict """ @@ -105,9 +112,24 @@ def classify(self, images_filename = images_file.name mime_type = images_file_content_type or 'application/octet-stream' images_file_tuple = (images_filename, images_file, mime_type) + parameters_tuple = None - if parameters: + if parameters is not None: parameters_tuple = (None, parameters, 'text/plain') + + url_tuple = None + if url is not None: + url_tuple = (None, url, 'text/plain') + threshold_tuple = None + if threshold is not None: + threshold_tuple = (None, threshold, 'application/json') + owners_tuple = None + if owners is not None: + owners_tuple = (None, owners, 'application/json') + classifier_ids_tuple = None + if classifier_ids is not None: + classifier_ids_tuple = (None, classifier_ids, 'application/json') + url = '/v3/classify' response = self.request( method='POST', @@ -115,21 +137,26 @@ def classify(self, headers=headers, params=params, files={ + 'parameters': parameters_tuple, 'images_file': images_file_tuple, - 'parameters': parameters_tuple + 'url': url_tuple, + 'threshold': threshold_tuple, + 'owners': owners_tuple, + 'classifier_ids': classifier_ids_tuple }, accept_json=True) return response ######################### - # face + # Face ######################### def detect_faces(self, images_file=None, parameters=None, images_file_content_type=None, - images_filename=None): + images_filename=None, + url=None): """ Detect faces in images. @@ -139,9 +166,10 @@ def detect_faces(self, does not support general biometric facial recognition. :param file images_file: An image file (.jpg, .png) or .zip file with images. Include no more than 15 images. You can also include images with the `url` property in the **parameters** object. All faces are detected, but if there are more than 10 faces in an image, age and gender confidence scores might return scores of 0. - :param str parameters: A JSON object that specifies a single image (.jpg, .png) to analyze by URL. The parameter can be sent as a string or a file. Example: `{\"url\":\"http://www.example.com/images/myimage.jpg\"}`. + :param str parameters: (Deprecated) A JSON object that specifies a single image (.jpg, .png) to analyze by URL. The parameter can be sent as a string or a file. Example: `{\"url\":\"http://www.example.com/images/myimage.jpg\"}`. :param str images_file_content_type: The content type of images_file. :param str images_filename: The filename for images_file. + :param str url: A string with the image URL to analyze. :return: A `dict` containing the `DetectedFaces` response. :rtype: dict """ @@ -153,22 +181,24 @@ def detect_faces(self, mime_type = images_file_content_type or 'application/octet-stream' images_file_tuple = (images_filename, images_file, mime_type) parameters_tuple = None - if parameters: + if parameters is not None: parameters_tuple = (None, parameters, 'text/plain') + url_tuple = None + if url is not None: + url_tuple = (None, url, 'text/plain') url = '/v3/detect_faces' response = self.request( method='POST', url=url, params=params, - files={ - 'images_file': images_file_tuple, - 'parameters': parameters_tuple - }, + files={'images_file': images_file_tuple, + 'parameters': parameters_tuple, + 'url': url_tuple}, accept_json=True) return response ######################### - # custom + # Custom ######################### def create_classifier(self, @@ -290,6 +320,8 @@ def __init__(self, class_name): def _from_dict(cls, _dict): """Initialize a Class object from a json dictionary.""" args = {} + if 'class_name' in _dict: + args['class_name'] = _dict['class_name'] if 'class' in _dict: args['class_name'] = _dict['class'] else: @@ -344,6 +376,8 @@ def __init__(self, class_name, score=None, type_hierarchy=None): def _from_dict(cls, _dict): """Initialize a ClassResult object from a json dictionary.""" args = {} + if 'class_name' in _dict: + args['class_name'] = _dict['class_name'] if 'class' in _dict: args['class_name'] = _dict['class'] else: From 19df9ba9c4f8016a0169bee06ad764ce629df253 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 18:24:20 -0500 Subject: [PATCH 35/45] Remove transfer encoding --- watson_developer_cloud/speech_to_text_v1.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/watson_developer_cloud/speech_to_text_v1.py b/watson_developer_cloud/speech_to_text_v1.py index 3d684514f..c23d7f41c 100644 --- a/watson_developer_cloud/speech_to_text_v1.py +++ b/watson_developer_cloud/speech_to_text_v1.py @@ -308,7 +308,6 @@ def check_jobs(self): def create_job(self, audio, content_type, - transfer_encoding=None, model=None, callback_url=None, events=None, @@ -333,7 +332,6 @@ def create_job(self, :param str audio: Audio to transcribe in the format specified by the `Content-Type` header. :param str content_type: The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, or audio/webm;codecs=vorbis. - :param str transfer_encoding: Set to `chunked` to send the audio in streaming mode. The data does not need to exist fully before being streamed to the service. :param str model: The identifier of the model to be used for the recognition request. (Use `GET /v1/models` for a list of available models.). :param str callback_url: A URL to which callback notifications are to be sent. The URL must already be successfully white-listed by using the `POST /v1/register_callback` method. Omit the parameter to poll the service for job completion and results. You can include the same callback URL with any number of job creation requests. Use the `user_token` query parameter to specify a unique user-specified string with each job to differentiate the callback notifications for the jobs. :param str events: If the job includes a callback URL, a comma-separated list of notification events to which to subscribe. Valid events are: `recognitions.started` generates a callback notification when the service begins to process the job. `recognitions.completed` generates a callback notification when the job is complete; you must use the `GET /v1/recognitions/{id}` method to retrieve the results before they time out or are deleted. `recognitions.completed_with_results` generates a callback notification when the job is complete; the notification includes the results of the request. `recognitions.failed` generates a callback notification if the service experiences an error while processing the job. Omit the parameter to subscribe to the default events: `recognitions.started`, `recognitions.completed`, and `recognitions.failed`. The `recognitions.completed` and `recognitions.completed_with_results` events are incompatible; you can specify only of the two events. If the job does not include a callback URL, omit the parameter. @@ -361,8 +359,7 @@ def create_job(self, if content_type is None: raise ValueError('content_type must be provided') headers = { - 'Content-Type': content_type, - 'Transfer-Encoding': transfer_encoding + 'Content-Type': content_type } params = { 'model': model, From 84ee57aaace5709448b877850a5f0663bed844e3 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 18:43:37 -0500 Subject: [PATCH 36/45] accept is optional --- test/unit/test_text_to_speech_v1.py | 2 +- watson_developer_cloud/text_to_speech_v1.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/test/unit/test_text_to_speech_v1.py b/test/unit/test_text_to_speech_v1.py index 3ed6d3c83..4e21033b5 100644 --- a/test/unit/test_text_to_speech_v1.py +++ b/test/unit/test_text_to_speech_v1.py @@ -99,7 +99,7 @@ def test_success(): assert responses.calls[1].request.url == voice_url assert responses.calls[1].response.text == json.dumps(voice_response) - text_to_speech.synthesize('hello', 'audio/basic') + text_to_speech.synthesize('hello') assert responses.calls[2].request.url == synthesize_url assert responses.calls[2].response.text == synthesize_response_body diff --git a/watson_developer_cloud/text_to_speech_v1.py b/watson_developer_cloud/text_to_speech_v1.py index c1fcade98..3ed4de239 100644 --- a/watson_developer_cloud/text_to_speech_v1.py +++ b/watson_developer_cloud/text_to_speech_v1.py @@ -106,7 +106,7 @@ def voices(self): def synthesize(self, text, - accept, + accept=None, voice=None, customization_id=None): """ @@ -121,8 +121,6 @@ def synthesize(self, """ if text is None: raise ValueError('text must be provided') - if accept is None: - raise ValueError('accept must be provided') headers = {'Accept': accept} params = { 'voice': voice, From 0447f90e60d5c7a8e8f875f32c97292c52e29762 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Wed, 7 Mar 2018 21:04:54 -0500 Subject: [PATCH 37/45] Taking care of pylon issues --- examples/conversation_v1.py | 295 ++++++++++-------- examples/microphone-speech-to-text.py | 18 +- examples/speech_to_text_v1.py | 4 +- test/integration/__init__.py | 2 +- .../test_integration_discovery_v1.py | 97 +++--- .../test_integration_speech_to_text_v1.py | 17 +- .../test_integration_text_to_speech_v1.py | 28 +- .../test_integration_visual_recognition.py | 35 ++- test/unit/__init__.py | 2 +- test/unit/test_conversation_v1.py | 9 +- test/unit/test_discovery_v1.py | 9 +- test/unit/test_speech_to_text_v1.py | 13 +- watson_developer_cloud/conversation_v1.py | 4 +- watson_developer_cloud/speech_to_text_v1.py | 130 ++++---- watson_developer_cloud/utils.py | 23 +- watson_developer_cloud/watson_service.py | 11 +- .../websocket/recognize_abstract_callback.py | 47 +-- .../speech_to_text_websocket_listener.py | 16 +- 18 files changed, 399 insertions(+), 361 deletions(-) diff --git a/examples/conversation_v1.py b/examples/conversation_v1.py index 9cbaadd7a..acbe49d27 100644 --- a/examples/conversation_v1.py +++ b/examples/conversation_v1.py @@ -1,6 +1,5 @@ from __future__ import print_function import json -import os from watson_developer_cloud import ConversationV1 conversation = ConversationV1( @@ -20,63 +19,60 @@ ######################### create_workspace_data = { - "name": "test_workspace", - "description": "integration tests", - "language": "en", - "intents": [ - { - "intent": "hello", - "description": "string", - "examples": [ - { - "text": "good morning" + "name": + "test_workspace", + "description": + "integration tests", + "language": + "en", + "intents": [{ + "intent": "hello", + "description": "string", + "examples": [{ + "text": "good morning" + }] + }], + "entities": [{ + "entity": "pizza_toppings", + "description": "Tasty pizza toppings", + "metadata": { + "property": "value" } - ] - } - ], - "entities": [ - { - "entity": "pizza_toppings", - "description": "Tasty pizza toppings", - "metadata": { - "property": "value" - } - } - ], - "counterexamples": [ - { - "text": "string" - } - ], - "metadata": {}, + }], + "counterexamples": [{ + "text": "string" + }], + "metadata": {}, } - -response = conversation.create_workspace(name=create_workspace_data['name'], - description=create_workspace_data['description'], - language='en', - intents=create_workspace_data['intents'], - entities=create_workspace_data['entities'], - counterexamples=create_workspace_data['counterexamples'], - metadata=create_workspace_data['metadata']) +response = conversation.create_workspace( + name=create_workspace_data['name'], + description=create_workspace_data['description'], + language='en', + intents=create_workspace_data['intents'], + entities=create_workspace_data['entities'], + counterexamples=create_workspace_data['counterexamples'], + metadata=create_workspace_data['metadata']) print(json.dumps(response, indent=2)) workspace_id = response['workspace_id'] -print("Workspace id ".format(workspace_id)) +print('Workspace id {0}'.format(workspace_id)) response = conversation.get_workspace(workspace_id=workspace_id, export=True) print(json.dumps(response, indent=2)) # message -response = conversation.message(workspace_id=workspace_id, input={ - 'text': 'What\'s the weather like?'}) +response = conversation.message( + workspace_id=workspace_id, input={ + 'text': 'What\'s the weather like?' + }) print(json.dumps(response, indent=2)) response = conversation.list_workspaces() print(json.dumps(response, indent=2)) -response = conversation.update_workspace(workspace_id=workspace_id, - description='Updated test workspace.') +response = conversation.update_workspace( + workspace_id=workspace_id, description='Updated test workspace.') print(json.dumps(response, indent=2)) # see cleanup section below for delete_workspace example @@ -85,26 +81,26 @@ # intents ######################### -examples = [{ "text": "good morning"}] -response = conversation.create_intent(workspace_id=workspace_id, - intent='test_intent', - description='Test intent.', - examples=examples) +examples = [{"text": "good morning"}] +response = conversation.create_intent( + workspace_id=workspace_id, + intent='test_intent', + description='Test intent.', + examples=examples) print(json.dumps(response, indent=2)) -response = conversation.get_intent(workspace_id=workspace_id, - intent='test_intent', - export=True) +response = conversation.get_intent( + workspace_id=workspace_id, intent='test_intent', export=True) print(json.dumps(response, indent=2)) -response = conversation.list_intents(workspace_id=workspace_id, - export=True) +response = conversation.list_intents(workspace_id=workspace_id, export=True) print(json.dumps(response, indent=2)) -response = conversation.update_intent(workspace_id=workspace_id, - intent='test_intent', - new_intent='updated_test_intent', - new_description='Updated test intent.') +response = conversation.update_intent( + workspace_id=workspace_id, + intent='test_intent', + new_intent='updated_test_intent', + new_description='Updated test intent.') print(json.dumps(response, indent=2)) # see cleanup section below for delete_intent example @@ -113,53 +109,58 @@ # examples ######################### -response = conversation.create_example(workspace_id=workspace_id, - intent='updated_test_intent', - text='Gimme a pizza with pepperoni') +response = conversation.create_example( + workspace_id=workspace_id, + intent='updated_test_intent', + text='Gimme a pizza with pepperoni') print(json.dumps(response, indent=2)) -response = conversation.get_example(workspace_id=workspace_id, - intent='updated_test_intent', - text='Gimme a pizza with pepperoni') +response = conversation.get_example( + workspace_id=workspace_id, + intent='updated_test_intent', + text='Gimme a pizza with pepperoni') print(json.dumps(response, indent=2)) -response = conversation.list_examples(workspace_id=workspace_id, - intent='updated_test_intent') +response = conversation.list_examples( + workspace_id=workspace_id, intent='updated_test_intent') print(json.dumps(response, indent=2)) -response = conversation.update_example(workspace_id=workspace_id, - intent='updated_test_intent', - text='Gimme a pizza with pepperoni', - new_text='Gimme a pizza with pepperoni') +response = conversation.update_example( + workspace_id=workspace_id, + intent='updated_test_intent', + text='Gimme a pizza with pepperoni', + new_text='Gimme a pizza with pepperoni') print(json.dumps(response, indent=2)) -response = conversation.delete_example(workspace_id=workspace_id, - intent='updated_test_intent', - text='Gimme a pizza with pepperoni') +response = conversation.delete_example( + workspace_id=workspace_id, + intent='updated_test_intent', + text='Gimme a pizza with pepperoni') print(json.dumps(response, indent=2)) ######################### # counterexamples ######################### -response = conversation.create_counterexample(workspace_id=workspace_id, - text='I want financial advice today.') +response = conversation.create_counterexample( + workspace_id=workspace_id, text='I want financial advice today.') print(json.dumps(response, indent=2)) -response = conversation.get_counterexample(workspace_id=workspace_id, - text='I want financial advice today.') +response = conversation.get_counterexample( + workspace_id=workspace_id, text='I want financial advice today.') print(json.dumps(response, indent=2)) response = conversation.list_counterexamples(workspace_id=workspace_id) print(json.dumps(response, indent=2)) -response = conversation.update_counterexample(workspace_id=workspace_id, - text='I want financial advice today.', - new_text='I want financial advice today.') +response = conversation.update_counterexample( + workspace_id=workspace_id, + text='I want financial advice today.', + new_text='I want financial advice today.') print(json.dumps(response, indent=2)) -response = conversation.delete_counterexample(workspace_id=workspace_id, - text='I want financial advice today.') +response = conversation.delete_counterexample( + workspace_id=workspace_id, text='I want financial advice today.') print(json.dumps(response, indent=2)) ######################### @@ -167,52 +168,65 @@ ######################### values = [{"value": "juice"}] -response = conversation.create_entity(workspace_id=workspace_id, - entity='test_entity', - description='A test entity.', - values=values) +response = conversation.create_entity( + workspace_id=workspace_id, + entity='test_entity', + description='A test entity.', + values=values) print(json.dumps(response, indent=2)) entities = [{ - 'entity': 'pattern_entity', + 'entity': + 'pattern_entity', 'values': [{ - 'value': 'value0', 'patterns': ['\\d{6}\\w{1}\\d{7}'], 'value_type': 'patterns' - }, - {'value': 'value1', - 'patterns': ['[-9][0-9][0-9][0-9][0-9]~! [1-9][1-9][1-9][1-9][1-9][1-9]'], - 'value_type': 'patterns'}, - {'value': 'value2', - 'patterns': ['[a-z-9]{17}'], - 'value_type': 'patterns'}, - {'value': 'value3', - 'patterns': [ - '\\d{3}(\\ |-)\\d{3}(\\ |-)\\d{4}', - '\\(\\d{3}\\)(\\ |-)\\d{3}(\\ |-)\\d{4}'], - 'value_type': 'patterns'}, - {'value': 'value4', - 'patterns': ['\\b\\d{5}\\b'], - 'value_type': 'patterns'}] + 'value': 'value0', + 'patterns': ['\\d{6}\\w{1}\\d{7}'], + 'value_type': 'patterns' + }, { + 'value': + 'value1', + 'patterns': + ['[-9][0-9][0-9][0-9][0-9]~! [1-9][1-9][1-9][1-9][1-9][1-9]'], + 'value_type': + 'patterns' + }, { + 'value': 'value2', + 'patterns': ['[a-z-9]{17}'], + 'value_type': 'patterns' + }, { + 'value': + 'value3', + 'patterns': [ + '\\d{3}(\\ |-)\\d{3}(\\ |-)\\d{4}', + '\\(\\d{3}\\)(\\ |-)\\d{3}(\\ |-)\\d{4}' + ], + 'value_type': + 'patterns' + }, { + 'value': 'value4', + 'patterns': ['\\b\\d{5}\\b'], + 'value_type': 'patterns' + }] }] -response = conversation.create_entity(workspace_id, - entity=entities[0]['entity'], - values=entities[0]['values']) +response = conversation.create_entity( + workspace_id, entity=entities[0]['entity'], values=entities[0]['values']) print(json.dumps(response, indent=2)) -response = conversation.get_entity(workspace_id=workspace_id, - entity=entities[0]['entity'], - export=True) +response = conversation.get_entity( + workspace_id=workspace_id, entity=entities[0]['entity'], export=True) print(json.dumps(response, indent=2)) response = conversation.list_entities(workspace_id=workspace_id) print(json.dumps(response, indent=2)) -response = conversation.update_entity(workspace_id=workspace_id, - entity='test_entity', - new_description='An updated test entity.') +response = conversation.update_entity( + workspace_id=workspace_id, + entity='test_entity', + new_description='An updated test entity.') print(json.dumps(response, indent=2)) -response = conversation.delete_entity(workspace_id=workspace_id, - entity='test_entity') +response = conversation.delete_entity( + workspace_id=workspace_id, entity='test_entity') print(json.dumps(response, indent=2)) ######################### @@ -222,19 +236,23 @@ values = [{"value": "orange juice"}] conversation.create_entity(workspace_id, 'beverage', values=values) -response = conversation.create_synonym(workspace_id, 'beverage', 'orange juice', 'oj') +response = conversation.create_synonym(workspace_id, 'beverage', + 'orange juice', 'oj') print(json.dumps(response, indent=2)) -response = conversation.get_synonym(workspace_id, 'beverage', 'orange juice', 'oj') +response = conversation.get_synonym(workspace_id, 'beverage', 'orange juice', + 'oj') print(json.dumps(response, indent=2)) response = conversation.list_synonyms(workspace_id, 'beverage', 'orange juice') print(json.dumps(response, indent=2)) -response = conversation.update_synonym(workspace_id, 'beverage', 'orange juice', 'oj', 'OJ') +response = conversation.update_synonym(workspace_id, 'beverage', + 'orange juice', 'oj', 'OJ') print(json.dumps(response, indent=2)) -response = conversation.delete_synonym(workspace_id, 'beverage', 'orange juice', 'OJ') +response = conversation.delete_synonym(workspace_id, 'beverage', + 'orange juice', 'OJ') print(json.dumps(response, indent=2)) conversation.delete_entity(workspace_id, 'beverage') @@ -254,7 +272,8 @@ response = conversation.list_values(workspace_id, 'test_entity') print(json.dumps(response, indent=2)) -response = conversation.update_value(workspace_id, 'test_entity', 'test', 'example') +response = conversation.update_value(workspace_id, 'test_entity', 'test', + 'example') print(json.dumps(response, indent=2)) response = conversation.delete_value(workspace_id, 'test_entity', 'example') @@ -266,22 +285,23 @@ # Dialog nodes ######################### create_dialog_node = { - "dialog_node": "greeting", - "description": "greeting messages", - "actions": [ - { - "name": "hello", - "type": "client", - "parameters": {}, - "result_variable": "string", - "credentials": "string" - } - ] + "dialog_node": + "greeting", + "description": + "greeting messages", + "actions": [{ + "name": "hello", + "type": "client", + "parameters": {}, + "result_variable": "string", + "credentials": "string" + }] } -response = conversation.create_dialog_node(workspace_id, - create_dialog_node['dialog_node'], - create_dialog_node['description'], - actions=create_dialog_node['actions']) +response = conversation.create_dialog_node( + workspace_id, + create_dialog_node['dialog_node'], + create_dialog_node['description'], + actions=create_dialog_node['actions']) print(json.dumps(response, indent=2)) response = conversation.get_dialog_node(workspace_id, @@ -291,9 +311,10 @@ response = conversation.list_dialog_nodes(workspace_id) print(json.dumps(response, indent=2)) -response = conversation.update_dialog_node(workspace_id, - create_dialog_node['dialog_node'], - new_dialog_node='updated_node') +response = conversation.update_dialog_node( + workspace_id, + create_dialog_node['dialog_node'], + new_dialog_node='updated_node') print(json.dumps(response, indent=2)) response = conversation.delete_dialog_node(workspace_id, 'updated_node') @@ -310,8 +331,8 @@ # clean-up ######################### -response = conversation.delete_intent(workspace_id=workspace_id, - intent='updated_test_intent') +response = conversation.delete_intent( + workspace_id=workspace_id, intent='updated_test_intent') print(json.dumps(response, indent=2)) response = conversation.delete_workspace(workspace_id=workspace_id) diff --git a/examples/microphone-speech-to-text.py b/examples/microphone-speech-to-text.py index 14e5c9a6e..58ae329cd 100644 --- a/examples/microphone-speech-to-text.py +++ b/examples/microphone-speech-to-text.py @@ -2,17 +2,18 @@ import pyaudio import tempfile from watson_developer_cloud import SpeechToTextV1 -from watson_developer_cloud.websocket import RecognizeCallback, RecognizeListener +from watson_developer_cloud.websocket import RecognizeCallback speech_to_text = SpeechToTextV1( username='YOUR SERVICE USERNAME', password='YOUR SERVICE PASSWORD', url='https://stream.watsonplatform.net/speech-to-text/api') + # Example using websockets class MyRecognizeCallback(RecognizeCallback): def __init__(self): - pass + RecognizeCallback.__init__(self) def on_transcription(self, transcript): print(transcript) @@ -35,6 +36,7 @@ def on_transcription_complete(self): def on_hypothesis(self, hypothesis): print(hypothesis) + mycallback = MyRecognizeCallback() tmp = tempfile.NamedTemporaryFile() @@ -45,9 +47,12 @@ def on_hypothesis(self, hypothesis): RECORD_SECONDS = 5 audio = pyaudio.PyAudio() -stream = audio.open(format=FORMAT, channels=CHANNELS, - rate=RATE, input=True, - frames_per_buffer=CHUNK) +stream = audio.open( + format=FORMAT, + channels=CHANNELS, + rate=RATE, + input=True, + frames_per_buffer=CHUNK) print('recording....') with open(tmp.name, 'w') as f: @@ -61,4 +66,5 @@ def on_hypothesis(self, hypothesis): print('Done recording...') with open(tmp.name) as f: - speech_to_text.recognize_with_websocket(audio=f, recognize_callback=mycallback) \ No newline at end of file + speech_to_text.recognize_with_websocket( + audio=f, recognize_callback=mycallback) diff --git a/examples/speech_to_text_v1.py b/examples/speech_to_text_v1.py index 56f606aa6..5f36a1d18 100644 --- a/examples/speech_to_text_v1.py +++ b/examples/speech_to_text_v1.py @@ -2,7 +2,7 @@ import json from os.path import join, dirname from watson_developer_cloud import SpeechToTextV1 -from watson_developer_cloud.websocket import RecognizeCallback, RecognizeListener +from watson_developer_cloud.websocket import RecognizeCallback speech_to_text = SpeechToTextV1( username='YOUR SERVICE USERNAME', @@ -27,7 +27,7 @@ # Example using websockets class MyRecognizeCallback(RecognizeCallback): def __init__(self): - pass + RecognizeCallback.__init__(self) def on_transcription(self, transcript): print(transcript) diff --git a/test/integration/__init__.py b/test/integration/__init__.py index 57a3e4a29..b4a466217 100644 --- a/test/integration/__init__.py +++ b/test/integration/__init__.py @@ -16,4 +16,4 @@ from dotenv import load_dotenv, find_dotenv # load the .env file containing your environment variables for the required -load_dotenv(find_dotenv()) \ No newline at end of file +load_dotenv(find_dotenv()) diff --git a/test/integration/test_integration_discovery_v1.py b/test/integration/test_integration_discovery_v1.py index 7982696f0..287c76d2c 100644 --- a/test/integration/test_integration_discovery_v1.py +++ b/test/integration/test_integration_discovery_v1.py @@ -3,79 +3,104 @@ from unittest import TestCase import os import watson_developer_cloud -import json import random + class Discoveryv1(TestCase): def setUp(self): self.discovery = watson_developer_cloud.DiscoveryV1( version='2017-10-16', username=os.getenv('DISCOVERY_TO_TEXT_USERNAME'), password=os.getenv('DISCOVERY_TO_TEXT_PASSWORD')) - self.discovery.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) - self.environment_id = 'e15f6424-f887-4f50-b4ea-68267c36fc9c' # This environment is created for integration testing - self.collection_id = self.discovery.list_collections(self.environment_id)['collections'][0]['collection_id'] + self.discovery.set_default_headers({ + 'X-Watson-Learning-Opt-Out': '1', + 'X-Watson-Test': '1' + }) + self.environment_id = 'e15f6424-f887-4f50-b4ea-68267c36fc9c' # This environment is created for integration testing + self.collection_id = self.discovery.list_collections( + self.environment_id)['collections'][0]['collection_id'] def test_environments(self): envs = self.discovery.list_environments() assert envs is not None - env = self.discovery.get_environment(envs['environments'][0]['environment_id']) + env = self.discovery.get_environment( + envs['environments'][0]['environment_id']) assert env is not None - fields = self.discovery.list_fields(self.environment_id, self.collection_id) + fields = self.discovery.list_fields(self.environment_id, + self.collection_id) assert fields is not None def test_configurations(self): configs = self.discovery.list_configurations(self.environment_id) assert configs is not None - new_configuration_id = self.discovery.create_configuration(self.environment_id, 'test', 'creating new config for python sdk')['configuration_id'] + new_configuration_id = self.discovery.create_configuration( + self.environment_id, 'test', + 'creating new config for python sdk')['configuration_id'] assert new_configuration_id is not None - self.discovery.get_configuration(self.environment_id, new_configuration_id) - updated_config = self.discovery.update_configuration(self.environment_id, new_configuration_id, 'lala') + self.discovery.get_configuration(self.environment_id, + new_configuration_id) + updated_config = self.discovery.update_configuration( + self.environment_id, new_configuration_id, 'lala') assert updated_config['name'] == 'lala' - deleted_config = self.discovery.delete_configuration(self.environment_id, new_configuration_id) + deleted_config = self.discovery.delete_configuration( + self.environment_id, new_configuration_id) assert deleted_config['status'] == 'deleted' def test_collections_and_expansions(self): - new_collection_id = self.discovery.create_collection(self.environment_id, - name='Example collection for python' + random.choice('ABCDEFGHIJKLMNOPQ'), - description="Integration test for python sdk")['collection_id'] + new_collection_id = self.discovery.create_collection( + self.environment_id, + name='Example collection for python' + + random.choice('ABCDEFGHIJKLMNOPQ'), + description="Integration test for python sdk")['collection_id'] assert new_collection_id is not None self.discovery.get_collection(self.environment_id, new_collection_id) - updated_collection = self.discovery.update_collection(self.environment_id, new_collection_id, name='lala') + updated_collection = self.discovery.update_collection( + self.environment_id, new_collection_id, name='lala') assert updated_collection['name'] == 'lala' - self.discovery.create_expansions(self.environment_id, new_collection_id, [{'input_terms': ['a'], 'expanded_terms': ['aa']}]) - expansions = self.discovery.list_expansions(self.environment_id, new_collection_id) - assert len(expansions['expansions']) > 0 - self.discovery.delete_expansions(self.environment_id, new_collection_id) + self.discovery.create_expansions(self.environment_id, + new_collection_id, [{ + 'input_terms': ['a'], + 'expanded_terms': ['aa'] + }]) + expansions = self.discovery.list_expansions(self.environment_id, + new_collection_id) + assert expansions['expansions'] + self.discovery.delete_expansions(self.environment_id, + new_collection_id) - deleted_collection = self.discovery.delete_collection(self.environment_id, new_collection_id) + deleted_collection = self.discovery.delete_collection( + self.environment_id, new_collection_id) assert deleted_collection['status'] == 'deleted' def test_documents(self): with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as fileinfo: - add_doc = self.discovery.add_document(environment_id=self.environment_id, - collection_id=self.collection_id, - file=fileinfo) - add_doc['document_id'] is not None + add_doc = self.discovery.add_document( + environment_id=self.environment_id, + collection_id=self.collection_id, + file=fileinfo) + assert add_doc['document_id'] is not None - doc_status = self.discovery.get_document_status(self.environment_id, self.collection_id, add_doc['document_id']) + doc_status = self.discovery.get_document_status( + self.environment_id, self.collection_id, add_doc['document_id']) assert doc_status is not None with open(os.path.join(os.path.dirname(__file__), '../../resources/simple.html'), 'r') as fileinfo: - update_doc = self.discovery.update_document(self.environment_id, - self.collection_id, - add_doc['document_id'], - file=fileinfo, - filename='newname.html') + update_doc = self.discovery.update_document( + self.environment_id, + self.collection_id, + add_doc['document_id'], + file=fileinfo, + filename='newname.html') assert update_doc is not None - delete_doc = self.discovery.delete_document(self.environment_id, self.collection_id, add_doc['document_id']) + delete_doc = self.discovery.delete_document( + self.environment_id, self.collection_id, add_doc['document_id']) assert delete_doc['status'] == 'deleted' - def test_queries(self): - query_results = self.discovery.query(self.environment_id, - self.collection_id, - filter='extracted_metadata.sha1::9181d244*', - return_fields='extracted_metadata.sha1') - assert query_results is not None \ No newline at end of file + query_results = self.discovery.query( + self.environment_id, + self.collection_id, + filter='extracted_metadata.sha1::9181d244*', + return_fields='extracted_metadata.sha1') + assert query_results is not None diff --git a/test/integration/test_integration_speech_to_text_v1.py b/test/integration/test_integration_speech_to_text_v1.py index e33281011..e55810bf6 100644 --- a/test/integration/test_integration_speech_to_text_v1.py +++ b/test/integration/test_integration_speech_to_text_v1.py @@ -1,8 +1,6 @@ from unittest import TestCase -import pytest import os import watson_developer_cloud -from watson_developer_cloud.speech_to_text_v1 import SpeechRecognitionResults class TestSpeechToTextV1(TestCase): @@ -10,7 +8,12 @@ def setUp(self): self.speech_to_text = watson_developer_cloud.SpeechToTextV1( username=os.getenv('SPEECH_TO_TEXT_USERNAME'), password=os.getenv('SPEECH_TO_TEXT_PASSWORD')) - self.speech_to_text.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) + self.speech_to_text.set_default_headers({ + 'X-Watson-Learning-Opt-Out': + '1', + 'X-Watson-Test': + '1' + }) self.custom_models = self.speech_to_text.list_language_models() self.create_custom_model = self.speech_to_text.create_language_model( name="integration_test_model", @@ -37,10 +40,7 @@ def test_create_custom_model(self): self.custom_models['customizations']) >= 1 def test_recognize(self): - with open( - os.path.join( - os.path.dirname(__file__), '../../resources/speech.wav'), - 'rb') as audio_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: output = self.speech_to_text.recognize( audio=audio_file, content_type='audio/l16; rate=44100') assert output['results'][0]['alternatives'][0][ @@ -70,4 +70,5 @@ def test_acoustic_model(self): self.speech_to_text.reset_acoustic_model( get_acoustic_model['customization_id']) - self.speech_to_text.delete_acoustic_model(get_acoustic_model['customization_id']) + self.speech_to_text.delete_acoustic_model( + get_acoustic_model['customization_id']) diff --git a/test/integration/test_integration_text_to_speech_v1.py b/test/integration/test_integration_text_to_speech_v1.py index 209eab9ec..fed7921c6 100644 --- a/test/integration/test_integration_text_to_speech_v1.py +++ b/test/integration/test_integration_text_to_speech_v1.py @@ -1,12 +1,17 @@ -import pytest + import unittest import watson_developer_cloud -import os + class TestIntegrationTextToSpeechV1(unittest.TestCase): def setUp(self): self.text_to_speech = watson_developer_cloud.TextToSpeechV1() - self.text_to_speech.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) + self.text_to_speech.set_default_headers({ + 'X-Watson-Learning-Opt-Out': + '1', + 'X-Watson-Test': + '1' + }) self.original_customizations = self.text_to_speech.list_voice_models() self.created_customization = self.text_to_speech.create_voice_model( name="test_integration_customization", @@ -27,11 +32,11 @@ def test_speak(self): text="my voice is my passport", accept='audio/wav', voice='en-US_AllisonVoice') - output.content is not None + assert output.content is not None def test_pronunciation(self): output = self.text_to_speech.get_pronunciation('hello') - output['pronunciation'] is not None + assert output['pronunciation'] is not None def test_customizations(self): old_length = len(self.original_customizations['customizations']) @@ -44,16 +49,9 @@ def test_custom_words(self): words = self.text_to_speech.list_words(customization_id)['words'] assert len(words) == 0 self.text_to_speech.add_word( - customization_id, - word="ACLs", - translation="ackles") - - words = [ - { - "word": "MACLs", - "translation": "mackles" - } - ] + customization_id, word="ACLs", translation="ackles") + + words = [{"word": "MACLs", "translation": "mackles"}] self.text_to_speech.add_words(customization_id, words) self.text_to_speech.delete_word(customization_id, 'ACLs') diff --git a/test/integration/test_integration_visual_recognition.py b/test/integration/test_integration_visual_recognition.py index 80a25da0a..2987f54cd 100644 --- a/test/integration/test_integration_visual_recognition.py +++ b/test/integration/test_integration_visual_recognition.py @@ -6,29 +6,46 @@ import json pytestmark = pytest.mark.skip('Run These Manually, they are destructive') -class IntegrationTestVisualRecognitionV3(TestCase): + +class IntegrationTestVisualRecognitionV3(TestCase): def setUp(self): - self.visual_recognition = watson_developer_cloud.VisualRecognitionV3('2016-05-20', api_key=os.environ.get( - 'YOUR API KEY')) - self.visual_recognition.set_default_headers({'X-Watson-Learning-Opt-Out': '1', 'X-Watson-Test': '1'}) + self.visual_recognition = watson_developer_cloud.VisualRecognitionV3( + '2016-05-20', api_key=os.environ.get('YOUR API KEY')) + self.visual_recognition.set_default_headers({ + 'X-Watson-Learning-Opt-Out': + '1', + 'X-Watson-Test': + '1' + }) def test_classify(self): car_path = join(dirname(__file__), '../../resources/cars.zip') with open(car_path, 'rb') as images_file: - parameters = json.dumps({'threshold': 0.1, 'classifier_ids': ['CarsvsTrucks_1479118188', 'default']}) - car_results = self.visual_recognition.classify(images_file=images_file, - parameters=parameters) + parameters = json.dumps({ + 'threshold': + 0.1, + 'classifier_ids': ['CarsvsTrucks_1479118188', 'default'] + }) + car_results = self.visual_recognition.classify( + images_file=images_file, parameters=parameters) assert car_results is not None def test_detect_faces(self): - output = self.visual_recognition.detect_faces(parameters=json.dumps({'url': 'https://www.ibm.com/ibm/ginni/images/ginni_bio_780x981_v4_03162016.jpg'})) + output = self.visual_recognition.detect_faces( + parameters=json.dumps({ + 'url': + 'https://www.ibm.com/ibm/ginni/images/ginni_bio_780x981_v4_03162016.jpg' + })) assert output is not None def test_custom_classifier(self): with open(os.path.join(os.path.dirname(__file__), '../../resources/cars.zip'), 'rb') as cars, \ open(os.path.join(os.path.dirname(__file__), '../../resources/trucks.zip'), 'rb') as trucks: - classifier = self.visual_recognition.create_classifier('Cars vs Trucks', cars_positive_examples=cars, negative_examples=trucks) + classifier = self.visual_recognition.create_classifier( + 'Cars vs Trucks', + cars_positive_examples=cars, + negative_examples=trucks) assert classifier is not None diff --git a/test/unit/__init__.py b/test/unit/__init__.py index b8c9cd96b..db5d6b2c5 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -14,4 +14,4 @@ from dotenv import load_dotenv, find_dotenv # load the .env file containing your environment variables for the required -load_dotenv(find_dotenv()) \ No newline at end of file +load_dotenv(find_dotenv()) diff --git a/test/unit/test_conversation_v1.py b/test/unit/test_conversation_v1.py index e7d1de553..dbb8e5aa2 100644 --- a/test/unit/test_conversation_v1.py +++ b/test/unit/test_conversation_v1.py @@ -1478,8 +1478,7 @@ def test_dialog_nodes(): status=200, content_type='application/json') - conversation = watson_developer_cloud.ConversationV1('2017-05-26', - username="username", password="password") + conversation = watson_developer_cloud.ConversationV1('2017-05-26', username="username", password="password") conversation.create_dialog_node('id', 'location-done') assert responses.calls[0].response.json()['application/json']['dialog_node'] == 'location-done' @@ -1488,9 +1487,9 @@ def test_dialog_nodes(): assert responses.calls[1].response.json() == {"description": "deleted successfully"} conversation.get_dialog_node('id', 'location-done') - assert responses.calls[2].response.json() == { "application/json": { "dialog_node": "location-atm" }} + assert responses.calls[2].response.json() == {"application/json": {"dialog_node": "location-atm"}} conversation.list_dialog_nodes('id') - assert responses.calls[3].response.json() == { "application/json": { "dialog_node": "location-atm" }} + assert responses.calls[3].response.json() == {"application/json": {"dialog_node": "location-atm"}} - assert len(responses.calls) == 4 \ No newline at end of file + assert len(responses.calls) == 4 diff --git a/test/unit/test_discovery_v1.py b/test/unit/test_discovery_v1.py index 619b5ad2a..4902e983f 100644 --- a/test/unit/test_discovery_v1.py +++ b/test/unit/test_discovery_v1.py @@ -786,16 +786,15 @@ def test_expansions(): status=200, content_type='application_json') - discovery = watson_developer_cloud.DiscoveryV1('2017-11-07', - username="username", password="password") + discovery = watson_developer_cloud.DiscoveryV1('2017-11-07', username="username", password="password") discovery.list_expansions('envid', 'colid') assert responses.calls[0].response.json() == {"expansions": "results"} discovery.create_expansions('envid', 'colid', [{"input_terms": "dumb", "expanded_terms": "dumb2"}]) - assert responses.calls[1].response.json() == {"expansions": "success" } + assert responses.calls[1].response.json() == {"expansions": "success"} discovery.delete_expansions('envid', 'colid') - assert responses.calls[2].response.json() == {"description": "success" } + assert responses.calls[2].response.json() == {"description": "success"} - assert len(responses.calls) == 3 \ No newline at end of file + assert len(responses.calls) == 3 diff --git a/test/unit/test_speech_to_text_v1.py b/test/unit/test_speech_to_text_v1.py index 50a06c248..1c4e435dc 100755 --- a/test/unit/test_speech_to_text_v1.py +++ b/test/unit/test_speech_to_text_v1.py @@ -1,7 +1,6 @@ # coding=utf-8 import os import json -import pytest import responses import watson_developer_cloud from watson_developer_cloud.speech_to_text_v1 import CustomWord @@ -39,9 +38,7 @@ def test_success(): status=200, content_type='application/json') - with open( - os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), - 'rb') as audio_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: speech_to_text.recognize( audio=audio_file, content_type='audio/l16; rate=44100') @@ -116,9 +113,7 @@ def test_recognitions(): speech_to_text.check_job('jobid') assert responses.calls[1].response.json() == {'status': 'waiting'} - with open( - os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), - 'rb') as audio_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: speech_to_text.create_job(audio=audio_file, content_type='audio/basic') assert responses.calls[2].response.json() == {'status': 'waiting'} @@ -482,9 +477,7 @@ def test_custom_audio_resources(): speech_to_text = watson_developer_cloud.SpeechToTextV1( username="username", password="password") - with open( - os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), - 'rb') as audio_file: + with open(os.path.join(os.path.dirname(__file__), '../../resources/speech.wav'), 'rb') as audio_file: speech_to_text.add_audio( customization_id='custid', audio_name="hiee", diff --git a/watson_developer_cloud/conversation_v1.py b/watson_developer_cloud/conversation_v1.py index d7385b6fe..4940e8e8c 100644 --- a/watson_developer_cloud/conversation_v1.py +++ b/watson_developer_cloud/conversation_v1.py @@ -484,9 +484,7 @@ def update_intent(self, if intent is None: raise ValueError('intent must be provided') if new_examples is not None: - new_examples = [ - self._convert_model(x, CreateExample) for x in new_examples - ] + new_examples = [self._convert_model(x, CreateExample) for x in new_examples] params = {'version': self.version} data = { 'intent': new_intent, diff --git a/watson_developer_cloud/speech_to_text_v1.py b/watson_developer_cloud/speech_to_text_v1.py index c23d7f41c..9c75da3fd 100644 --- a/watson_developer_cloud/speech_to_text_v1.py +++ b/watson_developer_cloud/speech_to_text_v1.py @@ -28,9 +28,9 @@ from .watson_service import WatsonService, _remove_null_values from .utils import deprecated from watson_developer_cloud.websocket import RecognizeCallback, RecognizeListener -from os.path import isfile import base64 import urllib + ############################################################################## # Service ############################################################################## @@ -187,25 +187,25 @@ def recognize(self, return response def recognize_with_websocket(self, - audio=None, - content_type='audio/l16; rate=44100', - model='en-US_BroadbandModel', - recognize_callback=None, - customization_id=None, - acoustic_customization_id=None, - customization_weight=None, - version=None, - inactivity_timeout=None, - interim_results=True, - keywords=None, - keywords_threshold=None, - max_alternatives=1, - word_alternatives_threshold=None, - word_confidence=False, - timestamps=False, - profanity_filter=None, - smart_formatting=False, - speaker_labels=None): + audio=None, + content_type='audio/l16; rate=44100', + model='en-US_BroadbandModel', + recognize_callback=None, + customization_id=None, + acoustic_customization_id=None, + customization_weight=None, + version=None, + inactivity_timeout=None, + interim_results=True, + keywords=None, + keywords_threshold=None, + max_alternatives=1, + word_alternatives_threshold=None, + word_confidence=False, + timestamps=False, + profanity_filter=None, + smart_formatting=False, + speaker_labels=None): """ Sends audio for speech recognition using web sockets. @@ -235,46 +235,44 @@ def recognize_with_websocket(self, if recognize_callback is None: raise ValueError('Recognize callback must be provided') if not isinstance(recognize_callback, RecognizeCallback): - raise Exception('Callback is not a derived class of RecognizeCallback') + raise Exception( + 'Callback is not a derived class of RecognizeCallback') headers = {} if self.default_headers is not None: headers = self.default_headers.copy() - base64_authorization = base64.b64encode(self.username + ':' + self.password) + base64_authorization = base64.b64encode( + self.username + ':' + self.password) headers['Authorization'] = 'Basic {0}'.format(base64_authorization) url = self.url.replace('https:', 'wss:') params = { - 'model': model, - 'customization_id': customization_id, - 'acoustic_customization_id': acoustic_customization_id, - 'customization_weight': customization_weight, - 'version': version - } + 'model': model, + 'customization_id': customization_id, + 'acoustic_customization_id': acoustic_customization_id, + 'customization_weight': customization_weight, + 'version': version + } params = _remove_null_values(params) url = url + '/v1/recognize?{0}'.format(urllib.urlencode(params)) options = { - 'content_type': content_type, - 'inactivity_timeout': inactivity_timeout, - 'interim_results': interim_results, - 'keywords': keywords, - 'keywords_threshold': keywords_threshold, - 'max_alternatives': max_alternatives, - 'word_alternatives_threshold': word_alternatives_threshold, - 'word_confidence': word_confidence, - 'timestamps': timestamps, - 'profanity_filter': profanity_filter, - 'smart_formatting': smart_formatting, - 'speaker_labels': speaker_labels - } + 'content_type': content_type, + 'inactivity_timeout': inactivity_timeout, + 'interim_results': interim_results, + 'keywords': keywords, + 'keywords_threshold': keywords_threshold, + 'max_alternatives': max_alternatives, + 'word_alternatives_threshold': word_alternatives_threshold, + 'word_confidence': word_confidence, + 'timestamps': timestamps, + 'profanity_filter': profanity_filter, + 'smart_formatting': smart_formatting, + 'speaker_labels': speaker_labels + } options = _remove_null_values(options) - recognizeListener = RecognizeListener(audio, - options, - recognize_callback, - url, - headers) + RecognizeListener(audio, options, recognize_callback, url, headers) ######################### # Asynchronous @@ -358,9 +356,7 @@ def create_job(self, raise ValueError('audio must be provided') if content_type is None: raise ValueError('content_type must be provided') - headers = { - 'Content-Type': content_type - } + headers = {'Content-Type': content_type} params = { 'model': model, 'callback_url': callback_url, @@ -484,10 +480,7 @@ def create_language_model(self, } url = '/v1/customizations' response = self.request( - method='POST', - url=url, - json=data, - accept_json=True) + method='POST', url=url, json=data, accept_json=True) return response @deprecated('Use create_language_model() instead.') @@ -495,7 +488,8 @@ def create_custom_model(self, name, description="", base_model="en-US_BroadbandModel"): - return self.create_language_model(name, base_model, description=description) + return self.create_language_model( + name, base_model, description=description) def delete_language_model(self, customization_id): """ @@ -764,11 +758,7 @@ def add_word(self, } url = '/v1/customizations/{0}/words/{1}'.format( *self._encode_path_vars(customization_id, word_name)) - self.request( - method='PUT', - url=url, - json=data, - accept_json=True) + self.request(method='PUT', url=url, json=data, accept_json=True) return None @deprecated('Use add_word instead.') @@ -791,11 +781,7 @@ def add_words(self, customization_id, words): data = {'words': words} url = '/v1/customizations/{0}/words'.format( *self._encode_path_vars(customization_id)) - self.request( - method='POST', - url=url, - json=data, - accept_json=True) + self.request(method='POST', url=url, json=data, accept_json=True) return None @deprecated('Use add_words() instead.') @@ -883,10 +869,7 @@ def list_custom_words(self, customization_id, word_type=None, sort=None): # customAcousticModels ######################### - def create_acoustic_model(self, - name, - base_model_name, - description=None): + def create_acoustic_model(self, name, base_model_name, description=None): """ Creates a custom acoustic model. @@ -912,10 +895,7 @@ def create_acoustic_model(self, } url = '/v1/acoustic_customizations' response = self.request( - method='POST', - url=url, - json=data, - accept_json=True) + method='POST', url=url, json=data, accept_json=True) return response def delete_acoustic_model(self, customization_id): @@ -1607,7 +1587,7 @@ def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'total_minutes_of_audio' - ) and self.total_minutes_of_audio is not None: + ) and self.total_minutes_of_audio is not None: _dict['total_minutes_of_audio'] = self.total_minutes_of_audio if hasattr(self, 'audio') and self.audio is not None: _dict['audio'] = [x._to_dict() for x in self.audio] @@ -1745,7 +1725,7 @@ def _to_dict(self): if hasattr(self, 'total_words') and self.total_words is not None: _dict['total_words'] = self.total_words if hasattr(self, 'out_of_vocabulary_words' - ) and self.out_of_vocabulary_words is not None: + ) and self.out_of_vocabulary_words is not None: _dict['out_of_vocabulary_words'] = self.out_of_vocabulary_words if hasattr(self, 'status') and self.status is not None: _dict['status'] = self.status @@ -2875,7 +2855,7 @@ def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'custom_language_model' - ) and self.custom_language_model is not None: + ) and self.custom_language_model is not None: _dict['custom_language_model'] = self.custom_language_model if hasattr(self, 'speaker_labels') and self.speaker_labels is not None: _dict['speaker_labels'] = self.speaker_labels diff --git a/watson_developer_cloud/utils.py b/watson_developer_cloud/utils.py index 92ac98bf5..e2900407c 100644 --- a/watson_developer_cloud/utils.py +++ b/watson_developer_cloud/utils.py @@ -1,12 +1,17 @@ import warnings + def deprecated(message): - def deprecated_decorator(func): - def deprecated_func(*args, **kwargs): - warnings.simplefilter('always', DeprecationWarning) - warnings.warn("{} is a deprecated function. {}".format(func.__name__, message), - category=DeprecationWarning, - stacklevel=2) - return func(*args, **kwargs) - return deprecated_func - return deprecated_decorator \ No newline at end of file + def deprecated_decorator(func): + def deprecated_func(*args, **kwargs): + warnings.simplefilter('always', DeprecationWarning) + warnings.warn( + "{} is a deprecated function. {}".format( + func.__name__, message), + category=DeprecationWarning, + stacklevel=2) + return func(*args, **kwargs) + + return deprecated_func + + return deprecated_decorator diff --git a/watson_developer_cloud/watson_service.py b/watson_developer_cloud/watson_service.py index 768f9a893..907ffb5dc 100755 --- a/watson_developer_cloud/watson_service.py +++ b/watson_developer_cloud/watson_service.py @@ -18,7 +18,6 @@ import sys from requests.structures import CaseInsensitiveDict import dateutil.parser as date_parser -import json try: from http.cookiejar import CookieJar # Python 3 @@ -245,17 +244,11 @@ def unpack_id(dictionary, label_id): return dictionary[label_id] return dictionary - @staticmethod - def _convert_model(val): - if hasattr(val, "_to_dict"): - return val._to_dict() - return val - @staticmethod def _convert_model(val, classname=None): if classname is not None and not hasattr(val, "_from_dict"): - if type(val) == str: - val = json.loads(val) + if isinstance(val, str): + val = json_import.loads(val) val = classname._from_dict(dict(val)) if hasattr(val, "_to_dict"): return val._to_dict() diff --git a/watson_developer_cloud/websocket/recognize_abstract_callback.py b/watson_developer_cloud/websocket/recognize_abstract_callback.py index 0b5c2f968..f58ca920a 100644 --- a/watson_developer_cloud/websocket/recognize_abstract_callback.py +++ b/watson_developer_cloud/websocket/recognize_abstract_callback.py @@ -14,46 +14,49 @@ # See the License for the specific language governing permissions and # limitations under the License. -class RecognizeCallback: - def on_transcription(self, transcript): - """ +class RecognizeCallback(object): + def __init__(self): + pass + + def on_transcription(self, transcript): + """ Called when an interim result is received """ - pass + pass - def on_connected(self): - """ + def on_connected(self): + """ Called when a WebSocket connection was made """ - pass + pass - def on_error(self, error): - """ + def on_error(self, error): + """ Called when there is an error in the Web Socket connection. """ - pass + pass - def on_inactivity_timeout(self): - """ + def on_inactivity_timeout(self, error): + """ Called when there is an inactivity timeout. """ - pass + pass - def on_listening(self): - """ + def on_listening(self): + """ Called when the service is listening for audio. """ - pass + pass - def on_transcription_complete(self): - """ + def on_transcription_complete(self): + """ Called after the service returns the final result for the transcription. """ - pass + pass - def on_hypothesis(self, hypothesis): - """ + def on_hypothesis(self, hypothesis): + """ Called when the service returns the final hypothesis """ - pass \ No newline at end of file + pass diff --git a/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py b/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py index c317a8c95..e87d4addc 100644 --- a/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py +++ b/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sys -import os import json # WebSockets @@ -27,7 +25,8 @@ CLOSE_SIGNAL = 1000 TEN_MILLISECONDS = 0.01 -class RecognizeListener: + +class RecognizeListener(object): def __init__(self, audio, options, recognize_callback, url, headers): self.audio = audio self.options = options @@ -35,8 +34,8 @@ def __init__(self, audio, options, recognize_callback, url, headers): self.url = url self.headers = headers - factory = self.WebSocketClientFactory(self.audio, self.options, - self.callback, self.url, self.headers) + factory = self.WebSocketClientFactory( + self.audio, self.options, self.callback, self.url, self.headers) factory.protocol = self.WebSocketClient if factory.isSecure: @@ -73,13 +72,14 @@ def send_chunk(chunk, final=False): if final: self.sendMessage(b'', isBinary=True) - if (self.bytes_sent + ONE_KB >= len(data)): - if (len(data) > self.bytes_sent): + if self.bytes_sent + ONE_KB >= len(data): + if len(data) > self.bytes_sent: send_chunk(data[self.bytes_sent:len(data)], True) return send_chunk(data[self.bytes_sent:self.bytes_sent + ONE_KB]) - self.factory.reactor.callLater(TEN_MILLISECONDS, self.send_audio, data=data) + self.factory.reactor.callLater( + TEN_MILLISECONDS, self.send_audio, data=data) def extract_transcripts(self, alternatives): transcripts = [] From 5847d7a13495f7b91dc693091599b93ef5dfa775 Mon Sep 17 00:00:00 2001 From: German Attanasio Date: Wed, 7 Mar 2018 22:35:53 -0500 Subject: [PATCH 38/45] fix(lint): update code based on pylint --- .../tone_conversation_integration.v1.py | 2 +- test/integration/__init__.py | 20 +++++-------------- test/integration/test_examples.py | 13 ------------ .../test_integration_discovery_v1.py | 1 - .../test_integration_speech_to_text_v1.py | 3 ++- .../test_integration_text_to_speech_v1.py | 4 ++-- .../test_integration_visual_recognition.py | 1 + test/unit/__init__.py | 20 ++++++------------- test/unit/test_conversation_v1.py | 15 +------------- test/unit/test_discovery_v1.py | 1 + test/unit/test_language_translator_v2.py | 14 ------------- .../test_natural_language_classifier_v1.py | 1 + .../test_natural_language_understanding.py | 1 + test/unit/test_personality_insights_v2.py | 1 + test/unit/test_personality_insights_v3.py | 1 + test/unit/test_tone_analyzer_v3.py | 1 + test/unit/test_visual_recognition_v3.py | 1 + tox.ini | 2 +- watson_developer_cloud/__init__.py | 1 + watson_developer_cloud/authorization_v1.py | 1 + .../features/v1/__init__.py | 1 + watson_developer_cloud/utils.py | 1 - watson_developer_cloud/watson_service.py | 1 + .../speech_to_text_websocket_listener.py | 10 +++++----- 24 files changed, 35 insertions(+), 82 deletions(-) diff --git a/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py b/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py index 60724f721..c0cecc45f 100644 --- a/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py +++ b/examples/conversation_tone_analyzer_integration/tone_conversation_integration.v1.py @@ -58,7 +58,7 @@ def invokeToneConversation(payload, maintainToneHistoryInContext): with application-specific code to process the err or data object returned by the Conversation Service. """ - tone = tone_analyzer.tone(tone_input=payload['input']['text']) + tone = tone_analyzer.tone(tone_input=payload['input']['text'], content_type='application/json') conversation_payload = tone_detection.\ updateUserTone(payload, tone, maintainToneHistoryInContext) response = conversation.message(workspace_id=workspace_id, diff --git a/test/integration/__init__.py b/test/integration/__init__.py index b4a466217..161119efe 100644 --- a/test/integration/__init__.py +++ b/test/integration/__init__.py @@ -1,19 +1,9 @@ # coding: utf-8 - -# Copyright 2015 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +from __future__ import print_function from dotenv import load_dotenv, find_dotenv # load the .env file containing your environment variables for the required -load_dotenv(find_dotenv()) +try: + load_dotenv(find_dotenv()) +except: + print('warning: no .env file loaded') diff --git a/test/integration/test_examples.py b/test/integration/test_examples.py index 6bd51b9fb..9e5e7d634 100644 --- a/test/integration/test_examples.py +++ b/test/integration/test_examples.py @@ -15,19 +15,6 @@ # examples path. /examples examples_path = join(dirname(__file__), '../', 'examples', '*.py') -# environment variables -try: - from dotenv import load_dotenv # pylint: disable=C0413 -except: - print ('warning: dotenv module could not be imported') - -try: - dotenv_path = join(dirname(__file__), '../', '.env') - load_dotenv(dotenv_path) -except: - print ('warning: no .env file loaded') - - @pytest.mark.skipif(os.getenv('VCAP_SERVICES') is None, reason='requires VCAP_SERVICES') def test_examples(): diff --git a/test/integration/test_integration_discovery_v1.py b/test/integration/test_integration_discovery_v1.py index 287c76d2c..fbd7c214a 100644 --- a/test/integration/test_integration_discovery_v1.py +++ b/test/integration/test_integration_discovery_v1.py @@ -1,5 +1,4 @@ # coding: utf-8 - from unittest import TestCase import os import watson_developer_cloud diff --git a/test/integration/test_integration_speech_to_text_v1.py b/test/integration/test_integration_speech_to_text_v1.py index e55810bf6..7cb2f433b 100644 --- a/test/integration/test_integration_speech_to_text_v1.py +++ b/test/integration/test_integration_speech_to_text_v1.py @@ -1,3 +1,4 @@ +# coding: utf-8 from unittest import TestCase import os import watson_developer_cloud @@ -52,7 +53,7 @@ def test_recognitions(self): def test_custom_corpora(self): output = self.speech_to_text.list_corpora(self.customization_id) - assert len(output['corpora']) == 0 + assert output['corpora'] def test_acoustic_model(self): list_models = self.speech_to_text.list_acoustic_models() diff --git a/test/integration/test_integration_text_to_speech_v1.py b/test/integration/test_integration_text_to_speech_v1.py index fed7921c6..c056ddde7 100644 --- a/test/integration/test_integration_text_to_speech_v1.py +++ b/test/integration/test_integration_text_to_speech_v1.py @@ -1,4 +1,4 @@ - +# coding: utf-8 import unittest import watson_developer_cloud @@ -47,7 +47,7 @@ def test_customizations(self): def test_custom_words(self): customization_id = self.created_customization['customization_id'] words = self.text_to_speech.list_words(customization_id)['words'] - assert len(words) == 0 + assert words self.text_to_speech.add_word( customization_id, word="ACLs", translation="ackles") diff --git a/test/integration/test_integration_visual_recognition.py b/test/integration/test_integration_visual_recognition.py index 2987f54cd..28c6b7c6c 100644 --- a/test/integration/test_integration_visual_recognition.py +++ b/test/integration/test_integration_visual_recognition.py @@ -1,3 +1,4 @@ +# coding: utf-8 import pytest import watson_developer_cloud import os diff --git a/test/unit/__init__.py b/test/unit/__init__.py index db5d6b2c5..161119efe 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -1,17 +1,9 @@ -# Copyright 2015 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +# coding: utf-8 +from __future__ import print_function from dotenv import load_dotenv, find_dotenv # load the .env file containing your environment variables for the required -load_dotenv(find_dotenv()) +try: + load_dotenv(find_dotenv()) +except: + print('warning: no .env file loaded') diff --git a/test/unit/test_conversation_v1.py b/test/unit/test_conversation_v1.py index dbb8e5aa2..40ce0e8c8 100644 --- a/test/unit/test_conversation_v1.py +++ b/test/unit/test_conversation_v1.py @@ -1,17 +1,4 @@ -# Copyright 2017 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - +# coding: utf-8 import json import datetime from dateutil.tz import tzutc diff --git a/test/unit/test_discovery_v1.py b/test/unit/test_discovery_v1.py index 4902e983f..60ab381b9 100644 --- a/test/unit/test_discovery_v1.py +++ b/test/unit/test_discovery_v1.py @@ -1,3 +1,4 @@ +# coding: utf-8 import responses import os import json diff --git a/test/unit/test_language_translator_v2.py b/test/unit/test_language_translator_v2.py index c9f9a3784..4e45c98d3 100644 --- a/test/unit/test_language_translator_v2.py +++ b/test/unit/test_language_translator_v2.py @@ -1,19 +1,5 @@ # coding=utf-8 -# Copyright 2017 IBM All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - import json import responses import watson_developer_cloud diff --git a/test/unit/test_natural_language_classifier_v1.py b/test/unit/test_natural_language_classifier_v1.py index cf5236bec..edb1a501a 100644 --- a/test/unit/test_natural_language_classifier_v1.py +++ b/test/unit/test_natural_language_classifier_v1.py @@ -1,3 +1,4 @@ +# coding: utf-8 import os import responses import watson_developer_cloud diff --git a/test/unit/test_natural_language_understanding.py b/test/unit/test_natural_language_understanding.py index 290edaf5c..9b9ae6164 100644 --- a/test/unit/test_natural_language_understanding.py +++ b/test/unit/test_natural_language_understanding.py @@ -1,3 +1,4 @@ +# coding: utf-8 from unittest import TestCase from watson_developer_cloud import NaturalLanguageUnderstandingV1 from watson_developer_cloud.natural_language_understanding_v1 import \ diff --git a/test/unit/test_personality_insights_v2.py b/test/unit/test_personality_insights_v2.py index 2095f217b..dc27c77f2 100644 --- a/test/unit/test_personality_insights_v2.py +++ b/test/unit/test_personality_insights_v2.py @@ -1,3 +1,4 @@ +# coding: utf-8 import responses import watson_developer_cloud import os diff --git a/test/unit/test_personality_insights_v3.py b/test/unit/test_personality_insights_v3.py index 00120b32a..f0b152730 100755 --- a/test/unit/test_personality_insights_v3.py +++ b/test/unit/test_personality_insights_v3.py @@ -1,3 +1,4 @@ +# coding: utf-8 import responses import watson_developer_cloud import os diff --git a/test/unit/test_tone_analyzer_v3.py b/test/unit/test_tone_analyzer_v3.py index e078f0b81..d41097eef 100755 --- a/test/unit/test_tone_analyzer_v3.py +++ b/test/unit/test_tone_analyzer_v3.py @@ -1,3 +1,4 @@ +# coding: utf-8 import responses import watson_developer_cloud from watson_developer_cloud import WatsonException diff --git a/test/unit/test_visual_recognition_v3.py b/test/unit/test_visual_recognition_v3.py index 11e964f7e..cef145ec5 100644 --- a/test/unit/test_visual_recognition_v3.py +++ b/test/unit/test_visual_recognition_v3.py @@ -1,3 +1,4 @@ +# coding: utf-8 import responses import watson_developer_cloud import json diff --git a/tox.ini b/tox.ini index 0c10c7d98..ab7f5c85d 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ envlist = lint, py27, py34, py35, py36 [testenv:lint] basepython = python2.7 deps = pylint -commands = pylint watson_developer_cloud +commands = pylint watson_developer_cloud test examples [testenv] passenv = TOXENV CI TRAVIS* diff --git a/watson_developer_cloud/__init__.py b/watson_developer_cloud/__init__.py index 96c1f3bdd..d44373e18 100755 --- a/watson_developer_cloud/__init__.py +++ b/watson_developer_cloud/__init__.py @@ -1,3 +1,4 @@ +# coding: utf-8 # Copyright 2016 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/watson_developer_cloud/authorization_v1.py b/watson_developer_cloud/authorization_v1.py index c3dc4ef67..f1e15aaae 100644 --- a/watson_developer_cloud/authorization_v1.py +++ b/watson_developer_cloud/authorization_v1.py @@ -1,3 +1,4 @@ +# coding: utf-8 # Copyright 2016 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/watson_developer_cloud/natural_language_understanding/features/v1/__init__.py b/watson_developer_cloud/natural_language_understanding/features/v1/__init__.py index ba1b10bbe..21324dba0 100644 --- a/watson_developer_cloud/natural_language_understanding/features/v1/__init__.py +++ b/watson_developer_cloud/natural_language_understanding/features/v1/__init__.py @@ -1,3 +1,4 @@ +# coding: utf-8 class Feature(object): def toDict(self): res = {} diff --git a/watson_developer_cloud/utils.py b/watson_developer_cloud/utils.py index e2900407c..bb709d4ff 100644 --- a/watson_developer_cloud/utils.py +++ b/watson_developer_cloud/utils.py @@ -1,6 +1,5 @@ import warnings - def deprecated(message): def deprecated_decorator(func): def deprecated_func(*args, **kwargs): diff --git a/watson_developer_cloud/watson_service.py b/watson_developer_cloud/watson_service.py index 907ffb5dc..cc0f83458 100755 --- a/watson_developer_cloud/watson_service.py +++ b/watson_developer_cloud/watson_service.py @@ -1,3 +1,4 @@ +# coding: utf-8 # Copyright 2017 IBM All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py b/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py index e87d4addc..c1b11adad 100644 --- a/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py +++ b/watson_developer_cloud/websocket/speech_to_text_websocket_listener.py @@ -44,7 +44,7 @@ def __init__(self, audio, options, recognize_callback, url, headers): contextFactory = None connectWS(factory, contextFactory) - reactor.run() + reactor.run() # pylint: disable=E1101 class WebSocketClient(WebSocketClientProtocol): def __init__(self, factory, audio, options, callback): @@ -55,7 +55,7 @@ def __init__(self, factory, audio, options, callback): self.isListening = False self.bytes_sent = 0 - super(self.__class__, self).__init__() + super(self.__class__, self).__init__() # pylint: disable=E1003 def build_start_message(self, options): options['action'] = 'start' @@ -129,10 +129,10 @@ def onMessage(self, payload, isBinary): elif 'results' in json_object or 'speaker_labels' in json_object: hypothesis = '' # empty hypothesis - if len(json_object['results']) != 0: + if json_object['results']: hypothesis = json_object['results'][0]['alternatives'][0][ 'transcript'] - b_final = (json_object['results'][0]['final'] == True) + b_final = (json_object['results'][0]['final'] is True) transcripts = self.extract_transcripts( json_object['results'][0]['alternatives']) @@ -155,7 +155,7 @@ def __init__(self, audio, options, callback, url=None, headers=None): self.closeHandshakeTimeout = self.SIX_SECONDS def endReactor(self): - reactor.stop() + reactor.stop() # pylint: disable=E1101 # this function gets called every time connectWS is called (once per WebSocket connection/session) def buildProtocol(self, addr): From c52250df4e29d82932c20f1a96de1f9373ed1257 Mon Sep 17 00:00:00 2001 From: German Attanasio Date: Wed, 7 Mar 2018 22:51:38 -0500 Subject: [PATCH 39/45] fix(STT,TTS): fix lint --- .pylintrc | 2 +- test/integration/test_integration_speech_to_text_v1.py | 2 +- test/integration/test_integration_text_to_speech_v1.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pylintrc b/.pylintrc index 2e4b9f262..18e99f8d5 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,4 +1,4 @@ # lint Python modules using external checkers. [MASTER] ignore=SVN -disable=R0903,R0912,R0913,R0914,R0915,W0141,C0111,C0103,W0603,W0703,R0911,C0301,C0302,R0902,R0904,W0142,W0212,E1101,E1103,R0201,W0201,W0122,W0232,RP0001,RP0003,RP0101,RP0002,RP0401,RP0701,RP0801,F0401,E0611,R0801,I0011,F0401,E0611,E1004,C0111,I0011,I0012,W0704,W0142,W0212,W0232,W0613,W0702,R0201,W0614,R0914,R0912,R0915,R0913,R0904,R0801,C0301,C0411,R0204,W0622 +disable=R0903,R0912,R0913,R0914,R0915,W0141,C0111,C0103,W0603,W0703,R0911,C0301,C0302,R0902,R0904,W0142,W0212,E1101,E1103,R0201,W0201,W0122,W0232,RP0001,RP0003,RP0101,RP0002,RP0401,RP0701,RP0801,F0401,E0611,R0801,I0011,F0401,E0611,E1004,C0111,I0011,I0012,W0704,W0142,W0212,W0232,W0613,W0702,R0201,W0614,R0914,R0912,R0915,R0913,R0904,R0801,C0301,C0411,R0204,W0622,inconsistent-return-statements diff --git a/test/integration/test_integration_speech_to_text_v1.py b/test/integration/test_integration_speech_to_text_v1.py index 7cb2f433b..5f7884115 100644 --- a/test/integration/test_integration_speech_to_text_v1.py +++ b/test/integration/test_integration_speech_to_text_v1.py @@ -53,7 +53,7 @@ def test_recognitions(self): def test_custom_corpora(self): output = self.speech_to_text.list_corpora(self.customization_id) - assert output['corpora'] + assert len(output['corpora']) == 0 # pylint: disable=len-as-condition def test_acoustic_model(self): list_models = self.speech_to_text.list_acoustic_models() diff --git a/test/integration/test_integration_text_to_speech_v1.py b/test/integration/test_integration_text_to_speech_v1.py index c056ddde7..ba1fbdffc 100644 --- a/test/integration/test_integration_text_to_speech_v1.py +++ b/test/integration/test_integration_text_to_speech_v1.py @@ -47,7 +47,7 @@ def test_customizations(self): def test_custom_words(self): customization_id = self.created_customization['customization_id'] words = self.text_to_speech.list_words(customization_id)['words'] - assert words + assert len(words) == 0 # pylint: disable=len-as-condition self.text_to_speech.add_word( customization_id, word="ACLs", translation="ackles") From 7d6d8fa477e90591058c6cb44ceae4f85f1ee7a4 Mon Sep 17 00:00:00 2001 From: German Attanasio Date: Wed, 7 Mar 2018 22:51:43 -0500 Subject: [PATCH 40/45] fix(Travis): update encrypted file --- .env.enc | Bin 2592 -> 2592 bytes .travis.yml | 7 ++++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.env.enc b/.env.enc index 6e1f6ead725c7eba24402eb89c43fe727b0eefe9..0785febc1cbd5b0b184e5fa38a3f0bb1b00fd15a 100644 GIT binary patch literal 2592 zcmV+*3g7kMVtvp@FO@T)HIXz-=x8I=%jk7(Jm${GNU4kgY<|P5Df$lBHbWdcec%7@e0Cnj)b>{ zIu2)N@y>V}=2iGx%9$r}+`v|12s3Y5tx%lss4R|7C<|=LN<{^Y3R{!|+>SWU%lqvmpuevpAEKp~27l zL|t5byy`Y25xz$m9G$9xK-u%PGmV%ChT3|8_`g zQ`AF2Tgbo}VbQ1e#ri^Hks7xdx!4Nx#hdgx_qtZ`@duL0@NZDxA$?oGg!e8d5urmr zO7TTMM9IxSWb#EtcX;bvSq+3V#fteU)HquRE;=LuZ(@I7agz_}*uo)y!Z^NztOY`v z10&*e!$pqE?ER$?jjKw=G^moYB@P=_EA1k#@$8}`2!`KDojb&L>c20z{w7r-J$Cw; zfm;q#{G$NQu4z``7)76ibCPcH1<-ylM)Pj)$Ta)cZSZ|%lto>-^QrvHGHNx5Bni|* z=iVH{_@+bjU>?~n)~>KaSRzj{T3U(XK+9=*SXi7mAS3bz!2hhPM{<;X!3W{TVX1A0 zu&-jL^&Kf_!6ELr9i0g|BjmkhX}i~nL2;?!g=j{$@inq;WNbz0C+06>0>-PHCTYibRoVe{#{+gor#IQ{T-dTDvahEb^gkt6d>NdB4FsyA@bxhfIUP^p zfPA5CmVDGz_EMwb=-fo6S>z2`Jrr#yQWH*E+7AsGw(hOZg%a1BB9(x!PR2cd(Bm8_) zVTt9(mPF6#-Y)pqv4ytEzUNGwd5^WkXtX#xwv<==KErcQOaI-h&LO?yfir!wI600m zmL0MFnw3X9WexCM4t%=AB@j|a6uxkuUO2LarrrdTku(|uMHVKDu3 zdE9dM3_-T_U$kLJcZqKUxS>6}mGeq5fub<^3aFf0XI;FgCGfqPEo43?Y4479rKeC6vImXc%V>vNZf)o zsp(MPqi)MpD0Q3sE%v~GvFkcn8(HNsi$(_HgW>xkPcHCz`%n&(MR*gqe+y7DAJLLkXO@dg&P`|)mlNiPw1x^)=tF1EH)h| zo6P^)zAqaS9H3mgpHFeWrMv5{U?i#9fc#73=tJPGD*n{s9DN~F%K9V*Z2Jg;Lf@R) z3$2wC;M-=8XN7MJ!2BDgmWw$oUhkB1Q)I;4ZdU&Km!rr{k;Ig#;+S(8SGLGRNjZ60DL9U(4Wo!HDA}*uEIhsE8TG!; z7z~%;H}Rls&r-bB0En0fGP2;wAqgEk$=`hUcdU!Ac`L_K5wgN1q-y1u#>@a99^4Wq zJyE|oSq2M#$KOlRC+J!eG`c#hzy4x|oGu{)b4kF=tr6{HfW2VOoZ)ci>ZLrMG0S`5 zK>!81{E;Mnhl(bfj;NEpeHxCUa@hwAY;NxZ(P4Xrn(QE zrR;6b!QQ4@TVbD8hm341lvze)`XE&+gKCTVlpDN@uM_%h*-=c>{tB-Sy}MNQPNms~ z&ISYVyof`yI;i9R!qcsfDI5q`Eu;}4cjh^xCGr*~AspZRY}gA|@keGRs<_%O)8L{r zIMV$#T7`*mi)lh0{kmT?8K%8j+xyUqn*ex4K05<2ZNe_X7_5PGp}ko};Xg@CS=igt zrY)uRiwrEKc{h*a>M(H!Ta1j@zgd3K$%k{m3!^N2GN7ONuYDGLsT%G{qZqpz;L478 z)>(4{_*`8Y9=Wfo*@7+v!Ar4{85d`%i0zXe4r>4~e_S>n@FW@hz#ISUImt97VgvMx ziJNDrv%IjVl$V!Hu~-#tT%n9Tkh`Cy)4G&`FbbDQv6<*dvd2(+gWEi!%N^tAPU^fu z+{s9i%WF!R4kQ$-mF?O%Jz+YTq=>>kgK#F?&zUQR0I1P_Si?*xzF1O)@eu86fOkst z2C%|TomZ24lNdQJRufb%w?#I9u@-yjr>*p({589oR0J84C+Re1$h?~k^6}iKYa8Pd zp$;bzu51mk&oB-|MV3EpeR81W4))s~*n25PjL|(vYENkq^c~STN2_q=USX2fO?u-< z8P6MY&{ECa6G-pf1;50nW=dd({3PU08CFhi&imZ=Xd1>%C_@Cm%^_DN z!p${Ih`5&d7qgAXQ39T>GjRqwR@<|Ln1NB~fLj%SMY{)n+ zlA)uBs`*&2HO%ZVD(rUVN{dK9s6-Q!Hk&~&pwr^1IMU!ISe(qJ7}g3$h%Gm4ECY2g CgcK+M literal 2592 zcmV+*3g7kTb?Rs`XYL91^<~-c(NC63B%gTOTPL}#md~2+W^IB37TVsJCaVIvY|`An z6gF)xaAo`l>~gvrguH!^;O1ldObR@Gl2nEL23?ZPH9xpu+ETi_ATz~wRc{{(ni|Db%IsO zFSy&{dlHd@uNQi?fHziP@m2cqF-s7{RUI5$d%J>NJ2g^`6;{bre#N1C;P-dr{EhCL zz9kunE}{j){clwxqRWeI2;UiS(<6LJH3VvbQ&0FZnVs+|0_ajc_=5tu49JD4vnG`} z)M7=&_AMuOdDuT*-CPB^GIsIoM)C9yEmHgFBeMn$ao<~ZR{lv*#?}YEP<#xtn`3h> zbyx3bMr}(7dCsBU2eR}z{!CrSWQ!u;c)xq8)Y0OVx!U*6`^`tF7gI2PSjR!yD#{dj z-x0bNzaW~2T~EZ-Q_zZ3f;d`H(0POr(Nc|``YkQu%$Yw&gAay<6_Hi+_tYnu%8?y+ zO$0Oa(e6&AVCq%vN95>oktut6T{UO!HFGk`0W@NabuHJsl8q6Hx-R+fGq#yKAN5r# zKC;4866ISjwLv#x+Fpuc7{GITHn7|F-mio;nBez~kcxw{(uz&5f1OZQL>hihiq0^f zf>^VQNq+0N+RSl`*x%)p(c&$G^qxS<-aaD;-P!Ejz8bJh(&V^JPNNy1gGgO4KlAL`kj74(y}!i9(*!kY2mL|RA{%2;rST|H5@PM!A?DWn9ohu;T_z0yQi%p zY(nZaZV(@Pf_RT+PR~s6C7B><%N%icJDp?zr?UxfGCFU>^>|+d3e(RLT)C}Z)c+ju zfV}qY6JmM^n!w@rX)}DG>ibX&QC&LP4-_&M*Mho1rQrJn7I4zmVm~LblU;N`_akvy zH;!Q;P_yi6S+H%r?6lfE6ce&QH!{d1$LgcrPv$NH7@6&`z``d>i>}#FR^^F=aQ5li z?c199C6jy%u1v=cXuHAf%Hc<;D((h&C`_2QdIGuv`@T7SjDLXvxE+IljpOx_vfQ4X zY6rur=&VHFDTF5vm<0xv*>$ZoAXsWOtJM$snnz>HF#a%trMJ)yrOESB4#zL}uAX<8 z5YgA_z?bm~V97hW?Tg@Xv71;|wk1ezqc?O6G*D}a05Xa}s$s7U8b86CkjJXk@DbeTnzIki41 z2oya(Q^gX9x-+FK1cyg#;Vx95!|?hl&Z+a1&(H^t)!+yRg7VGd@5vMvarWu>*ag*u z1UTrTF5ix<$~&Zpno(;`*6RElTgd=8LWzOFkoQ6cr2>g9-F-HHY*uBX(x+V*ejb1p zb?r!TkclEsAp5=K!QO&@8>tj0E06VR!TxzNbHo+WR(Hlk*FJJwD}TC?gh<@nzNJ zlX}j?!h*0l8Raln!jC6y=x@{XQtXPV->HuL9BM=g2f}EBO6r=D1ftI}^gi^ywA!!m z3iCvu(*7*h9p0IK=3Q9IE^R&QV``t@5;J72CKvnTg6ShX@TN3FLqC)IlQ~gVPRW$bmW)3PoDGD|FtP93SeG1Bise zPB=#rr;`GJyN5`5h@mE^JQVybHoP;24^a+<4ro-(>`QSgmD%SOIx!(GXjHMoy6#d7 z|1gLH)r(QD)e{j#Q8!Up<}GXM33OqCAV9&2FBdeuyw;KprK1kCTxIT$+afWyPu?mP z=7xPO{z`i{{%tBZqLY(A-%bi!ppOz6^z_E9F3ZyK&Y^Ty&?Fj0`ue${PENikl5Lm=Dcz` zl&GhObhPASbFj-K6&QweGTPC6a8n|KF@GIH`6NHk3M-ti-}8n42v^?6)r2U6i38MW zLCRG70|n`Hv8e!2pi+EkY>^KxIJ*a*M@R4{GJy1QfksM|?23g#p0_lExx*ZkPHzP# z!1lLxNj&@{+1aQpxejt$7mU9_Dm#9lCb!KbExZ&LMcmw{e}u4J>s)}>LAg?bh=#Qo zrqECk&OpOY`9xB!%hf&?W;FUG#n151W(e-@%%ff`c55H8vr0*Xck>KkhhHZ% zAv`@`18RZ@m>VdX3T>-f(gpApn5l&uw~V*wIOi?I{nMa!6}T0?KQ=abnYHzWT@ONo z=}tw(e|8**#1)}{i$^S;{pIZhoG|VwQ30Z#`(k8wv72_dwf4tovu$m#3wI%xG<<7e zn{$;ZE@W6zYY+8!XhR|ZvIV>Muex-62EX8M_M5p->2kz)057_h2(1y<{|dHh;L@kp z5M~o+9h}qR3h8QBs&8Fh;~yQas@eLn2+gh zc+6F4mnSi1;KK^tK{b5yN44OLo$LmuQXL{otV+k?rLvD*{|0@p-MxH0zxU=uRa~E2 zK3*Q|z(w&1wxxTpBFpw|zE+b!^EiyIyOK$)ryN>^_VFi|xq)Sbkk;dqIb9RWGRi1o zyykDEZwz|f=AM4f7>7EK4fd&1#dW#2Lzz2?xspx!%=l+`Lg;5QCc24uQ_E{#1}%~I C!3;kD diff --git a/.travis.yml b/.travis.yml index cbc1b4be4..f4881da02 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,10 +7,11 @@ python: - '3.6' cache: pip before_install: -- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && openssl aes-256-cbc -K $encrypted_cebf25e6c525_key - -iv $encrypted_cebf25e6c525_iv -in .env.enc -out .env -d || true' +- '[ "${TRAVIS_PULL_REQUEST}" = "false" ] && openssl aes-256-cbc -K $encrypted_6e98b3e8e789_key -iv $encrypted_6e98b3e8e789_iv -in .env.enc -out .env -d || true' install: pip install tox-travis -script: tox +script: +- pip install -U python-dotenv +- tox before_deploy: - pip install -r requirements.txt - pip install -r requirements-dev.txt From 15f6f0b1fec9e8263b512592207a5f181c2882a2 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 8 Mar 2018 09:30:31 -0500 Subject: [PATCH 41/45] refactor: renaming integration tests --- .../{test_integration_discovery_v1.py => test_discovery_v1.py} | 0 ...integration_speech_to_text_v1.py => test_speech_to_text_v1.py} | 0 ...integration_text_to_speech_v1.py => test_text_to_speech_v1.py} | 0 ...tegration_visual_recognition.py => test_visual_recognition.py} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename test/integration/{test_integration_discovery_v1.py => test_discovery_v1.py} (100%) rename test/integration/{test_integration_speech_to_text_v1.py => test_speech_to_text_v1.py} (100%) rename test/integration/{test_integration_text_to_speech_v1.py => test_text_to_speech_v1.py} (100%) rename test/integration/{test_integration_visual_recognition.py => test_visual_recognition.py} (100%) diff --git a/test/integration/test_integration_discovery_v1.py b/test/integration/test_discovery_v1.py similarity index 100% rename from test/integration/test_integration_discovery_v1.py rename to test/integration/test_discovery_v1.py diff --git a/test/integration/test_integration_speech_to_text_v1.py b/test/integration/test_speech_to_text_v1.py similarity index 100% rename from test/integration/test_integration_speech_to_text_v1.py rename to test/integration/test_speech_to_text_v1.py diff --git a/test/integration/test_integration_text_to_speech_v1.py b/test/integration/test_text_to_speech_v1.py similarity index 100% rename from test/integration/test_integration_text_to_speech_v1.py rename to test/integration/test_text_to_speech_v1.py diff --git a/test/integration/test_integration_visual_recognition.py b/test/integration/test_visual_recognition.py similarity index 100% rename from test/integration/test_integration_visual_recognition.py rename to test/integration/test_visual_recognition.py From 18f6c7cbc443c32a694f8d75764a52968ec10491 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 8 Mar 2018 09:35:59 -0500 Subject: [PATCH 42/45] Instructions for pyaudio --- examples/microphone-speech-to-text.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/examples/microphone-speech-to-text.py b/examples/microphone-speech-to-text.py index 58ae329cd..ebb6c8156 100644 --- a/examples/microphone-speech-to-text.py +++ b/examples/microphone-speech-to-text.py @@ -1,3 +1,6 @@ +# You need to install pyaudio to run this example +# pip install pyaudio + from __future__ import print_function import pyaudio import tempfile From 0cb0c1822339b0b89ac5c10bf6e448f2aa695e6a Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 8 Mar 2018 12:20:49 -0500 Subject: [PATCH 43/45] docs(python sdk): Updating the setup.py with new packages --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 7a4e5b883..40d8866c6 100644 --- a/setup.py +++ b/setup.py @@ -64,7 +64,7 @@ def run_tests(self): version=__version__, description='Client library to use the IBM Watson Services', license='Apache 2.0', - install_requires=['requests>=2.0, <3.0', 'python_dateutil>=2.5.3'], + install_requires=['requests>=2.0, <3.0', 'python_dateutil>=2.5.3', 'autobahn>=0.10.9', 'Twisted>=13.2.0', 'pyOpenSSL>=16.2.0', 'service-identity>=17.0.0'], tests_require=['responses', 'pytest', 'python_dotenv', 'pytest-rerunfailures', 'tox'], cmdclass={'test': PyTest}, author='Jeffrey Stylos', From 3c102ad8728181d89080bd6d73eb86e734a49182 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 8 Mar 2018 13:32:54 -0500 Subject: [PATCH 44/45] Updating readme with dependencies --- README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 5a9504e2c..348345254 100755 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ Version 1.0 focuses on the move to programmatically-generated code for many of t ## Migration This version includes many breaking changes as a result of standardizing behavior across the new generated services. Full details on migration from previous versions can be found [here](https://github.com/watson-developer-cloud/python-sdk/wiki/Migration). -## Configuring the http client +## Configuring the http client (Supported from v1.1.0) To set client configs like timeout use the `with_http_config()` function and pass it a dictionary of configs. ```python @@ -88,6 +88,11 @@ print(json.dumps(response, indent=2)) * [requests] * `python_dateutil` >= 2.5.3 * [responses] for testing +* Following for web sockets support in speech to text + * autobahn>=0.10.9 + * Twisted>=13.2.0 + * pyOpenSSL>=16.2.0 +service-identity>=17.0.0 ## Contributing From 843edebfce54a9a6dff3a826820c2b27214681d9 Mon Sep 17 00:00:00 2001 From: Erika Dsouza Date: Thu, 8 Mar 2018 14:08:43 -0500 Subject: [PATCH 45/45] Formatting the readme --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 348345254..b8b668a10 100755 --- a/README.md +++ b/README.md @@ -89,10 +89,10 @@ print(json.dumps(response, indent=2)) * `python_dateutil` >= 2.5.3 * [responses] for testing * Following for web sockets support in speech to text - * autobahn>=0.10.9 - * Twisted>=13.2.0 - * pyOpenSSL>=16.2.0 -service-identity>=17.0.0 + * `autobahn` >= 0.10.9 + * `Twisted` >= 13.2.0 + * `pyOpenSSL` >= 16.2.0 + * `service-identity` >= 17.0.0 ## Contributing