diff --git a/.github/workflows/continous-integration.yml b/.github/workflows/continous-integration.yml index 97fce5644301..6b7d199a0006 100644 --- a/.github/workflows/continous-integration.yml +++ b/.github/workflows/continous-integration.yml @@ -39,8 +39,8 @@ jobs: - name: Run Swagger 🕵️‍♀️ run: | npm install -g swagger-cli - swagger-cli validate docs/_static/spec/action-server.yml - swagger-cli validate docs/_static/spec/rasa.yml + swagger-cli validate newdocs/static/spec/action-server.yml + swagger-cli validate newdocs/static/spec/rasa.yml quality: name: Code Quality diff --git a/.gitignore b/.gitignore index af807c259979..4f2a3c2a24a6 100644 --- a/.gitignore +++ b/.gitignore @@ -18,8 +18,6 @@ venv .pytype dist/ pip-wheel-metadata -docs/nlu/_build -docs/_build server/ scala/ mongodb/ @@ -47,14 +45,11 @@ tmp_training_data.json models/ .mypy_cache/ *.tar.gz -docs/nlu/key -docs/nlu/key.pub secrets.tar .pytest_cache test_download.zip bower_components/ build/lib/ -docs/core/_build /models/ node_modules/ npm-debug.log @@ -72,8 +67,6 @@ examples/formbot/models* examples/concertbot/data* examples/concertbot/models* examples/moodbot/models* -docs/core/key -docs/core/key.pub failed_stories.md errors.json pip-wheel-metadata/* diff --git a/Makefile b/Makefile index face21fa7a70..74681e9015b2 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ clean: rm -rf build/ rm -rf .pytype/ rm -rf dist/ - rm -rf docs/_build + rm -rf docs/build install: poetry run python -m pip install -U pip diff --git a/README.md b/README.md index 5ec79e8c6ad7..04b3ca146428 100644 --- a/README.md +++ b/README.md @@ -258,12 +258,11 @@ make types ### Deploying documentation updates -We use `sphinx-versioning` to build docs for tagged versions and for the master branch. -The static site that gets built is pushed to the `docs` branch of this repo, which doesn't contain -any code, only the site. +We use `Docusaurus v2` to build docs for tagged versions and for the master branch. +The static site that gets built is pushed to the `documentation` branch of this repo. -We host the site on netlify. On master branch builds (see `.github/workflows/documentation.yml`), we push the built docs to the `docs` -branch. Netlify automatically re-deploys the docs pages whenever there is a change to that branch. +We host the site on netlify. On master branch builds (see `.github/workflows/documentation.yml`), we push the built docs to +the `documentation` branch. Netlify automatically re-deploys the docs pages whenever there is a change to that branch. ## License diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 0e3f943b4e84..000000000000 --- a/docs/Makefile +++ /dev/null @@ -1,242 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -SPHINXABUILD = sphinx-autobuild -PAPER = -BUILDDIR = _build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) - $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don\'t have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " applehelp to make an Apple Help Book" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " epub3 to make an epub3" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " coverage to run coverage check of the documentation (if enabled)" - @echo " dummy to check syntax errors of document sources" - -.PHONY: clean -clean: - rm -rf $(BUILDDIR)/* - -.PHONY: html -html: - poetry run $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -.PHONY: markdown -markdown: - rm -rf build/markdown - sphinx-build -M markdown ./ build -v - cp -r _static/images/* ../newdocs/static/img - cp -r build/markdown/* ../newdocs/docs - -.PHONY: dirhtml -dirhtml: - poetry run $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -.PHONY: singlehtml -singlehtml: - poetry run $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -.PHONY: pickle -pickle: - poetry run $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -.PHONY: json -json: - poetry run $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -.PHONY: htmlhelp -htmlhelp: - poetry run $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -.PHONY: qthelp -qthelp: - poetry run $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/rasa_nlu.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/rasa_nlu.qhc" - -.PHONY: applehelp -applehelp: - poetry run $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp - @echo - @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." - @echo "N.B. You won't be able to view it unless you put it in" \ - "~/Library/Documentation/Help or install it in your application" \ - "bundle." - -.PHONY: devhelp -devhelp: - poetry run $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/rasa_nlu" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/rasa_nlu" - @echo "# devhelp" - -.PHONY: epub -epub: - poetry run $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -.PHONY: epub3 -epub3: - poetry run $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 - @echo - @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." - -.PHONY: latex -latex: - poetry run $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -.PHONY: latexpdf -latexpdf: - poetry run $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: latexpdfja -latexpdfja: - poetry run $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: text -text: - poetry run $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -.PHONY: man -man: - poetry run $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -.PHONY: texinfo -texinfo: - poetry run $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -.PHONY: info -info: - poetry run $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -.PHONY: gettext -gettext: - poetry run $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -.PHONY: changes -changes: - poetry run $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -.PHONY: linkcheck -linkcheck: - poetry run $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -.PHONY: doctest -doctest: - poetry run $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -.PHONY: coverage -coverage: - poetry run $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage - @echo "Testing of coverage in the sources finished, look at the " \ - "results in $(BUILDDIR)/coverage/python.txt." - -.PHONY: xml -xml: - poetry run $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -.PHONY: pseudoxml -pseudoxml: - poetry run $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." - -.PHONY: dummy -dummy: - poetry run $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy - @echo - @echo "Build finished. Dummy builder generates no files." - -.PHONY: livehtml -livehtml: - poetry run $(SPHINXABUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html diff --git a/docs/_static/css/custom.css b/docs/_static/css/custom.css deleted file mode 100644 index 77cdf280191d..000000000000 --- a/docs/_static/css/custom.css +++ /dev/null @@ -1,23 +0,0 @@ -dl.glossary dt { - margin-top: 20px; - margin-bottom: 0px; -} - -dl.glossary dd { - margin-top: 2px; -} - -.toggle .header { - display: block; - clear: both; -} - -.toggle .header:after { - content: "▶ show"; - cursor: pointer; -} - -.toggle .header.open:after { - content: "▼ hide"; - cursor: pointer; -} \ No newline at end of file diff --git a/docs/_static/images/component_lifecycle.png b/docs/_static/images/component_lifecycle.png deleted file mode 100644 index 5f73eec61da5..000000000000 Binary files a/docs/_static/images/component_lifecycle.png and /dev/null differ diff --git a/docs/_static/images/contextual_interjection.png b/docs/_static/images/contextual_interjection.png deleted file mode 100644 index 901782a1b63b..000000000000 Binary files a/docs/_static/images/contextual_interjection.png and /dev/null differ diff --git a/docs/_static/images/dialogflow_export.png b/docs/_static/images/dialogflow_export.png deleted file mode 100644 index 41e0d20bbbd5..000000000000 Binary files a/docs/_static/images/dialogflow_export.png and /dev/null differ diff --git a/docs/_static/images/dialogflow_export_2.png b/docs/_static/images/dialogflow_export_2.png deleted file mode 100644 index 4b0e9407e15a..000000000000 Binary files a/docs/_static/images/dialogflow_export_2.png and /dev/null differ diff --git a/docs/_static/images/generic_interjection.png b/docs/_static/images/generic_interjection.png deleted file mode 100644 index 7ef99728a685..000000000000 Binary files a/docs/_static/images/generic_interjection.png and /dev/null differ diff --git a/docs/_static/images/generic_interjection_handled.png b/docs/_static/images/generic_interjection_handled.png deleted file mode 100644 index a764f72616cd..000000000000 Binary files a/docs/_static/images/generic_interjection_handled.png and /dev/null differ diff --git a/docs/_static/images/greet_interjection.png b/docs/_static/images/greet_interjection.png deleted file mode 100644 index 3e0654c3fa45..000000000000 Binary files a/docs/_static/images/greet_interjection.png and /dev/null differ diff --git a/docs/_static/images/intent_mappings.png b/docs/_static/images/intent_mappings.png deleted file mode 100644 index 06d8eb333f5a..000000000000 Binary files a/docs/_static/images/intent_mappings.png and /dev/null differ diff --git a/docs/_static/images/intents-user-goals-dialogue-elements.png b/docs/_static/images/intents-user-goals-dialogue-elements.png deleted file mode 100644 index 7db87c347721..000000000000 Binary files a/docs/_static/images/intents-user-goals-dialogue-elements.png and /dev/null differ diff --git a/docs/_static/images/interactive_learning_graph.gif b/docs/_static/images/interactive_learning_graph.gif deleted file mode 100644 index e0c4df4f2fe0..000000000000 Binary files a/docs/_static/images/interactive_learning_graph.gif and /dev/null differ diff --git a/docs/_static/images/knowledge-base-example.png b/docs/_static/images/knowledge-base-example.png deleted file mode 100644 index fcbf165fd002..000000000000 Binary files a/docs/_static/images/knowledge-base-example.png and /dev/null differ diff --git a/docs/_static/images/luis_export.png b/docs/_static/images/luis_export.png deleted file mode 100644 index f250e2fbedad..000000000000 Binary files a/docs/_static/images/luis_export.png and /dev/null differ diff --git a/docs/_static/images/memoization_policy_convo.png b/docs/_static/images/memoization_policy_convo.png deleted file mode 100644 index fe46270e2e77..000000000000 Binary files a/docs/_static/images/memoization_policy_convo.png and /dev/null differ diff --git a/docs/_static/images/mood_bot.png b/docs/_static/images/mood_bot.png deleted file mode 100644 index 3100011bb7cb..000000000000 Binary files a/docs/_static/images/mood_bot.png and /dev/null differ diff --git a/docs/_static/images/rasa-message-processing.png b/docs/_static/images/rasa-message-processing.png deleted file mode 100644 index 387783b5803b..000000000000 Binary files a/docs/_static/images/rasa-message-processing.png and /dev/null differ diff --git a/docs/_static/spec/action-server.yml b/docs/_static/spec/action-server.yml deleted file mode 100644 index 67fabda8ab0d..000000000000 --- a/docs/_static/spec/action-server.yml +++ /dev/null @@ -1,128 +0,0 @@ -openapi: "3.0.2" -info: - title: "Rasa SDK - Action Server Endpoint" - version: "0.0.0" - description: >- - API of the action server which is used by Rasa - to execute custom actions. -servers: - - url: "http://localhost:5055/webhook" - description: "Local development action server" -paths: - /: - post: - summary: Core request to execute a custom action - description: >- - Rasa Core sends a request to the action server to execute a - certain custom action. As a response to the action call from Core, - you can modify the tracker, e.g. by setting slots and send responses - back to the user. - operationId: call_action - requestBody: - description: >- - Describes the action to be called and provides information on the - current state of the conversation. - required: true - content: - application/json: - schema: - type: object - properties: - next_action: - description: The name of the action which should be executed. - type: string - sender_id: - description: >- - Unique id of the user who is having the - current conversation. - type: string - tracker: - $ref: "./rasa.yml#/components/schemas/Tracker" - domain: - $ref: "./rasa.yml#/components/schemas/Domain" - responses: - 200: - description: Action was executed succesfully. - content: - application/json: - schema: - type: object - properties: - events: - description: Events returned by the action. - type: array - items: - $ref: "./rasa.yml#/components/schemas/Event" - responses: - description: >- - List of responses which should be sent to the user - type: array - items: - $ref: "#/components/schemas/Response" - 400: - description: >- - Action execution was rejected. This is the same as returning - an `ActionExecutionRejected` event. - content: - application/json: - schema: - type: object - properties: - action_name: - type: string - description: >- - Name of the action which rejected its execution. - error: - type: string - description: The error message. - 500: - description: >- - The action server encountered an exception while running the action. -components: - schemas: - Response: - oneOf: - - $ref: '#/components/schemas/TextResponse' - - $ref: '#/components/schemas/TemplateResponse' - - $ref: '#/components/schemas/ButtonResponse' - TextResponse: - description: Text which the bot should utter. - type: object - properties: - text: - description: The text which should be uttered. - type: string - required: ["text"] - TemplateResponse: - description: Response template the bot should utter. - type: object - properties: - template: - description: Name of the template - type: string - additionalProperties: - description: Keyword argument to fill the template - type: string - required: ["template"] - ButtonResponse: - description: Text with buttons which should be sent to the user. - type: object - properties: - text: - type: string - description: Message - buttons: - type: array - items: - $ref: '#/components/schemas/Button' - Button: - description: >- - A button which can be clicked by the user in the conversation. - type: object - properties: - title: - type: string - description: The text on the button - payload: - type: string - description: Payload which is sent if the button is pressed. diff --git a/docs/_static/spec/rasa.yml b/docs/_static/spec/rasa.yml deleted file mode 100644 index 4afaa495ea8f..000000000000 --- a/docs/_static/spec/rasa.yml +++ /dev/null @@ -1,1666 +0,0 @@ -openapi: 3.0.1 -info: - title: "Rasa - Server Endpoints" - version: "1.0.0" - description: >- - The Rasa server provides endpoints to retrieve trackers of - conversations as well as endpoints to modify them. Additionally, - endpoints for training and testing models are provided. -servers: - - url: "http://localhost:5005" - description: "Local development server" - -paths: - /: - get: - tags: - - Server Information - summary: Health endpoint of Rasa Server - operationId: getHealth - description: >- - This URL can be used as an endpoint to run - health checks against. When the server is running - this will return 200. - responses: - 200: - description: Up and running - content: - text/plain: - schema: - type: string - description: Welcome text of Rasa Server - example: >- - Hello from Rasa: 1.0.0 - - /version: - get: - tags: - - Server Information - operationId: getVersion - summary: Version of Rasa - description: >- - Returns the version of Rasa. - responses: - 200: - description: Version of Rasa - content: - application/json: - schema: - type: object - properties: - version: - type: string - description: >- - Rasa version number - minimum_compatible_version: - type: string - description: >- - Minimum version this Rasa version is - able to load models from - example: - version: 1.0.0 - minimum_compatible_version: 1.0.0 - - /status: - get: - security: - - TokenAuth: [] - - JWT: [] - operationId: getStatus - tags: - - Server Information - summary: Status of the Rasa server - description: >- - Information about the server and the currently loaded Rasa model. - responses: - 200: - description: Success - content: - application/json: - schema: - type: object - properties: - fingerprint: - type: object - description: Fingerprint of the loaded model - example: - config: - - 7625d69d93053ac8520a544d0852c626 - domain: - - 229b51e41876bbcbbbfbeddf79548d5a - messages: - - cf7eda7edcae128a75ee8c95d3bbd680 - stories: - - b5facea681fd00bc7ecc6818c70d9639 - trained_at: 1556527123.42784 - version: 1.0.0 - model_file: - type: string - description: Path of the loaded model - example: 20190429-103105.tar.gz - num_active_training_jobs: - type: integer - description: Number of running training processes - example: 2 - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - - - /conversations/{conversation_id}/tracker: - get: - security: - - TokenAuth: [] - - JWT: [] - operationId: getConversationTracker - tags: - - Tracker - summary: Retrieve a conversations tracker - description: >- - The tracker represents the state of the conversation. - The state of the tracker is created by applying a - sequence of events, which modify the state. These - events can optionally be included in the response. - parameters: - - $ref: '#/components/parameters/conversation_id' - - $ref: '#/components/parameters/include_events' - - $ref: '#/components/parameters/until' - responses: - 200: - $ref: '#/components/responses/200Tracker' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - /conversations/{conversation_id}/tracker/events: - post: - security: - - TokenAuth: [] - - JWT: [] - operationId: addConversationTrackerEvents - tags: - - Tracker - summary: Append events to a tracker - description: >- - Appends one or multiple new events to the tracker state of the conversation. - Any existing events will be kept and the new events will be - appended, updating the existing state. - parameters: - - $ref: '#/components/parameters/conversation_id' - - $ref: '#/components/parameters/include_events' - requestBody: - required: true - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/Event' - - type: array - items: - $ref: '#/components/schemas/Event' - responses: - 200: - $ref: '#/components/responses/200Tracker' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - put: - security: - - TokenAuth: [] - - JWT: [] - operationId: replaceConversationTrackerEvents - tags: - - Tracker - summary: Replace a trackers events - description: >- - Replaces all events of a tracker with the passed - list of events. This endpoint should not be used to - modify trackers in a production setup, but rather - for creating training data. - parameters: - - $ref: '#/components/parameters/conversation_id' - - $ref: '#/components/parameters/include_events' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/EventList' - responses: - 200: - $ref: '#/components/responses/200Tracker' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - /conversations/{conversation_id}/story: - get: - security: - - TokenAuth: [] - - JWT: [] - operationId: getConversationStory - tags: - - Tracker - summary: Retrieve an end-to-end story corresponding to a conversation - description: >- - The story represents the whole conversation in end-to-end - format. This can be posted to the '/test/stories' endpoint and used - as a test. - parameters: - - $ref: '#/components/parameters/conversation_id' - - $ref: '#/components/parameters/until' - responses: - 200: - $ref: '#/components/responses/200Story' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - /conversations/{conversation_id}/execute: - post: - security: - - TokenAuth: [] - - JWT: [] - operationId: executeConversationAction - tags: - - Tracker - summary: Run an action in a conversation - deprecated: true - description: >- - DEPRECATED. Runs the action, calling the action server if necessary. - Any responses sent by the executed action will be forwarded - to the channel specified in the output_channel parameter. - If no output channel is specified, any messages that should be - sent to the user will be included in the response of this endpoint. - parameters: - - $ref: '#/components/parameters/conversation_id' - - $ref: '#/components/parameters/include_events' - - $ref: '#/components/parameters/output_channel' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ActionRequest' - responses: - 200: - description: Success - content: - application/json: - schema: - type: object - properties: - tracker: - $ref: '#/components/schemas/Tracker' - messages: - type: array - items: - $ref: '#/components/schemas/BotMessage' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - /conversations/{conversation_id}/trigger_intent: - post: - security: - - TokenAuth: [] - - JWT: [] - operationId: triggerConversationIntent - tags: - - Tracker - summary: Inject an intent into a conversation - description: >- - Sends a specified intent and list of entities in place of a - user message. The bot then predicts and executes a response action. - Any responses sent by the executed action will be forwarded - to the channel specified in the ``output_channel`` parameter. - If no output channel is specified, any messages that should be - sent to the user will be included in the response of this endpoint. - parameters: - - $ref: '#/components/parameters/conversation_id' - - $ref: '#/components/parameters/include_events' - - $ref: '#/components/parameters/output_channel' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/IntentTriggerRequest' - responses: - 200: - description: Success - content: - application/json: - schema: - type: object - properties: - tracker: - $ref: '#/components/schemas/Tracker' - messages: - type: array - items: - $ref: '#/components/schemas/BotMessage' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - /conversations/{conversation_id}/predict: - post: - security: - - TokenAuth: [] - - JWT: [] - operationId: predictConversationAction - tags: - - Tracker - summary: Predict the next action - description: >- - Runs the conversations tracker through the model's - policies to predict the scores of all actions present - in the model's domain. Actions are returned in the - 'scores' array, sorted on their 'score' values. - The state of the tracker is not modified. - parameters: - - $ref: '#/components/parameters/conversation_id' - responses: - 200: - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/PredictResult' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - - /conversations/{conversation_id}/messages: - post: - security: - - TokenAuth: [] - - JWT: [] - operationId: addConversationMessage - tags: - - Tracker - summary: Add a message to a tracker - description: >- - Adds a message to a tracker. This doesn't trigger - the prediction loop. It will log the message - on the tracker and return, no actions will be - predicted or run. This is often used together with the - predict endpoint. - parameters: - - $ref: '#/components/parameters/conversation_id' - - $ref: '#/components/parameters/include_events' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/Message' - responses: - 200: - $ref: '#/components/responses/200Tracker' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - /model/train: - post: - security: - - TokenAuth: [] - - JWT: [] - operationId: trainModel - tags: - - Model - summary: Train a Rasa model - description: >- - Trains a new Rasa model. Depending on the data given only a dialogue model, - only a NLU model, or a model combining a trained dialogue model with an - NLU model will be trained. The new model is not loaded by default. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/TrainingRequest' - responses: - 200: - description: Zipped Rasa model - headers: - filename: - schema: - type: string - description: File name of the trained model. - content: - application/octet-stream: - schema: - $ref: '#/components/schemas/TrainingResult' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 500: - $ref: '#/components/responses/500ServerError' - - /model/test/stories: - post: - security: - - TokenAuth: [] - - JWT: [] - operationId: testModelStories - tags: - - Model - summary: Evaluate stories - description: >- - Evaluates one or multiple stories against the currently - loaded Rasa model. - parameters: - - $ref: '#/components/parameters/e2e' - requestBody: - required: true - content: - text/markdown: - schema: - $ref: '#/components/schemas/StoriesTrainingData' - responses: - 200: - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/EvaluationStoriesResult' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - /model/test/intents: - post: - security: - - TokenAuth: [] - - JWT: [] - operationId: testModelIntent - tags: - - Model - summary: Perform an intent evaluation - description: >- - Evaluates intents against the currently loaded Rasa model or the model specified in the query. - parameters: - - $ref: '#/components/parameters/model' - requestBody: - required: true - content: - text/markdown: - schema: - $ref: '#/components/schemas/NLUTrainingData' - responses: - 200: - description: Intent evaluation result - content: - application/json: - schema: - $ref: '#/components/schemas/EvaluationIntentsResult' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - /model/predict: - post: - security: - - TokenAuth: [] - - JWT: [] - operationId: predictModelAction - tags: - - Model - summary: Predict an action on a temporary state - description: >- - Predicts the next action on the tracker state as it is - posted to this endpoint. Rasa will create a temporary - tracker from the provided events and will use it to - predict an action. No messages will be sent and no - action will be run. - parameters: - - $ref: '#/components/parameters/include_events' - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/EventList' - responses: - 200: - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/PredictResult' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 409: - $ref: '#/components/responses/409Conflict' - 500: - $ref: '#/components/responses/500ServerError' - - /model/parse: - post: - security: - - TokenAuth: [] - - JWT: [] - operationId: parseModelMessage - tags: - - Model - summary: Parse a message using the Rasa model - description: >- - Predicts the intent and entities of the message - posted to this endpoint. No messages will be stored - to a conversation and no action will be run. This will - just retrieve the NLU parse results. - parameters: - - $ref: '#/components/parameters/emulation_mode' - requestBody: - required: true - content: - application/json: - schema: - type: object - properties: - text: - type: string - description: Message to be parsed - example: "Hello, I am Rasa!" - message_id: - type: string - description: Optional ID for message to be parsed - example: "b2831e73-1407-4ba0-a861-0f30a42a2a5a" - responses: - 200: - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/ParseResult' - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 500: - $ref: '#/components/responses/500ServerError' - - /model: - put: - security: - - TokenAuth: [] - - JWT: [] - operationId: replaceModel - tags: - - Model - summary: Replace the currently loaded model - description: >- - Updates the currently loaded model. - First, tries to load the model from the local storage system. - Secondly, tries to load the model from the provided model server configuration. - Last, tries to load the model from the provided remote storage. - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/ModelRequest' - responses: - 204: - description: Model was successfully replaced. - 400: - $ref: '#/components/responses/400BadRequest' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 500: - $ref: '#/components/responses/500ServerError' - - delete: - security: - - TokenAuth: [] - - JWT: [] - operationId: unloadModel - tags: - - Model - summary: Unload the trained model - description: >- - Unloads the currently loaded trained model from the server. - responses: - 204: - description: Model was sucessfully unloaded. - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - - /domain: - get: - security: - - TokenAuth: [] - - JWT: [] - operationId: getDomain - tags: - - Domain - summary: Retrieve the loaded domain - description: >- - Returns the domain specification the currently loaded - model is using. - responses: - 200: - description: Domain was successfully retrieved. - content: - application/json: - schema: - $ref: '#/components/schemas/Domain' - application/yaml: - schema: - $ref: '#/components/schemas/Domain' - 401: - $ref: '#/components/responses/401NotAuthenticated' - 403: - $ref: '#/components/responses/403NotAuthorized' - 406: - $ref: '#/components/responses/406InvalidHeader' - 500: - $ref: '#/components/responses/500ServerError' - - -components: - - securitySchemes: - - TokenAuth: - type: apiKey - in: query - name: token - JWT: - type: http - scheme: bearer - bearerFormat: JWT - - - parameters: - - conversation_id: - in: path - name: conversation_id - description: Id of the conversation - example: default - schema: - type: string - required: true - batch_size: - in: query - name: batch_size - description: Batch size to use for training. - example: 5 - schema: - type: number - default: 5 - required: false - epochs: - in: query - name: epochs - description: Number of epochs to train. - example: 30 - schema: - type: number - default: 30 - required: false - e2e: - in: query - name: e2e - description: Perform an end-to-end evaluation on the posted stories. - example: false - schema: - type: boolean - default: false - required: false - model: - in: query - name: model - description: >- - Model that should be used for evaluation. - If the parameter is set, the model will be - fetched with the currently loaded configuration - setup. However, the currently loaded model - will not be updated. The state of the server - will not change. If the parameter is not set, - the currently loaded model will be used for - the evaluation. - example: rasa-model.tar.gz - schema: - type: string - required: false - include_events: - in: query - name: include_events - description: >- - Specify which events of the tracker the response - should contain. - example: AFTER_RESTART - schema: - type: string - default: AFTER_RESTART - enum: - - AFTER_RESTART - - ALL - - APPLIED - - NONE - emulation_mode: - in: query - name: emulation_mode - description: >- - Specify the emulation mode. - example: LUIS - schema: - type: string - enum: - - WIT - - LUIS - - DIALOGFLOW - until: - in: query - name: until - description: >- - All events previous to the passed timestamp will be replayed. - Events that occur exactly at the target time will be included. - example: 1559744410 - schema: - type: number - default: None - required: false - output_channel: - in: query - name: output_channel - description: >- - The bot's utterances will be forwarded to this channel. It uses the credentials - listed in `credentials.yml` to connect. In case the channel does - not support this, the utterances will be returned in the response body. Use - `latest` to try to send the messages to the latest channel the user used. - Currently supported channels are listed in the permitted values for the - parameter. - example: "slack" - schema: - type: string - enum: - - latest - - slack - - callback - - facebook - - rocketchat - - telegram - - twilio - - webexteams - - socketio - - responses: - - 200Tracker: - description: Success - content: - application/json: - schema: - $ref: '#/components/schemas/Tracker' - 200Story: - description: Success - content: - text/markdown: - example: >- - ## story_00055028 - - * greet: hello - - utter_ask_howcanhelp - * inform: I'm looking for a [moderately priced](price:moderate) - [Indian](cuisine) restaurant for [two](people) people - - utter_on_it - - utter_ask_location - 400BadRequest: - description: Bad Request - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - example: - version: "1.0.0" - status: "failure" - reason: "BadRequest" - code: 400 - 401NotAuthenticated: - description: User is not authenticated. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - example: - version: "1.0.0" - status: "failure" - reason: "NotAuthenticated" - message: >- - User is not authenticated to access resource. - code: 401 - 403NotAuthorized: - description: User has insufficient permission. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - example: - version: "1.0.0" - status: "failure" - reason: "NotAuthorized" - message: >- - User has insufficient permission to access resource. - code: 403 - 406InvalidHeader: - description: Invalid header provided. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - example: - version: "1.0.0" - status: "failure" - reason: "InvalidHeader" - message: >- - Invalid header was provided with the request. - code: 406 - 409Conflict: - description: The request conflicts with the currently loaded model. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - example: - version: "1.0.0" - status: "failure" - reason: "Conflict" - message: >- - The request conflicts with the currently loaded model. - code: 409 - 500ServerError: - description: An unexpected error occurred. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - example: - version: "1.0.0" - status: "ServerError" - message: >- - An unexpected error occurred. - code: 500 - - - schemas: - - ModelRequest: - type: object - properties: - model_file: - type: string - description: Path to model file - example: "/models/20190512.tar.gz" - model_server: - $ref: '#/components/schemas/EndpointConfig' - remote_storage: - description: Name of remote storage system - type: string - example: "aws" - enum: - - aws - - gcs - - azure - - ActionRequest: - type: object - properties: - name: - description: Name of the action to be executed. - type: string - example: utter_greet - policy: - description: Name of the policy that predicted the action. - type: string - nullable: true - confidence: - description: Confidence of the prediction. - type: number - nullable: true - example: 0.987232 - required: ["name"] - - IntentTriggerRequest: - type: object - properties: - name: - description: Name of the intent to be executed. - type: string - example: greet - entities: - description: Entities to be passed on. - type: object - nullable: true - example: {"temperature": "high"} - required: ["name"] - - Message: - type: object - properties: - text: - type: string - description: >- - Message text - example: Hello! - sender: - type: string - description: >- - Origin of the message - who sent it - example: user - enum: - - user - parse_data: - $ref: '#/components/schemas/ParseResult' - required: ["text", "sender"] - - Entity: - type: object - description: Entities within a message - properties: - start: - type: integer - description: Char offset of the start - end: - type: integer - description: Char offset of the end - value: - type: string - description: Found value for entity - entity: - type: string - description: Type of the entity - confidence: - type: number - required: ["start", "end", "value", "entity"] - - Intent: - type: object - description: Intent of the text - properties: - confidence: - type: number - description: Confidence of the intent - example: 0.6323 - name: - type: string - description: Intent name - example: greet - required: ["confidence", "name"] - - ParseResult: - type: object - properties: - entities: - type: array - description: Parsed entities - items: - $ref: '#/components/schemas/Entity' - intent: - $ref: '#/components/schemas/Intent' - intent_ranking: - type: array - description: Scores of all intents - items: - $ref: '#/components/schemas/Intent' - text: - type: string - description: Text of the message - example: "Hello!" - description: >- - NLU parser information. If set, message - will not be passed through NLU, but instead - this parsing information will be used. - required: ["text"] - - Event: - type: object - properties: - event: - type: string - description: Event name - example: "slot" - timestamp: - type: integer - description: Time of application - example: 1559744410 - required: ["event"] - - EventList: - type: array - items: - $ref: '#/components/schemas/Event' - - Domain: - type: object - description: The bot's domain. - properties: - config: - type: object - description: Addional option - properties: - store_entities_as_slots: - type: boolean - description: Store all entites as slot when found - example: false - intents: - type: array - description: All intent names and properties - items: - $ref: '#/components/schemas/IntentDescription' - entities: - type: array - description: All entity names - items: - type: string - example: ['person', 'location'] - slots: - description: Slot names and configuration - type: object - additionalProperties: - $ref: '#/components/schemas/SlotDescription' - responses: - description: Bot response templates - type: object - additionalProperties: - $ref: '#/components/schemas/TemplateDescription' - actions: - description: Available action names - type: array - items: - type: string - example: ['action_greet', 'action_goodbye', 'action_listen'] - - BotMessage: - type: object - properties: - recipient_id: - type: string - description: Id of the message receiver - text: - type: string - description: Message - image: - type: string - description: Image URL - buttons: - type: array - description: Quick reply buttons - items: - type: object - properties: - title: - type: string - description: Button caption - payload: - type: string - description: Payload to be sent if button is clicked - attachement: - type: array - description: Additional information - items: - type: object - properties: - title: - type: string - description: Attachement caption - payload: - type: string - description: Attachement payload - - Tracker: - type: object - description: Conversation tracker which stores the conversation state. - properties: - conversation_id: - type: string - description: Id of the conversation - example: default - slots: - type: array - description: Slot values - items: - $ref: '#/components/schemas/Slot' - latest_message: - $ref: '#/components/schemas/ParseResult' - latest_event_time: - type: number - description: Most recent event time - example: 1537645578.314389 - followup_action: - type: string - description: Deterministic scheduled next action - paused: - type: boolean - description: Bot is pasued - example: false - events: - type: array - description: Event history - items: - $ref: '#/components/schemas/Event' - latest_input_channel: - type: string - description: Communication channel - example: rest - latest_action_name: - type: string - description: Name of last bot action - example: action_listen - active_form: - type: object - description: Name of the active form - properties: - name: - type: string - description: Name of the acive form - example: restaurant_form - - Error: - type: object - properties: - version: - type: string - description: Rasa version - status: - type: string - enum: ["failure"] - description: Status of the requested action - message: - type: string - description: Error message - reason: - type: string - description: Error category - details: - type: object - description: Additional error information - help: - type: string - description: Optional URL to additonal material - code: - type: number - description: HTTP status code - - PredictResult: - type: object - properties: - scores: - type: array - description: Prediction results - items: - type: object - properties: - action: - type: string - description: Action name - example: utter_greet - score: - type: number - description: Assigned score - example: 1.0 - policy: - type: string - description: >- - Policy which predicted the most likely action - example: policy_2_TEDPolicy - tracker: - $ref: '#/components/schemas/Tracker' - - EndpointConfig: - type: object - properties: - url: - type: string - description: URL pointing to model - params: - type: object - description: Parameters of request - headers: - type: object - description: HTTP headers - basic_auth: - description: Basic authentification data - type: object - token: - description: Token - type: string - token_name: - description: Name of token - type: string - wait_time_between_pulls: - type: integer - description: Time to wait between pulls from model server - - TrainingRequest: - type: object - properties: - domain: - $ref: '#/components/schemas/DomainFile' - config: - $ref: '#/components/schemas/ConfigFile' - nlu: - $ref: '#/components/schemas/NLUTrainingData' - responses: - $ref: '#/components/schemas/RetrievalIntentsTrainingData' - stories: - $ref: '#/components/schemas/StoriesTrainingData' - force: - type: boolean - description: >- - Force a model training even if the data has not changed - example: false - save_to_default_model_directory: - type: boolean - description: >- - If `true` (default) the trained model will be saved in the default model - directory, if `false` it will be saved in a temporary directory - required: ["config"] - - NLUTrainingData: - type: string - description: Rasa NLU training data in markdown format - example: >- - ## intent:greet - - - hey - - - hello - - - hi - - ## intent:goodbye - - - bye - - - goodbye - - - have a nice day - - - see you - - ## intent:affirm - - - yes - - - indeed - - ## intent:deny - - - no - - - never - - ## intent:mood_great - - - perfect - - - very good - - - great - - ## intent:mood_unhappy - - - sad - - - not good - - - unhappy - - RetrievalIntentsTrainingData: - type: string - description: Rasa response texts for retrieval intents in markdown format - example: >- - ## ask name - * chitchat/ask_name - - my name is Sara, Rasa's documentation bot! - - ## ask weather - * chitchat/ask_weather - - it's always sunny where I live - - StoriesTrainingData: - type: string - description: Rasa Core stories in markdown format - example: >- - ## happy path - - * greet - - - utter_greet - - * mood_great - - - utter_happy - - ## sad path 1 - - * greet - - - utter_greet - - * mood_unhappy - - - utter_cheer_up - - - utter_did_that_help - - * affirm - - - utter_happy - - ## sad path 2 - - * greet - - - utter_greet - - * mood_unhappy - - - utter_cheer_up - - - utter_did_that_help - - * deny - - - utter_goodbye - - ## say goodbye - - * goodbye - - - utter_goodbye - - DomainFile: - type: string - description: Rasa domain in plain text - example: >- - intents: - - greet - - goodbye - - affirm - - deny - - mood_great - - mood_unhappy - - responses: - utter_greet: - - text: "Hey! How are you?" - - utter_cheer_up: - - text: "Here is something to cheer you up:" - image: "https://i.imgur.com/nGF1K8f.jpg" - - utter_did_that_help: - - text: "Did that help you?" - - utter_happy: - - text: "Great carry on!" - - utter_goodbye: - - text: "Bye" - - ConfigFile: - type: string - description: Rasa config in plain text - example: >- - language: en - - pipeline: supervised_embeddings - - policies: - - name: MemoizationPolicy - - name: TEDPolicy - - TrainingResult: - type: string - format: binary - - EvaluationIntentsResult: - type: object - properties: - intent_evaluation: - type: object - description: Rasa NLU intent evaluation - properties: - report: - type: string - example: >- - # intent evaluation report - precision recall f1-score support - - goodbye 1.00 1.00 1.00 4 - greet 1.00 1.00 1.00 6 - affirm 1.00 1.00 1.00 5 - deny 1.00 1.00 1.00 6 - mood_great 1.00 1.00 1.00 8 - mood_unhappy 1.00 1.00 1.00 10 - - micro avg 1.00 1.00 1.00 39 - macro avg 1.00 1.00 1.00 39 - weighted avg 1.00 1.00 1.00 39 - accuracy: - type: number - example: 0.19047619047619047 - f1_score: - type: number - example: 0.06095238095238095 - precision: - type: number - example: 0.036281179138321996 - predictions: - type: array - items: - type: object - properties: - intent: - type: string - example: greet - predicted: - type: string - example: greet - text: - type: string - example: "hey" - confidence: - type: number - example: 0.9973567 - entity_evaluation: - type: object - description: Rasa NLU entity evaluation - properties: - extractor_name: - type: object - description: Entity evaluation result for entity extractor - example: CRFEntityExtractor - properties: - report: - type: string - example: >- - # entity evaluation report - precision recall f1-score support - - no_entity 1.00 1.00 1.00 70 - - micro avg 1.00 1.00 1.00 70 - macro avg 1.00 1.00 1.00 70 - weighted avg 1.00 1.00 1.00 70 - precision: - type: number - example: 0.9769792 - f1_score: - type: number - example: 0.967869 - accuracy: - type: number - example: 0.978567 - - EvaluationStoriesResult: - type: object - properties: - actions: - type: array - items: - type: object - properties: - action: - type: string - description: Name of the actual action - example: utter_ask_howcanhelp - predicted: - type: string - description: Name of the predicted action - example: utter_ask_howcanhelp - policy: - type: string - description: Machine-learning policy used in the prediction - example: policy_0_MemoizationPolicy - confidence: - type: string - description: Confidence score of the prediction - example: 1.0 - description: >- - Accuracy of the classification, - http://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html - is_end_to_end_evaluation: - type: boolean - description: True if evaluation is end-to-end, false otherwise - example: true - precision: - type: number - description: >- - Precision of the classification, see - http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html - example: 1.0 - f1: - type: number - description: >- - F1 score of the classification, - http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html - example: 0.9333333333333333 - accuracy: - type: number - description: >- - Accuracy of the classification, - http://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html - example: 0.9 - in_training_data_fraction: - type: number - description: >- - Fraction of stories that are present in the training data of the - model loaded at evaluation time. - example: 0.8571428571428571 - report: - type: string - description: >- - Sklearn classifcation report, see - http://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html - example: >- - # classification report - precision recall f1-score support - action_listen 1.00 1.00 1.00 3 - greet 1.00 1.00 1.00 1 - inform 1.00 1.00 1.00 2 - utter_ask_howcanhelp 1.00 1.00 1.00 1 - utter_ask_location 1.00 1.00 1.00 1 - utter_ask_numpeople 0.00 0.00 0.00 0 - utter_on_it 1.00 0.50 0.67 2 - - avg / total 1.00 0.90 0.93 10 - - Slot: - type: object - additionalProperties: - $ref: '#/components/schemas/SlotValue' - example: - slot_name: slot_value - - SlotValue: - oneOf: - - type: string - - type: array - items: - type: string - - SlotDescription: - type: object - properties: - auto_fill: - type: boolean - initial_value: - type: string - nullable: true - type: - type: string - values: - type: array - items: - type: string - required: ['type', 'auto_fill'] - - TemplateDescription: - type: object - properties: - text: - type: string - description: Template text - required: ['text'] - - IntentDescription: - type: object - additionalProperties: - type: object - properties: - use_entities: - type: boolean diff --git a/docs/_templates/page.html b/docs/_templates/page.html deleted file mode 100644 index 65f134ac0dd4..000000000000 --- a/docs/_templates/page.html +++ /dev/null @@ -1,15 +0,0 @@ -{% extends "!page.html" %} - -{% block footer %} -{{ super() }} - -{% endblock %} diff --git a/docs/api/action-server.rst b/docs/api/action-server.rst deleted file mode 100644 index e0b4a0be9b51..000000000000 --- a/docs/api/action-server.rst +++ /dev/null @@ -1,14 +0,0 @@ -:desc: Check out the API docs for open source chatbot framework Rasa's - action server, which allows you to define your own custom actions. - -:pagetype: full - -.. _action-server: - -Action Server -============= - -.. raw:: html - - - diff --git a/docs/api/agent.rst b/docs/api/agent.rst deleted file mode 100644 index a1a658f7273a..000000000000 --- a/docs/api/agent.rst +++ /dev/null @@ -1,15 +0,0 @@ -:desc: The Agent class provides a central interface for performing crucial - operations like training, handling messages, loading a model, and - action prediction. - -.. _agent: - -Agent -===== - -.. edit-link:: - :url: https://github.com/RasaHQ/rasa/edit/master/rasa/core/agent.py - :text: SUGGEST DOCSTRING EDITS - -.. autoclass:: rasa.core.agent.Agent - :members: diff --git a/docs/api/core-featurization.rst b/docs/api/core-featurization.rst deleted file mode 100644 index 2267b6882f80..000000000000 --- a/docs/api/core-featurization.rst +++ /dev/null @@ -1,119 +0,0 @@ -:desc: Find out how to apply machine learning algorithms to conversational AI - using vector representations of conversations with Rasa. - -.. _featurization_conversations: - -Featurization of Conversations -============================== - -.. edit-link:: - -In order to apply machine learning algorithms to conversational AI, we need -to build up vector representations of conversations. - -Each story corresponds to a tracker which consists of the states of the -conversation just before each action was taken. - - -State Featurizers -^^^^^^^^^^^^^^^^^ -Every event in a trackers history creates a new state (e.g. running a bot -action, receiving a user message, setting slots). Featurizing a single state -of the tracker has a couple steps: - -1. **Tracker provides a bag of active features**: - - features indicating intents and entities, if this is the first - state in a turn, e.g. it's the first action we will take after - parsing the user's message. (e.g. - ``[intent_restaurant_search, entity_cuisine]`` ) - - features indicating which slots are currently defined, e.g. - ``slot_location`` if the user previously mentioned the area - they're searching for restaurants. - - features indicating the results of any API calls stored in - slots, e.g. ``slot_matches`` - - features indicating what the last action was (e.g. - ``prev_action_listen``) - -2. **Convert all the features into numeric vectors**: - - We use the ``X, y`` notation that's common for supervised learning, - where ``X`` is an array of shape - ``(num_data_points, time_dimension, num_input_features)``, - and ``y`` is an array of shape ``(num_data_points, num_bot_features)`` - or ``(num_data_points, time_dimension, num_bot_features)`` - containing the target class labels encoded as one-hot vectors. - - The target labels correspond to actions taken by the bot. - To convert the features into vector format, there are different - featurizers available: - - - ``BinarySingleStateFeaturizer`` creates a binary one-hot encoding: - The vectors ``X, y`` indicate a presence of a certain intent, - entity, previous action or slot e.g. ``[0 0 1 0 0 1 ...]``. - - - ``LabelTokenizerSingleStateFeaturizer`` creates a vector - based on the feature label: - All active feature labels (e.g. ``prev_action_listen``) are split - into tokens and represented as a bag-of-words. For example, actions - ``utter_explain_details_hotel`` and - ``utter_explain_details_restaurant`` will have 3 features in - common, and differ by a single feature indicating a domain. - - Labels for user inputs (intents, entities) and bot actions - are featurized separately. Each label in the two categories - is tokenized on a special character ``split_symbol`` - (e.g. ``action_search_restaurant = {action, search, restaurant}``), - creating two vocabularies. A bag-of-words representation - is then created for each label using the appropriate vocabulary. - The slots are featurized as binary vectors, indicating - their presence or absence at each step of the dialogue. - - -.. note:: - - If the domain defines the possible ``actions``, - ``[ActionGreet, ActionGoodbye]``, - ``4`` additional default actions are added: - ``[ActionListen(), ActionRestart(), - ActionDefaultFallback(), ActionDeactivateForm()]``. - Therefore, label ``0`` indicates default action listen, label ``1`` - default restart, label ``2`` a greeting and ``3`` indicates goodbye. - - -Tracker Featurizers -^^^^^^^^^^^^^^^^^^^ - -It's often useful to include a bit more history than just the current state -when predicting an action. The ``TrackerFeaturizer`` iterates over tracker -states and calls a ``SingleStateFeaturizer`` for each state. There are two -different tracker featurizers: - -1. Full Dialogue ----------------- - -``FullDialogueTrackerFeaturizer`` creates numerical representation of -stories to feed to a recurrent neural network where the whole dialogue -is fed to a network and the gradient is backpropagated from all time steps. -Therefore, ``X`` is an array of shape -``(num_stories, max_dialogue_length, num_input_features)`` and -``y`` is an array of shape -``(num_stories, max_dialogue_length, num_bot_features)``. -The smaller dialogues are padded with ``-1`` for all features, indicating -no values for a policy. - -2. Max History --------------- - -``MaxHistoryTrackerFeaturizer`` creates an array of previous tracker -states for each bot action or utterance, with the parameter -``max_history`` defining how many states go into each row in ``X``. -Deduplication is performed to filter out duplicated turns (bot actions -or bot utterances) in terms of their previous states. Hence ``X`` -has shape ``(num_unique_turns, max_history, num_input_features)`` -and ``y`` is an array of shape ``(num_unique_turns, num_bot_features)``. - -For some algorithms a flat feature vector is needed, so ``X`` -should be reshaped to -``(num_unique_turns, max_history * num_input_features)``. If numeric -target class labels are needed instead of one-hot vectors, use -``y.argmax(axis=-1)``. diff --git a/docs/api/custom-nlu-components.rst b/docs/api/custom-nlu-components.rst deleted file mode 100644 index aed371dae47b..000000000000 --- a/docs/api/custom-nlu-components.rst +++ /dev/null @@ -1,72 +0,0 @@ -:desc: Create custom components to create additional features like sentiment - analysis to integrate with open source bot framework Rasa. - -.. _custom-nlu-components: - -Custom NLU Components -===================== - -.. edit-link:: - -You can create a custom component to perform a specific task which NLU doesn't currently offer (for example, sentiment analysis). -Below is the specification of the :class:`rasa.nlu.components.Component` class with the methods you'll need to implement. - -.. note:: - There is a detailed tutorial on building custom components `here - `_. - - -You can add a custom component to your pipeline by adding the module path. -So if you have a module called ``sentiment`` -containing a ``SentimentAnalyzer`` class: - - .. code-block:: yaml - - pipeline: - - name: "sentiment.SentimentAnalyzer" - - -Also be sure to read the section on the :ref:`component-lifecycle`. - -To get started, you can use this skeleton that contains the most important -methods that you should implement: - -.. literalinclude:: ../../tests/nlu/example_component.py - :language: python - :linenos: - -.. note:: - If you create a custom tokenizer you should implement the methods of ``rasa.nlu.tokenizers.tokenizer.Tokenizer``. - The ``train`` and ``process`` methods are already implemented and you simply need to overwrite the ``tokenize`` - method. - -.. note:: - If you create a custom featurizer you can return two different kind of features: sequence features and sentence - features. The sequence features are a matrix of size ``(number-of-tokens x feature-dimension)``, e.g. - the matrix contains a feature vector for every token in the sequence. - The sentence features are represented by a matrix of size ``(1 x feature-dimension)``. - -Component -^^^^^^^^^ - -.. autoclass:: rasa.nlu.components.Component - - .. automethod:: required_components - - .. automethod:: required_packages - - .. automethod:: create - - .. automethod:: provide_context - - .. automethod:: train - - .. automethod:: process - - .. automethod:: persist - - .. automethod:: prepare_partial_processing - - .. automethod:: partially_process - - .. automethod:: can_handle_language diff --git a/docs/api/event-brokers.rst b/docs/api/event-brokers.rst deleted file mode 100644 index c75ece399b36..000000000000 --- a/docs/api/event-brokers.rst +++ /dev/null @@ -1,284 +0,0 @@ -:desc: Find out how open source chatbot framework Rasa allows - you to stream events to a message broker. - -.. _event-brokers: - -Event Brokers -============= - -.. edit-link:: - -An event broker allows you to connect your running assistant to other services that process the data coming -in from conversations. For example, you could `connect your live assistant to -Rasa X `_ -to review and annotate conversations or forward messages to an external analytics -service. The event broker publishes messages to a message streaming service, -also known as a message broker, to forward Rasa :ref:`events` from the Rasa server to other services. - -.. contents:: - :local: - :depth: 1 - -Format ------- - -All events are streamed to the broker as serialized dictionaries every time -the tracker updates its state. An example event emitted from the ``default`` -tracker looks like this: - -.. code-block:: json - - { - "sender_id": "default", - "timestamp": 1528402837.617099, - "event": "bot", - "text": "what your bot said", - "data": "some data about e.g. attachments" - "metadata" { - "a key": "a value", - } - } - -The ``event`` field takes the event's ``type_name`` (for more on event -types, check out the :ref:`events` docs). - - -.. _event-brokers-pika: - -Pika Event Broker ------------------ - -The example implementation we're going to show you here uses -`Pika `_ , the Python client library for -`RabbitMQ `_. - -.. contents:: - :local: - -Adding a Pika Event Broker Using the Endpoint Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can instruct Rasa to stream all events to your Pika event broker by adding an ``event_broker`` section to your -``endpoints.yml``: - -.. literalinclude:: ../../data/test_endpoints/event_brokers/pika_endpoint.yml - -Rasa will automatically start streaming events when you restart the Rasa server. - - -Adding a Pika Event Broker in Python -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Here is how you add it using Python code: - -.. code-block:: python - - from rasa.core.brokers.pika import PikaEventBroker - from rasa.core.tracker_store import InMemoryTrackerStore - - pika_broker = PikaEventBroker('localhost', - 'username', - 'password', - queues=['rasa_events']) - - tracker_store = InMemoryTrackerStore(domain=domain, event_broker=pika_broker) - - -Implementing a Pika Event Consumer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You need to have a RabbitMQ server running, as well as another application -that consumes the events. This consumer to needs to implement Pika's -``start_consuming()`` method with a ``callback`` action. Here's a simple -example: - -.. code-block:: python - - import json - import pika - - - def _callback(self, ch, method, properties, body): - # Do something useful with your incoming message body here, e.g. - # saving it to a database - print('Received event {}'.format(json.loads(body))) - - if __name__ == '__main__': - - # RabbitMQ credentials with username and password - credentials = pika.PlainCredentials('username', 'password') - - # Pika connection to the RabbitMQ host - typically 'rabbit' in a - # docker environment, or 'localhost' in a local environment - connection = pika.BlockingConnection( - pika.ConnectionParameters('rabbit', credentials=credentials)) - - # start consumption of channel - channel = connection.channel() - channel.basic_consume(_callback, - queue='rasa_events', - no_ack=True) - channel.start_consuming() - -Kafka Event Broker ------------------- - -It is possible to use `Kafka `_ as main broker for your -events. In this example we are going to use the `python-kafka `_ library, a Kafka client written in Python. - -.. contents:: - :local: - -Adding a Kafka Event Broker Using the Endpoint Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can instruct Rasa to stream all events to your Kafka event broker by adding an ``event_broker`` section to your -``endpoints.yml``. - -Using ``SASL_PLAINTEXT`` protocol the endpoints file must have the following entries: - -.. literalinclude:: ../../data/test_endpoints/event_brokers/kafka_plaintext_endpoint.yml - -If using SSL protocol, the endpoints file should look like: - -.. literalinclude:: ../../data/test_endpoints/event_brokers/kafka_ssl_endpoint.yml - -Adding a Kafka Broker in Python -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The code below shows an example on how to instantiate a Kafka producer in you script. - -.. code-block:: python - - from rasa.core.brokers.kafka import KafkaEventBroker - from rasa.core.tracker_store import InMemoryTrackerStore - - kafka_broker = KafkaEventBroker(host='localhost:9092', - topic='rasa_events') - - tracker_store = InMemoryTrackerStore(domain=domain, event_broker=kafka_broker) - - -The host variable can be either a list of brokers addresses or a single one. -If only one broker address is available, the client will connect to it and -request the cluster Metadata. -Therefore, the remain brokers in the cluster can be discovered -automatically through the data served by the first connected broker. - -To pass more than one broker address as argument, they must be passed in a -list of strings. e.g.: - -.. code-block:: python - - kafka_broker = KafkaEventBroker(host=['kafka_broker_1:9092', - 'kafka_broker_2:2030', - 'kafka_broker_3:9092'], - topic='rasa_events') - -Authentication and Authorization -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Rasa's Kafka producer accepts two types of security protocols - ``SASL_PLAINTEXT`` and ``SSL``. - -For development environment, or if the brokers servers and clients are located -into the same machine, you can use simple authentication with ``SASL_PLAINTEXT``. -By using this protocol, the credentials and messages exchanged between the clients and servers -will be sent in plaintext. Thus, this is not the most secure approach, but since it's simple -to configure, it is useful for simple cluster configurations. -``SASL_PLAINTEXT`` protocol requires the setup of the ``username`` and ``password`` -previously configured in the broker server. - -.. code-block:: python - - kafka_broker = KafkaEventBroker(host='kafka_broker:9092', - sasl_plain_username='kafka_username', - sasl_plain_password='kafka_password', - security_protocol='SASL_PLAINTEXT', - topic='rasa_events') - - -If the clients or the brokers in the kafka cluster are located in different -machines, it's important to use ssl protocol to assure encryption of data and client -authentication. After generating valid certificates for the brokers and the -clients, the path to the certificate and key generated for the producer must -be provided as arguments, as well as the CA's root certificate. - -.. code-block:: python - - kafka_broker = KafkaEventBroker(host='kafka_broker:9092', - ssl_cafile='CARoot.pem', - ssl_certfile='certificate.pem', - ssl_keyfile='key.pem', - ssl_check_hostname=True, - security_protocol='SSL', - topic='rasa_events') - -If the ``ssl_check_hostname`` parameter is enabled, the clients will verify -if the broker's hostname matches the certificate. It's used on client's connections -and inter-broker connections to prevent man-in-the-middle attacks. - - -Implementing a Kafka Event Consumer -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The parameters used to create a Kafka consumer are the same used on the producer creation, -according to the security protocol being used. The following implementation shows an example: - -.. code-block:: python - - from kafka import KafkaConsumer - from json import loads - - consumer = KafkaConsumer('rasa_events', - bootstrap_servers=['localhost:29093'], - value_deserializer=lambda m: json.loads(m.decode('utf-8')), - security_protocol='SSL', - ssl_check_hostname=False, - ssl_cafile='CARoot.pem', - ssl_certfile='certificate.pem', - ssl_keyfile='key.pem') - - for message in consumer: - print(message.value) - -SQL Event Broker ----------------- - -It is possible to use an SQL database as an event broker. Connections to databases are established using -`SQLAlchemy `_, a Python library which can interact with many -different types of SQL databases, such as `SQLite `_, -`PostgreSQL `_ and more. The default Rasa installation allows connections to SQLite -and PostgreSQL databases, to see other options, please see the -`SQLAlchemy documentation on SQL dialects `_. - - -Adding a SQL Event Broker Using the Endpoint Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To instruct Rasa to save all events to your SQL event broker, add an ``event_broker`` section to your -``endpoints.yml``. For example, a valid SQLite configuration -could look like the following: - -.. code-block:: yaml - - event_broker: - type: SQL - dialect: sqlite - db: events.db - -PostgreSQL databases can be used as well: - -.. code-block:: yaml - - event_broker: - type: SQL - url: 127.0.0.1 - port: 5432 - dialect: postgresql - username: myuser - password: mypassword - db: mydatabase - -With this configuration applied, Rasa will create a table called ``events`` on the database, -where all events will be added. diff --git a/docs/api/events.rst b/docs/api/events.rst deleted file mode 100644 index 7bf83d67dd9e..000000000000 --- a/docs/api/events.rst +++ /dev/null @@ -1,320 +0,0 @@ -:desc: Use events in open source library Rasa Core to support functionalities - like resetting slots, scheduling reminder or pausing a conversation. - -.. _events: - -Events -====== - -.. edit-link:: - -Conversations in Rasa are represented as a sequence of events. -This page lists the event types defined in Rasa Core. - -.. note:: - If you are using the Rasa SDK to write custom actions in python, - you need to import the events from ``rasa_sdk.events``, not from - ``rasa.core.events``. If you are writing actions in another language, - your events should be formatted like the JSON objects on this page. - - - -.. contents:: - :local: - -General Purpose Events ----------------------- - -Set a Slot -~~~~~~~~~~ - -:Short: Event to set a slot on a tracker -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER SetSlot - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.SlotSet - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: SlotSet.apply_to - - -Restart a conversation -~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Resets anything logged on the tracker. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER Restarted - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.Restarted - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: Restarted.apply_to - - -Reset all Slots -~~~~~~~~~~~~~~~ - -:Short: Resets all the slots of a conversation. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER AllSlotsReset - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.AllSlotsReset - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: AllSlotsReset.apply_to - - -Schedule a reminder -~~~~~~~~~~~~~~~~~~~ - -:Short: Schedule an intent to be triggered in the future. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :lines: 1- - :start-after: # DOCS MARKER ReminderScheduled - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ReminderScheduled - -:Effect: - When added to a tracker, Rasa Core will schedule the intent (and entities) to be - triggered in the future, in place of a user input. You can link - this intent to an action of your choice using the :ref:`mapping-policy`. - - -Cancel a reminder -~~~~~~~~~~~~~~~~~~~ - -:Short: Cancel one or more reminders. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :lines: 1- - :start-after: # DOCS MARKER ReminderCancelled - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ReminderCancelled - -:Effect: - When added to a tracker, Rasa Core will cancel any outstanding reminders that - match the ``ReminderCancelled`` event. For example, - - - ``ReminderCancelled(intent="greet")`` cancels all reminders with intent ``greet`` - - ``ReminderCancelled(entities={...})`` cancels all reminders with the given entities - - ``ReminderCancelled("...")`` cancels the one unique reminder with the given name - - ``ReminderCancelled()`` cancels all reminders - - -Pause a conversation -~~~~~~~~~~~~~~~~~~~~ - -:Short: Stops the bot from responding to messages. Action prediction - will be halted until resumed. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER ConversationPaused - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ConversationPaused - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: ConversationPaused.apply_to - - -Resume a conversation -~~~~~~~~~~~~~~~~~~~~~ - -:Short: Resumes a previously paused conversation. The bot will start - predicting actions again. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER ConversationResumed - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ConversationResumed - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: ConversationResumed.apply_to - - -Force a followup action -~~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Instead of predicting the next action, force the next action - to be a fixed one. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER FollowupAction - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.FollowupAction - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: FollowupAction.apply_to - - -Automatically tracked events ----------------------------- - - -User sent message -~~~~~~~~~~~~~~~~~ - -:Short: Message a user sent to the bot. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :lines: 1- - :start-after: # DOCS MARKER UserUttered - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.UserUttered - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: UserUttered.apply_to - - -Bot responded message -~~~~~~~~~~~~~~~~~~~~~ - -:Short: Message a bot sent to the user. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER BotUttered - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.BotUttered - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: BotUttered.apply_to - - -Undo a user message -~~~~~~~~~~~~~~~~~~~ - -:Short: Undoes all side effects that happened after the last user message - (including the ``user`` event of the message). -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER UserUtteranceReverted - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.UserUtteranceReverted - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: UserUtteranceReverted.apply_to - - -Undo an action -~~~~~~~~~~~~~~ - -:Short: Undoes all side effects that happened after the last action - (including the ``action`` event of the action). -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER ActionReverted - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ActionReverted - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: ActionReverted.apply_to - - -Log an executed action -~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Logs an action the bot executed to the conversation. Events that - action created are logged separately. -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER ActionExecuted - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.ActionExecuted - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: ActionExecuted.apply_to - -Start a new conversation session -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Marks the beginning of a new conversation session. Resets the tracker and - triggers an ``ActionSessionStart`` which by default applies the existing - ``SlotSet`` events to the new session. - -:JSON: - .. literalinclude:: ../../tests/core/test_events.py - :start-after: # DOCS MARKER SessionStarted - :dedent: 4 - :end-before: # DOCS END -:Class: - .. autoclass:: rasa.core.events.SessionStarted - -:Effect: - When added to a tracker, this is the code used to update the tracker: - - .. literalinclude:: ../../rasa/core/events/__init__.py - :dedent: 4 - :pyobject: SessionStarted.apply_to diff --git a/docs/api/http-api.rst b/docs/api/http-api.rst deleted file mode 100644 index 9bdf0adb9c99..000000000000 --- a/docs/api/http-api.rst +++ /dev/null @@ -1,14 +0,0 @@ -:desc: Read about Rasa's HTTP API that has endpoints for conversations, - training models, and configuring your bot. - -:pagetype: full - -.. _http-api: - -HTTP API -======== - -.. raw:: html - - - \ No newline at end of file diff --git a/docs/api/jupyter-notebooks.rst b/docs/api/jupyter-notebooks.rst deleted file mode 100644 index dab21d0f5e38..000000000000 --- a/docs/api/jupyter-notebooks.rst +++ /dev/null @@ -1,143 +0,0 @@ -:desc: Learn how to integrate open source chatbot platform Rasa into - Jupyter notebooks, alongside all your machine learning code. - -.. _jupyter-notebooks: - -Jupyter Notebooks -================= - -.. edit-link:: - -This page contains the most important methods for using Rasa in a Jupyter notebook. - -Running asynchronous Rasa code in Jupyter Notebooks requires an extra requirement, -since Jupyter Notebooks already run on event loops. Install this requirement in -the command line before launching jupyter: - -.. code-block:: bash - - pip install nest_asyncio - -Then in the first cell of your notebook, include: - -.. runnable:: - - import nest_asyncio - - nest_asyncio.apply() - print("Event loop ready.") - - -First, you need to create a project if you don't already have one. -To do this, run this cell, which will create the ``test-project`` directory and make it -your working directory: - -.. runnable:: - - from rasa.cli.scaffold import create_initial_project - import os - - project = "test-project" - create_initial_project(project) - - # move into project directory and show files - os.chdir(project) - print(os.listdir(".")) - - -To train a model, you will have to tell the ``train`` function -where to find the relevant files. -To define variables that contain these paths, run: - - -.. runnable:: - - config = "config.yml" - training_files = "data/" - domain = "domain.yml" - output = "models/" - print(config, training_files, domain, output) - - - - -Train a Model -~~~~~~~~~~~~~ - -Now we can train a model by passing in the paths to the ``rasa.train`` function. -Note that the training files are passed as a list. -When training has finished, ``rasa.train`` returns the path where the trained model has been saved. - - - -.. runnable:: - - import rasa - - model_path = rasa.train(domain, config, [training_files], output) - print(model_path) - - - - -Chat with your assistant -~~~~~~~~~~~~~~~~~~~~~~~~ - -To start chatting to an assistant, call the ``chat`` function, passing -in the path to your saved model: - - -.. runnable:: - - from rasa.jupyter import chat - chat(model_path) - - - -Evaluate your model against test data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Rasa has a convenience function for getting your training data. -Rasa's ``get_core_nlu_directories`` is a function which -recursively finds all the stories and NLU data files in a directory -and copies them into two temporary directories. -The return values are the paths to these newly created directories. - -.. runnable:: - - import rasa.data as data - stories_directory, nlu_data_directory = data.get_core_nlu_directories(training_files) - print(stories_directory, nlu_data_directory) - - - -To test your model, call the ``test`` function, passing in the path -to your saved model and directories containing the stories and nlu data -to evaluate on. - -.. runnable:: - - rasa.test(model_path, stories_directory, nlu_data_directory) - print("Done testing.") - - -The results of the core evaluation will be written to a file called ``results``. -NLU errors will be reported to ``errors.json``. -Together, they contain information about the accuracy of your model's -predictions and other metrics. - -.. runnable:: - - if os.path.isfile("errors.json"): - print("NLU Errors:") - print(open("errors.json").read()) - else: - print("No NLU errors.") - - if os.path.isdir("results"): - print("\n") - print("Core Errors:") - print(open("results/failed_stories.md").read()) - -.. juniper:: - :language: python diff --git a/docs/api/lock-stores.rst b/docs/api/lock-stores.rst deleted file mode 100644 index be72e4486a25..000000000000 --- a/docs/api/lock-stores.rst +++ /dev/null @@ -1,69 +0,0 @@ -:desc: Messages that are being processed lock Rasa for a given conversation ID to - ensure that multiple incoming messages for that conversation do not interfere with - each other. Rasa provides multiple implementations to maintain conversation locks. - -.. _lock-stores: - -Lock Stores -=========== - -.. edit-link:: - -Rasa uses a ticket lock mechanism to ensure that incoming messages for a given -conversation ID are processed in the right order, and locks conversations while -messages are actively processed. This means multiple Rasa servers can -be run in parallel as replicated services, and clients do not necessarily need to -address the same node when sending messages for a given conversation ID. - -.. contents:: - -InMemoryLockStore (default) -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:Description: - ``InMemoryLockStore`` is the default lock store. It maintains conversation locks - within a single process. - - .. note:: - This lock store should not be used when multiple Rasa servers are run - parallel. - -:Configuration: - To use the ``InMemoryTrackerStore`` no configuration is needed. - -RedisLockStore -~~~~~~~~~~~~~~ - -:Description: - ``RedisLockStore`` maintains conversation locks using Redis as a persistence layer. - This is the recommended lock store for running a replicated set of Rasa servers. - -:Configuration: - To set up Rasa with Redis the following steps are required: - - 1. Start your Redis instance - 2. Add required configuration to your ``endpoints.yml`` - - .. code-block:: yaml - - lock_store: - type: "redis" - url: - port: - password: - db: - - 3. To start the Rasa Core server using your Redis backend, add the ``--endpoints`` - flag, e.g.: - - .. code-block:: bash - - rasa run -m models --endpoints endpoints.yml - -:Parameters: - - ``url`` (default: ``localhost``): The url of your redis instance - - ``port`` (default: ``6379``): The port which redis is running on - - ``db`` (default: ``1``): The number of your redis database - - ``password`` (default: ``None``): Password used for authentication - (``None`` equals no authentication) - - ``use_ssl`` (default: ``False``): Whether or not the communication is encrypted diff --git a/docs/api/rasa-sdk.rst b/docs/api/rasa-sdk.rst deleted file mode 100644 index 962510479557..000000000000 --- a/docs/api/rasa-sdk.rst +++ /dev/null @@ -1,216 +0,0 @@ -:desc: Extend your Rasa conversational AI assistant using Rasa-SDK to connect to - external APIs or improve dialogue with custom actions written in Python. - -.. _rasa-sdk: - -Rasa SDK -======== - -.. edit-link:: - -Rasa SDK provides the tools you need to write custom actions in python. - -.. contents:: - :local: - -Installation ------------- - -Use ``pip`` to install ``rasa-sdk`` on your action server. - -.. code-block:: bash - - pip install rasa-sdk - -.. note:: - - You do not need to install ``rasa`` for your action server. - E.g. if you are running Rasa in a docker container, it is recommended to - create a separate container for your action server. In this - separate container, you only need to install ``rasa-sdk``. - -Running the Action Server -------------------------- - -If you have ``rasa`` installed, run this command to start your action server: - -.. code-block:: bash - - rasa run actions - -Otherwise, if you do not have ``rasa`` installed, run this command: - -.. code-block:: bash - - python -m rasa_sdk --actions actions - -You can verify that the action server is up and running with the command: - -.. code-block:: bash - - curl http://localhost:5055/health - -You can get the list of registered custom actions with the command: - -.. code-block:: bash - - curl http://localhost:5055/actions - - -The file that contains your custom actions should be called ``actions.py``. -Alternatively, you can use a package directory called ``actions`` or else -manually specify an actions module or package with the ``--actions`` flag. - -The full list of options for running the action server with either command is: - -.. program-output:: rasa run actions --help - -Actions -------- - -The ``Action`` class is the base class for any custom action. It has two methods -that both need to be overwritten, ``name()`` and ``run()``. - -.. _custom_action_example: - -In a restaurant bot, if the user says "show me a Mexican restaurant", -your bot could execute the action ``ActionCheckRestaurants``, -which might look like this: - -.. testcode:: - - from rasa_sdk import Action - from rasa_sdk.events import SlotSet - - class ActionCheckRestaurants(Action): - def name(self) -> Text: - return "action_check_restaurants" - - def run(self, - dispatcher: CollectingDispatcher, - tracker: Tracker, - domain: Dict[Text, Any]) -> List[Dict[Text, Any]]: - - cuisine = tracker.get_slot('cuisine') - q = "select * from restaurants where cuisine='{0}' limit 1".format(cuisine) - result = db.query(q) - - return [SlotSet("matches", result if result is not None else [])] - - -You should add the action name ``action_check_restaurants`` to -the actions in your domain file. The action's ``run()`` method receives -three arguments. You can access the values of slots and the latest message -sent by the user using the ``tracker`` object, and you can send messages -back to the user with the ``dispatcher`` object, by calling -``dispatcher.utter_message``. - -Details of the ``run()`` method: - -.. automethod:: rasa_sdk.Action.run - -Details of the ``dispatcher.utter_message()`` method: - -.. automethod:: rasa_sdk.executor.CollectingDispatcher.utter_message - - -.. _custom_session_start: - -Customizing the session start action -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The default behavior of the session start action is to take all existing slots and to -carry them over into the next session. Let's say you do not want to carry over all -slots, but only a user's name and their phone number. To do that, you'd override the -``action_session_start`` with a custom action that might look like this: - -.. testcode:: - - from typing import Text, List, Dict, Any - - from rasa_sdk import Action, Tracker - from rasa_sdk.events import SlotSet, SessionStarted, ActionExecuted, EventType - from rasa_sdk.executor import CollectingDispatcher - - - class ActionSessionStart(Action): - def name(self) -> Text: - return "action_session_start" - - @staticmethod - def fetch_slots(tracker: Tracker) -> List[EventType]: - """Collect slots that contain the user's name and phone number.""" - - slots = [] - - for key in ("name", "phone_number"): - value = tracker.get_slot(key) - if value is not None: - slots.append(SlotSet(key=key, value=value)) - - return slots - - async def run( - self, - dispatcher: CollectingDispatcher, - tracker: Tracker, - domain: Dict[Text, Any], - ) -> List[EventType]: - - # the session should begin with a `session_started` event - events = [SessionStarted(metadata=self.metadata)] - - # any slots that should be carried over should come after the - # `session_started` event - events.extend(self.fetch_slots(tracker)) - - # an `action_listen` should be added at the end as a user message follows - events.append(ActionExecuted("action_listen")) - - return events - -.. note:: - - You need to explicitly add ``action_session_start`` to your domain to override this - custom action. - -Events ------- - -An action's ``run()`` method returns a list of events. For more information on -the different types of events, see :ref:`Events`. There is an example of a ``SlotSet`` event -:ref:`above `. The action itself will automatically be added to the -tracker as an ``ActionExecuted`` event. If the action should not trigger any -other events, it should return an empty list. - -Tracker -------- - -The ``rasa_sdk.Tracker`` lets you access the bot's memory in your custom -actions. You can get information about past events and the current state of the -conversation through ``Tracker`` attributes and methods. - -The following are available as attributes of a ``Tracker`` object: - -- ``sender_id`` - The unique ID of person talking to the bot. -- ``slots`` - The list of slots that can be filled as defined in the - "ref"`domains`. -- ``latest_message`` - A dictionary containing the attributes of the latest - message: ``intent``, ``entities`` and ``text``. -- ``events`` - A list of all previous events. -- ``active_form`` - The name of the currently active form. -- ``latest_action_name`` - The name of the last action the bot executed. - -The available methods from the ``Tracker`` are: - -.. automethod:: rasa_sdk.interfaces.Tracker.current_state - -.. automethod:: rasa_sdk.interfaces.Tracker.is_paused - -.. automethod:: rasa_sdk.interfaces.Tracker.get_latest_entity_values - -.. automethod:: rasa_sdk.interfaces.Tracker.get_latest_input_channel - -.. automethod:: rasa_sdk.interfaces.Tracker.events_after_latest_restart - -.. automethod:: rasa_sdk.interfaces.Tracker.get_slot diff --git a/docs/api/tensorflow_usage.rst b/docs/api/tensorflow_usage.rst deleted file mode 100644 index ba82509cde6c..000000000000 --- a/docs/api/tensorflow_usage.rst +++ /dev/null @@ -1,61 +0,0 @@ -:desc: Find out how to configure your environment for efficient usage of TensorFlow inside Rasa Open Source. - -.. _tensorflow_usage: - -TensorFlow Configuration -======================== - -TensorFlow allows configuring options in the runtime environment via -`TF Config submodule `_. Rasa Open Source supports a smaller subset of these -configuration options and makes appropriate calls to the ``tf.config`` submodule. -This smaller subset comprises of configurations that developers frequently use with Rasa Open Source. -All configuration options are specified using environment variables as shown in subsequent sections. - -Optimizing CPU Performance --------------------------- - -.. note:: - We recommend that you configure these options only if you are an advanced TensorFlow user and understand the - implementation of the machine learning components in your pipeline. These options affect how operations are carried - out under the hood in Tensorflow. Leaving them at their default values is fine. - -Depending on the TensorFlow operations a NLU component or Core policy uses, you can leverage multi-core CPU -parallelism by tuning these options. - -Parallelizing One Operation -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Set ``TF_INTRA_OP_PARALLELISM_THREADS`` as an environment variable to specify the maximum number of threads that can be used -to parallelize the execution of one operation. For example, operations like ``tf.matmul()`` and ``tf.reduce_sum`` can be executed -on multiple threads running in parallel. The default value for this variable is ``0`` which means TensorFlow would -allocate one thread per CPU core. - -Parallelizing Multiple Operations -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Set ``TF_INTER_OP_PARALLELISM_THREADS`` as an environment variable to specify the maximum number of threads that can be used -to parallelize the execution of multiple **non-blocking** operations. These would include operations that do not have a -directed path between them in the TensorFlow graph. In other words, the computation of one operation does not affect the -computation of the other operation. The default value for this variable is ``0`` which means TensorFlow would allocate one thread per CPU core. - -To understand more about how these two options differ from each other, refer to this -`stackoverflow thread `_. - - -Optimizing GPU Performance --------------------------- - -Limiting GPU Memory Growth -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -TensorFlow by default blocks all the available GPU memory for the running process. This can be limiting if you are running -multiple TensorFlow processes and want to distribute memory across them. To prevent Rasa Open Source from blocking all -of the available GPU memory, set the environment variable ``TF_FORCE_GPU_ALLOW_GROWTH`` to ``True``. - -Restricting Absolute GPU Memory Available -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You may want to limit the absolute amount of GPU memory that can be used by a Rasa Open Source process. - -For example, say you have two visible GPUs(``GPU:0`` and ``GPU:1``) and you want to allocate 1024 MB from the first GPU -and 2048 MB from the second GPU. You can do this by setting the environment variable ``TF_GPU_MEMORY_ALLOC`` to ``"0:1024, 1:2048"``. diff --git a/docs/api/tracker-stores.rst b/docs/api/tracker-stores.rst deleted file mode 100644 index 602dc5eda3ef..000000000000 --- a/docs/api/tracker-stores.rst +++ /dev/null @@ -1,381 +0,0 @@ -:desc: All conversations are stored within a tracker store. Read how Rasa Open Source - provides implementations for different store types out of the box. - -.. _tracker-stores: - -Tracker Stores -============== - -.. edit-link:: - -All conversations are stored within a tracker store. -Rasa Open Source provides implementations for different store types out of the box. -If you want to use another store, you can also build a custom tracker store by -extending the ``TrackerStore`` class. - -.. contents:: - -InMemoryTrackerStore (default) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:Description: - ``InMemoryTrackerStore`` is the default tracker store. It is used if no other - tracker store is configured. It stores the conversation history in memory. - - .. note:: As this store keeps all history in memory, the entire history is lost if you restart the Rasa server. - -:Configuration: - To use the ``InMemoryTrackerStore`` no configuration is needed. - -.. _sql-tracker-store: - -SQLTrackerStore -~~~~~~~~~~~~~~~ - -:Description: - ``SQLTrackerStore`` can be used to store the conversation history in an SQL database. - Storing your trackers this way allows you to query the event database by sender_id, timestamp, action name, - intent name and typename. - -:Configuration: - To set up Rasa Open Source with SQL the following steps are required: - - #. Add required configuration to your ``endpoints.yml``: - - .. code-block:: yaml - - tracker_store: - type: SQL - dialect: "postgresql" # the dialect used to interact with the db - url: "" # (optional) host of the sql db, e.g. "localhost" - db: "rasa" # path to your db - username: # username used for authentication - password: # password used for authentication - query: # optional dictionary to be added as a query string to the connection URL - driver: my-driver - - #. To start the Rasa server using your SQL backend, - add the ``--endpoints`` flag, e.g.: - - .. code-block:: bash - - rasa run -m models --endpoints endpoints.yml - - #. If deploying your model in Docker Compose, add the service to your ``docker-compose.yml``: - - .. code-block:: yaml - - postgres: - image: postgres:latest - - To route requests to the new service, make sure that the ``url`` in your ``endpoints.yml`` - references the service name: - - .. code-block:: yaml - :emphasize-lines: 4 - - tracker_store: - type: SQL - dialect: "postgresql" # the dialect used to interact with the db - url: "postgres" - db: "rasa" # path to your db - username: # username used for authentication - password: # password used for authentication - query: # optional dictionary to be added as a query string to the connection URL - driver: my-driver - - -:Parameters: - - ``domain`` (default: ``None``): Domain object associated with this tracker store - - ``dialect`` (default: ``sqlite``): The dialect used to communicate with your SQL backend. Consult the `SQLAlchemy docs `_ for available dialects. - - ``url`` (default: ``None``): URL of your SQL server - - ``port`` (default: ``None``): Port of your SQL server - - ``db`` (default: ``rasa.db``): The path to the database to be used - - ``username`` (default: ``None``): The username which is used for authentication - - ``password`` (default: ``None``): The password which is used for authentication - - ``event_broker`` (default: ``None``): Event broker to publish events to - - ``login_db`` (default: ``None``): Alternative database name to which initially connect, and create the database specified by ``db`` (PostgreSQL only) - - ``query`` (default: ``None``): Dictionary of options to be passed to the dialect and/or the DBAPI upon connect - - -:Officially Compatible Databases: - - PostgreSQL - - Oracle > 11.0 - - SQLite - -:Oracle Configuration: - To use the SQLTrackerStore with Oracle, there are a few additional steps. - First, create a database ``tracker`` in your Oracle database and create a user with access to it. - Create a sequence in the database with the following command, where username is the user you created - (read more about creating sequences `here `__): - - .. code-block:: sql - - CREATE SEQUENCE username.events_seq; - - Next you have to extend the Rasa Open Source image to include the necessary drivers and clients. - First download the Oracle Instant Client from `here `__, - rename it to ``oracle.rpm`` and store it in the directory from where you'll be building the docker image. - Copy the following into a file called ``Dockerfile``: - - .. parsed-literal:: - - FROM rasa/rasa:\ |release|-full - - # Switch to root user to install packages - USER root - - RUN apt-get update -qq \ - && apt-get install -y --no-install-recommends \ - alien \ - libaio1 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - - # Copy in oracle instaclient - # https://www.oracle.com/database/technologies/instant-client/linux-x86-64-downloads.html - COPY oracle.rpm oracle.rpm - - # Install the Python wrapper library for the Oracle drivers - RUN pip install cx-Oracle - - # Install Oracle client libraries - RUN alien -i oracle.rpm - - USER 1001 - - Then build the docker image: - - .. parsed-literal:: - - docker build . -t rasa-oracle:\ |release|-oracle-full - - Now you can configure the tracker store in the ``endpoints.yml`` as described above, - and start the container. The ``dialect`` parameter with this setup will be ``oracle+cx_oracle``. - Read more about :ref:`deploying-your-rasa-assistant`. - -.. _redis-tracker-store: - -RedisTrackerStore -~~~~~~~~~~~~~~~~~~ - -:Description: - ``RedisTrackerStore`` can be used to store the conversation history in `Redis `_. - Redis is a fast in-memory key-value store which can optionally also persist data. - -:Configuration: - To set up Rasa Open Source with Redis the following steps are required: - - #. Start your Redis instance - #. Add required configuration to your ``endpoints.yml``: - - .. code-block:: yaml - - tracker_store: - type: redis - url: - port: - db: - password: - use_ssl: - - #. To start the Rasa server using your configured Redis instance, - add the ``--endpoints`` flag, e.g.: - - .. code-block:: bash - - rasa run -m models --endpoints endpoints.yml - - #. If deploying your model in Docker Compose, add the service to your ``docker-compose.yml``: - - .. code-block:: yaml - - redis: - image: redis:latest - - To route requests to the new service, make sure that the ``url`` in your ``endpoints.yml`` - references the service name: - - .. code-block:: yaml - :emphasize-lines: 3 - - tracker_store: - type: redis - url: - port: - db: - password: - use_ssl: - -:Parameters: - - ``url`` (default: ``localhost``): The url of your redis instance - - ``port`` (default: ``6379``): The port which redis is running on - - ``db`` (default: ``0``): The number of your redis database - - ``password`` (default: ``None``): Password used for authentication - (``None`` equals no authentication) - - ``record_exp`` (default: ``None``): Record expiry in seconds - - ``use_ssl`` (default: ``False``): whether or not to use SSL for transit encryption - -.. _mongo-tracker-store: - -MongoTrackerStore -~~~~~~~~~~~~~~~~~ - -:Description: - ``MongoTrackerStore`` can be used to store the conversation history in `Mongo `_. - MongoDB is a free and open-source cross-platform document-oriented NoSQL database. - -:Configuration: - #. Start your MongoDB instance. - #. Add required configuration to your ``endpoints.yml`` - - .. code-block:: yaml - - tracker_store: - type: mongod - url: - db: - username: - password: - auth_source: - - You can also add more advanced configurations (like enabling ssl) by appending - a parameter to the url field, e.g. mongodb://localhost:27017/?ssl=true - - #. To start the Rasa server using your configured MongoDB instance, - add the ``--endpoints`` flag, e.g.: - - .. code-block:: bash - - rasa run -m models --endpoints endpoints.yml - - #. If deploying your model in Docker Compose, add the service to your ``docker-compose.yml``: - - .. code-block:: yaml - - mongo: - image: mongo - environment: - MONGO_INITDB_ROOT_USERNAME: rasa - MONGO_INITDB_ROOT_PASSWORD: example - mongo-express: # this service is a MongoDB UI, and is optional - image: mongo-express - ports: - - 8081:8081 - environment: - ME_CONFIG_MONGODB_ADMINUSERNAME: rasa - ME_CONFIG_MONGODB_ADMINPASSWORD: example - - To route requests to this database, make sure to set the ``url`` in your ``endpoints.yml`` as the service name, - and specify the user and password: - - .. code-block:: yaml - :emphasize-lines: 3, 5-6 - - tracker_store: - type: mongod - url: mongodb://mongo:27017 - db: - username: - password: - auth_source: - - -:Parameters: - - ``url`` (default: ``mongodb://localhost:27017``): URL of your MongoDB - - ``db`` (default: ``rasa``): The database name which should be used - - ``username`` (default: ``0``): The username which is used for authentication - - ``password`` (default: ``None``): The password which is used for authentication - - ``auth_source`` (default: ``admin``): database name associated with the user’s credentials. - - ``collection`` (default: ``conversations``): The collection name which is - used to store the conversations - - -.. _tracker-stores-dynamo: - -DynamoTrackerStore -~~~~~~~~~~~~~~~~~~ - -:Description: - ``DynamoTrackerStore`` can be used to store the conversation history in - `DynamoDB `_. DynamoDB is a hosted NoSQL - database offered by Amazon Web Services (AWS). - -:Configuration: - #. Start your DynamoDB instance. - #. Add required configuration to your ``endpoints.yml``: - - .. code-block:: yaml - - tracker_store: - type: dynamo - tablename: - region: - - #. To start the Rasa server using your configured ``DynamoDB`` instance, - add the ``--endpoints`` flag, e.g.: - - .. code-block:: bash - - rasa run -m models --endpoints endpoints.yml - -:Parameters: - - ``tablename`` (default: ``states``): name of the DynamoDB table - - ``region`` (default: ``us-east-1``): name of the region associated with the client - - -.. _custom-tracker-store: - -Custom Tracker Store -~~~~~~~~~~~~~~~~~~~~ - -:Description: - If you require a tracker store which is not available out of the box, you can implement your own. - This is done by extending the base class ``TrackerStore``. - - .. autoclass:: rasa.core.tracker_store.TrackerStore - -:Steps: - #. Extend the ``TrackerStore`` base class. Note that your constructor has to - provide a parameter ``url``. - #. In your ``endpoints.yml`` put in the module path to your custom tracker store - and the parameters you require: - - .. code-block:: yaml - - tracker_store: - type: path.to.your.module.Class - url: localhost - a_parameter: a value - another_parameter: another value - - #. If you are deploying in Docker Compose, you have two options to add this store to Rasa Open Source: - - - extending the Rasa image to include the module - - mounting the module as volume - - Make sure to add the corresponding service as well. For example, mounting it as a volume would look like so: - - ``docker-compose.yml``: - - .. code-block:: yaml - :emphasize-lines: 5-7 - - rasa: - - volumes: - - - - ./path/to/your/module.py:/app/path/to/your/module.py - custom-tracker-store: - image: custom-image:tag - - ``endpoints.yml``: - - .. code-block:: yaml - :emphasize-lines: 3 - - tracker_store: - type: path.to.your.module.Class - url: custom-tracker-store - a_parameter: a value - another_parameter: another value diff --git a/docs/api/tracker.rst b/docs/api/tracker.rst deleted file mode 100644 index 208457947ad8..000000000000 --- a/docs/api/tracker.rst +++ /dev/null @@ -1,21 +0,0 @@ -:desc: Trackers maintain the state of the a dialogue and can be - featurized for machine learning algorithms right out of - the box. - -.. _tracker: - -Tracker -======= - -.. edit-link:: - -Trackers maintain the state of a dialogue between the assistant and the user in the form -of conversation sessions. To learn more about how to configure the session behavior, -check out the docs on :ref:`session_config`. - -.. edit-link:: - :url: https://github.com/RasaHQ/rasa/edit/master/rasa/core/trackers.py - :text: SUGGEST DOCSTRING EDITS - -.. autoclass:: rasa.core.trackers.DialogueStateTracker - :members: diff --git a/docs/api/training-data-importers.rst b/docs/api/training-data-importers.rst deleted file mode 100644 index 7900c2b72863..000000000000 --- a/docs/api/training-data-importers.rst +++ /dev/null @@ -1,232 +0,0 @@ -:desc: Change the way Rasa imports training data by replacing the default importer or - writing your own importer. - -.. _training-data-importers: - -Training Data Importers -======================= - -.. edit-link:: - -.. contents:: - :local: - -By default, you can use command line arguments to specify where Rasa should look -for training data on your disk. Rasa then loads any potential training files and uses -them to train your assistant. - -If needed, you can also customize `how` Rasa imports training data. -Potential use cases for this might be: - -- using a custom parser to load training data in other formats -- using different approaches to collect training data (e.g. loading them from different resources) - -You can instruct Rasa to load and use your custom importer by adding the section -``importers`` to the Rasa configuration file and specifying the importer with its -full class path: - -.. code-block:: yaml - - importers: - - name: "module.CustomImporter" - parameter1: "value" - parameter2: "value2" - - name: "module.AnotherCustomImporter" - -The ``name`` key is used to determine which importer should be loaded. Any extra -parameters are passed as constructor arguments to the loaded importer. - -.. note:: - - You can specify multiple importers. Rasa will automatically merge their results. - - -RasaFileImporter (default) -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By default Rasa uses the importer ``RasaFileImporter``. If you want to use it on its -own, you don't have to specify anything in your configuration file. -If you want to use it together with other importers, add it to your -configuration file: - -.. code-block:: yaml - - importers: - - name: "RasaFileImporter" - -MultiProjectImporter (experimental) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. warning:: - - This feature is currently experimental and might change or be removed in the future. - Please share your feedback on it in the `forum `_ to help - us making this feature ready for production. - -With this importer you can build a contextual AI assistant by combining multiple -reusable Rasa projects. -You might, for example, handle chitchat with one project and greet your users with -another. These projects can be developed in isolation, and then combined at train time -to create your assistant. - -An example directory structure could look like this: - -.. code-block:: bash - - . - ├── config.yml - └── projects - ├── GreetBot - │   ├── data - │   │   ├── nlu.md - │   │   └── stories.md - │   └── domain.yml - └── ChitchatBot - ├── config.yml - ├── data - │   ├── nlu.md - │   └── stories.md - └── domain.yml - -In this example the contextual AI assistant imports the ``ChitchatBot`` project which in turn -imports the ``GreetBot`` project. Project imports are defined in the configuration files of -each project. -To instruct Rasa to use the ``MultiProjectImporter`` module, put this section in the config -file of your root project: - -.. code-block:: yaml - - importers: - - name: MultiProjectImporter - - -Then specify which projects you want to import. -In our example, the ``config.yml`` in the root project would look like this: - -.. code-block:: yaml - - imports: - - projects/ChitchatBot - -The configuration file of the ``ChitchatBot`` in turn references the ``GreetBot``: - -.. code-block:: yaml - - imports: - - ../GreetBot - -The ``GreetBot`` project does not specify further projects so the ``config.yml`` can be -omitted. - -Rasa uses relative paths from the referencing configuration file to import projects. -These can be anywhere on your file system as long as the file access is permitted. - -During the training process Rasa will import all required training files, combine -them, and train a unified AI assistant. The merging of the training data happens during -runtime, so no additional files with training data are created or visible. - -.. note:: - - Rasa will use the policy and NLU pipeline configuration of the root project - directory during training. **Policy or NLU configurations of imported projects - will be ignored.** - -.. note:: - - Equal intents, entities, slots, responses, actions and forms will be merged, - e.g. if two projects have training data for an intent ``greet``, - their training data will be combined. - -Writing a Custom Importer -~~~~~~~~~~~~~~~~~~~~~~~~~ -If you are writing a custom importer, this importer has to implement the interface of -:ref:`training-data-importers-trainingFileImporter`: - -.. code-block:: python - - from typing import Optional, Text, Dict, List, Union - - import rasa - from rasa.core.domain import Domain - from rasa.core.interpreter import RegexInterpreter, NaturalLanguageInterpreter - from rasa.core.training.structures import StoryGraph - from rasa.importers.importer import TrainingDataImporter - from rasa.nlu.training_data import TrainingData - - - class MyImporter(TrainingDataImporter): - """Example implementation of a custom importer component.""" - - def __init__( - self, - config_file: Optional[Text] = None, - domain_path: Optional[Text] = None, - training_data_paths: Optional[Union[List[Text], Text]] = None, - **kwargs: Dict - ): - """Constructor of your custom file importer. - - Args: - config_file: Path to configuration file from command line arguments. - domain_path: Path to domain file from command line arguments. - training_data_paths: Path to training files from command line arguments. - **kwargs: Extra parameters passed through configuration in configuration file. - """ - - pass - - async def get_domain(self) -> Domain: - path_to_domain_file = self._custom_get_domain_file() - return Domain.load(path_to_domain_file) - - def _custom_get_domain_file(self) -> Text: - pass - - async def get_stories( - self, - interpreter: "NaturalLanguageInterpreter" = RegexInterpreter(), - template_variables: Optional[Dict] = None, - use_e2e: bool = False, - exclusion_percentage: Optional[int] = None, - ) -> StoryGraph: - from rasa.core.training.dsl import StoryFileReader - - path_to_stories = self._custom_get_story_file() - return await StoryFileReader.read_from_file(path_to_stories, await self.get_domain()) - - def _custom_get_story_file(self) -> Text: - pass - - async def get_config(self) -> Dict: - path_to_config = self._custom_get_config_file() - return rasa.utils.io.read_config_file(path_to_config) - - def _custom_get_config_file(self) -> Text: - pass - - async def get_nlu_data(self, language: Optional[Text] = "en") -> TrainingData: - from rasa.nlu.training_data import loading - - path_to_nlu_file = self._custom_get_nlu_file() - return loading.load_data(path_to_nlu_file) - - def _custom_get_nlu_file(self) -> Text: - pass - - - -.. _training-data-importers-trainingFileImporter: - -TrainingDataImporter -~~~~~~~~~~~~~~~~~~~~ - - -.. autoclass:: rasa.importers.importer.TrainingDataImporter - - .. automethod:: get_domain - - .. automethod:: get_config - - .. automethod:: get_nlu_data - - .. automethod:: get_stories diff --git a/docs/changelog.rst b/docs/changelog.rst deleted file mode 100644 index 76888fe1bf10..000000000000 --- a/docs/changelog.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. _changelog: - -.. include:: ../CHANGELOG.rst diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index ec24e782492b..000000000000 --- a/docs/conf.py +++ /dev/null @@ -1,430 +0,0 @@ -# -# -- General configuration ------------------------------------------------ -import re -import sys - -nitpicky = True -linkcheck_anchors_ignore = [".*"] -linkcheck_ignore = [ - r"http://localhost:\d+/", - r"https://github.com/mit-nlp/MITIE/releases/download/", - r"https://github.com/rasahq/rasa/issues/.*", # due to rate limiting - r"https://github.com/RasaHQ/rasa/issues/.*", # due to rate limiting -] -linkcheck_retries = 2 -linkcheck_timeout = 5 -# If your documentation needs a minimal Sphinx version, state it here. -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx_markdown_builder", - "sphinx.ext.autodoc", - "sphinx_autodoc_typehints", - "sphinx.ext.napoleon", - "sphinx.ext.mathjax", - "sphinx.ext.doctest", - "sphinx.ext.extlinks", - "sphinx_tabs.tabs", - "sphinxcontrib.programoutput", - "sphinxcontrib.httpdomain", - "rasabaster.button", - "rasabaster.card", - "rasabaster.chatbubble", - "rasabaster.copyable", - "rasabaster.editlink", - "rasabaster.runnable", - "rasabaster.conversations", -] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = ".rst" - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = "Rasa" -copyright = "2020, Rasa Technologies" -author = "Rasa Technologies" - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -__version__ = None -exec(open("../rasa/version.py").read()) -version = ".".join(__version__.split(".")[:2]) -# The full version, including alpha/beta/rc tags. -release = __version__ - -# Variables (formatted as `|variable|`) to be replaced in the text -# Sphinx replaces `|release|` and `|version|` by default, -# but we have to add an `rst_epilog` to replace our own variables -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-rst_epilog -from rasa_sdk import __version__ as rasa_sdk_version - -# type : Dict[Text, Text] -variables_to_replace = {"rasa_sdk_version": rasa_sdk_version} - -rst_epilog = "" -for name, value in variables_to_replace.items(): - rst_epilog += f".. |{name}| replace:: {value}\n" - - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = [ - "build/*", - "_build", - "Thumbs.db", - ".DS_Store", - # ignore doc pages that we don't show to appease keep_warnings - "core/old-core-change-log.rst", - "core/old-core-migration-guide.rst", - "nlu/old-nlu-change-log.rst", - "nlu/old-nlu-migration-guide.rst", -] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# html_theme = 'default' - -html_theme = "rasabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Rasa", - "github_user": "RasaHQ", - "github_repo": "rasa_nlu", - "fixed_sidebar": True, - "product": "Rasa", - "base_url": "https://rasa.com/docs/rasa/", - "canonical_url": "https://rasa.com/docs/rasa/", -} -# html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. -# " v documentation" by default. -html_title = "" - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (relative to this directory) to use as a favicon of -# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not None, a 'Last updated on:' timestamp is inserted at every page -# bottom, using the given strftime format. -# The empty string is equivalent to '%b %d, %Y'. -# html_last_updated_fmt = None - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -html_sidebars = {"**": ["simpletoc.html"]} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# 'ja' uses this config value. -# 'zh' user can custom change `jieba` dictionary path. -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "rasa_doc" - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, "rasa_nlu.tex", "rasa\\_nlu Documentation", "Alan Nichol", "manual") -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "rasa_nlu", "rasa_nlu Documentation", [author], 1)] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "rasa", - "rasa Documentation", - author, - "rasa", - "One line description of project.", - "Miscellaneous", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - -import os - -doctest_path = [os.path.abspath("..")] - -# Make sure we are using the project root as the working directory instead of /docs -doctest_global_setup = r""" -import os -os.chdir(os.path.abspath('..')) -""" - -# extlinks configuration - -extlinks = {"gh-code": (f"https://github.com/RasaHQ/rasa/tree/{release}/%s", "github ")} - -# Sphinxcontrib configuration -scv_priority = "tags" -scv_show_banner = True -# scv_banner_greatest_tag = True -scv_banner_main_ref = "1.10.8" -scv_sort = ("semver",) -scv_whitelist_branches = (re.compile("^master$"),) -# scv_whitelist_tags = ('None',) -scv_grm_exclude = ("README.md", ".gitignore", ".nojekyll", "CNAME") -scv_whitelist_tags = ( - re.compile(r"^[2-9]+\.\d+\.\d+$"), - re.compile(r"^2\.[0-9]+\.[0-9]+(\w+)?$"), - re.compile(r"^1\.[1-9][0-9]+\.\d+$"), - re.compile(r"^1\.9\.7$"), - re.compile(r"^1\.8\.3$"), - re.compile(r"^1\.7\.4$"), - re.compile(r"^1\.6\.2$"), - re.compile(r"^1\.5\.3$"), - re.compile(r"^1\.4\.6$"), - re.compile(r"^1\.3\.10$"), - re.compile(r"^1\.2\.9$"), - re.compile(r"^1\.1\.8$"), - re.compile(r"^1\.0\.9$"), -) -# scv_greatest_tag = True -scv_root_ref = "1.10.8" - -# type classes for nitpicky to ignore -nitpick_ignore = [ - # non-rasa typing - ("py:class", "str"), - ("py:class", "bool"), - ("py:class", "int"), - ("py:class", "Any"), - ("py:class", "dict"), - ("py:class", "Dict"), - ("py:class", "List"), - ("py:class", "Text"), - ("py:class", "Optional"), - ("py:class", "Iterator"), - ("py:class", "typing.Any"), - ("py:class", "typing.Dict"), - ("py:class", "typing.List"), - ("py:class", "typing.Optional"), - ("py:class", "typing.Generator"), - ("py:class", "typing.Iterator"), - ("py:class", "typing.Type"), - ("py:class", "collections.deque"), - ("py:class", "sanic.app.Sanic"), - ("py:data", "typing.Any"), - ("py:data", "typing.Dict"), - ("py:data", "typing.List"), - ("py:data", "typing.Optional"), - ("py:data", "typing.Iterator"), - ("py:obj", "None"), - # rasa typing - ("py:class", "CollectingDispatcher"), - ("py:class", "Tracker"), - ("py:class", "rasa.core.agent.Agent"), - ("py:class", "rasa.core.conversation.Dialogue"), - ("py:class", "rasa.core.domain.Domain"), - ("py:class", "rasa.core.policies.Policy"), - ("py:class", "rasa.core.events.Event"), - ("py:class", "rasa.core.events.SlotSet"), - ("py:class", "rasa.core.processor.MessageProcessor"), - ("py:class", "rasa.core.training.structures.StoryGraph"), - ("py:class", "rasa.nlu.components.Component"), - ("py:class", "rasa.nlu.training_data.message.Message"), - ("py:class", "rasa.nlu.training_data.training_data.TrainingData"), -] - - -def setup(sphinx): - sphinx.add_stylesheet("css/custom.css") - - try: - utils_path = os.path.abspath(os.path.join(__file__, "..", "utils")) - sys.path.insert(0, utils_path) - from StoryLexer import StoryLexer - - sphinx.add_lexer("story", StoryLexer()) - except ImportError: - print("No Story Lexer :( Sad times!") diff --git a/docs/core/about.rst b/docs/core/about.rst deleted file mode 100644 index ccb32e6b0dcf..000000000000 --- a/docs/core/about.rst +++ /dev/null @@ -1,55 +0,0 @@ -:desc: Get started with machine learning dialogue management to scale your bot - development using Rasa as a conversational AI platform. - -.. _about-rasa-core: - -The Rasa Core Dialogue Engine -============================= - -.. chat-bubble:: - :text: What am I looking at? - :sender: bot - - -.. chat-bubble:: - :text: Rasa Core is a dialogue engine for building AI assistants. - :sender: user - -.. chat-bubble:: - :text: It's part of the open source Rasa framework. - :sender: user - -.. chat-bubble:: - :text: What's cool about it? - :sender: bot - -.. chat-bubble:: - :text: Rather than a bunch of if/else statements, it uses a machine learning model trained on example conversations to decide what to do next. - :sender: user - -.. chat-bubble:: - :text: That sounds harder than writing a few if statements. - :sender: bot - - -.. chat-bubble:: - :text: In the beginning of a project, it seems easier to just hard-code some logic. - :sender: user - -.. chat-bubble:: - :text: Rasa helps you when you want to go past that and create a bot that can handle more complexity. - This blog post explains the philosophy behind Rasa Core. - :sender: user - - -.. chat-bubble:: - :text: Can I see it in action? - :sender: bot - -.. chat-bubble:: - :text: We thought you'd never ask! - :sender: user - -.. chat-bubble:: - :text: Head over to the Rasa Tutorial for an interactive example. - :sender: user diff --git a/docs/core/actions.rst b/docs/core/actions.rst deleted file mode 100644 index 7eec704d2191..000000000000 --- a/docs/core/actions.rst +++ /dev/null @@ -1,168 +0,0 @@ -:desc: Learn about about how to write your own custom actions with the - open source Rasa framework to be able to interact with the external - world - ranging from databases to third-party APIs. - -.. _actions: - -Actions -======= - -.. edit-link:: - -Actions are the things your bot runs in response to user input. -There are four kinds of actions in Rasa: - - 1. **Utterance actions**: start with ``utter_`` and send a specific message - to the user. - 2. **Retrieval actions**: start with ``respond_`` and send a message selected by a retrieval model. - 3. **Custom actions**: run arbitrary code and send any number of messages (or none). - 4. **Default actions**: e.g. ``action_listen``, ``action_restart``, - ``action_default_fallback``. - -.. contents:: - :local: - -Utterance Actions ------------------ - -To define an utterance action (``ActionUtterTemplate``), add a response to the domain file -that starts with ``utter_``: - -.. code-block:: yaml - - responses: - utter_my_message: - - "this is what I want my action to say!" - -It is conventional to start the name of an utterance action with ``utter_``. -If this prefix is missing, you can still use the response in your custom -actions, but the response can not be directly predicted as its own action. -See :ref:`responses` for more details. - -If you use an external NLG service, you don't need to specify the -responses in the domain, but you still need to add the utterance names -to the actions list of the domain. - - -Retrieval Actions ------------------ - -Retrieval actions make it easier to work with a large number of similar intents like chitchat and FAQs. -See :ref:`retrieval-actions` to learn more. - -.. _custom-actions: - -Custom Actions --------------- - -An action can run any code you want. Custom actions can turn on the lights, -add an event to a calendar, check a user's bank balance, or anything -else you can imagine. - -Rasa will call an endpoint you can specify, when a custom action is -predicted. This endpoint should be a webserver that reacts to this -call, runs the code and optionally returns information to modify -the dialogue state. - -To specify, your action server use the ``endpoints.yml``: - -.. code-block:: yaml - - action_endpoint: - url: "http://localhost:5055/webhook" - -And pass it to the scripts using ``--endpoints endpoints.yml``. - -You can create an action server in node.js, .NET, java, or any -other language and define your actions there - but we provide -a small python SDK to make development there even easier. - -.. note:: - - Rasa uses a ticket lock mechanism to ensure incoming messages from the same - conversation ID do not interfere with each other and are processed in the right - order. If you expect your custom action to take more than 60 seconds to run, please - set the ``TICKET_LOCK_LIFETIME`` environment variable to your expected value. - -Custom Actions Written in Python -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -For actions written in python, we have a convenient :ref:`rasa-sdk` which starts -this action server for you. - -Execute Actions in Other Code ------------------------------ - -Rasa will send an HTTP ``POST`` request to your server containing -information on which action to run. Furthermore, this request will contain all -information about the conversation. :ref:`action-server` shows the detailed API spec. - -As a response to the action call from Rasa, you can modify the tracker, -e.g. by setting slots and send responses back to the user. -All of the modifications are done using events. -There is a list of all possible event types in :ref:`events`. - -.. _default-actions: - -Default Actions ---------------- - -The available default actions are: - -+-----------------------------------+------------------------------------------------+ -| ``action_listen`` | Stop predicting more actions and wait for user | -| | input. | -+-----------------------------------+------------------------------------------------+ -| ``action_restart`` | Reset the whole conversation. Can be triggered | -| | during a conversation by entering ``/restart`` | -| | if the :ref:`mapping-policy` is included in | -| | the policy configuration. | -+-----------------------------------+------------------------------------------------+ -| ``action_session_start`` | Start a new conversation session. Take all set | -| | slots, mark the beginning of a new conversation| -| | session and re-apply the existing ``SlotSet`` | -| | events. This action is triggered automatically | -| | after an inactivity period defined by the | -| | ``session_expiration_time`` parameter in the | -| | domain's :ref:`session_config`. Can be | -| | triggered manually during a conversation by | -| | entering ``/session_start``. All conversations | -| | begin with an ``action_session_start``. | -+-----------------------------------+------------------------------------------------+ -| ``action_default_fallback`` | Undo the last user message (as if the user did | -| | not send it and the bot did not react) and | -| | utter a message that the bot did not | -| | understand. See :ref:`fallback-actions`. | -+-----------------------------------+------------------------------------------------+ -| ``action_deactivate_form`` | Deactivate the active form and reset the | -| | requested slot. | -| | See also :ref:`section_unhappy`. | -+-----------------------------------+------------------------------------------------+ -| ``action_revert_fallback_events`` | Revert events that occurred during the | -| | TwoStageFallbackPolicy. | -| | See :ref:`fallback-actions`. | -+-----------------------------------+------------------------------------------------+ -| ``action_default_ask_affirmation``| Ask the user to affirm their intent. | -| | It is suggested to overwrite this default | -| | action with a custom action to have more | -| | meaningful prompts. | -+-----------------------------------+------------------------------------------------+ -| ``action_default_ask_rephrase`` | Ask the user to rephrase their intent. | -+-----------------------------------+------------------------------------------------+ -| ``action_back`` | Undo the last user message (as if the user did | -| | not send it and the bot did not react). | -| | Can be triggered during a conversation by | -| | entering ``/back`` if the MappingPolicy is | -| | included in the policy configuration. | -+-----------------------------------+------------------------------------------------+ - -All the default actions can be overridden. To do so, add the action name -to the list of actions in your domain: - -.. code-block:: yaml - - actions: - - action_default_ask_affirmation - -Rasa will then call your action endpoint and treat it as every other -custom action. diff --git a/docs/core/domains.rst b/docs/core/domains.rst deleted file mode 100644 index 527ce16e7630..000000000000 --- a/docs/core/domains.rst +++ /dev/null @@ -1,375 +0,0 @@ -:desc: Define intents, entities, slots and actions in Rasa to build contextual - AI Assistants and chatbots using open source bot framework Rasa. - -.. _domains: - -Domains -======= - -.. edit-link:: - -The ``Domain`` defines the universe in which your assistant operates. -It specifies the ``intents``, ``entities``, ``slots``, and ``actions`` -your bot should know about. Optionally, it can also include ``responses`` -for the things your bot can say. - -.. contents:: - :local: - - -An example of a Domain ----------------------- - -As an example, the domain created by ``rasa init`` has the following yaml definition: - - -.. literalinclude:: ../../rasa/cli/initial_project/domain.yml - :language: yaml - -**What does this mean?** - -Your NLU model will define the ``intents`` and ``entities`` that you -need to include in the domain. The ``entities`` section lists all entities -extracted by any :ref:`entity extractor` in your -NLU pipeline. - -For example: - -.. code-block:: yaml - - entities: - - PERSON # entity extracted by SpacyEntityExtractor - - time # entity extracted by DucklingHTTPExtractor - - membership_type # custom entity extracted by CRFEntityExtractor - - priority # custom entity extracted by CRFEntityExtractor - - -:ref:`slots` hold information you want to keep track of during a conversation. -A categorical slot called ``risk_level`` would be -defined like this: - -.. code-block:: yaml - - slots: - risk_level: - type: categorical - values: - - low - - medium - - high - - -:ref:`Here ` you can find the full list of slot types defined by -Rasa Core, along with syntax for including them in your domain file. - - -:ref:`actions` are the things your bot can actually do. -For example, an action could: - -* respond to a user, -* make an external API call, -* query a database, or -* just about anything! - -Custom Actions and Slots ------------------------- - -To reference slots in your domain, you need to reference them by -their **module path**. To reference custom actions, use their **name**. -For example, if you have a module called ``my_actions`` containing -a class ``MyAwesomeAction``, and module ``my_slots`` containing -``MyAwesomeSlot``, you would add these lines to the domain file: - -.. code-block:: yaml - - actions: - - my_custom_action - ... - - slots: - - my_slots.MyAwesomeSlot - - -The ``name`` function of ``MyAwesomeAction`` needs to return -``my_custom_action`` in this example (for more details, -see :ref:`custom-actions`). - -.. _domain-responses: - -Responses ---------- - -Responses are messages the bot will send back to the user. There are -two ways to use these responses: - -1. If the name of the response starts with ``utter_``, the response can - directly be used as an action. You would add the response - to the domain: - - .. code-block:: yaml - - responses: - utter_greet: - - text: "Hey! How are you?" - - Afterwards, you can use the response as an action in the - stories: - - .. code-block:: story - - ## greet the user - * intent_greet - - utter_greet - - When ``utter_greet`` is run as an action, it will send the message from - the response back to the user. - -2. You can use the responses to generate response messages from your - custom actions using the dispatcher: - ``dispatcher.utter_message(template="utter_greet")``. - This allows you to separate the logic of generating - the messages from the actual copy. In your custom action code, you can - send a message based on the response like this: - - .. code-block:: python - - from rasa_sdk.actions import Action - - class ActionGreet(Action): - def name(self): - return 'action_greet' - - def run(self, dispatcher, tracker, domain): - dispatcher.utter_message(template="utter_greet") - return [] - -Images and Buttons ------------------- - -Responses defined in a domain's yaml file can contain images and -buttons as well: - -.. code-block:: yaml - - responses: - utter_greet: - - text: "Hey! How are you?" - buttons: - - title: "great" - payload: "great" - - title: "super sad" - payload: "super sad" - utter_cheer_up: - - text: "Here is something to cheer you up:" - image: "https://i.imgur.com/nGF1K8f.jpg" - -.. note:: - - Please keep in mind that it is up to the implementation of the output - channel on how to display the defined buttons. The command line, for - example, can't display buttons or images, but tries to mimic them by - printing the options. - -Custom Output Payloads ----------------------- - -You can also send any arbitrary output to the output channel using the -``custom:`` key. Note that since the domain is in yaml format, the json -payload should first be converted to yaml format. - -For example, although date pickers are not a defined parameter in responses -because they are not supported by most channels, a Slack date picker -can be sent like so: - -.. code-block:: yaml - - responses: - utter_take_bet: - - custom: - blocks: - - type: section - text: - text: "Make a bet on when the world will end:" - type: mrkdwn - accessory: - type: datepicker - initial_date: '2019-05-21' - placeholder: - type: plain_text - text: Select a date - - -Channel-Specific Responses --------------------------- - -For each response, you can have multiple **response variations** (see :ref:`variations`). -If you have certain response variations that you would like sent only to specific -channels, you can specify this with the ``channel:`` key. The value should match -the name defined in the ``name()`` method of the channel's ``OutputChannel`` -class. Channel-specific responses are especially useful if creating custom -output payloads that will only work in certain channels. - - -.. code-block:: yaml - - responses: - utter_ask_game: - - text: "Which game would you like to play?" - channel: "slack" - custom: - - # payload for Slack dropdown menu to choose a game - - text: "Which game would you like to play?" - buttons: - - title: "Chess" - payload: '/inform{"game": "chess"}' - - title: "Checkers" - payload: '/inform{"game": "checkers"}' - - title: "Fortnite" - payload: '/inform{"game": "fortnite"}' - -Each time your bot looks for responses, it will first check to see if there -are any channel-specific response variations for the connected channel. If there are, it -will choose **only** from these response variations. If no channel-specific response variations are -found, it will choose from any response variations that do not have a defined ``channel``. -Therefore, it is good practice to always have at least one response variation for each -response that has no ``channel`` specified so that your bot can respond in all -environments, including in the shell and in interactive learning. - -Variables ---------- - -You can also use **variables** in your responses to insert information -collected during the dialogue. You can either do that in your custom python -code or by using the automatic slot filling mechanism. For example, if you -have a response like this: - -.. code-block:: yaml - - responses: - utter_greet: - - text: "Hey, {name}. How are you?" - -Rasa will automatically fill that variable with a value found in a slot called -``name``. - -In custom code, you can retrieve a response by using: - -.. testsetup:: - - from rasa_sdk.actions import Action - -.. testcode:: - - class ActionCustom(Action): - def name(self): - return "action_custom" - - def run(self, dispatcher, tracker, domain): - # send utter default response to user - dispatcher.utter_message(template="utter_default") - # ... other code - return [] - -If the response contains variables denoted with ``{my_variable}`` -you can supply values for the fields by passing them as keyword -arguments to ``utter_message``: - -.. code-block:: python - - dispatcher.utter_message(template="utter_greet", my_variable="my text") - -.. _variations: - -Variations ----------- - -If you want to randomly vary the response sent to the user, you can list -multiple **response variations** and Rasa will randomly pick one of them, e.g.: - -.. code-block:: yaml - - responses: - utter_greeting: - - text: "Hey, {name}. How are you?" - - text: "Hey, {name}. How is your day going?" - -.. _use_entities: - -Ignoring entities for certain intents -------------------------------------- - -If you want all entities to be ignored for certain intents, you can -add the ``use_entities: []`` parameter to the intent in your domain -file like this: - -.. code-block:: yaml - - intents: - - greet: - use_entities: [] - -To ignore some entities or explicitly take only certain entities -into account you can use this syntax: - -.. code-block:: yaml - - intents: - - greet: - use_entities: - - name - - first_name - ignore_entities: - - location - - age - -This means that excluded entities for those intents will be unfeaturized and therefore -will not impact the next action predictions. This is useful when you have -an intent where you don't care about the entities being picked up. If you list -your intents as normal without this parameter, the entities will be -featurized as normal. - -.. note:: - - If you really want these entities not to influence action prediction we - suggest you make the slots with the same name of type ``unfeaturized``. - -.. _session_config: - -Session configuration ---------------------- - -A conversation session represents the dialogue between the assistant and the user. -Conversation sessions can begin in three ways: - - 1. the user begins the conversation with the assistant, - 2. the user sends their first message after a configurable period of inactivity, or - 3. a manual session start is triggered with the ``/session_start`` intent message. - -You can define the period of inactivity after which a new conversation -session is triggered in the domain under the ``session_config`` key. -``session_expiration_time`` defines the time of inactivity in minutes after which a -new session will begin. ``carry_over_slots_to_new_session`` determines whether -existing set slots should be carried over to new sessions. - -The default session configuration looks as follows: - -.. code-block:: yaml - - session_config: - session_expiration_time: 60 # value in minutes, 0 means infinitely long - carry_over_slots_to_new_session: true # set to false to forget slots between sessions - -This means that if a user sends their first message after 60 minutes of inactivity, a -new conversation session is triggered, and that any existing slots are carried over -into the new session. Setting the value of ``session_expiration_time`` to 0 means -that sessions will not end (note that the ``action_session_start`` action will still -be triggered at the very beginning of conversations). - -.. note:: - - A session start triggers the default action ``action_session_start``. Its default - implementation moves all existing slots into the new session. Note that all - conversations begin with an ``action_session_start``. Overriding this action could - for instance be used to initialize the tracker with slots from an external API - call, or to start the conversation with a bot message. The docs on - :ref:`custom_session_start` shows you how to do that. diff --git a/docs/core/fallback-actions.rst b/docs/core/fallback-actions.rst deleted file mode 100644 index 84419887ecc8..000000000000 --- a/docs/core/fallback-actions.rst +++ /dev/null @@ -1,128 +0,0 @@ -:desc: Define custom fallback actions with thresholds for NLU and Core for letting - your conversation fail gracefully with open source dialogue management. - -.. _fallback-actions: - -Fallback Actions -================ - -.. edit-link:: - -Sometimes you want to revert to a fallback action, such as replying, -`"Sorry, I didn't understand that"`. You can handle fallback cases by adding -either the ``FallbackPolicy`` or the ``TwoStageFallbackPolicy`` to your -policy ensemble. - -Fallback Policy ---------------- - - -The ``FallbackPolicy`` has one fallback action, which will -be executed if the intent recognition has a confidence below ``nlu_threshold`` -or if none of the dialogue policies predict an action with -confidence higher than ``core_threshold`` or if the highest ranked intent differs in -confidence with the second highest ranked intent by less than ``ambiguity_threshold``. - -The thresholds and fallback action can be adjusted in the policy configuration -file as parameters of the ``FallbackPolicy``. - -.. code-block:: yaml - - policies: - - name: "FallbackPolicy" - nlu_threshold: 0.4 - core_threshold: 0.3 - ambiguity_threshold: 0.1 - fallback_action_name: "action_default_fallback" - -``action_default_fallback`` is a default action in Rasa Core which sends the -``utter_default`` response to the user. Make sure to specify -the ``utter_default`` in your domain file. It will also revert back to the -state of the conversation before the user message that caused the -fallback, so that it will not influence the prediction of future actions. -You can take a look at the source of the action below: - -.. autoclass:: rasa.core.actions.action.ActionDefaultFallback - - -You can also create your own custom action to use as a fallback (see -:ref:`custom actions ` for more info on custom actions). If you -do, make sure to pass the custom fallback action to ``FallbackPolicy`` inside -your policy configuration file. For example: - -.. code-block:: yaml - - policies: - - name: "FallbackPolicy" - nlu_threshold: 0.4 - core_threshold: 0.3 - ambiguity_threshold: 0.1 - fallback_action_name: "my_fallback_action" - - -.. note:: - If your custom fallback action does not return a ``UserUtteranceReverted`` event, - the next predictions of your bot may become inaccurate, as it is very likely that - the fallback action is not present in your stories. - -If you have a specific intent, let's say it's called ``out_of_scope``, that -should always trigger the fallback action, you should add this as a story: - -.. code-block:: story - - ## fallback story - * out_of_scope - - action_default_fallback - - -Two-stage Fallback Policy -------------------------- - -The ``TwoStageFallbackPolicy`` handles low NLU confidence in multiple stages -by trying to disambiguate the user input (low core confidence is handled in -the same manner as the ``FallbackPolicy``). - -- If a NLU prediction has a low confidence score, the user is asked to affirm - the classification of the intent. (Default action: - ``action_default_ask_affirmation``) - - - If they affirm, the story continues as if the intent was classified - with high confidence from the beginning. - - If they deny, the user is asked to rephrase their message. - -- Rephrasing (default action: ``action_default_ask_rephrase``) - - - If the classification of the rephrased intent was confident, the story - continues as if the user had this intent from the beginning. - - If the rephrased intent was not classified with high confidence, the user - is asked to affirm the classified intent. - -- Second affirmation (default action: ``action_default_ask_affirmation``) - - - If the user affirms the intent, the story continues as if the user had - this intent from the beginning. - - If the user denies, the original intent is classified as the specified - ``deny_suggestion_intent_name``, and an ultimate fallback action - ``fallback_nlu_action_name`` is triggered (e.g. a handoff to a human). - -Rasa Core provides the default implementations of -``action_default_ask_affirmation`` and ``action_default_ask_rephrase``. -The default implementation of ``action_default_ask_rephrase`` action utters -the response ``utter_ask_rephrase``, so be sure to specify this -response in your domain file. -The implementation of both actions can be overwritten with :ref:`custom actions `. - -You can specify the core fallback action as well as the ultimate NLU -fallback action as parameters to ``TwoStageFallbackPolicy`` in your -policy configuration file. - -.. code-block:: yaml - - policies: - - name: TwoStageFallbackPolicy - nlu_threshold: 0.3 - core_threshold: 0.3 - ambiguity_threshold: 0.1 - fallback_core_action_name: "action_default_fallback" - fallback_nlu_action_name: "action_default_fallback" - deny_suggestion_intent_name: "out_of_scope" diff --git a/docs/core/forms.rst b/docs/core/forms.rst deleted file mode 100644 index 81752dcec7f7..000000000000 --- a/docs/core/forms.rst +++ /dev/null @@ -1,366 +0,0 @@ -:desc: Follow a rule-based process of information gathering using FormActions - in open source bot framework Rasa. - -.. _forms: - -Forms -===== - -.. edit-link:: - -.. note:: - There is an in-depth tutorial `here `_ about how to use Rasa Forms for slot filling. - -.. contents:: - :local: - -One of the most common conversation patterns is to collect a few pieces of -information from a user in order to do something (book a restaurant, call an -API, search a database, etc.). This is also called **slot filling**. - - -If you need to collect multiple pieces of information in a row, we recommended -that you create a ``FormAction``. This is a single action which contains the -logic to loop over the required slots and ask the user for this information. -There is a full example using forms in the ``examples/formbot`` directory of -Rasa Core. - - -When you define a form, you need to add it to your domain file. -If your form's name is ``restaurant_form``, your domain would look like this: - -.. code-block:: yaml - - forms: - - restaurant_form - actions: - ... - -See ``examples/formbot/domain.yml`` for an example. - -Configuration File ------------------- - -To use forms, you also need to include the ``FormPolicy`` in your policy -configuration file. For example: - -.. code-block:: yaml - - policies: - - name: "FormPolicy" - -see ``examples/formbot/config.yml`` for an example. - -Form Basics ------------ - -Using a ``FormAction``, you can describe *all* of the happy paths with a single -story. By "happy path", we mean that whenever you ask a user for some information, -they respond with the information you asked for. - -If we take the example of the restaurant bot, this single story describes all of the -happy paths. - -.. code-block:: story - - ## happy path - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - form{"name": null} - -In this story the user intent is ``request_restaurant``, which is followed by -the form action ``restaurant_form``. With ``form{"name": "restaurant_form"}`` the -form is activated and with ``form{"name": null}`` the form is deactivated again. -As shown in the section :ref:`section_unhappy` the bot can execute any kind of -actions outside the form while the form is still active. On the "happy path", -where the user is cooperating well and the system understands the user input correctly, -the form is filling all requested slots without interruption. - -The ``FormAction`` will only request slots which haven't already been set. -If a user starts the conversation with -`I'd like a vegetarian Chinese restaurant for 8 people`, then they won't be -asked about the ``cuisine`` and ``num_people`` slots. - -Note that for this story to work, your slots should be :ref:`unfeaturized -`. If any of these slots are featurized, your story needs to -include ``slot{}`` events to show these slots being set. In that case, the -easiest way to create valid stories is to use :ref:`interactive-learning`. - -In the story above, ``restaurant_form`` is the name of our form action. -Here is an example of what it looks like. -You need to define three methods: - -- ``name``: the name of this action -- ``required_slots``: a list of slots that need to be filled for the ``submit`` method to work. -- ``submit``: what to do at the end of the form, when all the slots have been filled. - -.. literalinclude:: ../../examples/formbot/actions.py - :dedent: 4 - :pyobject: RestaurantForm.name - -.. literalinclude:: ../../examples/formbot/actions.py - :dedent: 4 - :pyobject: RestaurantForm.required_slots - -.. literalinclude:: ../../examples/formbot/actions.py - :dedent: 4 - :pyobject: RestaurantForm.submit - -Once the form action gets called for the first time, -the form gets activated and the ``FormPolicy`` jumps in. -The ``FormPolicy`` is extremely simple and just always predicts the form action. -See :ref:`section_unhappy` for how to work with unexpected user input. - -Every time the form action gets called, it will ask the user for the next slot in -``required_slots`` which is not already set. -It does this by looking for a response called ``utter_ask_{slot_name}``, -so you need to define these in your domain file for each required slot. - -Once all the slots are filled, the ``submit()`` method is called, where you can -use the information you've collected to do something for the user, for example -querying a restaurant API. -If you don't want your form to do anything at the end, just use ``return []`` -as your submit method. -After the submit method is called, the form is deactivated, -and other policies in your Core model will be used to predict the next action. - -Custom slot mappings --------------------- - -If you do not define slot mappings, slots will be only filled by entities -with the same name as the slot that are picked up from the user input. -Some slots, like ``cuisine``, can be picked up using a single entity, but a -``FormAction`` can also support yes/no questions and free-text input. -The ``slot_mappings`` method defines how to extract slot values from user responses. - -Here's an example for the restaurant bot: - -.. literalinclude:: ../../examples/formbot/actions.py - :dedent: 4 - :pyobject: RestaurantForm.slot_mappings - -The predefined functions work as follows: - -- ``self.from_entity(entity=entity_name, intent=intent_name, role=role_name, group=group_name)`` - will look for an entity called ``entity_name`` to fill a slot - ``slot_name`` regardless of user intent if ``intent_name`` is ``None`` - else only if the users intent is ``intent_name``. If ``role_name`` and/or ``group_name`` - are provided, the role/group label of the entity also needs to match the given values. -- ``self.from_intent(intent=intent_name, value=value)`` - will fill slot ``slot_name`` with ``value`` if user intent is ``intent_name``. - To make a boolean slot, take a look at the definition of ``outdoor_seating`` - above. Note: Slot will not be filled with user intent of message triggering - the form action. Use ``self.from_trigger_intent`` below. -- ``self.from_trigger_intent(intent=intent_name, value=value)`` - will fill slot ``slot_name`` with ``value`` if form was triggered with user - intent ``intent_name``. -- ``self.from_text(intent=intent_name)`` will use the next - user utterance to fill the text slot ``slot_name`` regardless of user intent - if ``intent_name`` is ``None`` else only if user intent is ``intent_name``. -- If you want to allow a combination of these, provide them as a list as in the - example above - - -Validating user input ---------------------- - -After extracting a slot value from user input, the form will try to validate the -value of the slot. Note that by default, validation only happens if the form -action is executed immediately after user input. This can be changed in the -``_validate_if_required()`` function of the ``FormAction`` class in Rasa SDK. -Any required slots that were filled before the initial activation of a form -are validated upon activation as well. - -By default, validation only checks if the requested slot was successfully -extracted from the slot mappings. If you want to add custom validation, for -example to check a value against a database, you can do this by writing a helper -validation function with the name ``validate_{slot-name}``. - -Here is an example , ``validate_cuisine()``, which checks if the extracted cuisine slot -belongs to a list of supported cuisines. - -.. literalinclude:: ../../examples/formbot/actions.py - :pyobject: RestaurantForm.cuisine_db - -.. literalinclude:: ../../examples/formbot/actions.py - :pyobject: RestaurantForm.validate_cuisine - -As the helper validation functions return dictionaries of slot names and values -to set, you can set more slots than just the one you are validating from inside -a helper validation method. However, you are responsible for making sure that -those extra slot values are valid. - -In case the slot is filled with something that you are certain can't be handled -and you want to deactivate the form directly, -you can overwrite the ``request_next_slot()`` method to do so. The example below -checks the value of the ``cuisine`` slot directly, but you could use any logic -you'd like to trigger deactivation: - -.. code-block:: python - - def request_next_slot( - self, - dispatcher: "CollectingDispatcher", - tracker: "Tracker", - domain: Dict[Text, Any], - ) -> Optional[List[EventType]]: - """Request the next slot and utter template if needed, - else return None""" - for slot in self.required_slots(tracker): - if self._should_request_slot(tracker, slot): - - ## Condition of validated slot that triggers deactivation - if slot == "cuisine" and tracker.get_slot("cuisine") == "caribbean": - dispatcher.utter_message(text="Sorry, I can't help you with that") - return self.deactivate() - - ## For all other slots, continue as usual - logger.debug(f"Request next slot '{slot}'") - dispatcher.utter_message( - template=f"utter_ask_{slot}", **tracker.slots - ) - return [SlotSet(REQUESTED_SLOT, slot)] - return None - - -If nothing is extracted from the user's utterance for any of the required slots, an -``ActionExecutionRejection`` error will be raised, meaning the action execution -was rejected and therefore Core will fall back onto a different policy to -predict another action. - -.. _section_unhappy: - -Handling unhappy paths ----------------------- - -Of course your users will not always respond with the information you ask of them. -Typically, users will ask questions, make chitchat, change their mind, or otherwise -stray from the happy path. The way this works with forms is that a form will raise -an ``ActionExecutionRejection`` if the user didn't provide the requested information. -You need to handle events that might cause ``ActionExecutionRejection`` errors -in your stories. For example, if you expect your users to chitchat with your bot, -you could add a story like this: - -.. code-block:: story - - ## chitchat - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - * chitchat - - utter_chitchat - - restaurant_form - - form{"name": null} - -In some situations, users may change their mind in the middle of form action -and decide not to go forward with their initial request. In cases like this, the -assistant should stop asking for the requested slots. You can handle such situations -gracefully using a default action ``action_deactivate_form`` which will deactivate -the form and reset the requested slot. An example story of such conversation could -look as follows: - -.. code-block:: story - - ## chitchat - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - * stop - - utter_ask_continue - * deny - - action_deactivate_form - - form{"name": null} - - -It is **strongly** recommended that you build these stories using interactive learning. -If you write these stories by hand you will likely miss important things. -Please read :ref:`section_interactive_learning_forms` -on how to use interactive learning with forms. - -The requested_slot slot ------------------------ - -The slot ``requested_slot`` is automatically added to the domain as an -unfeaturized slot. If you want to make it featurized, you need to add it -to your domain file as a categorical slot. You might want to do this if you -want to handle your unhappy paths differently depending on what slot is -currently being asked from the user. For example, say your users respond -to one of the bot's questions with another question, like *why do you need to know that?* -The response to this ``explain`` intent depends on where we are in the story. -In the restaurant case, your stories would look something like this: - -.. code-block:: story - - ## explain cuisine slot - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - slot{"requested_slot": "cuisine"} - * explain - - utter_explain_cuisine - - restaurant_form - - slot{"cuisine": "greek"} - ( ... all other slots the form set ... ) - - form{"name": null} - - ## explain num_people slot - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - slot{"requested_slot": "num_people"} - * explain - - utter_explain_num_people - - restaurant_form - - slot{"cuisine": "greek"} - ( ... all other slots the form set ... ) - - form{"name": null} - -Again, it is **strongly** recommended that you use interactive -learning to build these stories. -Please read :ref:`section_interactive_learning_forms` -on how to use interactive learning with forms. - -.. _conditional-logic: - -Handling conditional slot logic -------------------------------- - -Many forms require more logic than just requesting a list of fields. -For example, if someone requests ``greek`` as their cuisine, you may want to -ask if they are looking for somewhere with outside seating. - -You can achieve this by writing some logic into the ``required_slots()`` method, -for example: - -.. code-block:: python - - @staticmethod - def required_slots(tracker) -> List[Text]: - """A list of required slots that the form has to fill""" - - if tracker.get_slot('cuisine') == 'greek': - return ["cuisine", "num_people", "outdoor_seating", - "preferences", "feedback"] - else: - return ["cuisine", "num_people", - "preferences", "feedback"] - -This mechanism is quite general and you can use it to build many different -kinds of logic into your forms. - - - -Debugging ---------- - -The first thing to try is to run your bot with the ``--debug`` flag, see :ref:`command-line-interface` for details. -If you are just getting started, you probably only have a few hand-written stories. -This is a great starting point, but -you should give your bot to people to test **as soon as possible**. One of the guiding principles -behind Rasa Core is: - -.. pull-quote:: Learning from real conversations is more important than designing hypothetical ones - -So don't try to cover every possibility in your hand-written stories before giving it to testers. -Real user behavior will always surprise you! diff --git a/docs/core/interactive-learning.rst b/docs/core/interactive-learning.rst deleted file mode 100644 index e024a8679cd0..000000000000 --- a/docs/core/interactive-learning.rst +++ /dev/null @@ -1,275 +0,0 @@ -:desc: Use Interactive learning to continuously validate and improve the - performance of your AI Assistant using machine learning based - open source dialogue management. - -.. _interactive-learning: - -Interactive Learning -==================== - -.. edit-link:: - -This page shows how to use interactive learning on the command line. - -In interactive learning mode, you provide feedback to your bot while you talk -to it. This is a powerful way -to explore what your bot can do, and the easiest way to fix any mistakes -it makes. One advantage of machine learning-based dialogue is that when -your bot doesn't know how to do something yet, you can just teach it! -Some people call this `Software 2.0 `_. - - -.. note:: - - Rasa X provides a UI for interactive learning, and you can use any user conversation - as a starting point. See - `Talk to Your Bot `_ - in the Rasa X docs. - -.. contents:: - :local: - -Running Interactive Learning -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Run the following command to start interactive learning: - -.. code-block:: bash - - rasa run actions --actions actions& - - rasa interactive \ - -m models/20190515-135859.tar.gz \ - --endpoints endpoints.yml - -The first command starts the action server (see :ref:`custom-actions`). - -The second command starts interactive learning mode. - -In interactive mode, Rasa will ask you to confirm every prediction -made by NLU and Core before proceeding. -Here's an example: - -.. code-block:: text - - Bot loaded. Type a message and press enter (use '/stop' to exit). - - ? Next user input: hello - - ? Is the NLU classification for 'hello' with intent 'hello' correct? Yes - - ------ - Chat History - - # Bot You - ──────────────────────────────────────────── - 1 action_listen - ──────────────────────────────────────────── - 2 hello - intent: hello 1.00 - ------ - - ? The bot wants to run 'utter_greet', correct? (Y/n) - - -The chat history and slot values are printed to the screen, which -should be all the information your need to decide what the correct -next action is. - -In this case, the bot chose the -right action (``utter_greet``), so we type ``y``. -Then we type ``y`` again, because ``action_listen`` is the correct -action after greeting. We continue this loop, chatting with the bot, -until the bot chooses the wrong action. - -Providing feedback on errors -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -For this example we are going to use the ``concertbot`` example, -so make sure you have the domain & data for it. You can download -the data from our `github repo -`_. - -If you ask ``/search_concerts``, the bot should suggest -``action_search_concerts`` and then ``action_listen`` (the confidence at which -the policy selected its next action will be displayed next to the action name). -Now let's enter ``/compare_reviews`` as the next user message. -The bot *might* choose the wrong one out of the two -possibilities (depending on the training run, it might also be correct): - -.. code-block:: text - - ------ - Chat History - - # Bot You - ─────────────────────────────────────────────────────────────── - 1 action_listen - ─────────────────────────────────────────────────────────────── - 2 /search_concerts - intent: search_concerts 1.00 - ─────────────────────────────────────────────────────────────── - 3 action_search_concerts 0.72 - action_listen 0.78 - ─────────────────────────────────────────────────────────────── - 4 /compare_reviews - intent: compare_reviews 1.00 - - - Current slots: - concerts: None, venues: None - - ------ - ? The bot wants to run 'action_show_venue_reviews', correct? No - - -Now we type ``n``, because it chose the wrong action, and we get a new -prompt asking for the correct one. This also shows the probabilities the -model has assigned to each of the actions: - -.. code-block:: text - - ? What is the next action of the bot? (Use arrow keys) - ❯ 0.53 action_show_venue_reviews - 0.46 action_show_concert_reviews - 0.00 utter_goodbye - 0.00 action_search_concerts - 0.00 utter_greet - 0.00 action_search_venues - 0.00 action_listen - 0.00 utter_youarewelcome - 0.00 utter_default - 0.00 action_default_fallback - 0.00 action_restart - - - -In this case, the bot should ``action_show_concert_reviews`` (rather than venue -reviews!) so we select that action. - -Now we can keep talking to the bot for as long as we like to create a longer -conversation. At any point you can press ``Ctrl-C`` and the bot will -provide you with exit options. You can write your newly-created stories and NLU -data to files. You can also go back a step if you made a mistake when providing -feedback. - -Make sure to combine the dumped stories and NLU examples with your original -training data for the next training. - -Visualization of conversations -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -During the interactive learning, Rasa will plot the current conversation -and a few similar conversations from the training data to help you -keep track of where you are. - -You can view the visualization at http://localhost:5005/visualization.html -as soon as you've started interactive learning. - -To skip the visualization, run ``rasa interactive --skip-visualization``. - -.. image:: /_static/images/interactive_learning_graph.gif - -.. _section_interactive_learning_forms: - -Interactive Learning with Forms -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you're using a FormAction, there are some additional things to keep in mind -when using interactive learning. - -The ``form:`` prefix -~~~~~~~~~~~~~~~~~~~~ - -The form logic is described by your ``FormAction`` class, and not by the stories. -The machine learning policies should not have to learn this behavior, and should -not get confused if you later change your form action, for example by adding or -removing a required slot. -When you use interactive learning to generate stories containing a form, -the conversation steps handled by the form -get a :code:`form:` prefix. This tells Rasa Core to ignore these steps when training -your other policies. There is nothing special you have to do here, all of the form's -happy paths are still covered by the basic story given in :ref:`forms`. - -Here is an example: - -.. code-block:: story - - * request_restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - slot{"requested_slot": "cuisine"} - * form: inform{"cuisine": "mexican"} - - slot{"cuisine": "mexican"} - - form: restaurant_form - - slot{"cuisine": "mexican"} - - slot{"requested_slot": "num_people"} - * form: inform{"number": "2"} - - form: restaurant_form - - slot{"num_people": "2"} - - form{"name": null} - - slot{"requested_slot": null} - - utter_slots_values - - -Input validation -~~~~~~~~~~~~~~~~ - -Every time the user responds with something *other* than the requested slot or -any of the required slots, -you will be asked whether you want the form action to try and extract a slot -from the user's message when returning to the form. This is best explained with -an example: - -.. code-block:: text - - 7 restaurant_form 1.00 - slot{"num_people": "3"} - slot{"requested_slot": "outdoor_seating"} - do you want to sit outside? - action_listen 1.00 - ───────────────────────────────────────────────────────────────────────────────────── - 8 /stop - intent: stop 1.00 - ───────────────────────────────────────────────────────────────────────────────────── - 9 utter_ask_continue 1.00 - do you want to continue? - action_listen 1.00 - ───────────────────────────────────────────────────────────────────────────────────── - 10 /affirm - intent: affirm 1.00 - - - Current slots: - cuisine: greek, feedback: None, num_people: 3, outdoor_seating: None, - preferences: None, requested_slot: outdoor_seating - - ------ - 2018-11-05 21:36:53 DEBUG rasa.core.tracker_store - Recreating tracker for id 'default' - ? The bot wants to run 'restaurant_form', correct? Yes - 2018-11-05 21:37:08 DEBUG rasa.core.tracker_store - Recreating tracker for id 'default' - ? Should 'restaurant_form' validate user input to fill the slot 'outdoor_seating'? (Y/n) - -Here the user asked to stop the form, and the bot asks the user whether they're sure -they don't want to continue. The user says they want to continue (the ``/affirm`` intent). -Here ``outdoor_seating`` has a ``from_intent`` slot mapping (mapping -the ``/affirm`` intent to ``True``), so this user input could be used to fill -that slot. However, in this case the user is just responding to the -"do you want to continue?" question and so you select ``n``, the user input -should not be validated. The bot will then continue to ask for the -``outdoor_seating`` slot again. - -.. warning:: - - If there is a conflicting story in your training data, i.e. you just chose - to validate the input (meaning it will be printed with the ``forms:`` prefix), - but your stories file contains the same story where you don't validate - the input (meaning it's without the ``forms:`` prefix), you will need to make - sure to remove this conflicting story. When this happens, there is a warning - prompt that reminds you to do this: - - **WARNING: FormPolicy predicted no form validation based on previous training - stories. Make sure to remove contradictory stories from training data** - - Once you've removed that story, you can press enter and continue with - interactive learning diff --git a/docs/core/knowledge-bases.rst b/docs/core/knowledge-bases.rst deleted file mode 100644 index 355bda83a128..000000000000 --- a/docs/core/knowledge-bases.rst +++ /dev/null @@ -1,563 +0,0 @@ -:desc: Leverage information from knowledge bases inside conversations using ActionQueryKnowledgeBase - in open source bot framework Rasa. - -.. _knowledge_base_actions: - -Knowledge Base Actions -====================== - -.. edit-link:: - -.. warning:: - This feature is experimental. - We introduce experimental features to get feedback from our community, so we encourage you to try it out! - However, the functionality might be changed or removed in the future. - If you have feedback (positive or negative) please share it with us on the `forum `_. - -.. contents:: - :local: - -Knowledge base actions enable you to handle the following kind of conversations: - -.. image:: ../_static/images/knowledge-base-example.png - -A common problem in conversational AI is that users do not only refer to certain objects by their names, -but also use reference terms such as "the first one" or "it". -We need to keep track of the information that was presented to resolve these mentions to -the correct object. - -In addition, users may want to obtain detailed information about objects during a conversation -- -for example, whether a restaurant has outside seating, or how expensive it is. -In order to respond to those user requests, knowledge about the restaurant domain is needed. -Since the information is subject to change, hard-coding the information isn't the solution. - - -To handle the above challenges, Rasa can be integrated with knowledge bases. To use this integration, you can create a -custom action that inherits from ``ActionQueryKnowledgeBase``, a pre-written custom action that contains -the logic to query a knowledge base for objects and their attributes. - -You can find a complete example in ``examples/knowledgebasebot`` -(`knowledge base bot `_), as well as instructions -for implementing this custom action below. - - -Using ``ActionQueryKnowledgeBase`` ----------------------------------- - -.. _create_knowledge_base: - -Create a Knowledge Base -~~~~~~~~~~~~~~~~~~~~~~~ - -The data used to answer the user's requests will be stored in a knowledge base. -A knowledge base can be used to store complex data structures. -We suggest you get started by using the ``InMemoryKnowledgeBase``. -Once you want to start working with a large amount of data, you can switch to a custom knowledge base -(see :ref:`custom_knowledge_base`). - -To initialize an ``InMemoryKnowledgeBase``, you need to provide the data in a json file. -The following example contains data about restaurants and hotels. -The json structure should contain a key for every object type, i.e. ``"restaurant"`` and ``"hotel"``. -Every object type maps to a list of objects -- here we have a list of 3 restaurants and a list of 3 hotels. - -.. code-block:: json - - { - "restaurant": [ - { - "id": 0, - "name": "Donath", - "cuisine": "Italian", - "outside-seating": true, - "price-range": "mid-range" - }, - { - "id": 1, - "name": "Berlin Burrito Company", - "cuisine": "Mexican", - "outside-seating": false, - "price-range": "cheap" - }, - { - "id": 2, - "name": "I due forni", - "cuisine": "Italian", - "outside-seating": true, - "price-range": "mid-range" - } - ], - "hotel": [ - { - "id": 0, - "name": "Hilton", - "price-range": "expensive", - "breakfast-included": true, - "city": "Berlin", - "free-wifi": true, - "star-rating": 5, - "swimming-pool": true - }, - { - "id": 1, - "name": "Hilton", - "price-range": "expensive", - "breakfast-included": true, - "city": "Frankfurt am Main", - "free-wifi": true, - "star-rating": 4, - "swimming-pool": false - }, - { - "id": 2, - "name": "B&B", - "price-range": "mid-range", - "breakfast-included": false, - "city": "Berlin", - "free-wifi": false, - "star-rating": 1, - "swimming-pool": false - }, - ] - } - - -Once the data is defined in a json file, called, for example, ``data.json``, you will be able use the this data file to create your -``InMemoryKnowledgeBase``, which will be passed to the action that queries the knowledge base. - -Every object in your knowledge base should have at least the ``"name"`` and ``"id"`` fields to use the default implementation. -If it doesn't, you'll have to :ref:`customize your InMemoryKnowledgeBase `. - - -Define the NLU Data -~~~~~~~~~~~~~~~~~~~ - -In this section: - -- we will introduce a new intent, ``query_knowledge_base`` -- we will annotate ``mention`` entities so that our model detects indirect mentions of objects like "the - first one" -- we will use :ref:`synonyms ` extensively - -For the bot to understand that the user wants to retrieve information from the knowledge base, you need to define -a new intent. We will call it ``query_knowledge_base``. - -We can split requests that ``ActionQueryKnowledgeBase`` can handle into two categories: -(1) the user wants to obtain a list of objects of a specific type, or (2) the user wants to know about a certain -attribute of an object. The intent should contain lots of variations of both of these requests: - -.. code-block:: md - - ## intent:query_knowledge_base - - what [restaurants](object_type:restaurant) can you recommend? - - list some [restaurants](object_type:restaurant) - - can you name some [restaurants](object_type:restaurant) please? - - can you show me some [restaurant](object_type:restaurant) options - - list [German](cuisine) [restaurants](object_type:restaurant) - - do you have any [mexican](cuisine) [restaurants](object_type:restaurant)? - - do you know the [price range](attribute:price-range) of [that one](mention)? - - what [cuisine](attribute) is [it](mention)? - - do you know what [cuisine](attribute) the [last one](mention:LAST) has? - - does the [first one](mention:1) have [outside seating](attribute:outside-seating)? - - what is the [price range](attribute:price-range) of [Berlin Burrito Company](restaurant)? - - what about [I due forni](restaurant)? - - can you tell me the [price range](attribute) of [that restaurant](mention)? - - what [cuisine](attribute) do [they](mention) have? - ... - -The above example just shows examples related to the restaurant domain. -You should add examples for every object type that exists in your knowledge base to the same ``query_knowledge_base`` intent. - -In addition to adding a variety of training examples for each query type, -you need to specify and annotate the following entities in your training examples: - -- ``object_type``: Whenever a training example references a specific object type from your knowledge base, the object type should - be marked as an entity. Use :ref:`synonyms ` to map e.g. ``restaurants`` to ``restaurant``, the correct - object type listed as a key in the knowledge base. -- ``mention``: If the user refers to an object via "the first one", "that one", or "it", you should mark those terms - as ``mention``. We also use synonyms to map some of the mentions to symbols. You can learn about that - in :ref:`resolving mentions `. -- ``attribute``: All attribute names defined in your knowledge base should be identified as ``attribute`` in the - NLU data. Again, use synonyms to map variations of an attribute name to the one used in the - knowledge base. - -Remember to add those entities to your domain file (as entities and slots): - -.. code-block:: yaml - - entities: - - object_type - - mention - - attribute - - slots: - object_type: - type: unfeaturized - mention: - type: unfeaturized - attribute: - type: unfeaturized - - -.. _create_action_query_knowledge_base: - - -Create an Action to Query your Knowledge Base -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To create your own knowledge base action, you need to inherit ``ActionQueryKnowledgeBase`` and pass the knowledge -base to the constructor of ``ActionQueryKnowledgeBase``. - -.. code-block:: python - - from rasa_sdk.knowledge_base.storage import InMemoryKnowledgeBase - from rasa_sdk.knowledge_base.actions import ActionQueryKnowledgeBase - - class MyKnowledgeBaseAction(ActionQueryKnowledgeBase): - def __init__(self): - knowledge_base = InMemoryKnowledgeBase("data.json") - super().__init__(knowledge_base) - -Whenever you create an ``ActionQueryKnowledgeBase``, you need to pass a ``KnowledgeBase`` to the constructor. -It can be either an ``InMemoryKnowledgeBase`` or your own implementation of a ``KnowledgeBase`` -(see :ref:`custom_knowledge_base`). -You can only pull information from one knowledge base, as the usage of multiple knowledge bases at the same time is not supported. - -This is the entirety of the code for this action! The name of the action is ``action_query_knowledge_base``. -Don't forget to add it to your domain file: - -.. code-block:: yaml - - actions: - - action_query_knowledge_base - -.. note:: - If you overwrite the default action name ``action_query_knowledge_base``, you need to add the following three - unfeaturized slots to your domain file: ``knowledge_base_objects``, ``knowledge_base_last_object``, and - ``knowledge_base_last_object_type``. - The slots are used internally by ``ActionQueryKnowledgeBase``. - If you keep the default action name, those slots will be automatically added for you. - -You also need to make sure to add a story to your stories file that includes the intent ``query_knowledge_base`` and -the action ``action_query_knowledge_base``. For example: - -.. code-block:: md - - ## Happy Path - * greet - - utter_greet - * query_knowledge_base - - action_query_knowledge_base - * goodbye - - utter_goodbye - -The last thing you need to do is to define the response ``utter_ask_rephrase`` in your domain file. -If the action doesn't know how to handle the user's request, it will use this response to ask the user to rephrase. -For example, add the following responses to your domain file: - -.. code-block:: md - - utter_ask_rephrase: - - text: "Sorry, I'm not sure I understand. Could you rephrase it?" - - text: "Could you please rephrase your message? I didn't quite get that." - -After adding all the relevant pieces, the action is now able to query the knowledge base. - -How It Works ------------- - -``ActionQueryKnowledgeBase`` looks at both the entities that were picked up in the request as well as the -previously set slots to decide what to query for. - -Query the Knowledge Base for Objects -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In order to query the knowledge base for any kind of object, the user's request needs to include the object type. -Let's look at an example: - - `Can you please name some restaurants?` - -This question includes the object type of interest: "restaurant." -The bot needs to pick up on this entity in order to formulate a query -- otherwise the action would not know what objects the user is interested in. - -When the user says something like: - - `What Italian restaurant options in Berlin do I have?` - -The user wants to obtain a list of restaurants that (1) have Italian cuisine and (2) are located in -Berlin. If the NER detects those attributes in the request of the user, the action will use those to filter the -restaurants found in the knowledge base. - -In order for the bot to detect these attributes, you need to mark "Italian" and "Berlin" as entities in the NLU data: - -.. code-block:: md - - What [Italian](cuisine) [restaurant](object_type) options in [Berlin](city) do I have?. - -The names of the attributes, "cuisine" and "city," should be equal to the ones used in the knowledge base. -You also need to add those as entities and slots to the domain file. - -Query the Knowledge Base for an Attribute of an Object -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If the user wants to obtain specific information about an object, the request should include both the object and -attribute of interest. -For example, if the user asks something like: - - `What is the cuisine of Berlin Burrito Company?` - -The user wants to obtain the "cuisine" (attribute of interest) for the restaurant "Berlin Burrito Company" (object of -interest). - -The attribute and object of interest should be marked as entities in the NLU training data: - -.. code-block:: md - - What is the [cuisine](attribute) of [Berlin Burrito Company](restaurant)? - -Make sure to add the object type, "restaurant," to the domain file as entity and slot. - - -.. _resolve_mentions: - -Resolve Mentions -~~~~~~~~~~~~~~~~ - -Following along from the above example, users may not always refer to restaurants by their names. -Users can either refer to the object of interest by its name, e.g. "Berlin Burrito Company" (representation string -of the object), or they may refer to a previously listed object via a mention, for example: - - `What is the cuisine of the second restaurant you mentioned?` - -Our action is able to resolve these mentions to the actual object in the knowledge base. -More specifically, it can resolve two mention types: (1) ordinal mentions, such as "the first one", and (2) -mentions such as "it" or "that one". - -**Ordinal Mentions** - -When a user refers to an object by its position in a list, it is called an ordinal mention. Here's an example: - -- User: `What restaurants in Berlin do you know?` -- Bot: `Found the following objects of type 'restaurant': 1: I due forni 2: PastaBar 3: Berlin Burrito Company` -- User: `Does the first one have outside seating?` - -The user referred to "I due forni" by the term "the first one". -Other ordinal mentions might include "the second one," "the last one," "any," or "3". - -Ordinal mentions are typically used when a list of objects was presented to the user. -To resolve those mentions to the actual object, we use an ordinal mention mapping which is set in the -``KnowledgeBase`` class. -The default mapping looks like: - - .. code-block:: python - - { - "1": lambda l: l[0], - "2": lambda l: l[1], - "3": lambda l: l[2], - "4": lambda l: l[3], - "5": lambda l: l[4], - "6": lambda l: l[5], - "7": lambda l: l[6], - "8": lambda l: l[7], - "9": lambda l: l[8], - "10": lambda l: l[9], - "ANY": lambda l: random.choice(l), - "LAST": lambda l: l[-1], - } - -The ordinal mention mapping maps a string, such as "1", to the object in a list, e.g. ``lambda l: l[0]``, meaning the -object at index ``0``. - -As the ordinal mention mapping does not, for example, include an entry for "the first one", -it is important that you use :ref:`entity_synonyms` to map "the first one" in your NLU data to "1": - -.. code-block:: md - - Does the [first one](mention:1) have [outside seating](attribute:outside-seating)? - -The NER detects "first one" as a ``mention`` entity, but puts "1" into the ``mention`` slot. -Thus, our action can take the ``mention`` slot together with the ordinal mention mapping to resolve "first one" to -the actual object "I due forni". - -You can overwrite the ordinal mention mapping by calling the function ``set_ordinal_mention_mapping()`` on your -``KnowledgeBase`` implementation (see :ref:`customize_in_memory_knowledge_base`). - -**Other Mentions** - -Take a look at the following conversation: - -- User: `What is the cuisine of PastaBar?` -- Bot: `PastaBar has an Italian cuisine.` -- User: `Does it have wifi?` -- Bot: `Yes.` -- User: `Can you give me an address?` - -In the question "Does it have wifi?", the user refers to "PastaBar" by the word "it". -If the NER detected "it" as the entity ``mention``, the knowledge base action would resolve it to the last mentioned -object in the conversation, "PastaBar". - -In the next input, the user refers indirectly to the object "PastaBar" instead of mentioning it explicitly. -The knowledge base action would detect that the user wants to obtain the value of a specific attribute, in this case, the address. -If no mention or object was detected by the NER, the action assumes the user is referring to the most recently -mentioned object, "PastaBar". - -You can disable this behavior by setting ``use_last_object_mention`` to ``False`` when initializing the action. - - -Customization -------------- - -Customizing ``ActionQueryKnowledgeBase`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can overwrite the following two functions of ``ActionQueryKnowledgeBase`` if you'd like to customize what the bot -says to the user: - -- ``utter_objects()`` -- ``utter_attribute_value()`` - -``utter_objects()`` is used when the user has requested a list of objects. -Once the bot has retrieved the objects from the knowledge base, it will respond to the user by default with a message, formatted like: - - `Found the following objects of type 'restaurant':` - `1: I due forni` - `2: PastaBar` - `3: Berlin Burrito Company` - -Or, if no objects are found, - - `I could not find any objects of type 'restaurant'.` - -If you want to change the utterance format, you can overwrite the method ``utter_objects()`` in your action. - -The function ``utter_attribute_value()`` determines what the bot utters when the user is asking for specific information about -an object. - -If the attribute of interest was found in the knowledge base, the bot will respond with the following utterance: - - `'Berlin Burrito Company' has the value 'Mexican' for attribute 'cuisine'.` - -If no value for the requested attribute was found, the bot will respond with - - `Did not find a valid value for attribute 'cuisine' for object 'Berlin Burrito Company'.` - -If you want to change the bot utterance, you can overwrite the method ``utter_attribute_value()``. - -.. note:: - There is a `tutorial `_ on our blog about - how to use knowledge bases in custom actions. The tutorial explains the implementation behind - ``ActionQueryKnowledgeBase`` in detail. - - -Creating Your Own Knowledge Base Actions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -``ActionQueryKnowledgeBase`` should allow you to easily get started with integrating knowledge bases into your actions. -However, the action can only handle two kind of user requests: - -- the user wants to get a list of objects from the knowledge base -- the user wants to get the value of an attribute for a specific object - -The action is not able to compare objects or consider relations between objects in your knowledge base. -Furthermore, resolving any mention to the last mentioned object in the conversation might not always be optimal. - -If you want to tackle more complex use cases, you can write your own custom action. -We added some helper functions to ``rasa_sdk.knowledge_base.utils`` -(`link to code `_ ) -to help you when implement your own solution. -We recommend using ``KnowledgeBase`` interface so that you can still use the ``ActionQueryKnowledgeBase`` -alongside your new custom action. - -If you write a knowledge base action that tackles one of the above use cases or a new one, be sure to tell us about -it on the `forum `_! - - -.. _customize_in_memory_knowledge_base: - -Customizing the ``InMemoryKnowledgeBase`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The class ``InMemoryKnowledgeBase`` inherits ``KnowledgeBase``. -You can customize your ``InMemoryKnowledgeBase`` by overwriting the following functions: - -- ``get_key_attribute_of_object()``: To keep track of what object the user was talking about last, we store the value - of the key attribute in a specific slot. Every object should have a key attribute that is unique, - similar to the primary key in a relational database. By default, the name of the key attribute for every object type - is set to ``id``. You can overwrite the name of the key attribute for a specific object type by calling - ``set_key_attribute_of_object()``. -- ``get_representation_function_of_object()``: Let's focus on the following restaurant: - - .. code-block:: json - - { - "id": 0, - "name": "Donath", - "cuisine": "Italian", - "outside-seating": true, - "price-range": "mid-range" - } - - When the user asks the bot to list any Italian restaurant, it doesn't need all of the details of the restaurant. - Instead, you want to provide a meaningful name that identifies the restaurant -- in most cases, the name of the object will do. - The function ``get_representation_function_of_object()`` returns a lambda function that maps the - above restaurant object to its name. - - .. code-block:: python - - lambda obj: obj["name"] - - This function is used whenever the bot is talking about a specific object, so that the user is presented a meaningful - name for the object. - - By default, the lambda function returns the value of the ``"name"`` attribute of the object. - If your object does not have a ``"name"`` attribute , or the ``"name"`` of an object is - ambiguous, you should set a new lambda function for that object type by calling - ``set_representation_function_of_object()``. -- ``set_ordinal_mention_mapping()``: The ordinal mention mapping is needed to resolve an ordinal mention, such as - "second one," to an object in a list. By default, the ordinal mention mapping looks like this: - - .. code-block:: python - - { - "1": lambda l: l[0], - "2": lambda l: l[1], - "3": lambda l: l[2], - "4": lambda l: l[3], - "5": lambda l: l[4], - "6": lambda l: l[5], - "7": lambda l: l[6], - "8": lambda l: l[7], - "9": lambda l: l[8], - "10": lambda l: l[9], - "ANY": lambda l: random.choice(l), - "LAST": lambda l: l[-1], - } - - You can overwrite it by calling the function ``set_ordinal_mention_mapping()``. - If you want to learn more about how this mapping is used, check out :ref:`resolve_mentions`. - - -See the `example bot `_ for an -example implementation of an ``InMemoryKnowledgeBase`` that uses the method ``set_representation_function_of_object()`` -to overwrite the default representation of the object type "hotel." -The implementation of the ``InMemoryKnowledgeBase`` itself can be found in the -`rasa-sdk `_ package. - - -.. _custom_knowledge_base: - -Creating Your Own Knowledge Base -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you have more data or if you want to use a more complex data structure that, for example, involves relations between -different objects, you can create your own knowledge base implementation. -Just inherit ``KnowledgeBase`` and implement the methods ``get_objects()``, ``get_object()``, and -``get_attributes_of_object()``. The `knowledge base code `_ -provides more information on what those methods should do. - -You can also customize your knowledge base further, by adapting the methods mentioned in the section -:ref:`customize_in_memory_knowledge_base`. - -.. note:: - We wrote a `blog post `_ - that explains how you can set up your own knowledge base. diff --git a/docs/core/old-core-change-log.rst b/docs/core/old-core-change-log.rst deleted file mode 100644 index 6ef81dd2a22e..000000000000 --- a/docs/core/old-core-change-log.rst +++ /dev/null @@ -1,1025 +0,0 @@ -:desc: Rasa Core Changelog - -.. _old-core-change-log: - -Core Change Log -=============== - -All notable changes to this project will be documented in this file. -This project adheres to `Semantic Versioning`_ starting with version 0.2.0. - -[0.14.4] - 2019-05-13 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- correctly process form actions in core evaluations - -[0.14.3] - 2019-05-07 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed interactive learning history printing - -[0.14.2] - 2019-05-07 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed required version of ``rasa_core_sdk`` during installation - -[0.14.1] - 2019-05-02 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed MappingPolicy bug upon prediction of ACTION_LISTEN after mapped action - -[0.14.0] - 2019-04-23 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``tf.ConfigProto`` configuration can now be specified - for tensorflow based pipelines -- open api spec for the Rasa Core SDK action server -- documentation about early deactivation of a form in validation -- Added max_event_history in tracker_store to set this value in DialogueStateTracker -- utility functions for colored logging -- open webbrowser when visualizing stories -- added ``/parse`` endpoint to query for NLU results -- File based event store -- ability to configure event store using the endpoints file -- added ability to use multiple env vars per line in yaml files -- added ``priority`` property of policies to influence best policy in - the case of equal confidence -- **support for python 3.7** -- ``Tracker.active_form`` now includes ``trigger_message`` attribute to allow - access to message triggering the form -- ``MappingPolicy`` which can be used to directly map an intent to an action - by adding the ``triggers`` keyword to an intent in the domain. -- default action ``action_back``, which when triggered with ``/back`` allows - the user to undo their previous message - -Changed -------- -- starter packs are now tested in parallel with the unittests, - and only on master and branches ending in ``.x`` (i.e. new version releases) -- renamed ``train_dialogue_model`` to ``train`` -- renamed ``rasa_core.evaluate`` to ``rasa_core.test`` -- ``event_broker.publish`` receives the event as a dict instead of text -- configuration key ``store_type`` of the tracker store endpoint configuration - has been renamed to ``type`` to allow usage across endpoints -- renamed ``policy_metadata.json`` to ``metadata.json`` for persisted models -- ``scores`` array returned by the ``/conversations/{sender_id}/predict`` - endpoint is now sorted according to the actions' scores. -- now randomly created augmented stories are subsampled during training and marked, - so that memo policies can ignore them -- changed payloads from "text" to "message" in files: server.yml, docs/connectors.rst, - rasa_core/server.py, rasa_core/training/interactive.py, tests/test_interactive.py -- dialogue files in ``/data/test_dialogues`` were updated with conversations - from the bots in ``/examples`` -- updated to tensorflow 1.13 - -Removed -------- -- removed ``admin_token`` from ``RasaChatInput`` since it wasn't used - -Fixed ------ -- When a ``fork`` is used in interactive learning, every forked - storyline is saved (not just the last) -- Handles slot names which contain characters that are invalid as python - variable name (e.g. dot) in a template - -[0.13.8] - 2019-04-16 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Message parse data no longer passed to graph node label in interactive - learning visualization - -[0.13.7] - 2019-04-01 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- correctly process form actions in end-to-end evaluations - -[0.13.6] - 2019-03-28 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- correctly process intent messages in end-to-end evaluations - -[Unreleased 0.13.8.aX] -^^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Message parse data no longer passed to graph node label in interactive - learning visualization - -[0.13.7] - 2019-04-01 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- correctly process form actions in end-to-end evaluations - -[0.13.6] - 2019-03-28 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- correctly process intent messages in end-to-end evaluations - -[0.13.4] - 2019-03-19 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- properly tag docker image as ``stable`` (instead of tagging alpha tags) - -[0.13.3] - 2019-03-04 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- Tracker Store Mongo DB's documentation now has ``auth_source`` parameter, - which is used for passing database name associated with the user's - credentials. - -[0.13.2] - 2019-02-06 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- ``MessageProcessor`` now also passes ``message_id`` to the interpreter - when parsing with a ``RasaNLUHttpInterpreter`` - -[0.13.1] - 2019-01-29 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``message_id`` can now be passed in the payload to the - ``RasaNLUHttpInterpreter`` - -Fixed ------ -- fixed domain persistence after exiting interactive learning -- fix form validation question error in interactive learning - -.. _corev0-13-0: - -[0.13.0] - 2019-01-23 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- A support for session persistence mechanism in the ``SocketIOInput`` - compatible with the example SocketIO WebChat + short explanation on - how session persistence should be implemented in a frontend -- ``TwoStageFallbackPolicy`` which asks the user for their affirmation - if the NLU confidence is low for an intent, for rephrasing the intent - if they deny the suggested intent, and does finally an ultimate fallback - if it does not get the intent right -- Additional checks in PolicyEnsemble to ensure that custom Policy - classes' ``load`` function returns the correct type -- Travis script now clones and tests the Rasa stack starter pack -- Entries for tensorflow and sklearn versions to the policy metadata -- SlackInput wont ignore ``app_mention`` event anymore. - Will handle messages containing @mentions to bots and will respond to these - (as long as the event itself is enabled in the application hosting the bot) -- Added sanitization mechanism for SlackInput that (in its current shape and form) - strips bot's self mentions from messages posted using the said @mentions. -- Added sanitization mechanism for SlackInput that (in its current - shape and form) strips bot's self mentions from messages posted using - the said @mentions. -- Added random seed option for KerasPolicy and EmbeddingPolicy - to allow for reproducible training results -- ``InvalidPolicyConfig`` error if policy in policy configuration could not be - loaded, or if ``policies`` key is empty or not provided -- Added a unique identifier to ``UserMessage`` and the ``UserUttered`` event. - -Removed -------- -- removed support for deprecated intents/entities format - -Changed -------- -- replaced ``pytest-pep8`` with ``pytest-pycodestyle`` -- switch from ``PyInquirer`` to ``questionary`` for the display of - commandline interface (to avoid prompt toolkit 2 version issues) -- if NLU classification returned ``None`` in interactive training, - directly ask a user for a correct intent -- trigger ``fallback`` on low nlu confidence - only if previous action is ``action_listen`` -- updated docs for interactive learning to inform users of the - ``--core`` flag -- Change memoization policies confidence score to 1.1 to override ML policies -- replaced flask server with async sanic - -Fixed ------ -- fix error during interactive learning which was caused by actions which - dispatched messages using ``dispatcher.utter_custom_message`` -- re-added missing ``python-engineio`` dependency -- fixed not working examples in ``examples/`` -- strip newlines from messages so you don't have something like "\n/restart\n" -- properly reload domain when using ``/model`` endpoint to upload new model -- updated documentation for custom channels to use the ``credentials.yml`` - -[0.12.3] - 2018-12-03 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- added ``scipy`` dependency (previously pulled in through keras) -- added element representation for command-line output - -Changed -------- -- improved button representation for custom buttons in command-line - -Changed -------- -- randomized initial sender_id during interactive training to avoid - loading previous sessions from persistent tracker stores - -Removed -------- -- removed keras dependency, since ``keras_policy`` uses ``tf.keras`` - - -[0.12.2] - 2018-11-20 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- argument handling on evaluate script -- added basic sanitization during visualization - - -[0.12.1] - 2018-11-11 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed interactive learning to properly submit executed actions to the action - server -- allow the specification of the policy configuration while using the - visualization script -- use default configuration if no policy configuration is passed -- fixed html delivery from interactive server script (package compatible) -- ``SlackBot`` when created in ``SlackInputChannel`` inherits the - ``slack_channel`` property, allowing Slack bots to post to any channel - instead of only back to the user -- fix writing of new domain file from interactive learning -- fix reading of state featurizers from yaml -- fix reading of batch_size parameter in keras policy - - -.. _corev0-12-0: - -[0.12.0] - 2018-11-11 -^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - - This is major new version with a lot of changes under the hood as well - as on the API level. Please take a careful look at the - :ref:`migration-guide` guide before updating. **You need to retrain your models.** - -Added ------ -- new connector for the Cisco Webex Teams chat -- openapi documentation of server API -- NLU data learned through interactive learning will now be stored in a - separate markdown-format file (any previous NLU data is merged) -- Command line interface for interactive learning now displays policy - confidence alongside the action name -- added action prediction confidence & policy to ``ActionExecuted`` event -- the Core policy configuration can now be set in a config.yaml file. - This makes training custom policies possible. -- both the date and the time at which a model was trained are now - included in the policy's metadata when it is persisted -- show visualization of conversation while doing interactive learning -- option for end-to-end evaluation of Rasa Core and NLU examples in - ``evaluate.py`` script -- `/conversations/{sender_id}/story` endpoint for returning - the end-to-end story describing a conversation -- docker-compose file to start a rasa core server together with nlu, - an action server, and duckling -- http server (``rasa_core.run --enable-api``) evaluation endpoint -- ability to add tracker_store using endpoints.yml -- ability load custom tracker store modules using the endpoints.yml -- ability to add an event broker using an endpoint configuration file -- raise an exception when ``server.py`` is used instead of - ``rasa_core.run --enable-api`` -- add documentation on how to configure endpoints within a configuration file -- ``auth_source`` parameter in ``MongoTrackerStore`` defining the database to - authenticate against -- missing instructions on setting up the facebook connector -- environment variables specified with ``${env_variable}`` in a yaml - configuration file are now replaced with the value of the - environment variable -- detailed documentation on how to deploy Rasa with Docker -- make ``wait_time_between_pulls`` configurable through endpoint - configuration -- add ``FormPolicy`` to handle form action prediction -- add ``ActionExecutionRejection`` exception and - ``ActionExecutionRejected`` event -- add default action ``ActionDeactivateForm()`` -- add ``formbot`` example -- add ability to turn off auto slot filling with entity for each - slot in domain.yml -- add ``InvalidDomain`` exception -- add ``active_form_...`` to state dictionary -- add ``active_form`` and ``latest_action_name`` properties to - ``DialogueStateTracker`` -- add ``Form`` and ``FormValidation`` events -- add ``REQUESTED_SLOT`` constant -- add ability to read ``action_listen`` from stories -- added train/eval scripts to compare policies - -Changed -------- -- improved response format for ``/predict`` endpoint -- all error messages from the server are now in json format -- ``agent.log_message`` now returns a tracker instead of the trackers state -- the core container does not load the nlu model by default anymore. - Instead it can be connected to a nlu server. -- stories are now visualized as ``.html`` page instead of an image -- move and deduplicate restaurantbot nlu data from ``franken_data.json`` - to ``nlu_data.md`` -- forms were completely reworked, see changelog in ``rasa_core_sdk`` -- state featurization if some form is active changed -- ``Domain`` raises ``InvalidDomain`` exception -- interactive learning is now started with rasa_core.train interactive -- passing a policy config file to train a model is now required -- flags for output of evaluate script have been merged to one flag ``--output`` - where you provide a folder where any output from the script should be stored - -Removed -------- -- removed graphviz dependency -- policy config related flags in training script (see migration guide) - - -Fixed ------ -- fixed an issue with boolean slots where False and None had the same value - (breaking model compatibility with models that use a boolean slot) -- use utf8 everywhere when handling file IO -- argument ``--connector`` on run script accepts custom channel module names -- properly handle non ascii categorical slot values, e.g. ``大于100亿元`` -- fixed HTTP server attempting to authenticate based on incorrect path to - the correct JWT data field -- all sender ids from channels are now handled as `str`. - Sender ids from old messages with an `int` id are converted to `str`. -- legacy pep8 errors - - -[0.11.12] - 2018-10-11 -^^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- Remove livechat widget from docs - - -[0.11.11] - 2018-10-05 -^^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Add missing name() to facebook Messenger class - - -[0.11.10] - 2018-10-05 -^^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- backport fix to JWT schema - - -[0.11.9] - 2018-10-04 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- pin tensorflow 1.10.0 - -[0.11.8] - 2018-09-28 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- cancel reminders if there has been a restarted event after the reminder - -Changed -------- -- JWT authentication now checks user roles. The ``admin`` role may access all - endpoints. For endpoints which contain a ``sender_id`` parameter, users - with the ``user`` role may only call endpoints where the ``sender_id`` - matches the user's ``username``. - -[0.11.7] - 2018-09-26 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- custom message method in rocketchat channel - -Fixed ------ -- don't fail if rasa and rest input channels are used together -- wrong parameter name in rocketchat channel methods -- Software 2.0 link on interactive learning documentation page went to - Tesla's homepage, now it links to Karpathy blogpost - -[0.11.6] - 2018-09-20 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``UserMessage`` and ``UserUttered`` classes have a new attribute - ``input_channel`` that stores the name of the ``InputChannel`` - through which the message was received - -[0.11.5] - 2018-09-20 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- numpy version incompatibility between rasa core and tensorflow - -[0.11.4] - 2018-09-19 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- a flag ``--fail_on_prediction_errors`` to the ``evaluate.py`` script - - if used when running the evaluation, the script will fail with a non - 0 exit code if there is at least one prediction error. This can be - used on CIs to validate models against test stories. -- JWT support: parameters to allow clients to authenticate requests to - the rasa_core.server using JWT's in addition to normal token based auth -- added socket.io input / output channel -- ``UserMessage`` and ``UserUttered`` classes have a new attribute - ``input_channel`` that stores the name of the ``InputChannel`` - through which the message was received - -Changed -------- -- dump failed stories after evaluation in the normal story format instead of - as a text file -- do not run actions during evaluation. instead, action are only predicted - and validated against the gold story. -- improved the online learning experience on the CLI -- made finetuning during online learning optional (use ``--finetune`` if - you want to enable it) - -Removed -------- -- package pytest-services since it wasn't necessary - -Fixed ------ -- fixed an issue with the followup (there was a name confusion, sometimes - the followup action would be set to the non existent ``follow_up_action`` - attribute instead of ``followup_action``) - -[0.11.3] - 2018-09-04 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- callback output channel, receives messages and uses a REST endpoint to - respond with messages - -Changed -------- -- channel input creation moved to the channel, every channel can now - customize how it gets created from the credentials file - -[0.11.2] - 2018-09-04 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- improved documentation for events (e.g. including json serialization) - -Removed -------- -- outdated documentation for removed endpoints in the server - (``/parse`` & ``/continue``) - -Fixed ------ -- read in fallback command line args - -[0.11.1] - 2018-08-30 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- increased minimal compatible model version to 0.11.0 - -.. _corev0-11-0: - -[0.11.0] - 2018-08-30 -^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - - This is major new version with a lot of changes under the hood as well - as on the API level. Please take a careful look at the - :ref:`migration-guide` guide before updating. You need to retrain your models. - - -Added ------ -- added microsoft botframework input and output channels -- added rocket chat input and output channels -- script parameter ``--quiet`` to set the log level to ``WARNING`` -- information about the python version a model has been trained with to the - model metadata -- more emoji support for PY2 -- intent confidence support in RegexInterpreter -- added parameter to train script to pull training data from an url instead - of a stories file -- added new policy: :ref:`embedding_policy` implemented in tensorflow - -Changed -------- -- default log level for all scripts has been changed from ``WARNING`` to - ``INFO``. -- format of the credentials file to allow specifying the credentials for - multiple channels -- webhook URLs for the input channels have changed and need to be reset -- deprecated using ``rasa_core.server`` as a script - use - ``rasa_core.run --enable_api`` instead -- collecting output channel will no properly collect events for images, - buttons, and attachments - -Removed -------- -- removed the deprecated ``TopicSet`` event -- removed ``tracker.follow_up_action`` - use the ``FollowupAction`` - event instead -- removed ``action_factory: remote`` from domain file - the domain is - always run over http -- removed ``OnlineLearningPolicy`` - use the ``training.online`` - script instead - -Fixed -------- -- lots of type annotations -- some invalid documentation references -- changed all ``logger.warn`` to ``logger.warning`` - -[0.10.4] - 2018-08-08 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- more emoji support for PY2 -- intent confidence support in RegexInterpreter - -[0.10.3] - 2018-08-03 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- updated to Rasa NLU 0.13 -- improved documentation quickstart - -Fixed ------ -- server request argument handling on python 3 -- creation of training data story graph - removes more nodes and speeds up - the training - -[0.10.2] - 2018-07-24 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- new ``RasaChatInput`` channel -- option to ignore entities for certain intents - -Fixed ------ -- loading of NLU model - -[0.10.1] - 2018-07-18 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- - -- documentation changes - -.. _corev0-10-0: - -[0.10.0] - 2018-07-17 -^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - - This is a major new release with backward incompatible changes. Old trained - models can not be read with the new version - you need to retrain your model. - View the :ref:`migration-guide` for details. - -Added ------ -- allow bot responses to be managed externally (instead of putting them into - the ``domain.yml``) -- options to prevent slack from making re-deliver message upon meeting failure condition. - the default is to ignore ``http_timeout``. -- added ability to create domain from yaml string and export a domain to a yaml string -- added server endpoint to fetch domain as json or yaml -- new default action ActionDefaultFallback -- event streaming to a ``RabbitMQ`` message broker using ``Pika`` -- docs section on event brokers -- ``Agent()`` class supports a ``model_server`` ``EndpointConfig``, which it regularly queries to fetch dialogue models -- this can be used with ``rasa_core.server`` with the ``--endpoint`` option (the key for this the model server config is ``model``) -- docs on model fetching from a URL - -Changed -------- -- changed the logic inside AugmentedMemoizationPolicy to recall actions only if they are the same in training stories -- moved AugmentedMemoizationPolicy to memoization.py -- wrapped initialization of BackgroundScheduler in try/except to allow running on jupyterhub / binderhub/ colaboratory -- fixed order of events logged on a tracker: action executed is now always - logged before bot utterances that action created - -Removed -------- -- removed support for topics - -[0.9.6] - 2018-06-18 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed fallback policy data generation - -[0.9.5] - 2018-06-14 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- handling of max history configuration in policies -- fixed instantiation issues of fallback policy - -[0.9.4] - 2018-06-07 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed evaluation script -- fixed story file loading (previously some story files with checkpoints could - create wrong training data) -- improved speed of data loading - -[0.9.3] - 2018-05-30 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- added token auth to all endpoints of the core server - - -[0.9.2] - 2018-05-30 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fix handling of max_history parameter in AugmentedMemoizationPolicy - -[0.9.1] - 2018-05-29 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- persistence of training data collected during online learning if default - file path is used -- the ``agent()`` method used in some ``rasa_core.server`` endpoints is - re-run at every new call of the ``ensure_loaded_agent`` decorator -- fixed OR usage of intents - -.. _corev0-9-0: - -[0.9.0] - 2018-05-24 -^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - - This is a major new release with backward incompatible changes. Old trained - models can not be read with the new version - you need to retrain your model. - -Added ------ -- supported loading training data from a folder - loads all stories from - all files in that directory -- parameter to specify NLU project when instantiating a ``RasaNLUInterpreter`` -- simple ``/respond`` endpoint to get bot response to a user message -- ``/conversations`` endpoint for listing sender ids of running conversations -- added a Mattermost channel that allows Rasa Core to communicate via a Mattermost app -- added a Twilio channel that allows Rasa Core to communicate via SMS -- ``FallbackPolicy`` for executing a default message if NLU or core model confidence is low. -- ``FormAction`` class to make it easier to collect multiple pieces of information with fewer stories. -- Dockerfile for ``rasa_core.server`` with a dialogue and Rasa NLU model - -Changed -------- -- moved server from klein to flask -- updated dependency fbmessenger from 4.3.1 to 5.0.0 -- updated Rasa NLU to 0.12.x -- updated all the dependencies to the latest versions - -Fixed ------ -- List slot is now populated with a list -- Slack connector: ``slack_channel`` kwarg is used to send messages either back to the user or to a static channel -- properly log to a file when using the ``run`` script -- documentation fix on stories - - -[0.8.6] - 2018-04-18 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- pin rasa nlu version to 0.11.4 (0.12.x only works with master) - -[0.8.5] - 2018-03-19 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- updated google analytics docs survey code - - -[0.8.4] - 2018-03-14 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- pin ``pykwalify<=1.6.0`` as update to ``1.6.1`` breaks compatibility - -[0.8.3] - 2018-02-28 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- pin ``fbmessenger`` version to avoid major update - -[0.8.2] - 2018-02-13 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- script to reload a dumped trackers state and to continue the conversation - at the end of the stored dialogue - -Changed -------- -- minor updates to dependencies - -Fixed ------ -- fixed datetime serialization of reminder event - -[0.8.1] - 2018-02-01 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- removed deque to support python 3.5 -- Documentation improvements to tutorials -- serialization of date time value for ``ReminderScheduled`` event - -.. _corev0-8-0: - -[0.8.0] - 2018-01-30 -^^^^^^^^^^^^^^^^^^^^ - -This is a major version change. Make sure to take a look at the -:ref:`migration-guide` in the documentation for advice on how to -update existing projects. - -Added ------ -- ``--debug`` and ``--verbose`` flags to scripts (train.py, run.py, server.py) - to set the log level -- support for story cycles when using checkpoints -- added a new machine learning policy `SklearnPolicy` that uses an sklearn - classifier to predict actions (logistic regression by default) -- warn if action emits events when using a model that it did never emit in - any of the stories the model was trained on -- support for event pushing and endpoints to retrieve the tracker state from the server -- Timestamp to every event -- added a Slack channel that allows Rasa Core to communicate via a Slack app -- added a Telegram channel that allows Rasa Core to communicate via a Telegram bot - -Changed -------- -- rewrite of the whole FB connector: replaced pymessenger library with fbmessenger -- story file utterance format changed from ``* _intent_greet[name=Rasa]`` - to ``* intent_greet{"name": "Rasa"}`` (old format is still supported but - deprecated) -- persist action names in domain during model persistence -- improved travis build speed by not using miniconda -- don't fail with an exception but with a helpful error message if an - utterance template contains a variable that can not be filled -- domain doesn't fail on unknown actions but emits a warning instead. this is to support reading - logs from older conversation if one recently removed an action from the domain - -Fixed ------ -- proper evaluation of stories with checkpoints -- proper visualization of stories with checkpoints -- fixed float slot min max value handling -- fixed non integer feature decoding, e.g. used for memoization policy -- properly log to specified file when starting Rasa Core server -- properly calculate offset of last reset event after loading tracker from - tracker store -- UserUtteranceReverted action incorrectly triggered actions to be replayed - - -[0.7.9] - 2017-11-29 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- visualization using Networkx version 2.x -- add output about line of failing intent when parsing story files - -[0.7.8] - 2017-11-27 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Pypi readme rendering - -[0.7.7] - 2017-11-24 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- log bot utterances to tracker - -Fixed ------ -- documentation improvements in README -- renamed interpreter argument to rasa core server - -[0.7.6] - 2017-11-15 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- moodbot example train command in docs - - -[0.7.5] - 2017-11-14 -^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- "sender_id" (and "DEFAULT_SENDER_ID") keyword consistency issue #56 - -Fixed ------ -- improved moodbot example - more nlu examples as well as better fitting of dialogue model - - -[0.7.4] - 2017-11-09 -^^^^^^^^^^^^^^^^^^^^ - -Changed -------- - -- added method to tracker to retrieve the latest entities #68 - -[0.7.3] - 2017-10-31 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- parameter to specify font size when rendering story visualization - -Fixed ------ -- fixed documentation of story visualization - -[0.7.2] - 2017-10-30 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- added facebook bot example -- added support for conditional checkpoints. a checkpoint can be restricted to - only allow one to use it if certain slots are set. see docs for details -- utterance templates in domain yaml support buttons and images -- validate domain yaml and raise exception on invalid file -- ``run`` script to load models and handle messages from an input channel - -Changed -------- -- small dropout in standard keras model to decrease reliance on exact intents -- a LOT of documentation improvements - -Fixed ------ -- fixed http error if action listen is not confirmed. #42 - -[0.7.1] - 2017-10-06 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- issues with restart events. They created wrong a messed up history leading to - wrong predictions - - -.. _corev0-7-0: - -[0.7.0] - 2017-10-04 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support for Rasa Core usage as a server with remote action execution - -Changed -------- -- switched to max code line length 80 -- removed action id - use ``action.name()`` instead. if an action implementation overrides the name, it should include the ``action_`` prefix (as it is not automatically added anymore) -- renamed ``rasa_dm.util`` to ``rasa_dm.utils`` -- renamed the whole package to ``rasa_core`` (so ``rasa_dm`` is gone!) -- renamed ``Reminder`` attribute ``id`` to ``name`` -- a lot of documentation improvements. docs are now at https://rasa.com/docs/core -- use hashing when writing memorized turns into persistence - requires retraining of all models that are trained with a version prior to this -- changed ``agent.handle_message(...)`` interface for easier usage - -.. _corev0-6-0: - -[0.6.0] - 2017-08-27 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support for multiple policies (e.g. one memoization and a Keras policy at the same time) -- loading domains from yaml files instead of defining them with python code -- added an api layer (called ``Agent``) for you to use for 95% of the things you want to do (training, persistence, loading models) -- support for reminders - -Changed -------- -- large refactoring of code base - -.. _corev0-5-0: - -[0.5.0] - 2017-06-18 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``ScoringPolicy`` added to policy implementations (less strict than standard default policy) -- ``RasaNLUInterpreter`` to run a nlu instance within dm (instead of using the http interface) -- more tests - -Changed -------- -- ``UserUtterance`` now holds the complete parse data from nlu (e.g. to access attributes other than entities or intent) -- ``Turn`` has a reference to a ``UserUtterance`` instead of directly storing intent & entities (allows access to other data) -- Simplified interface of output channels -- order of actions in the DefaultPolicy in ``possible_actions`` (``ActionListen`` now always has index 0) - -Fixed ------ -- ``RedisTrackerStore`` checks if tracker is stored before accessing it (otherwise a ``None`` access exception is thrown) -- ``RegexInterpreter`` checks if the regex actually matches the message instead of assuming it always does -- ``str`` implementation for all events -- ``Controller`` can be started without an input channel (e.g. messages need to be fed into the queue manually) - -.. _corev0-2-0: - -[0.2.0] - 2017-05-18 -^^^^^^^^^^^^^^^^^^^^ -First released version. - - -.. _`master`: https://github.com/RasaHQ/rasa_core/ - -.. _`Semantic Versioning`: http://semver.org/ diff --git a/docs/core/old-core-migration-guide.rst b/docs/core/old-core-migration-guide.rst deleted file mode 100644 index cab6e31abf65..000000000000 --- a/docs/core/old-core-migration-guide.rst +++ /dev/null @@ -1,429 +0,0 @@ -:desc: Information about changes between major versions of chatbot framework - Rasa Core and how you can migrate from one version to another. - -.. _old-core-migration-guide: - -Migration Guide -=============== -This page contains information about changes between major versions and -how you can migrate from one version to another. - -.. _migration-to-0-14-0: - -0.13.x to 0.14.0 - -General -~~~~~~~ - -- The python package has a new name, as does the module. You should install - the package using ``pip install rasa`` (instead of ``rasa_core``). - - The code moved from ``rasa_core`` to ``rasa.core`` - best way to fix is a - search and replace for the two most common usages: - ``from rasa_core`` and ``import rasa_core``. - - We have added a backwards compatibility package to still allow you to import - from ``rasa_core``, this will emit a warning but all imports will still - work. Nevertheless, you should do the above renaming of any access - to ``rasa_core``. - --The `MappingPolicy` is now included in `default_config.yml`. If you are using - a custom policy configuration make sure to update it appropriately. - -- deprecated ``remote.py`` got removed - the API should be consumed directly - instead or with the help of the ``rasa_core_sdk``. - -Asynchronous First -~~~~~~~~~~~~~~~~~~ -- **No more flask.** The flask webserver has been replaced with an asynchronous - webserver called Sanic. If you run the server in production using a wsgi - runner, there are instructions here on how to recreate that with the - sanic webserver: - https://sanic.readthedocs.io/en/latest/sanic/deploying.html#running-via-gunicorn -- **Agent**: some of the method signatures changed from normal functions to - async coroutines. These functions need to be awaited when called, e.g. - ``await agent.handle_message(...)``. Changed functions include - - ``handle_message`` - - ``handle_text`` - - ``log_message`` - - ``execute_action`` - - ``load_data`` - - ``visualize`` - -Custom Input / Output Channels -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If you wrote your own input output channels, there are a couple of changes -necessary to make the channels work properly with the asyncio server operation: - -- **Need to provide Sanic blueprints.** To make the server fully asynchronous - the input channels need to provide Sanic blueprints instead of flask - blueprints. Imports should change from - ``from flask import Blueprint, request`` to - ``from sanic import Blueprint, response``. All route functions, e.g. - ``def webhook(...)`` need to be async and accept a request parameter as - their first argument, e.g. ``async def webhook(request, ...)``. - - Calls to ``on_new_message(...)`` need to be awaited: - ``await on_new_message(...)``. - - All output channel functions need to be async: - ``send_text_message``, ``send_image_url``, ``send_attachment``, - ``send_response``, ``send_text_with_buttons`` and ``send_custom_message``. - And all internal calls to these methods need to be awaited. - - For inspiration, feel free to check the code of the existing channels. - -Function Naming -~~~~~~~~~~~~~~~ -- renamed ``train_dialogue_model`` to ``train``. Please use ``train`` from - now on. -- renamed ``rasa_core.evaluate`` to ``rasa_core.test``. Please use ``test`` - from now on. - -.. _migration-to-0-13-0: - -0.12.x to 0.13.0 ----------------- - -.. warning:: - - Python 2 support has now been completely dropped: to upgrade to - this version you **must use Python 3**. As always, **make sure** - **you retrain your models when switching to this version** - -General -~~~~~~~ - -- Support for Python 2 has now been completely removed from Rasa Core, please - upgrade to Python 3.5 or 3.6 to continue using the software -- If you were using the deprecated intent/entity format (``_intent[entity1=val1, entity=val2]``), - then you will have to update your training data to the standard format - (``/intent{"entity1": val1, "entity2": val2``} because it is no longer supported - -.. _migration-to-0-12-0: - -0.11.x to 0.12.0 ----------------- - -.. warning:: - - This is major new version with a lot of changes under the hood as well - as on the API level. Please take a careful look at the mentioned - before updating. Please make sure to - **retrain your models when switching to this version**. - -Train script -~~~~~~~~~~~~ - -- You **must** pass a policy config flag with ``-c/--config`` now when training - a model, see :ref:`policy_file`. -- Interactive learning is now started with - ``python -m rasa_core.train interactive`` rather than the - ``--interactive`` flag -- All policy configuration related flags have been removed (``--epochs``, - ``--max_history``, ``--validation_split``, ``--batch_size``, - ``--nlu_threshold``, ``--core_threshold``, - ``--fallback_action_name``), specify these in the policy config file instead, - see :ref:`policy_file` - -Visualization script -~~~~~~~~~~~~~~~~~~~~ - -- You **must** pass a policy config flag with ``-c/--config`` now, - see :ref:`policy_file`. - -Evaluation script -~~~~~~~~~~~~~~~~~ - -- The ``--output`` flag now takes one argument: the name of the folder - any files generated from the script should be written to -- The ``--failed`` flag was removed, as this is part of the ``--output`` - flag now - -Forms -~~~~~ - -- Forms were completely reworked, please follow :ref:`forms` - for instructions how to use them. -- ``FormField`` class and its subclasses were removed, - overwrite ``FormAction.slot_mapping()`` method to specify the mapping between - user input and requested slot in the form - utilizing helper methods ``FormAction.from_entity(...)``, - ``FormAction.from_intent(...)`` and ``FormAction.from_text(...)`` -- stories for forms need to be written differently, - it is recommended to use interactive learning to create form stories -- functionality of ``FormAction.get_other_slots(...)`` was moved to - ``FormAction.extract_other_slots(...)`` -- functionality of ``FormAction.get_requested_slot(...)`` was moved to - ``FormAction.extract_requested_slot(...)`` -- overwrite ``FormAction.validate(...)`` method to validate user input against - the slot requested by the form - -.. _migration-to-0-11-0: - -0.10.x to 0.11.0 ----------------- - -.. warning:: - - This is major new version with a lot of changes under the hood as well - as on the API level. Please take a careful look at the mentioned - before updating. Please make sure to - **retrain your models when switching to this version**. - -General -~~~~~~~ -.. note:: - - TL;DR these are the most important surface changes. But if you have - a second please take a minute to read all of them. - -- If you have custom actions, you now need to run a separate server to execute - them. If your actions are written in python (in a file called actions.py) you - can do this by running ``python -m rasa_core_sdk.endpoint --actions actions`` - and specifying the action endpoint in the ``endpoints.yml`` - For more information please read :ref:`custom actions `. -- For your custom actions, the imports have changed from - ``from rasa_core.actions import Action`` to ``from rasa_core_sdk import Action`` and - from ``from rasa_core.events import *`` to ``from rasa_core_sdk.events import *`` -- The actions list in the domain now needs to always contain the actions names - instead of the classpath (e.g. change ``actions.ActionExample`` to ``action_example``) -- utter templates that should be used as actions, now need to start with - ``utter_``, otherwise the bot won't be able to find the action - -HTTP Server endpoints -~~~~~~~~~~~~~~~~~~~~~ -- We removed ``/parse`` and ``/continue`` endpoints used for running actions - remotely. This has been replaced by the action server that allows you - to run your action code in any language. There are no replacement endpoints - for these two, as the flow of information has been changed: Instead of you - calling Rasa Core to update the tracker and receive the next action to be - executed, Rasa Core will call your action server once it predicted an action. - More information can be found in the updated docs for :ref:`custom actions `. - - -Webhooks -~~~~~~~~ -- The endpoints for the webhooks changed. All webhooks are now at - ``/webhooks/CHANNEL_NAME/webhook``. For example, the webhook - to receive facebook messages on a local instance is now - ``http://localhost:5005/webhooks/facebook/webhook``. -- format of the ``credentials.yml`` used in the ``run`` and ``server`` scripts - has changed to allow for multiple channels in one file: - - The new format now contains the channels name first, e.g. for facebook: - - .. code-block:: yaml - - facebook: - verify: "rasa-bot" - secret: "3e34709d01ea89032asdebfe5a74518" - page-access-token: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" - -Changes to Input and Output Channels -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- ``ConsoleOutputChannel`` and ``ConsoleInputChannel`` have been removed. Either - use the `run script `_ - to run your bot on the cmdline, or adapt the ``serve_application`` - `function `_ - to run from a python script. -- ``rasa_core.channels.direct`` output channel package removed. - ``CollectingOutputChannel`` moved to ``rasa_core.channels.channel`` -- ``HttpInputComponent`` renamed to ``InputChannel`` & moved to - ``rasa_core.channels.channel.InputChannel`` -- If you wrote your own custom input channel, make sure to inherit from - ``InputChannel`` instead of ``HttpInputComponent``. -- ``CollectingOutput`` channel will no properly collect events for images, - buttons, and attachments. The content of the collected messages has changed, - ``data`` is now called ``buttons``. -- removed package ``rasa_core.channels.rest``, - please use ``rasa_core.channels.RestInput`` instead -- remove file input channel ``rasa_core.channels.file.FileInputChannel`` -- signature of ``agent.handle_channel`` got renamed - and the signature changed. here is an up to date example: - - .. code-block:: python - - from rasa_core.channels.facebook import FacebookInput - - input_channel = FacebookInput(fb_verify="VERIFY", - fb_secret="SECRET", - fb_access_token="ACCESS_TOKEN") - agent.handle_channels([input_channel], port=5005, serve_forever=True) -- If you wrote your own custom output channel, make sure to split messages - on double new lines if you like (the ``InputChannel`` you inherit from - doesn't do this anymore), e.g.: - - .. code-block:: python - - def send_text_message(self, recipient_id: Text, message: Text) -> None: - """Send a message through this channel.""" - - for message_part in message.split("\n\n"): - # self.send would be the actual communication to e.g. facebook - self.send(recipient_id, message_part) - - -.. _migration-to-0-10-0: - -0.9.x to 0.10.0 ---------------- -.. warning:: - - This is a release **breaking backwards compatibility**. - You can no longer load old models with this version, due to the addition of - the default action ``ActionDefaultFallback``. Please make sure to retrain - your model before using this version - -There have been some API changes to classes and methods: - -- if you use ``dispatcher.utter_template`` or - ``dispatcher.utter_button_template`` in your custom actions run code, - they now need the ``tracker`` as a second argument, e.g. - ``dispatcher.utter_template("utter_greet", tracker)`` - -- all input and output channels should have a ``name``. If you are using a - custom channel, make sure to implement a class method that returns - the name. The name needs to be added to the - **input channel and the output channel**. You can find examples - in ``rasa_core.channels.direct.CollectingOutputChannel``: - - .. code-block:: python - - @classmethod - def name(cls): - """Every channel needs a name""" - return "collector" - -- the ``RasaNLUHttpInterpreter`` when created now needs to be passed an - instance of ``EndpointConfig`` instead of ``server`` and ``token``, e.g.: - - .. code-block:: python - - from rasa_core.utils import EndpointConfig - - endpoint = EndpointConfig("http://localhost:500", token="mytoken") - interpreter = RasaNLUHttpInterpreter("mymodelname", endpoint) - -.. _migration-to-0-9-0: - -0.8.x to 0.9.0 --------------- - -.. warning:: - - This is a release **breaking backwards compatibility**. - Unfortunately, it is not possible to load - previously trained models (as the stored file formats have changed as - well as the configuration and metadata). Please make sure to retrain - a model before trying to use it with this improved version. - -- loading data should be done either using: - - .. code-block:: python - - from rasa_core import training - - training_data = training.load_data(...) - - or using an agent instance: - - .. code-block:: python - - training_data = agent.load_data(...) - agent.train(training_data, ...) - - It is deprecated to pass the training data file directly to ``agent.train``. - Instead, the data should be loaded in one of the above ways and then passed - to train. - -- ``ScoringPolicy`` got removed and replaced by ``AugmentedMemoizationPolicy`` - which is similar, but is able to match more states to states it has seen - during trainer (e.g. it is able to handle slots better) - -- if you use custom featurizers, you need to - **pass them directly to the policy** that should use them. - This allows the policies to use different featurizers. Passing a featurizer - is **optional**. Accordingly, the ``max_history`` parameter moved to that - featurizer: - - .. code-block:: python - - from rasa_core.featurizers import (MaxHistoryTrackerFeaturizer, - BinarySingleStateFeaturizer) - - featurizer = MaxHistoryTrackerFeaturizer(BinarySingleStateFeaturizer(), - max_history=5) - - agent = Agent(domain_file, - policies=[MemoizationPolicy(max_history=5), - KerasPolicy(featurizer)]) - - If no featurizer is passed during policy creation, the policies default - featurizer will be used. The `MemoizationPolicy` allows passing in the - `max_history` parameter directly, without creating a featurizer. - -- the ListSlot now stores a list of entities (with the same name) - present in an utterance - - -.. _migration-to-0-8-0: - -0.7.x to 0.8.0 --------------- - -- Credentials for the facebook connector changed. Instead of providing: - - .. code-block:: yaml - - # OLD FORMAT - verify: "rasa-bot" - secret: "3e34709d01ea89032asdebfe5a74518" - page-tokens: - 1730621093913654: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" - - you should now pass the configuration parameters like this: - - .. code-block:: yaml - - # NEW FORMAT - verify: "rasa-bot" - secret: "3e34709d01ea89032asdebfe5a74518" - page-access-token: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" - - As you can see, the new facebook connector only supports a single page. Same - change happened to the in code arguments for the connector which should be - changed to: - - .. code-block:: python - - from rasa_core.channels.facebook import FacebookInput - - FacebookInput( - credentials.get("verify"), - credentials.get("secret"), - credentials.get("page-access-token")) - -- Story file format changed from ``* _intent_greet[name=Rasa]`` - to ``* intent_greet{"name": "Rasa"}`` (old format is still supported but - deprecated). Instead of writing: - - .. code-block:: story - - ## story_07715946 - * _greet - - action_ask_howcanhelp - * _inform[location=rome,price=cheap] - - action_on_it - - action_ask_cuisine - - The new format looks like this: - - .. code-block:: story - - ## story_07715946 - * greet - - action_ask_howcanhelp - * inform{"location": "rome", "price": "cheap"} - - action_on_it - - action_ask_cuisine diff --git a/docs/core/policies.rst b/docs/core/policies.rst deleted file mode 100644 index 4d3c28209a5a..000000000000 --- a/docs/core/policies.rst +++ /dev/null @@ -1,612 +0,0 @@ -:desc: Define and train customized policy configurations to optimize your - contextual assistant for longer contexts or unseen utterances which - require generalization. - -.. _policies: - -Policies -======== - -.. edit-link:: - -.. contents:: - :local: - - -.. _policy_file: - -Configuring Policies -^^^^^^^^^^^^^^^^^^^^ - -The :class:`rasa.core.policies.Policy` class decides which action to take -at every step in the conversation. - -There are different policies to choose from, and you can include -multiple policies in a single :class:`rasa.core.agent.Agent`. - -.. note:: - - Per default a maximum of 10 next actions can be predicted - by the agent after every user message. To update this value - you can set the environment variable ``MAX_NUMBER_OF_PREDICTIONS`` - to the desired number of maximum predictions. - - -Your project's ``config.yml`` file takes a ``policies`` key -which you can use to customize the policies your assistant uses. -In the example below, the last two lines show how to use a custom -policy class and pass arguments to it. - -.. code-block:: yaml - - policies: - - name: "TEDPolicy" - featurizer: - - name: MaxHistoryTrackerFeaturizer - max_history: 5 - state_featurizer: - - name: BinarySingleStateFeaturizer - - name: "MemoizationPolicy" - max_history: 5 - - name: "FallbackPolicy" - nlu_threshold: 0.4 - core_threshold: 0.3 - fallback_action_name: "my_fallback_action" - - name: "path.to.your.policy.class" - arg1: "..." - - -Max History ------------ - -One important hyperparameter for Rasa Core policies is the ``max_history``. -This controls how much dialogue history the model looks at to decide which -action to take next. - -You can set the ``max_history`` by passing it to your policy's ``Featurizer`` -in the policy configuration yaml file. - -.. note:: - - Only the ``MaxHistoryTrackerFeaturizer`` uses a max history, - whereas the ``FullDialogueTrackerFeaturizer`` always looks at - the full conversation history. See :ref:`featurization_conversations` for details. - -As an example, let's say you have an ``out_of_scope`` intent which -describes off-topic user messages. If your bot sees this intent multiple -times in a row, you might want to tell the user what you `can` help them -with. So your story might look like this: - -.. code-block:: story - - * out_of_scope - - utter_default - * out_of_scope - - utter_default - * out_of_scope - - utter_help_message - -For Rasa Core to learn this pattern, the ``max_history`` -has to be `at least` 4. - -If you increase your ``max_history``, your model will become bigger and -training will take longer. If you have some information that should -affect the dialogue very far into the future, you should store it as a -slot. Slot information is always available for every featurizer. - - -Data Augmentation ------------------ - -When you train a model, by default Rasa Core will create -longer stories by randomly gluing together -the ones in your stories files. -This is because if you have stories like: - -.. code-block:: story - - # thanks - * thankyou - - utter_youarewelcome - - # bye - * goodbye - - utter_goodbye - - -You actually want to teach your policy to **ignore** the dialogue history -when it isn't relevant and just respond with the same action no matter -what happened before. - -You can alter this behavior with the ``--augmentation`` flag. -Which allows you to set the ``augmentation_factor``. -The ``augmentation_factor`` determines how many augmented stories are -subsampled during training. The augmented stories are subsampled before training -since their number can quickly become very large, and we want to limit it. -The number of sampled stories is ``augmentation_factor`` x10. -By default augmentation is set to 20, resulting in a maximum of 200 augmented stories. - -``--augmentation 0`` disables all augmentation behavior. -The memoization based policies are not affected by augmentation -(independent of the ``augmentation_factor``) and will automatically -ignore all augmented stories. - -Action Selection -^^^^^^^^^^^^^^^^ - -At every turn, each policy defined in your configuration will -predict a next action with a certain confidence level. For more information -about how each policy makes its decision, read into the policy's description below. -The bot's next action is then decided by the policy that predicts with the highest confidence. - -In the case that two policies predict with equal confidence (for example, the Memoization -and Mapping Policies always predict with confidence of either 0 or 1), the priority of the -policies is considered. Rasa policies have default priorities that are set to ensure the -expected outcome in the case of a tie. They look like this, where higher numbers have higher priority: - -#. ``TEDPolicy`` and ``SklearnPolicy`` -#. ``MappingPolicy`` -#. ``MemoizationPolicy`` and ``AugmentedMemoizationPolicy`` -#. ``FallbackPolicy`` and ``TwoStageFallbackPolicy`` -#. ``FormPolicy`` - -This priority hierarchy ensures that, for example, if there is an intent with a mapped action, but the NLU confidence is not -above the ``nlu_threshold``, the bot will still fall back. In general, it is not recommended to have more -than one policy per priority level, and some policies on the same priority level, such as the two -fallback policies, strictly cannot be used in tandem. - -If you create your own policy, use these priorities as a guide for figuring out the priority of your policy. -If your policy is a machine learning policy, it should most likely have priority 1, the same as the Rasa machine -learning policies. - -.. warning:: - All policy priorities are configurable via the ``priority:`` parameter in the configuration, - but we **do not recommend** changing them outside of specific cases such as custom policies. - Doing so can lead to unexpected and undesired bot behavior. - -.. _embedding_policy: - -Embedding Policy -^^^^^^^^^^^^^^^^ - - .. warning:: - - ``EmbeddingPolicy`` was renamed to ``TEDPolicy``. Please use :ref:`ted_policy` instead of ``EmbeddingPolicy`` - in your policy configuration. The functionality of the policy stayed the same. - -.. _ted_policy: - -TED Policy -^^^^^^^^^^ - -The Transformer Embedding Dialogue (TED) Policy is described in -`our paper `__. - -This policy has a pre-defined architecture, which comprises the -following steps: - - - concatenate user input (user intent and entities), previous system actions, slots and active forms for each time - step into an input vector to pre-transformer embedding layer; - - feed it to transformer; - - apply a dense layer to the output of the transformer to get embeddings of a dialogue for each time step; - - apply a dense layer to create embeddings for system actions for each time step; - - calculate the similarity between the dialogue embedding and embedded system actions. - This step is based on the `StarSpace `_ idea. - -It is recommended to use ``state_featurizer=LabelTokenizerSingleStateFeaturizer(...)`` -(see :ref:`featurization_conversations` for details). - -**Configuration:** - - Configuration parameters can be passed as parameters to the ``TEDPolicy`` within the configuration file. - If you want to adapt your model, start by modifying the following parameters: - - - ``epochs``: - This parameter sets the number of times the algorithm will see the training data (default: ``1``). - One ``epoch`` is equals to one forward pass and one backward pass of all the training examples. - Sometimes the model needs more epochs to properly learn. - Sometimes more epochs don't influence the performance. - The lower the number of epochs the faster the model is trained. - - ``hidden_layers_sizes``: - This parameter allows you to define the number of feed forward layers and their output - dimensions for dialogues and intents (default: ``dialogue: [], label: []``). - Every entry in the list corresponds to a feed forward layer. - For example, if you set ``dialogue: [256, 128]``, we will add two feed forward layers in front of - the transformer. The vectors of the input tokens (coming from the dialogue) will be passed on to those - layers. The first layer will have an output dimension of 256 and the second layer will have an output - dimension of 128. If an empty list is used (default behavior), no feed forward layer will be - added. - Make sure to use only positive integer values. Usually, numbers of power of two are used. - Also, it is usual practice to have decreasing values in the list: next value is smaller or equal to the - value before. - - ``number_of_transformer_layers``: - This parameter sets the number of transformer layers to use (default: ``1``). - The number of transformer layers corresponds to the transformer blocks to use for the model. - - ``transformer_size``: - This parameter sets the number of units in the transformer (default: ``128``). - The vectors coming out of the transformers will have the given ``transformer_size``. - - ``weight_sparsity``: - This parameter defines the fraction of kernel weights that are set to 0 for all feed forward layers - in the model (default: ``0.8``). The value should be between 0 and 1. If you set ``weight_sparsity`` - to 0, no kernel weights will be set to 0, the layer acts as a standard feed forward layer. You should not - set ``weight_sparsity`` to 1 as this would result in all kernel weights being 0, i.e. the model is not able - to learn. - - .. warning:: - - Pass an appropriate number, for example 50, of ``epochs`` to the ``TEDPolicy``, otherwise the policy will - be trained only for ``1`` epoch. - - .. warning:: - - Default ``max_history`` for this policy is ``None`` which means it'll use the - ``FullDialogueTrackerFeaturizer``. We recommend to set ``max_history`` to some finite value in order to - use ``MaxHistoryTrackerFeaturizer`` for **faster training**. See :ref:`featurization_conversations` for - details. We recommend to increase ``batch_size`` for ``MaxHistoryTrackerFeaturizer`` - (e.g. ``"batch_size": [32, 64]``) - - .. container:: toggle - - .. container:: header - - .. container:: block - - The above configuration parameters are the ones you should configure to fit your model to your data. - However, additional parameters exist that can be adapted. - - .. code-block:: none - - +---------------------------------+------------------+--------------------------------------------------------------+ - | Parameter | Default Value | Description | - +=================================+==================+==============================================================+ - | hidden_layers_sizes | dialogue: [] | Hidden layer sizes for layers before the embedding layers | - | | label: [] | for dialogue and labels. The number of hidden layers is | - | | | equal to the length of the corresponding. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | transformer_size | 128 | Number of units in transformer. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | number_of_transformer_layers | 1 | Number of transformer layers. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | number_of_attention_heads | 4 | Number of attention heads in transformer. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | use_key_relative_attention | False | If 'True' use key relative embeddings in attention. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | use_value_relative_attention | False | If 'True' use value relative embeddings in attention. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | max_relative_position | None | Maximum position for relative embeddings. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | batch_size | [8, 32] | Initial and final value for batch sizes. | - | | | Batch size will be linearly increased for each epoch. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | batch_strategy | "balanced" | Strategy used when creating batches. | - | | | Can be either 'sequence' or 'balanced'. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | epochs | 1 | Number of epochs to train. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | random_seed | None | Set random seed to any 'int' to get reproducible results. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | embedding_dimension | 20 | Dimension size of embedding vectors. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | number_of_negative_examples | 20 | The number of incorrect labels. The algorithm will minimize | - | | | their similarity to the user input during training. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | similarity_type | "auto" | Type of similarity measure to use, either 'auto' or 'cosine' | - | | | or 'inner'. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | loss_type | "softmax" | The type of the loss function, either 'softmax' or 'margin'. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | ranking_length | 10 | Number of top actions to normalize scores for loss type | - | | | 'softmax'. Set to 0 to turn off normalization. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | maximum_positive_similarity | 0.8 | Indicates how similar the algorithm should try to make | - | | | embedding vectors for correct labels. | - | | | Should be 0.0 < ... < 1.0 for 'cosine' similarity type. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | maximum_negative_similarity | -0.2 | Maximum negative similarity for incorrect labels. | - | | | Should be -1.0 < ... < 1.0 for 'cosine' similarity type. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | use_maximum_negative_similarity | True | If 'True' the algorithm only minimizes maximum similarity | - | | | over incorrect intent labels, used only if 'loss_type' is | - | | | set to 'margin'. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | scale_loss | True | Scale loss inverse proportionally to confidence of correct | - | | | prediction. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | regularization_constant | 0.001 | The scale of regularization. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | negative_margin_scale | 0.8 | The scale of how important it is to minimize the maximum | - | | | similarity between embeddings of different labels. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | drop_rate_dialogue | 0.1 | Dropout rate for embedding layers of dialogue features. | - | | | Value should be between 0 and 1. | - | | | The higher the value the higher the regularization effect. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | drop_rate_label | 0.0 | Dropout rate for embedding layers of label features. | - | | | Value should be between 0 and 1. | - | | | The higher the value the higher the regularization effect. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | drop_rate_attention | 0.0 | Dropout rate for attention. Value should be between 0 and 1. | - | | | The higher the value the higher the regularization effect. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | weight_sparsity | 0.8 | Sparsity of the weights in dense layers. | - | | | Value should be between 0 and 1. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | evaluate_every_number_of_epochs | 20 | How often to calculate validation accuracy. | - | | | Set to '-1' to evaluate just once at the end of training. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | evaluate_on_number_of_examples | 0 | How many examples to use for hold out validation set. | - | | | Large values may hurt performance, e.g. model accuracy. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | tensorboard_log_directory | None | If you want to use tensorboard to visualize training | - | | | metrics, set this option to a valid output directory. You | - | | | can view the training metrics after training in tensorboard | - | | | via 'tensorboard --logdir '. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | tensorboard_log_level | "epoch" | Define when training metrics for tensorboard should be | - | | | logged. Either after every epoch ('epoch') or for every | - | | | training step ('minibatch'). | - +---------------------------------+------------------+--------------------------------------------------------------+ - - .. warning:: - - If ``evaluate_on_number_of_examples`` is non zero, random examples will be picked by stratified split and - used as **hold out** validation set, so they will be excluded from training data. - We suggest to set it to zero if data set contains a lot of unique examples of dialogue turns. - - .. note:: - - For ``cosine`` similarity ``maximum_positive_similarity`` and ``maximum_negative_similarity`` should - be between ``-1`` and ``1``. - - .. note:: - - There is an option to use linearly increasing batch size. The idea comes from - ``_. In order to do it pass a list to ``batch_size``, e.g. - ``"batch_size": [8, 32]`` (default behavior). If constant ``batch_size`` is required, pass an ``int``, - e.g. ``"batch_size": 8``. - - .. note:: - - The parameter ``maximum_negative_similarity`` is set to a negative value to mimic the original - starspace algorithm in the case ``maximum_negative_similarity = maximum_positive_similarity`` and - ``use_maximum_negative_similarity = False``. See `starspace paper `_ - for details. - - -.. _mapping-policy: - -Mapping Policy -^^^^^^^^^^^^^^ - -The ``MappingPolicy`` can be used to directly map intents to actions. The -mappings are assigned by giving an intent the property ``triggers``, e.g.: - -.. code-block:: yaml - - intents: - - ask_is_bot: - triggers: action_is_bot - -An intent can only be mapped to at most one action. The bot will run -the mapped action once it receives a message of the triggering intent. Afterwards, -it will listen for the next message. With the next -user message, normal prediction will resume. - -If you do not want your intent-action mapping to affect the dialogue -history, the mapped action must return a ``UserUtteranceReverted()`` -event. This will delete the user's latest message, along with any events that -happened after it, from the dialogue history. This means you should not -include the intent-action interaction in your stories. - -For example, if a user asks "Are you a bot?" off-topic in the middle of the -flow, you probably want to answer without that interaction affecting the next -action prediction. A triggered custom action can do anything, but here's a -simple example that dispatches a bot utterance and then reverts the interaction: - -.. code-block:: python - - class ActionIsBot(Action): - """Revertible mapped action for utter_is_bot""" - - def name(self): - return "action_is_bot" - - def run(self, dispatcher, tracker, domain): - dispatcher.utter_template(template="utter_is_bot") - return [UserUtteranceReverted()] - -.. note:: - - If you use the ``MappingPolicy`` to predict bot utterance actions directly (e.g. - ``triggers: utter_{}``), these interactions must go in your stories, as in this - case there is no ``UserUtteranceReverted()`` and the - intent and the mapped response action will appear in the dialogue history. - -.. note:: - - The MappingPolicy is also responsible for executing the default actions ``action_back`` - and ``action_restart`` in response to ``/back`` and ``/restart``. If it is not included - in your policy example these intents will not work. - -Memoization Policy -^^^^^^^^^^^^^^^^^^ - -The ``MemoizationPolicy`` just memorizes the conversations in your -training data. It predicts the next action with confidence ``1.0`` -if this exact conversation exists in the training data, otherwise it -predicts ``None`` with confidence ``0.0``. - -Augmented Memoization Policy -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The ``AugmentedMemoizationPolicy`` remembers examples from training -stories for up to ``max_history`` turns, just like the ``MemoizationPolicy``. -Additionally, it has a forgetting mechanism that will forget a certain amount -of steps in the conversation history and try to find a match in your stories -with the reduced history. It predicts the next action with confidence ``1.0`` -if a match is found, otherwise it predicts ``None`` with confidence ``0.0``. - -.. note:: - - If you have dialogues where some slots that are set during - prediction time might not be set in training stories (e.g. in training - stories starting with a reminder not all previous slots are set), - make sure to add the relevant stories without slots to your training - data as well. - -.. _fallback-policy: - -Fallback Policy -^^^^^^^^^^^^^^^ - -The ``FallbackPolicy`` invokes a :ref:`fallback action -` if at least one of the following occurs: - -1. The intent recognition has a confidence below ``nlu_threshold``. -2. The highest ranked intent differs in confidence with the second highest - ranked intent by less than ``ambiguity_threshold``. -3. None of the dialogue policies predict an action with confidence higher than ``core_threshold``. - -**Configuration:** - - The thresholds and fallback action can be adjusted in the policy configuration - file as parameters of the ``FallbackPolicy``: - - .. code-block:: yaml - - policies: - - name: "FallbackPolicy" - nlu_threshold: 0.3 - ambiguity_threshold: 0.1 - core_threshold: 0.3 - fallback_action_name: 'action_default_fallback' - - +----------------------------+---------------------------------------------+ - | ``nlu_threshold`` | Min confidence needed to accept an NLU | - | | prediction | - +----------------------------+---------------------------------------------+ - | ``ambiguity_threshold`` | Min amount by which the confidence of the | - | | top intent must exceed that of the second | - | | highest ranked intent. | - +----------------------------+---------------------------------------------+ - | ``core_threshold`` | Min confidence needed to accept an action | - | | prediction from Rasa Core | - +----------------------------+---------------------------------------------+ - | ``fallback_action_name`` | Name of the :ref:`fallback action | - | | ` | - | | to be called if the confidence of intent | - | | or action is below the respective threshold | - +----------------------------+---------------------------------------------+ - - You can also configure the ``FallbackPolicy`` in your python code: - - .. code-block:: python - - from rasa.core.policies.fallback import FallbackPolicy - from rasa.core.policies.keras_policy import TEDPolicy - from rasa.core.agent import Agent - - fallback = FallbackPolicy(fallback_action_name="action_default_fallback", - core_threshold=0.3, - nlu_threshold=0.3, - ambiguity_threshold=0.1) - - agent = Agent("domain.yml", policies=[TEDPolicy(), fallback]) - - .. note:: - - You can include either the ``FallbackPolicy`` or the - ``TwoStageFallbackPolicy`` in your configuration, but not both. - -.. _two-stage-fallback-policy: - -Two-Stage Fallback Policy -^^^^^^^^^^^^^^^^^^^^^^^^^ - -The ``TwoStageFallbackPolicy`` handles low NLU confidence in multiple stages -by trying to disambiguate the user input. - -- If an NLU prediction has a low confidence score or is not significantly higher - than the second highest ranked prediction, the user is asked to affirm - the classification of the intent. - - - If they affirm, the story continues as if the intent was classified - with high confidence from the beginning. - - If they deny, the user is asked to rephrase their message. - -- Rephrasing - - - If the classification of the rephrased intent was confident, the story - continues as if the user had this intent from the beginning. - - If the rephrased intent was not classified with high confidence, the user - is asked to affirm the classified intent. - -- Second affirmation - - - If the user affirms the intent, the story continues as if the user had - this intent from the beginning. - - If the user denies, the original intent is classified as the specified - ``deny_suggestion_intent_name``, and an ultimate fallback action - is triggered (e.g. a handoff to a human). - -**Configuration:** - - To use the ``TwoStageFallbackPolicy``, include the following in your - policy configuration. - - .. code-block:: yaml - - policies: - - name: TwoStageFallbackPolicy - nlu_threshold: 0.3 - ambiguity_threshold: 0.1 - core_threshold: 0.3 - fallback_core_action_name: "action_default_fallback" - fallback_nlu_action_name: "action_default_fallback" - deny_suggestion_intent_name: "out_of_scope" - - +-------------------------------+------------------------------------------+ - | ``nlu_threshold`` | Min confidence needed to accept an NLU | - | | prediction | - +-------------------------------+------------------------------------------+ - | ``ambiguity_threshold`` | Min amount by which the confidence of the| - | | top intent must exceed that of the second| - | | highest ranked intent. | - +-------------------------------+------------------------------------------+ - | ``core_threshold`` | Min confidence needed to accept an action| - | | prediction from Rasa Core | - +-------------------------------+------------------------------------------+ - | ``fallback_core_action_name`` | Name of the :ref:`fallback action | - | | ` | - | | to be called if the confidence of Rasa | - | | Core action prediction is below the | - | | ``core_threshold``. This action is | - | | to propose the recognized intents | - +-------------------------------+------------------------------------------+ - | ``fallback_nlu_action_name`` | Name of the :ref:`fallback action | - | | ` | - | | to be called if the confidence of Rasa | - | | NLU intent classification is below the | - | | ``nlu_threshold``. This action is called | - | | when the user denies the second time | - +-------------------------------+------------------------------------------+ - |``deny_suggestion_intent_name``| The name of the intent which is used to | - | | detect that the user denies the suggested| - | | intents | - +-------------------------------+------------------------------------------+ - - .. note:: - - You can include either the ``FallbackPolicy`` or the - ``TwoStageFallbackPolicy`` in your configuration, but not both. - - -.. _form-policy: - -Form Policy -^^^^^^^^^^^ - -The ``FormPolicy`` is an extension of the ``MemoizationPolicy`` which -handles the filling of forms. Once a ``FormAction`` is called, the -``FormPolicy`` will continually predict the ``FormAction`` until all required -slots in the form are filled. For more information, see :ref:`forms`. diff --git a/docs/core/reminders-and-external-events.rst b/docs/core/reminders-and-external-events.rst deleted file mode 100644 index c731b8473546..000000000000 --- a/docs/core/reminders-and-external-events.rst +++ /dev/null @@ -1,169 +0,0 @@ -:desc: Learn how to use external events and schedule reminders. - -.. _reminders-and-external-events: - -Reminders and External Events -============================= - -.. edit-link:: - -The ``ReminderScheduled`` event and the -`trigger_intent endpoint <../../api/http-api/#operation/triggerConversationIntent>`_ let your assistant remind you -about things after a given period of time, or to respond to external events (other applications, sensors, etc.). -You can find a full example assistant that implements these features -`here `_. - -.. contents:: - :local: - -.. _reminders: - -Reminders ---------- - -Instead of an external sensor, you might just want to be reminded about something after a certain amount of time. -For this, Rasa provides the special event ``ReminderScheduled``, and another event, ``ReminderCancelled``, to unschedule a reminder. - -.. _scheduling-reminders-guide: - -Scheduling Reminders -^^^^^^^^^^^^^^^^^^^^ - -Let's say you want your assistant to remind you to call a friend in 5 seconds. -(You probably want some longer time span, but for the sake of testing, let it be 5 seconds.) -Thus, we define an intent ``ask_remind_call`` with some NLU data, - -.. code-block:: md - - ## intent:ask_remind_call - - remind me to call [Albert](name) - - remind me to call [Susan](name) - - later I have to call [Daksh](name) - - later I have to call [Anna](name) - ... - -and connect this intent with a new custom action ``action_set_reminder``. -We could make this connection by providing training stories (recommended for more complex assistants), or using the :ref:`mapping-policy`. - -The custom action ``action_set_reminder`` should schedule a reminder that, 5 seconds later, triggers an intent ``EXTERNAL_reminder`` with all the entities that the user provided in his/her last message (similar to an external event): - -.. literalinclude:: ../../examples/reminderbot/actions.py - :pyobject: ActionSetReminder - -Note that this requires the ``datetime`` and ``rasa_sdk.events`` packages. - -Finally, we define another custom action ``action_react_to_reminder`` and link it to the ``EXTERNAL_reminder`` intent: - -.. code-block:: md - - - EXTERNAL_reminder: - triggers: action_react_to_reminder - -where the ``action_react_to_reminder`` is - -.. literalinclude:: ../../examples/reminderbot/actions.py - :pyobject: ActionReactToReminder - -Instead of a custom action, we could also have used a simple response template. -But here we want to make use of the fact that the reminder can carry entities, and we can process the entities in this custom action. - -.. warning:: - - Reminders are cancelled whenever you shutdown your Rasa server. - -.. warning:: - - Reminders currently (Rasa 1.8) don't work in `rasa shell`. - You have to test them with a - `running Rasa X server `_ instead. - -.. note:: - - Proactively reaching out to the user is dependent on the abilities of a channel and - hence not supported by every channel. If your channel does not support it, consider - using the :ref:`callbackInput` channel to send messages to a `webhook `_. - -.. _cancelling-reminders-guide: - -Cancelling Reminders -^^^^^^^^^^^^^^^^^^^^ - -Sometimes the user may want to cancel a reminder that he has scheduled earlier. -A simple way of adding this functionality to your assistant is to create an intent ``ask_forget_reminders`` and let your assistant respond to it with a custom action such as - -.. literalinclude:: ../../examples/reminderbot/actions.py - :pyobject: ForgetReminders - -Here, ``ReminderCancelled()`` simply cancels all the reminders that are currently scheduled. -Alternatively, you may provide some parameters to narrow down the types of reminders that you want to cancel. -For example, - - - ``ReminderCancelled(intent="greet")`` cancels all reminders with intent ``greet`` - - ``ReminderCancelled(entities={...})`` cancels all reminders with the given entities - - ``ReminderCancelled("...")`` cancels the one unique reminder with the given name "``...``" that you supplied - during its creation - -.. _external-event-guide: - -External Events ---------------- - -Let's say you want to send a message from some other device to change the course of an ongoing conversation. -For example, some moisture-sensor attached to a Raspberry Pi should inform your personal assistant that your favorite -plant needs watering, and your assistant should then relay this message to you. - -To do this, your Raspberry Pi needs to send a message to the `trigger_intent endpoint <../../api/http-api/#operation/triggerConversationIntent>`_ of your conversation. -As the name says, this injects a user intent (possibly with entities) into your conversation. -So for Rasa it is almost as if you had entered a message that got classified with this intent and these entities. -Rasa then needs to respond to this input with an action such as ``action_warn_dry``. -The easiest and most reliable way to connect this action with the intent is via the :ref:`mapping-policy`. - -.. _getting-conversation-id: - -Getting the Conversation ID -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The first thing we need is the Session ID of the conversation that your sensor should send a notification to. -An easy way to get this is to define a custom action (see :ref:`custom-actions`) that displays the ID in the conversation. -For example: - -.. literalinclude:: ../../examples/reminderbot/actions.py - :pyobject: ActionTellID - -In addition, we also declare an intent ``ask_id``, define some NLU data for it, and add both ``action_tell_id`` and -``ask_id`` to the domain file, where we specify that one should trigger the other: - -.. code-block:: md - - intents: - - ask_id: - triggers: action_tell_id - -Now, when you ask "What is the ID of this conversation?", the assistant replies with something like "The ID of this -conversation is: 38cc25d7e23e4dde800353751b7c2d3e". - -If you want your assistant to link to the Raspberry Pi automatically, you will have to write a custom action that -informs the Pi about the conversation id when your conversation starts (see :ref:`custom_session_start`). - -.. _responding_to_external_events: - -Responding to External Events -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Now that we have our Session ID, we need to prepare the assistant so it responds to messages from the sensor. -To this end, we define a new intent ``EXTERNAL_dry_plant`` without any NLU data. -This intent will later be triggered by the external sensor. -Here, we start the intent name with ``EXTERNAL_`` to indicate that this is not something the user would say, but you can name the intent however you like. - -In the domain file, we now connect the intent ``EXTERNAL_dry_plant`` with another custom action ``action_warn_dry``, e.g. - -.. literalinclude:: ../../examples/reminderbot/actions.py - :pyobject: ActionWarnDry - -Now, when you are in a conversation with id ``38cc25d7e23e4dde800353751b7c2d3e``, then running - -.. code-block:: shell - - curl -H "Content-Type: application/json" -X POST -d '{"name": "EXTERNAL_dry_plant", "entities": {"plant": "Orchid"}}' http://localhost:5005/conversations/38cc25d7e23e4dde800353751b7c2d3e/trigger_intent - -in the terminal will cause your assistant to say "Your Orchid needs some water!". diff --git a/docs/core/responses.rst b/docs/core/responses.rst deleted file mode 100644 index cd7e27d363d0..000000000000 --- a/docs/core/responses.rst +++ /dev/null @@ -1,170 +0,0 @@ -:desc: Read how to define assistant responses or use a service to generate the - responses using Rasa as an open source chat assistant platform. - -.. _responses: - -Responses -========= - -.. edit-link:: - -If you want your assistant to respond to user messages, you need to manage -these responses. In the training data for your bot, -your stories, you specify the actions your bot -should execute. These actions -can use responses to send messages back to the user. - -There are three ways to manage these responses: - -1. Responses are normally stored in your domain file, see :ref:`here ` -2. Retrieval action responses are part of the training data, see :ref:`here ` -3. You can also create a custom NLG service to generate responses, see :ref:`here ` - -.. _in-domain-responses: - -Including the responses in the domain --------------------------------------- - -The default format is to include the responses in your domain file. -This file then contains references to all your custom actions, -available entities, slots and intents. - -.. literalinclude:: ../../data/test_domains/default_with_slots.yml - :language: yaml - -In this example domain file, the section ``responses`` contains the -responses the assistant uses to send messages to the user. - -.. note:: - - If you want to change the text, or any other part of the bots response, - you need to retrain the assistant before these changes will be picked up. - -.. note:: - - Responses that are used in a story should be listed in the ``stories`` - section of the domain.yml file. In this example, the ``utter_channel`` - response is not used in a story so it is not listed in that section. - -More details about the format of these responses can be found in the -documentation about the domain file format: :ref:`domain-responses`. - -.. _custom-nlg-service: - -Creating your own NLG service for bot responses ------------------------------------------------ - -Retraining the bot just to change the text copy can be suboptimal for -some workflows. That's why Core also allows you to outsource the -response generation and separate it from the dialogue learning. - -The assistant will still learn to predict actions and to react to user input -based on past dialogues, but the responses it sends back to the user -are generated outside of Rasa Core. - -If the assistant wants to send a message to the user, it will call an -external HTTP server with a ``POST`` request. To configure this endpoint, -you need to create an ``endpoints.yml`` and pass it either to the ``run`` -or ``server`` script. The content of the ``endpoints.yml`` should be - -.. literalinclude:: ../../data/test_endpoints/example_endpoints.yml - :language: yaml - -Then pass the ``enable-api`` flag to the ``rasa run`` command when starting -the server: - -.. code-block:: shell - - $ rasa run \ - --enable-api \ - -m examples/babi/models \ - --log-file out.log \ - --endpoints endpoints.yml - - -The body of the ``POST`` request sent to the endpoint will look -like this: - -.. code-block:: json - - { - "tracker": { - "latest_message": { - "text": "/greet", - "intent_ranking": [ - { - "confidence": 1.0, - "name": "greet" - } - ], - "intent": { - "confidence": 1.0, - "name": "greet" - }, - "entities": [] - }, - "sender_id": "22ae96a6-85cd-11e8-b1c3-f40f241f6547", - "paused": false, - "latest_event_time": 1531397673.293572, - "slots": { - "name": null - }, - "events": [ - { - "timestamp": 1531397673.291998, - "event": "action", - "name": "action_listen" - }, - { - "timestamp": 1531397673.293572, - "parse_data": { - "text": "/greet", - "intent_ranking": [ - { - "confidence": 1.0, - "name": "greet" - } - ], - "intent": { - "confidence": 1.0, - "name": "greet" - }, - "entities": [] - }, - "event": "user", - "text": "/greet" - } - ] - }, - "arguments": {}, - "template": "utter_greet", - "channel": { - "name": "collector" - } - } - -The endpoint then needs to respond with the generated response: - -.. code-block:: json - - { - "text": "hey there", - "buttons": [], - "image": null, - "elements": [], - "attachments": [] - } - -Rasa will then use this response and sent it back to the user. - - -.. _external-events: - -Proactively Reaching Out to the User with External Events ---------------------------------------------------------- - -You may want to proactively reach out to the user, -for example to display the output of a long running background operation -or notify the user of an external event. -To learn more, check out `reminderbot `_ in -the Rasa examples directory or look into :ref:`reminders-and-external-events`. diff --git a/docs/core/retrieval-actions.rst b/docs/core/retrieval-actions.rst deleted file mode 100644 index db729c9abdbf..000000000000 --- a/docs/core/retrieval-actions.rst +++ /dev/null @@ -1,242 +0,0 @@ -:desc: Use a retrieval model to select chatbot responses - in open source bot framework Rasa. - -.. _retrieval-actions: - -Retrieval Actions -================= - -.. edit-link:: - -.. warning:: - This feature is experimental. - We introduce experimental features to get feedback from our community, so we encourage you to try it out! - However, the functionality might be changed or removed in the future. - If you have feedback (positive or negative) please share it with us on the `forum `_. - Also, currently we do not support adding new annotations in Rasa X if your training data contains retrieval actions. - Once we have gathered enough feedback and we're happy with the training data format, we'll add support for training response retrieval models in Rasa X. - -.. note:: - There is an in-depth blog post `here `_ about how to use retrieval - actions for handling single turn interactions. - -.. contents:: - :local: - -About -^^^^^ - -Retrieval actions are designed to make it simpler to work with :ref:`small-talk` and :ref:`simple-questions` . -For example, if your assistant can handle 100 FAQs and 50 different small talk intents, you can use a single retrieval -action to cover all of these. -From a dialogue perspective, these single-turn exchanges can all be treated equally, so this simplifies your stories. - -Instead of having a lot of stories like: - -.. code-block:: story - - ## weather - * ask_weather - - utter_ask_weather - - ## introduction - * ask_name - - utter_introduce_myself - - ... - - -You can cover all of these with a single story where the above intents are grouped under a common ``chitchat`` intent: - - -.. code-block:: story - - ## chitchat - * chitchat - - respond_chitchat - -A retrieval action uses the output of a :ref:`response-selector` component from NLU which learns a -retrieval model to predict the correct response from a list of candidate responses given a user message text. - - -.. _retrieval-training-data: - -Training Data -^^^^^^^^^^^^^ - -Like the name suggests, retrieval actions learn to select the correct response from a list of candidates. -As with other NLU data, you need to include examples of what your users will say in your NLU file: - -.. code-block:: md - - ## intent: chitchat/ask_name - - what's your name - - who are you? - - what are you called? - - ## intent: chitchat/ask_weather - - how's weather? - - is it sunny where you are? - -First, all of these examples will be combined into a single ``chitchat`` retrieval intent that NLU will predict. -All retrieval intents have a suffix added to them which identifies a particular response text for your assistant, in the -above example - ``ask_name`` and ``ask_weather``. The suffix is separated from the intent name by a ``/`` delimiter - -Next, include response texts for all retrieval intents in a **separate** training data file as ``responses.md``: - -.. code-block:: md - - ## ask name - * chitchat/ask_name - - my name is Sara, Rasa's documentation bot! - - ## ask weather - * chitchat/ask_weather - - it's always sunny where I live - -The retrieval model is trained separately as part of the NLU training pipeline to select the correct response. -One important thing to remember is that the retrieval model uses the text of the response messages -to select the correct one. If you change the text of these responses, you have to retrain your retrieval model! -This is a key difference to the responses defined in your domain file. - -.. note:: - The file containing response texts must exist as a separate file inside the training data directory passed - to the training process. The contents of it cannot be a part of the file which contains training data for other - components of NLU. - -.. note:: - As shown in the above examples, ``/`` symbol is reserved as a delimiter to separate retrieval intents from response text identifier. Make sure not to - use it in the name of your intents. - -Config File -^^^^^^^^^^^ - -You need to include the :ref:`response-selector` component in your config. The component needs a tokenizer, a featurizer and an -intent classifier to operate on the user message before it can predict a response and hence these -components should be placed before ``ResponseSelector`` in the NLU configuration. An example: - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "WhitespaceTokenizer" - intent_split_symbol: "_" - - name: "CountVectorsFeaturizer" - - name: "DIETClassifier" - - name: "ResponseSelector" - -Domain -^^^^^^ - -Rasa uses a naming convention to match the intent names like ``chitchat/ask_name`` -to the retrieval action. -The correct action name in this case is ``respond_chitchat``. The prefix ``respond_`` is mandatory to identify it as a -retrieval action. Another example - correct action name for ``faq/ask_policy`` would be ``respond_faq`` -To include this in your domain, add it to the list of actions: - -.. code-block:: yaml - - actions: - ... - - respond_chitchat - - respond_faq - - -A simple way to ensure that the retrieval action is predicted after the chitchat -intent is to use the :ref:`mapping-policy`. -However, you can also include this action in your stories. -For example, if you want to repeat a question after handling chitchat -(see :ref:`unhappy-paths` ) - -.. code-block:: story - - ## interruption - * search_restaurant - - utter_ask_cuisine - * chitchat - - respond_chitchat - - utter_ask_cuisine - -Multiple Retrieval Actions -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If your assistant includes both FAQs **and** chitchat, it is possible to -separate these into separate retrieval actions, for example having intents -like ``chitchat/ask_weather`` and ``faq/returns_policy``. -Rasa supports adding multiple ``RetrievalActions`` like ``respond_chitchat`` and ``respond_returns_policy`` -To train separate retrieval models for each of the intents, you need to include a separate ``ResponseSelector`` -component in the config: - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "WhitespaceTokenizer" - intent_split_symbol: "_" - - name: "CountVectorsFeaturizer" - - name: "DIETClassifier" - - name: "ResponseSelector" - retrieval_intent: chitchat - - name: "ResponseSelector" - retrieval_intent: faq - -You could still have two separate retrieval actions but both actions can share the same retrieval model by specifying a single ``ResponseSelector`` component and leaving the ``retrieval_intent`` to its default value(None): - -.. code-block:: yaml - - language: "en" - - pipeline: - - name: "WhitespaceTokenizer" - intent_split_symbol: "_" - - name: "CountVectorsFeaturizer" - - name: "DIETClassifier" - - name: "ResponseSelector" - - -In this case, the response selector will be trained on examples from both ``chitchat/{x}`` and ``faq/{x}`` and will be -identified by the name ``default`` the NLU parsed output. - -In our experiments so far, having separate retrieval models does **not** make any difference to the accuracy of each -retrieval action. So for simplicity, we recommend you use a single retrieval -model for both chitchat and FAQs -If you get different results, please let us know in the `forum `_ ! - - -Parsing Response Selector Output -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The parsed output from NLU will have a property named ``response_selector`` containing the output for -each response selector. Each response selector is identified by ``retrieval_intent`` parameter of that response selector -and stores two properties - - - - ``response``: The predicted response text and the prediction confidence. - - ``ranking``: Ranking with confidences of top 10 candidate responses. - -Example result: - -.. code-block:: json - - { - "text": "What is the recommend python version to install?", - "entities": [], - "intent": {"confidence": 0.6485910906220309, "name": "faq"}, - "intent_ranking": [ - {"confidence": 0.6485910906220309, "name": "faq"}, - {"confidence": 0.1416153159565678, "name": "greet"} - ], - "response_selector": { - "faq": { - "response": {"confidence": 0.7356462617, "name": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, - "ranking": [ - {"confidence": 0.7356462617, "name": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, - {"confidence": 0.2134543431, "name": "You can ask me about how to get started"} - ] - } - } - } - -If the ``retrieval_intent`` parameter of a particular response selector was left to its default value, -the corresponding response selector will be identified as ``default`` in the returned output. diff --git a/docs/core/slots.rst b/docs/core/slots.rst deleted file mode 100644 index 25aec4dd97a5..000000000000 --- a/docs/core/slots.rst +++ /dev/null @@ -1,358 +0,0 @@ -:desc: Store information the user provided as well as information from database - queries in slots to influence how the machine learning based dialogue - continues. - -.. _slots: - -Slots -===== - -.. edit-link:: - -.. contents:: - :local: - -What are slots? ---------------- - -**Slots are your bot's memory.** They act as a key-value store -which can be used to store information the user provided (e.g their home city) -as well as information gathered about the outside world (e.g. the result of a -database query). - -Most of the time, you want slots to influence how the dialogue progresses. -There are different slot types for different behaviors. - -For example, if your user has provided their home city, you might -have a ``text`` slot called ``home_city``. If the user asks for the -weather, and you *don't* know their home city, you will have to ask -them for it. A ``text`` slot only tells Rasa Core whether the slot -has a value. The specific value of a ``text`` slot (e.g. Bangalore -or New York or Hong Kong) doesn't make any difference. - -If the value itself is important, use a ``categorical`` or a ``bool`` slot. -There are also ``float``, and ``list`` slots. -If you just want to store some data, but don't want it to affect the flow -of the conversation, use an ``unfeaturized`` slot. - - -How Rasa Uses Slots -------------------- - -The ``Policy`` doesn't have access to the -value of your slots. It receives a featurized representation. -As mentioned above, for a ``text`` slot the value is irrelevant. -The policy just sees a ``1`` or ``0`` depending on whether it is set. - -**You should choose your slot types carefully!** - -How Slots Get Set ------------------ - -You can provide an initial value for a slot in your domain file: - -.. code-block:: yaml - - slots: - name: - type: text - initial_value: "human" - -You can get the value of a slot using ``.get_slot()`` inside ``actions.py`` for example: - -.. code-block:: python - - data = tracker.get_slot("slot-name") - - - -There are multiple ways that slots are set during a conversation: - -Slots Set from NLU -~~~~~~~~~~~~~~~~~~ - -If your NLU model picks up an entity, and your domain contains a -slot with the same name, the slot will be set automatically. For example: - -.. code-block:: story - - # story_01 - * greet{"name": "Ali"} - - slot{"name": "Ali"} - - utter_greet - -In this case, you don't have to include the ``- slot{}`` part in the -story, because it is automatically picked up. - -To disable this behavior for a particular slot, you can set the -``auto_fill`` attribute to ``False`` in the domain file: - -.. code-block:: yaml - - slots: - name: - type: text - auto_fill: False - - -Slots Set By Clicking Buttons -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use buttons as a shortcut. -Rasa Core will send messages starting with a ``/`` to the -``RegexInterpreter``, which expects NLU input in the same format -as in story files, e.g. ``/intent{entities}``. For example, if you let -users choose a color by clicking a button, the button payloads might -be ``/choose{"color": "blue"}`` and ``/choose{"color": "red"}``. - -You can specify this in your domain file like this: -(see details in :ref:`domains`) - -.. code-block:: yaml - - utter_ask_color: - - text: "what color would you like?" - buttons: - - title: "blue" - payload: '/choose{"color": "blue"}' - - title: "red" - payload: '/choose{"color": "red"}' - - -Slots Set by Actions -~~~~~~~~~~~~~~~~~~~~ - -The second option is to set slots by returning events in :ref:`custom actions `. -In this case, your stories need to include the slots. -For example, you have a custom action to fetch a user's profile, and -you have a ``categorical`` slot called ``account_type``. -When the ``fetch_profile`` action is run, it returns a -:class:`rasa.core.events.SlotSet` event: - -.. code-block:: yaml - - slots: - account_type: - type: categorical - values: - - premium - - basic - -.. code-block:: python - - from rasa_sdk.actions import Action - from rasa_sdk.events import SlotSet - import requests - - class FetchProfileAction(Action): - def name(self): - return "fetch_profile" - - def run(self, dispatcher, tracker, domain): - url = "http://myprofileurl.com" - data = requests.get(url).json - return [SlotSet("account_type", data["account_type"])] - - -.. code-block:: story - - # story_01 - * greet - - action_fetch_profile - - slot{"account_type" : "premium"} - - utter_welcome_premium - - # story_02 - * greet - - action_fetch_profile - - slot{"account_type" : "basic"} - - utter_welcome_basic - - -In this case you **do** have to include the ``- slot{}`` part in your stories. -Rasa Core will learn to use this information to decide on the correct action to -take (in this case, ``utter_welcome_premium`` or ``utter_welcome_basic``). - -.. note:: - It is **very easy** to forget about slots if you are writing - stories by hand. We strongly recommend that you build up these - stories using :ref:`section_interactive_learning_forms` rather than writing them. - - -.. _slot-classes: - -Slot Types ----------- - -Text Slot -~~~~~~~~~ - -.. option:: text - - :Use For: User preferences where you only care whether or not they've - been specified. - :Example: - .. sourcecode:: yaml - - slots: - cuisine: - type: text - :Description: - Results in the feature of the slot being set to ``1`` if any value is set. - Otherwise the feature will be set to ``0`` (no value is set). - -Boolean Slot -~~~~~~~~~~~~ - -.. option:: bool - - :Use For: True or False - :Example: - .. sourcecode:: yaml - - slots: - is_authenticated: - type: bool - :Description: - Checks if slot is set and if True - -Categorical Slot -~~~~~~~~~~~~~~~~ - -.. option:: categorical - - :Use For: Slots which can take one of N values - :Example: - .. sourcecode:: yaml - - slots: - risk_level: - type: categorical - values: - - low - - medium - - high - - :Description: - Creates a one-hot encoding describing which of the ``values`` matched. - A default value ``__other__`` is automatically added to the user-defined - values. All values encountered which are not explicitly defined in the - domain are mapped to ``__other__`` for featurization. The value - ``__other__`` should not be used as a user-defined value; if it is, it - will still behave as the default to which all unseen values are mapped. - -Float Slot -~~~~~~~~~~ - -.. option:: float - - :Use For: Continuous values - :Example: - .. sourcecode:: yaml - - slots: - temperature: - type: float - min_value: -100.0 - max_value: 100.0 - - :Defaults: ``max_value=1.0``, ``min_value=0.0`` - :Description: - All values below ``min_value`` will be treated as ``min_value``, the same - happens for values above ``max_value``. Hence, if ``max_value`` is set to - ``1``, there is no difference between the slot values ``2`` and ``3.5`` in - terms of featurization (e.g. both values will influence the dialogue in - the same way and the model can not learn to differentiate between them). - -List Slot -~~~~~~~~~ - -.. option:: list - - :Use For: Lists of values - :Example: - .. sourcecode:: yaml - - slots: - shopping_items: - type: list - :Description: - The feature of this slot is set to ``1`` if a value with a list is set, - where the list is not empty. If no value is set, or the empty list is the - set value, the feature will be ``0``. The **length of the list stored in - the slot does not influence the dialogue**. - -.. _unfeaturized-slot: - -Unfeaturized Slot -~~~~~~~~~~~~~~~~~ - -.. option:: unfeaturized - - :Use For: Data you want to store which shouldn't influence the dialogue flow - :Example: - .. sourcecode:: yaml - - slots: - internal_user_id: - type: unfeaturized - :Description: - There will not be any featurization of this slot, hence its value does - not influence the dialogue flow and is ignored when predicting the next - action the bot should run. - -Custom Slot Types ------------------ - -Maybe your restaurant booking system can only handle bookings -for up to 6 people. In this case you want the *value* of the -slot to influence the next selected action (and not just whether -it's been specified). You can do this by defining a custom slot class. - -In the code below, we define a slot class called ``NumberOfPeopleSlot``. -The featurization defines how the value of this slot gets converted to a vector -to our machine learning model can deal with. -Our slot has three possible "values", which we can represent with -a vector of length ``2``. - -+---------------+------------------------------------------+ -| ``(0,0)`` | not yet set | -+---------------+------------------------------------------+ -| ``(1,0)`` | between 1 and 6 | -+---------------+------------------------------------------+ -| ``(0,1)`` | more than 6 | -+---------------+------------------------------------------+ - - -.. testcode:: - - from rasa.core.slots import Slot - - class NumberOfPeopleSlot(Slot): - - def feature_dimensionality(self): - return 2 - - def as_feature(self): - r = [0.0] * self.feature_dimensionality() - if self.value: - if self.value <= 6: - r[0] = 1.0 - else: - r[1] = 1.0 - return r - -Now we also need some training stories, so that Rasa Core -can learn from these how to handle the different situations: - - -.. code-block:: story - - # story1 - ... - * inform{"people": "3"} - - action_book_table - ... - # story2 - * inform{"people": "9"} - - action_explain_table_limit diff --git a/docs/core/stories.rst b/docs/core/stories.rst deleted file mode 100644 index 366386f1051f..000000000000 --- a/docs/core/stories.rst +++ /dev/null @@ -1,218 +0,0 @@ -:desc: Stories are used to teach Rasa real conversation designs to learn - from providing the basis for a scalable machine learning dialogue management. - -.. _stories: - -Stories -======= - -.. edit-link:: - -.. contents:: - :local: - -Rasa stories are a form of training data used to train the Rasa's dialogue management models. - -A story is a representation of a conversation between a user and an AI assistant, converted into a specific format where user inputs are expressed as corresponding intents (and entities where necessary) while the responses of an assistant are expressed as corresponding action names. - -A training example for the Rasa Core dialogue system is called a **story**. -This is a guide to the story data format. - -.. note:: - You can also **spread your stories across multiple files** and specify the - folder containing the files for most of the scripts (e.g. training, - visualization). The stories will be treated as if they would have - been part of one large file. - - -Format ------- - -Here's an example of a dialogue in the Rasa story format: - -.. code-block:: story - - ## greet + location/price + cuisine + num people - * greet - - action_ask_howcanhelp - * inform{"location": "rome", "price": "cheap"} - - action_on_it - - action_ask_cuisine - * inform{"cuisine": "spanish"} - - action_ask_numpeople - * inform{"people": "six"} - - action_ack_dosearch - - -What makes up a story? -~~~~~~~~~~~~~~~~~~~~~~ - -- A story starts with a name preceded by two hashes ``## story_03248462``. - You can call the story anything you like, but it can be very useful for - debugging to give them descriptive names! -- The end of a story is denoted by a newline, and then a new story - starts again with ``##``. -- Messages sent by the user are shown as lines starting with ``*`` - in the format ``intent{"entity1": "value", "entity2": "value"}``. -- Actions executed by the bot are shown as lines starting with ``-`` - and contain the name of the action. -- Events returned by an action are on lines immediately after that action. - For example, if an action returns a ``SlotSet`` event, this is shown as - ``slot{"slot_name": "value"}``. - - -User Messages -~~~~~~~~~~~~~ -While writing stories, you do not have to deal with the specific contents of -the messages that the users send. Instead, you can take advantage of the output -from the NLU pipeline, which lets you use just the combination of an intent and -entities to refer to all the possible messages the users can send to mean the -same thing. - -It is important to include the entities here as well because the policies learn -to predict the next action based on a *combination* of both the intent and -entities (you can, however, change this behavior using the -:ref:`use_entities ` attribute). - -.. warning:: - ``/`` symbol is reserved as a delimiter to separate retrieval intents from response text identifiers. - Refer to ``Training Data Format`` section of :ref:`retrieval-actions` for more details on this format. - If any of the intent names contain the delimiter, the file containing these stories will be considered as a training - file for :ref:`response-selector` model and will be ignored for training Core models. - -Actions -~~~~~~~ -While writing stories, you will encounter two types of actions: utterance actions -and custom actions. Utterance actions are hardcoded messages that a bot can respond -with. Custom actions, on the other hand, involve custom code being executed. - -All actions (both utterance actions and custom actions) executed by the bot are shown -as lines starting with ``-`` followed by the name of the action. - -The responses for utterance actions must begin with the prefix ``utter_``, and must match the name -of the response defined in the domain. - -For custom actions, the action name is the string you choose to return from -the ``name`` method of the custom action class. Although there is no restriction -on naming your custom actions (unlike utterance actions), the best practice here is to -prefix the name with ``action_``. - -Events -~~~~~~ -Events such as setting a slot or activating/deactivating a form have to be -explicitly written out as part of the stories. Having to include the events -returned by a custom action separately, when that custom action is already -part of a story might seem redundant. However, since Rasa cannot -determine this fact during training, this step is necessary. - -You can read more about events :ref:`here `. - -Slot Events -*********** -Slot events are written as ``- slot{"slot_name": "value"}``. If this slot is set -inside a custom action, it is written on the line immediately following the -custom action event. If your custom action resets a slot value to `None`, the -corresponding event for that would be ``-slot{"slot_name": null}``. - -Form Events -*********** -There are three kinds of events that need to be kept in mind while dealing with -forms in stories. - -- A form action event (e.g. ``- restaurant_form``) is used in the beginning when first starting a form, and also while resuming the form action when the form is already active. -- A form activation event (e.g. ``- form{"name": "restaurant_form"}``) is used right after the first form action event. -- A form deactivation event (e.g. ``- form{"name": null}``), which is used to deactivate the form. - - -.. note:: - In order to get around the pitfall of forgetting to add events, the recommended - way to write these stories is to use :ref:`interactive learning `. - - -Checkpoints and OR statements ------------------------------ - -Checkpoints and OR statements should both be used with caution, if at all. -There is usually a better way to achieve what you want by using forms and/or -retrieval actions. - - -Checkpoints -~~~~~~~~~~~ - -You can use ``> checkpoints`` to modularize and simplify your training -data. Checkpoints can be useful, but **do not overuse them**. Using -lots of checkpoints can quickly make your example stories hard to -understand. It makes sense to use them if a story block is repeated -very often in different stories, but stories *without* checkpoints -are easier to read and write. Here is an example story file which -contains checkpoints (note that you can attach more than one checkpoint -at a time): - -.. code-block:: story - - ## first story - * greet - - action_ask_user_question - > check_asked_question - - ## user affirms question - > check_asked_question - * affirm - - action_handle_affirmation - > check_handled_affirmation - - ## user denies question - > check_asked_question - * deny - - action_handle_denial - > check_handled_denial - - ## user leaves - > check_handled_denial - > check_handled_affirmation - * goodbye - - utter_goodbye - -.. note:: - Unlike regular stories, checkpoints are not restricted to starting with an - input from the user. As long as the checkpoint is inserted at the right points - in the main stories, the first event can be a custom action or a response action - as well. - - -OR Statements -~~~~~~~~~~~~~ - -Another way to write shorter stories, or to handle multiple intents -the same way, is to use an ``OR`` statement. For example, if you ask -the user to confirm something, and you want to treat the ``affirm`` -and ``thankyou`` intents in the same way. The story below will be -converted into two stories at training time: - - -.. code-block:: story - - ## story - ... - - utter_ask_confirm - * affirm OR thankyou - - action_handle_affirmation - -Just like checkpoints, ``OR`` statements can be useful, but if you are using a -lot of them, it is probably better to restructure your domain and/or intents. - - -.. warning:: - Overusing these features (both checkpoints and OR statements) - will slow down training. - - -End-to-End Story Evaluation Format ----------------------------------- - -The end-to-end story format is a format that combines both NLU and Core training data -into a single file for evaluation. Read more about :ref:`testing-your-assistant` - -.. warning:: - This format is only used for end-to-end evaluation and cannot be used for training. diff --git a/docs/dialogue-elements/completing-tasks.rst b/docs/dialogue-elements/completing-tasks.rst deleted file mode 100644 index d0ee4f04fac0..000000000000 --- a/docs/dialogue-elements/completing-tasks.rst +++ /dev/null @@ -1,121 +0,0 @@ -:desc: Read about common dialogue patterns encountered by task-oriented - bots and how best to handle them using Rasa's open source dialogue - management system. - -.. _completing-tasks: - -================ -Completing Tasks -================ - -.. edit-link:: - -.. contents:: - :local: - -.. _simple-questions: - -Simple Questions ----------------- - -Simple questions, or FAQs, should receive the same answer -no matter what happened previously in the conversation. -Users will often ask a basic set of questions -and your assistant should answer them reliably. - -.. conversations:: - examples: - - - - what's your email address? - - ( it's contact@example.com - - - - do you have a loyalty program? - - ( unfortunately we don't - -Just like greetings and goodbyes, you can use the mapping policy to achieve this. -See :ref:`greetings`. - -Business Logic --------------- - -.. note:: - There is an in-depth tutorial `here `_ about how to use Rasa Forms for slot filling and business logic. - -Your AI assistant will often have to follow some pre-defined business logic. -To figure out how to help users, your assistant will often have to ask a few questions. -The answers you get will impact the rest of the conversation; for example, some products might -be limited to users in a certain country or above a certain age. It is good practice to -implement that logic inside a form, separating it from the learned behavior. A single form -can cover all the happy paths (e.g. all the ways that a user can provide the required information). -You can read more about forms in `this tutorial `_. - -.. conversations:: - examples: - - - - I'd like to apply for a loan - - ( I'd love to help. Which state are you in? - - Alaska - - ( Unfortunately, we only operate in the continental U.S. - - - - I'd like to apply for a loan - - ( I'd love to help. Which state are you in? - - California - - ( Thanks. Do you know what your credit score is? - - -See :ref:`conditional-logic` for details on how to use forms to implement business logic. - -Contextual Questions --------------------- - -Unlike answers to FAQs, correct responses to contextual questions depend on the conversation history. -These include questions which refer to something earlier in the conversation and are ambiguous -on their own. -Real users will often ask questions like "which is better?" and "why?". -It is frustrating for users if your assistant doesn't understand this, -and can only answer full questions like "which of your savings accounts has a better interest rate?" -Understanding contextual questions is a key difference between -`level 2 and level 3 assistants `_. - - -.. conversations:: - examples: - - - - ( what's your email address? - - why do you need to know that? - - ( I need your email so I can send you a confirmation - - - - ( are you currently a premium customer? - - what do you mean? - - ( We have different memberships. Check your statement to see if you are a premium member. - -.. _unhappy-paths: - -Unhappy Paths -------------- - -When your assistant asks a user for information, you will often get responses other -than the information you asked for. For example, the user might refuse to provide this information, -they might correct something they said earlier, or interrupt with chitchat. -It is important that your assistant can handle these edge cases. There -are so many things a user might say other than provide you the information you asked for, -and a simple interruption shouldn't throw off your assistant completely. -This is a key reason for building an assistant that can learn from real data. - -The best way to collect training data for unhappy paths is to use -:ref:`interactive-learning`. - -.. conversations:: - examples: - - - - ( what's your email address? - - no. - - ( I will need your email address in order to create an account. - - ( what's your email address? - - - - ( what's your email address? - - work@example.com - - ( thanks, and your phone number? - - no wait, please use personal@example.com - - ( ok, I'll use that email. - - ( thanks, and your phone number? diff --git a/docs/dialogue-elements/dialogue-elements.rst b/docs/dialogue-elements/dialogue-elements.rst deleted file mode 100644 index 340bcbe7edc4..000000000000 --- a/docs/dialogue-elements/dialogue-elements.rst +++ /dev/null @@ -1,34 +0,0 @@ -:desc: Dialogue elements are an abstraction layer for your conversational AI platform - which describe common, recurring patterns in chatbot conversations. - -.. _dialogue-elements: - -Dialogue Elements -================= - -.. edit-link:: - -Dialogue elements are common conversation patterns. -We use three different levels of abstraction to discuss AI assistants. -This can be helpful in a product team, so that you have a common language -which designers, developers, and product owners can use to discuss -issues and new features. - -- highest level: user goals -- middle level: dialogue elements -- lowest level: intents, entities, actions, slots, and responses. - - - -.. note:: - Some chatbot tools use the word ``intent`` to refer to the user - goal. This is confusing because only some messages tell you what a user's - goal is. If a user says "I want to open an account" (``intent: open_account``), - that is clearly their goal. But most user messages ("yes", "what does that mean?", "I don't know") - aren't specific to one goal. In Rasa, every message has an intent, - and a user goal describes what a person wants to achieve. - - -.. image:: /_static/images/intents-user-goals-dialogue-elements.png - - diff --git a/docs/dialogue-elements/guiding-users.rst b/docs/dialogue-elements/guiding-users.rst deleted file mode 100644 index 06b0c7c4b78b..000000000000 --- a/docs/dialogue-elements/guiding-users.rst +++ /dev/null @@ -1,193 +0,0 @@ -:desc: Read about dialogue patterns you can use to deliver a friendlier user - experience with your bot using Rasa's open source dialogue chat - assistant platform. - -.. _guiding-users: - -============= -Guiding Users -============= - -.. edit-link:: - -.. contents:: - :local: - -.. _implicit-confirmation: - -Implicit Confirmation ---------------------- - -Implicit confirmation involves repeating details back to the user to reassure -them that they were understood correctly. -This also gives the user a chance to intervene if your assistant misunderstood. - -.. conversations:: - examples: - - - - Can I get a large hawaiian and bbq pizza - - ( Sure, that's one large hawaiian and one regular bbq pizza. - - ( Anything else? - - No they should both be large! - - -.. _explicit-confirmation: - -Explicit Confirmation ---------------------- - -Explicit confirmation means asking the user to clarify how you should help them. -An important thing to remember about AI assistants is that **the user is never wrong**. -When a user tells you something like `I just moved`, they are being perfectly clear, -even if your assistant is not sure how to help them. -If the user goal is ambiguous to your assistant, ask for clarification. - - -.. conversations:: - examples: - - - - I just moved - - ( I'm not sure I understood you correctly. Do you mean ... - - ^ I want to cancel my contract - - ^ I want to update my personal details - - -You can configure the ``TwoStageFallbackPolicy`` to ask your user to clarify, -and present them with quick replies for the most likely intents. -To do this, configure the policy as in this example: - -.. code-block:: yaml - - policies: - - name: TwoStageFallbackPolicy - nlu_threshold: 0.3 - core_threshold: 0.3 - fallback_core_action_name: "action_default_fallback" - fallback_nlu_action_name: "action_default_fallback" - deny_suggestion_intent_name: "out_of_scope" - ... - -.. _explaining-possibilities: - -Explaining Possibilities ------------------------- - -AI assistants are always limited to helping users with a specific set of -tasks, and should be able to explain to a user what they can do. -That includes coherently responding to requests that are out of scope. - - -.. conversations:: - examples: - - - - What can you do? - - ( I can help you update your personal details, change your plan, and answer any questions you have about our products. - - - - Can you get me a pizza? - - ( I'm afraid I can't help with that. - - ( I can help you update your personal details, change your plan, and answer any questions you have about our products. - - -When to explain the limitations of your assistant is specific to your application, -but these example stories show some common cases: - -.. code-block:: story - - ## user asks whats possible - * ask_whatspossible - - utter_explain_whatspossible - - ## user asks for something out of scope - * out_of_scope - - utter_cannot_help - - utter_explain_whatspossible - - -Collecting User Feedback ------------------------- - -Asking for feedback is one of the best tools you have to understand -your users and determine whether you solved their problem! -Storing this feedback is a powerful way to figure out how you can improve your assistant. - -.. conversations:: - examples: - - - - ( Was that helpful? - - no. - - ( Thanks. Why wasn't I able to help? - - ^ you didn't understand me correctly - - ^ you understood me, but your answers weren't very helpful. - - -Use a form to collect user feedback. To do this, define a custom form action -(see :ref:`forms` for more details about forms). - -.. code-block:: python - - from rasa_sdk.action import FormAction - - class FeedbackForm(FormAction): - - def name(self): - return "feedback_form" - - @staticmethod - def required_slots(tracker): - return ["feedback", "negative_feedback_reason"] - - -Add the form and slots to your domain: - - -.. code-block:: yaml - - forms: - - feedback_form - slots: - feedback: - type: bool - feedback_reason: - type: text - requested_slot: - type: text - -And make sure the ``FormPolicy`` is present in your configuration file: - -.. code-block:: yaml - - policies: - - FormPolicy - ... - - - -Handing off to a Human ----------------------- - -Users will be very frustrated if your assistant cannot help them and there is no way to reroute -the conversation to a human agent. There should always be a way to break out of a conversation! -There are multiple reasons why you might trigger a human handoff: - -* the user asks to speak to a human -* the assistant is struggling to understand the user -* the assistant understands what the user wants, but a human is required to resolve the issue. - - -.. conversations:: - examples: - - - - let me speak to a human - - ( let me put you in touch with someone. - - - - I want to cancel - - ( I'm afraid I can't help you with that. - - ( let me put you in touch with someone. - - -The direct request to speak with a human can be handled using the mapping policy: - -.. code-block:: yaml - - intents: - - request_human: {"triggers": "action_human_handoff"} diff --git a/docs/dialogue-elements/small-talk.rst b/docs/dialogue-elements/small-talk.rst deleted file mode 100644 index 98d315ca9cc8..000000000000 --- a/docs/dialogue-elements/small-talk.rst +++ /dev/null @@ -1,184 +0,0 @@ -:desc: Learn to handle greetings, off-topic chitchat, and other small talk - in your bot using features provided by Rasa's open source chat assistant - platform. - -.. _small-talk: - -========== -Small Talk -========== - -.. edit-link:: - -Small talk includes the back-and-forth that makes conversations natural, -but doesn’t directly relate to the user's goal. This includes greetings, -acknowledgements, reactions, and off-topic chitchat. - -.. contents:: - :local: - -.. _greetings: - -Greetings ---------- - -Greetings and goodbyes are some of the simplest interactions. Just about every system needs them. - -.. conversations:: - examples: - - - - hello - - ( hi, how are you? - - - - how are you? - - ( I am well, and you? - - - - goodbye - - ( bye bye! - - -To respond correctly to greetings and goodbyes, you need to define responses -for each of these. If you always want the same responses, you can use the ``MappingPolicy`` -to trigger these responses when the corresponding intent is predicted. - -In your domain file, add the ``triggers`` metadata to the relevant intents: - -.. code-block:: yaml - - intents: - - greet: {triggers: utter_greet} - - goodbye: {triggers: utter_goodbye} - -And make sure the mapping policy is present in your ``config.yml``: - -.. code-block:: yaml - - policies: - - name: "MappingPolicy" - ... - -If you want to implement less rigid behavior, use regular stories -instead of the mapping policy. For example, if you want to send a special -response if the user says goodbye immediately after saying hello, remove the -``triggers`` metadata from the domain file, and include relevant stories in your -training data: - -.. code-block:: story - - * greet - - utter_greet - * goodbye - - utter_ask_why_leaving - - -Acknowledgements ----------------- - -Your users will often react to the things your assistant says, and will expect an acknowledgement. -Acknowledgements can be as simple as a thumbs up. -They reassure the user that their message has been received. -For the most common reactions, it is worth implementing specific responses. - -.. conversations:: - examples: - - - - woah that's expensive! - - ( we offer good value. - - ( would you like to continue getting a quote? - - - - that's awesome! - - ( glad you think so :) - - -First, you need NLU data for reactions and acknowledgements: - -.. code-block:: md - - ## intent:acknowledge - - ok - - got it - - understood - - k - - ## intent:opinion+positive - - nice! - - excellent - - that's awesome - - ## intent:opinion+negative - - ugh - - that sucks - - woah! that's [expensive](price) - - -And then you need training stories to teach Rasa how to respond: - -.. code-block:: story - - ## price reaction - * opinion+negative{"price": "expensive"} - - utter_good_value - - utter_ask_continue - - ## simple acknowledgement - * opinion+positive - - utter_positive_feedback_reaction - - -Chitchat --------- - -Your assistant will often receive unexpected or unprompted input. -We call this chitchat. -While it's not possible to coherently respond to everything a user -might say, you can at least acknowledge that the message was received. -One strategy is to collect training data from your users and define intents -and responses for some of the more common topics. -See :ref:`explaining-possibilities` for how to handle out-of-scope input. - -.. conversations:: - examples: - - - - will you marry me? - - ( no - - - - I can juggle 7 oranges - - ( wow! - - - - aahhh - - ( I feel you - - -Insults -------- - -Unfortunately users will often abuse your assistant. You should acknowledge the nature of their -comment and respond in a way that reflects your assistant's persona. -Responding with a joke can encourage users to continue sending abuse, so consider your responses carefully. -You can read more about this topic in `this paper `_. - - -.. conversations:: - examples: - - - - stupid bot - - ( that's not very nice - - -The simplest approach is to create a single ``insult`` intent and use the mapping policy -to respond to it: - -In your domain file: - -.. code-block:: yaml - - intents: - - insult: {triggers: utter_respond_insult} - -And in your configuration file: - -.. code-block:: yaml - - policies: - - name: "MappingPolicy" - ... diff --git a/docs/glossary.rst b/docs/glossary.rst deleted file mode 100644 index 2f5ffa24ffbf..000000000000 --- a/docs/glossary.rst +++ /dev/null @@ -1,97 +0,0 @@ -:desc: Glossary for all Rasa-related terms - -.. _glossary: - -Glossary -======== - -.. glossary:: - - :ref:`Action ` - A single step that a bot takes in a conversation (e.g. calling an API or sending a response back to the user). - - Annotation - Adding labels to messages and conversations so that they can be used to train a model. - - CMS - A Content Management System (CMS) can be used to store bot responses externally instead of directly including it as part of the domain. This provides more flexibility in changing them as they are not tightly-coupled with the training data. - - :ref:`Custom Action ` - An action written by a Rasa developer that can run arbitrary code mainly to interact with the outside world. - - :ref:`Default Action ` - A built-in action that comes with predefined functionality. - - :ref:`Domain ` - Defines the inputs and outputs of an assistant. - - It includes a list of all the intents, entities, slots, actions, and forms that the assistant knows about. - - :ref:`Entity ` - Structured information that can be extracted from a user message. - - For example a telephone number, a person's name, a location, the name of a product - - :ref:`Event ` - All conversations in Rasa are represented as a sequence of events. For instance, a ``UserUttered`` represents a user entering a message, and an ``ActionExecuted`` represents the assistant executing an action. You can learn more about them :ref:`here `. - - :ref:`Form ` - A type of custom action that asks the user for multiple pieces of information. - - For example, if you need a city, a cuisine, and a price range to recommend a restaurant, you can create a restaurant form to do that. You can describe any business logic inside a form. For example, if you want to ask for a particular neighbourhood if a user mentions a large city like Los Angeles, you can write that logic inside the form. - - Happy / Unhappy Paths - If your assistant asks a user for some information and the user provides it, we call that a happy path. Unhappy paths are all the possible edge cases of a bot. For example, the user refusing to give some input, changing the topic of conversation, or correcting something they said earlier. - - Intent - Something that a user is trying to convey or accomplish (e,g., greeting, specifying a location). - - :ref:`Interactive Learning ` - A mode of training the bot where the user provides feedback to the bot while talking to it. - - This is a powerful way to write complicated stories by enabling users to explore what a bot can do and easily fix any mistakes it makes. - - Minimum viable assistant - A basic assistant that can handle the most important happy path stories. - - NLG - Natural Language Generation (NLG) is the process of generating natural language messages to send to a user. - - Rasa uses a simple template-based approach for NLG. Data-driven approaches (such as neural NLG) can be implemented by creating a custom NLG component. - - :ref:`Rasa NLU ` - Natural Language Understanding (NLU) deals with parsing and understanding human language into a structured format. - - Rasa NLU is the part of Rasa that performs intent classification and entity extraction. - - :ref:`Pipeline ` - A Rasa bot's NLU system is defined by a pipeline, which is a list of NLU components (see "Rasa NLU Component") in a particular order. A user input is processed by each component one by one before finally giving out the structured output. - - :ref:`Policy ` - Policies make decisions on how conversation flow should proceed. At every turn, the policy which predicts the next action with the highest confidence will be used. A Core model can have multiple policies included, and the policy whose prediction has the highest confidence decides the next action to be taken. - - :ref:`Rasa Core ` - The dialogue engine that decides on what to do next in a conversation based on the context. - - :ref:`Rasa NLU Component ` - An element in the Rasa NLU pipeline (see "Pipeline"). - - Incoming messages are processed by a sequence of components called a pipeline. A component can perform tasks ranging from entity extraction to intent classification to pre-processing. - - :ref:`Slot ` - A key-value store that Rasa uses to track information over the course of a conversation. - - :ref:`Story ` - A conversation between a user and a bot annotated with the intent / entities of the users' messages as well as the sequence of actions to be performed by the bot - - :ref:`Template / Response / Utterance ` - A message template that is used to respond to a user. This can include text, buttons, images, and other attachments. - - User Goal - A goal that a user wants to achieve. - - For example, a user may have the goal of booking a table at a restaurant. Another user may just want to make small talk. Sometimes, the user expresses their goal with a single message, e.g. "I want to book a table at a restaurant". Other times the assistant may have to ask a few questions to understand how to help the user. Note: Many other places refer to the user goal as the "intent", but in Rasa terminology, an intent is associated with every user message. - - Word embedding / Word vector - A vector of floating point numbers which represent the meaning of a word. Words which have similar meanings should have vectors which point in almost the same direction. Word embeddings are often used as an input to machine learning algorithms. - diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index 2c70887d363c..000000000000 --- a/docs/index.rst +++ /dev/null @@ -1,117 +0,0 @@ -:desc: Learn more about open-source natural language processing library Rasa NLU - for intent classification and entity extraction in on premise chatbots. - -.. _index: - -Build contextual chatbots and AI assistants with Rasa -===================================================== - -.. note:: - These docs are for Rasa 1.0 and later. Docs for older versions are at https://legacy-docs.rasa.com. - - This is the documentation for version |release| of Rasa. Please make sure you are reading the documentation - that matches the version you have installed. - - -Rasa is an open source machine learning framework for automated text and voice-based conversations. -Understand messages, hold conversations, and connect to messaging channels and APIs. - - -.. toctree:: - :maxdepth: 1 - :caption: User Guide - :hidden: - - user-guide/installation - user-guide/rasa-tutorial - user-guide/building-assistants - user-guide/command-line-interface - user-guide/architecture - user-guide/messaging-and-voice-channels - user-guide/testing-your-assistant - user-guide/setting-up-ci-cd - user-guide/validate-files - user-guide/configuring-http-api - user-guide/how-to-deploy - user-guide/cloud-storage - -.. toctree:: - :maxdepth: 1 - :caption: NLU - :hidden: - - About - nlu/using-nlu-only - nlu/training-data-format - nlu/language-support - nlu/choosing-a-pipeline - nlu/components - nlu/entity-extraction - -.. toctree:: - :maxdepth: 1 - :caption: Core - :hidden: - - About - core/stories - core/domains - core/responses - core/actions - core/reminders-and-external-events - core/policies - core/slots - core/forms - core/retrieval-actions - core/interactive-learning - core/fallback-actions - core/knowledge-bases - -.. toctree:: - :maxdepth: 1 - :caption: Conversation Design - :hidden: - - dialogue-elements/dialogue-elements - dialogue-elements/small-talk - dialogue-elements/completing-tasks - dialogue-elements/guiding-users - -.. toctree:: - :maxdepth: 1 - :hidden: - :caption: API Reference - - api/action-server - api/http-api - api/jupyter-notebooks - api/agent - api/custom-nlu-components - api/rasa-sdk - api/events - api/tracker - api/tracker-stores - api/event-brokers - api/lock-stores - api/training-data-importers - api/core-featurization - api/tensorflow_usage - migration-guide - changelog - -.. toctree:: - :maxdepth: 1 - :hidden: - :caption: Migrate from (beta) - - Dialogflow - Wit.ai - LUIS - IBM Watson - -.. toctree:: - :maxdepth: 1 - :hidden: - :caption: Reference - - glossary diff --git a/docs/migrate-from/facebook-wit-ai-to-rasa.rst b/docs/migrate-from/facebook-wit-ai-to-rasa.rst deleted file mode 100644 index ad435f69788d..000000000000 --- a/docs/migrate-from/facebook-wit-ai-to-rasa.rst +++ /dev/null @@ -1,100 +0,0 @@ -:desc: Open source alternative to Facebook's Wit.ai for conversational bots and NLP - -.. _facebook-wit-ai-to-rasa: - -Rasa as open source alternative to Facebook's Wit.ai - Migration Guide -====================================================================== - -.. edit-link:: - -This guide shows you how to migrate your application built with Facebook's Wit.ai to Rasa. Here are a few reasons why we see developers switching: - -* **Faster**: Runs locally - no http requests and server round trips required -* **Customizable**: Tune models and get higher accuracy with your data set -* **Open source**: No risk of vendor lock-in - Rasa is under the Apache 2.0 license and you can use it in commercial projects - - -.. raw:: html - - In addition, our open source tools allow developers to build contextual AI assistants and manage dialogues with machine learning instead of rules - learn more in this blog post. - - -Let's get started with migrating your application from Wit.ai to Rasa: - - -Step 1: Export your Training Data from Wit.ai -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Navigate to your app's setting page by clicking the **Settings** icon in the upper right corner. Scroll down to **Export your data** and hit the button **Download .zip with your data**. - -This will download a file with a ``.zip`` extension. Unzip this file to create a folder. The file you want from your download is called ``expressions.json`` - -Step 2: Create a Rasa Project -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To create a Rasa project, run: - -.. code-block:: bash - - rasa init - -This will create a directory called ``data``. -Remove the files in this directory, and -move the expressions.json file into this directory. - -.. code-block:: bash - - rm -r data/* - mv /path/to/expressions.json data/ - - - -Step 3: Train your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To train a model using your Wit data, run: - -.. code-block:: bash - - rasa train nlu - -Step 4: Test your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Let's see how your NLU model will interpret some test messages. -To start a testing session, run: - -.. code-block:: bash - - rasa shell nlu - -This will prompt your for input. -Type a test message and press 'Enter'. -The output of your NLU model will be printed to the screen. -You can keep entering messages and test as many as you like. -Press 'control + C' to quit. - - -Step 5: Start a Server with your NLU Model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To start a server with your NLU model, run: - -.. code-block:: bash - - rasa run nlu - -This will start a server listening on port 5005. - -To send a request to the server, run: - -.. copyable:: - - curl 'localhost:5005/model/parse?emulation_mode=wit' -d '{"text": "hello"}' - -The ``emulation_mode`` parameter tells Rasa that you want your json -response to have the same format as you would get from wit.ai. -You can also leave it out to get the result in the usual Rasa format. - - -Join the `Rasa Community Forum `_ and let us know how your migration went! diff --git a/docs/migrate-from/google-dialogflow-to-rasa.rst b/docs/migrate-from/google-dialogflow-to-rasa.rst deleted file mode 100644 index 818ba776d2c4..000000000000 --- a/docs/migrate-from/google-dialogflow-to-rasa.rst +++ /dev/null @@ -1,121 +0,0 @@ -:desc: Open source alternative to Google Dialogflow for conversational bots and NLP - -.. _google-dialogflow-to-rasa: - -Rasa as open source alternative to Google Dialogflow - Migration Guide -====================================================================== - -.. edit-link:: - -This guide shows you how to migrate your application built with Google Dialogflow to Rasa. Here are a few reasons why we see developers switching: - -* **Faster**: Runs locally - no http requests and server round trips required -* **Customizable**: Tune models and get higher accuracy with your data set -* **Open source**: No risk of vendor lock-in - Rasa is under the Apache 2.0 license and you can use it in commercial projects - - -.. raw:: html - - In addition, our open source tools allow developers to build contextual AI assistants and manage dialogues with machine learning instead of rules - learn more in this blog post. -
-
- -.. raw:: html - - Let's get started with migrating your application from Dialogflow to Rasa (you can find a more detailed tutorial here): - - - - - -Step 1: Export your data from Dialogflow -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Navigate to your agent's settings by clicking the gear icon. - -.. image:: ../_static/images/dialogflow_export.png - :width: 240 - :alt: Dialogflow Export - -Click on the 'Export and Import' tab and click on the 'Export as ZIP' button. - -.. image:: ../_static/images/dialogflow_export_2.png - :width: 675 - :alt: Dialogflow Export 2 - - -This will download a file with a ``.zip`` extension. Unzip this file to create a folder. - -Step 2: Create a Rasa Project -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To create a Rasa project, run: - -.. code-block:: bash - - rasa init - -This will create a directory called ``data``. -Remove the files in this directory, and -move your unzipped folder into this directory. - -.. code-block:: bash - - rm -r data/* - mv testagent data/ - -Step 3: Train your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To train a model using your dialogflow data, run: - -.. code-block:: bash - - rasa train nlu - -Step 4: Test your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Let's see how your NLU model will interpret some test messages. -To start a testing session, run: - -.. code-block:: bash - - rasa shell nlu - -This will prompt your for input. -Type a test message and press 'Enter'. -The output of your NLU model will be printed to the screen. -You can keep entering messages and test as many as you like. -Press 'control + C' to quit. - - -Step 5: Start a Server with your NLU Model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To start a server with your NLU model, run: - -.. code-block:: bash - - rasa run - -This will start a server listening on port 5005. - -To send a request to the server, run: - -.. copyable:: - - curl 'localhost:5005/model/parse?emulation_mode=dialogflow' -d '{"text": "hello"}' - -The ``emulation_mode`` parameter tells Rasa that you want your json -response to have the same format as you would get from dialogflow. -You can also leave it out to get the result in the usual Rasa format. - -Terminology: -^^^^^^^^^^^^ - -The words ``intent``, ``entity``, and ``utterance`` have the same meaning in Rasa as they do in Dialogflow. -In Dialogflow, there is a concept called ``Fulfillment``. In Rasa we call this a `Custom Action `_. - - -Join the `Rasa Community Forum `_ and let us know how your migration went! diff --git a/docs/migrate-from/ibm-watson-to-rasa.rst b/docs/migrate-from/ibm-watson-to-rasa.rst deleted file mode 100644 index 85719e107488..000000000000 --- a/docs/migrate-from/ibm-watson-to-rasa.rst +++ /dev/null @@ -1,31 +0,0 @@ -:desc: Open source alternative to IBM Watson for conversational bots and NLP - -.. _ibm-watson-to-rasa: - -Rasa as open source alternative to IBM Watson - Migration Tips -============================================================== - -.. edit-link:: - - - -.. raw:: html - -
There is no support for IBM Watson yet. However, a group of community members is working on a way to use exported IBM Watson workspaces in Rasa. If you're interested in that, check out our Community Forum. - - -At Rasa, we hear a few different reasons why developers switch from cloud-based tools like IBM Watson: - -* **Faster**: Runs locally - no https requests and server round trips required -* **Customizable**: Tune models and get higher accuracy with your data set -* **Open source**: No risk of vendor lock-in - Rasa comes with an Apache 2.0 license and you can use it in commercial projects - - -.. raw:: html - - In addition, our open source tools allow developers to build contextual AI assistants and manage dialogues with machine learning instead of rules - learn more in this blog post. - - -.. button:: - :link: https://rasa.com/docs/getting-started/ - :text: Learn more about Rasa diff --git a/docs/migrate-from/microsoft-luis-to-rasa.rst b/docs/migrate-from/microsoft-luis-to-rasa.rst deleted file mode 100644 index 4e57bf737efe..000000000000 --- a/docs/migrate-from/microsoft-luis-to-rasa.rst +++ /dev/null @@ -1,111 +0,0 @@ -:desc: Open source alternative to Microsoft LUIS for conversational bots and NLP - -.. _microsoft-luis-to-rasa: - -Rasa as open source alternative to Microsoft LUIS - Migration Guide -=================================================================== - -.. edit-link:: - -This guide shows you how to migrate your application built with Microsoft LUIS to Rasa. Here are a few reasons why we see developers switching: - -* **Faster**: Runs locally - no http requests and server round trips required -* **Customizable**: Tune models and get higher accuracy with your data set -* **Open source**: No risk of vendor lock-in - Rasa is under the Apache 2.0 license and you can use it in commercial projects - - -.. raw:: html - - In addition, our open source tools allow developers to build contextual AI assistants and manage dialogues with machine learning instead of rules - learn more in this blog post. - - -Let's get started with migrating your application from LUIS to Rasa: - - -Step 1: Export your Training Data from LUIS -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Go to your list of `LUIS applications `_ and click -on the three dots menu next to the app you want to export. - -.. image:: ../_static/images/luis_export.png - :width: 240 - :alt: LUIS Export - -Select 'Export App'. This will download a file with a ``.json`` extension that can be imported directly into Rasa. - -Step 2: Create a Rasa Project -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To create a Rasa project, run: - -.. code-block:: bash - - rasa init - -This will create a directory called ``data``. -Remove the files in this directory, and -move your json file into this directory. - -.. code-block:: bash - - rm -r data/* - mv /path/to/file.json data/ - -Step 3: Train your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To train a model using your LUIS data, run: - -.. code-block:: bash - - rasa train nlu - -Step 4: Test your NLU model -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Let's see how your NLU model will interpret some test messages. -To start a testing session, run: - -.. code-block:: bash - - rasa shell nlu - -This will prompt your for input. -Type a test message and press 'Enter'. -The output of your NLU model will be printed to the screen. -You can keep entering messages and test as many as you like. -Press 'control + C' to quit. - - -Step 5: Start a Server with your NLU Model -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To start a server with your NLU model, run: - -.. code-block:: bash - - rasa run nlu - -This will start a server listening on port 5005. - -To send a request to the server, run: - -.. copyable:: - - curl 'localhost:5005/model/parse?emulation_mode=luis' -d '{"text": "hello"}' - -The ``emulation_mode`` parameter tells Rasa that you want your json -response to have the same format as you would get from LUIS. -You can also leave it out to get the result in the usual Rasa format. - -Terminology: -^^^^^^^^^^^^ - -The words ``intent``, ``entity``, and ``utterance`` have the same meaning in Rasa as they do -in LUIS. -LUIS's ``patterns`` feature is very similar to Rasa NLU's `regex features `_ -LUIS's ``phrase lists`` feature does not currently have an equivalent in Rasa NLU. - - -Join the `Rasa Community Forum `_ and let us know how your migration went! diff --git a/docs/migration-guide.rst b/docs/migration-guide.rst deleted file mode 100644 index da01311f6db9..000000000000 --- a/docs/migration-guide.rst +++ /dev/null @@ -1,307 +0,0 @@ -:desc: Information about changes between major versions of chatbot framework - Rasa Core and how you can migrate from one version to another. - -.. _migration-guide: - -Migration Guide -=============== - -.. edit-link:: - -This page contains information about changes between major versions and -how you can migrate from one version to another. - -.. _migration-to-rasa-2.0: - -Rasa 1.10 to Rasa 2.0 ---------------------- - -General -~~~~~~~ -- The deprecated brokers ``FileProducer``, ``KafkaProducer``, ``PikaProducer`` - and the ``SQLProducer`` have been removed. If you used these brokers in your - ``endpoints.yml`` make sure to use the renamed variants instead: - - ``FileProducer`` became ``FileEventBroker`` - - ``KafkaProducer`` became ``KafkaEventBroker`` - - ``PikaProducer`` became ``PikaEventBroker`` - - ``SQLProducer`` became ``SQLEventBroker`` - -- The deprecated ``EmbeddingIntentClassifier`` has been removed. If you used this - component in your ``pipeline`` configuration (``config.yml``) you can replace it - with ``DIETClassifier``. It accepts the same configuration parameters. - -- The deprecated ``KerasPolicy`` has been removed. If you used this - component in your ``policies`` configuration (``config.yml``) you can replace it - with ``TEDPolicy``. It accepts the same configuration parameters. - - -.. _migration-to-rasa-1.8: - -Rasa 1.7 to Rasa 1.8 --------------------- -.. warning:: - - This is a release **breaking backwards compatibility**. - It is not possible to load previously trained models. Please make sure to retrain a - model before trying to use it with this improved version. - -General -~~~~~~~ -- The :ref:`ted_policy` replaced the ``keras_policy`` as recommended machine - learning policy. New projects generated with ``rasa init`` will automatically use - this policy. In case you want to change your existing model configuration to use the - :ref:`ted_policy` add this to the ``policies`` section in your ``config.yml`` - and remove potentially existing ``KerasPolicy`` entries: - - .. code-block:: yaml - - policies: - # - ... other policies - - name: TEDPolicy - max_history: 5 - epochs: 100 - - The given snippet specifies default values for the parameters ``max_history`` and - ``epochs``. ``max_history`` is particularly important and strongly depends on your stories. - Please see the docs of the :ref:`ted_policy` if you want to customize them. - -- All pre-defined pipeline templates are deprecated. **Any templates you use will be - mapped to the new configuration, but the underlying architecture is the same**. - Take a look at :ref:`choosing-a-pipeline` to decide on what components you should use - in your configuration file. - -- The :ref:`embedding_policy` was renamed to :ref:`ted_policy`. The functionality of the policy stayed the same. - Please update your configuration files to use ``TEDPolicy`` instead of ``EmbeddingPolicy``. - -- Most of the model options for ``EmbeddingPolicy``, ``EmbeddingIntentClassifier``, and ``ResponseSelector`` got - renamed. Please update your configuration files using the following mapping: - - ============================= ======================================================= - Old model option New model option - ============================= ======================================================= - hidden_layers_sizes_a dictionary "hidden_layers_sizes" with key "text" - hidden_layers_sizes_b dictionary "hidden_layers_sizes" with key "label" - hidden_layers_sizes_pre_dial dictionary "hidden_layers_sizes" with key "dialogue" - hidden_layers_sizes_bot dictionary "hidden_layers_sizes" with key "label" - num_transformer_layers number_of_transformer_layers - num_heads number_of_attention_heads - max_seq_length maximum_sequence_length - dense_dim dense_dimension - embed_dim embedding_dimension - num_neg number_of_negative_examples - mu_pos maximum_positive_similarity - mu_neg maximum_negative_similarity - use_max_sim_neg use_maximum_negative_similarity - C2 regularization_constant - C_emb negative_margin_scale - droprate_a droprate_dialogue - droprate_b droprate_label - evaluate_every_num_epochs evaluate_every_number_of_epochs - evaluate_on_num_examples evaluate_on_number_of_examples - ============================= ======================================================= - - Old configuration options will be mapped to the new names, and a warning will be thrown. - However, these will be deprecated in a future release. - -- The Embedding Intent Classifier is now deprecated and will be replaced by :ref:`DIETClassifier ` - in the future. - ``DIETClassfier`` performs intent classification as well as entity recognition. - If you want to get the same model behavior as the current ``EmbeddingIntentClassifier``, you can use - the following configuration of ``DIETClassifier``: - - .. code-block:: yaml - - pipeline: - # - ... other components - - name: DIETClassifier - hidden_layers_sizes: - text: [256, 128] - number_of_transformer_layers: 0 - weight_sparsity: 0 - intent_classification: True - entity_recognition: False - use_masked_language_model: False - BILOU_flag: False - # ... any other parameters - - See :ref:`DIETClassifier ` for more information about the new component. - Specifying ``EmbeddingIntentClassifier`` in the configuration maps to the above component definition, the - behavior is unchanged from previous versions. - -- ``CRFEntityExtractor`` is now deprecated and will be replaced by ``DIETClassifier`` in the future. If you want to - get the same model behavior as the current ``CRFEntityExtractor``, you can use the following configuration: - - .. code-block:: yaml - - pipeline: - # - ... other components - - name: LexicalSyntacticFeaturizer - features: [ - ["low", "title", "upper"], - [ - "BOS", - "EOS", - "low", - "prefix5", - "prefix2", - "suffix5", - "suffix3", - "suffix2", - "upper", - "title", - "digit", - ], - ["low", "title", "upper"], - ] - - name: DIETClassifier - intent_classification: False - entity_recognition: True - use_masked_language_model: False - number_of_transformer_layers: 0 - # ... any other parameters - - ``CRFEntityExtractor`` featurizes user messages on its own, it does not depend on any featurizer. - We extracted the featurization from the component into the new featurizer :ref:`LexicalSyntacticFeaturizer`. Thus, - in order to obtain the same results as before, you need to add this featurizer to your pipeline before the - :ref:`diet-classifier`. - Specifying ``CRFEntityExtractor`` in the configuration maps to the above component definition, the behavior - is unchanged from previous versions. - -- If your pipeline contains ``CRFEntityExtractor`` and ``EmbeddingIntentClassifier`` you can substitute both - components with :ref:`DIETClassifier `. You can use the following pipeline for that: - - .. code-block:: yaml - - pipeline: - # - ... other components - - name: LexicalSyntacticFeaturizer - features: [ - ["low", "title", "upper"], - [ - "BOS", - "EOS", - "low", - "prefix5", - "prefix2", - "suffix5", - "suffix3", - "suffix2", - "upper", - "title", - "digit", - ], - ["low", "title", "upper"], - ] - - name: DIETClassifier - number_of_transformer_layers: 0 - # ... any other parameters - -.. _migration-to-rasa-1.7: - -Rasa 1.6 to Rasa 1.7 --------------------- - -General -~~~~~~~ -- By default, the ``EmbeddingIntentClassifier``, ``EmbeddingPolicy``, and ``ResponseSelector`` will - now normalize the top 10 confidence results if the ``loss_type`` is ``"softmax"`` (which has been - default since 1.3, see :ref:`migration-to-rasa-1.3`). This is configurable via the ``ranking_length`` - configuration parameter; to turn off normalization to match the previous behavior, set ``ranking_length: 0``. - -.. _migration-to-rasa-1.3: - -Rasa 1.2 to Rasa 1.3 --------------------- -.. warning:: - - This is a release **breaking backwards compatibility**. - It is not possible to load previously trained models. Please make sure to retrain a - model before trying to use it with this improved version. - -General -~~~~~~~ -- Default parameters of ``EmbeddingIntentClassifier`` are changed. See :ref:`components` for details. - Architecture implementation is changed as well, so **old trained models cannot be loaded**. - Default parameters and architecture for ``EmbeddingPolicy`` are changed. See :ref:`policies` for details. - It uses transformer instead of lstm. **Old trained models cannot be loaded**. - They use ``inner`` similarity and ``softmax`` loss by default instead of - ``cosine`` similarity and ``margin`` loss (can be set in config file). - They use ``balanced`` batching strategy by default to counteract class imbalance problem. - The meaning of ``evaluate_on_num_examples`` is changed. If it is non zero, random examples will be - picked by stratified split and used as **hold out** validation set, so they will be excluded from training data. - We suggest to set it to zero (default) if data set contains a lot of unique examples of dialogue turns. - Removed ``label_tokenization_flag`` and ``label_split_symbol`` from component. Instead moved intent splitting to ``Tokenizer`` components via ``intent_tokenization_flag`` and ``intent_split_symbol`` flag. -- Default ``max_history`` for ``EmbeddingPolicy`` is ``None`` which means it'll use - the ``FullDialogueTrackerFeaturizer``. We recommend to set ``max_history`` to - some finite value in order to use ``MaxHistoryTrackerFeaturizer`` - for **faster training**. See :ref:`featurization_conversations` for details. - We recommend to increase ``batch_size`` for ``MaxHistoryTrackerFeaturizer`` - (e.g. ``"batch_size": [32, 64]``) -- **Compare** mode of ``rasa train core`` allows the whole core config comparison. - Therefore, we changed the naming of trained models. They are named by config file - name instead of policy name. Old naming style will not be read correctly when - creating **compare** plots (``rasa test core``). Please remove old trained models - in comparison folder and retrain. Normal core training is unaffected. -- We updated the **evaluation metric** for our **NER**. We report the weighted precision and f1-score. - So far we included ``no-entity`` in this report. However, as most of the tokens actually don't have - an entity set, this will influence the weighted precision and f1-score quite a bit. From now on we - exclude ``no-entity`` from the evaluation. The overall metrics now only include proper entities. You - might see a drop in the performance scores when running the evaluation again. -- ``/`` is reserved as a delimiter token to distinguish between retrieval intent and the corresponding response text - identifier. Make sure you don't include ``/`` symbol in the name of your intents. - -.. _migration-to-rasa-1.0: - -Rasa NLU 0.14.x and Rasa Core 0.13.x to Rasa 1.0 ------------------------------------------------- -.. warning:: - - This is a release **breaking backwards compatibility**. - It is not possible to load previously trained models. Please make sure to retrain a - model before trying to use it with this improved version. - -General -~~~~~~~ - -- The scripts in ``rasa.core`` and ``rasa.nlu`` can no longer be executed. To train, test, run, ... an NLU or Core - model, you should now use the command line interface ``rasa``. The functionality is, for the most part, the same as before. - Some changes in commands reflect the combined training and running of NLU and Core models, but NLU and Core can still - be trained and used individually. If you attempt to run one of the old scripts in ``rasa.core`` or ``rasa.nlu``, - an error is thrown that points you to the command you - should use instead. See all the new commands at :ref:`command-line-interface`. - -- If you have written a custom output channel, all ``send_`` methods subclassed - from the ``OutputChannel`` class need to take an additional ``**kwargs`` - argument. You can use these keyword args from your custom action code or the - templates in your domain file to send any extra parameters used in your - channel's send methods. - -- If you were previously importing the ``Button`` or ``Element`` classes from - ``rasa_core.dispatcher``, these are now to be imported from ``rasa_sdk.utils``. - -- Rasa NLU and Core previously used `separate configuration files - `_. - These two files should be merged into a single file either named ``config.yml``, or passed via the ``--config`` parameter. - -Script parameters -~~~~~~~~~~~~~~~~~ -- All script parameter names have been unified to follow the same schema. - Any underscores (``_``) in arguments have been replaced with dashes (``-``). - For example: ``--max_history`` has been changed to ``--max-history``. You can - see all of the script parameters in the ``--help`` output of the commands - in the :ref:`command-line-interface`. - -- The ``--num_threads`` parameter was removed from the ``run`` command. The - server will always run single-threaded, but will now run asynchronously. If you want to - make use of multiple processes, feel free to check out the `Sanic server - documentation `_. - -- To avoid conflicts in script parameter names, connectors in the ``run`` command now need to be specified with - ``--connector``, as ``-c`` is no longer supported. The maximum history in the ``rasa visualize`` command needs to be - defined with ``--max-history``. Output paths and log files cannot be specified with ``-o`` anymore; ``--out`` and - ``--log-file`` should be used. NLU data has been standarized to be ``--nlu`` and the name of - any kind of data files or directory to be ``--data``. - -HTTP API -~~~~~~~~ -- There are numerous HTTP API endpoint changes which can be found `here `_. diff --git a/docs/nlu/about.rst b/docs/nlu/about.rst deleted file mode 100644 index 65d6f408a54e..000000000000 --- a/docs/nlu/about.rst +++ /dev/null @@ -1,29 +0,0 @@ -:desc: Learn more about open-source natural language processing library Rasa NLU - for intent classification and entity extraction in on premise chatbots. - -.. _about-rasa-nlu: - -Rasa NLU: Language Understanding for Chatbots and AI assistants -=============================================================== - - -Rasa NLU is an open-source natural language processing tool for intent classification, response retrieval and -entity extraction in chatbots. For example, taking a sentence like - -.. code-block:: console - - "I am looking for a Mexican restaurant in the center of town" - -and returning structured data like - -.. code-block:: json - - { - "intent": "search_restaurant", - "entities": { - "cuisine" : "Mexican", - "location" : "center" - } - } - -If you want to use Rasa NLU on its own, see :ref:`using-nlu-only`. diff --git a/docs/nlu/choosing-a-pipeline.rst b/docs/nlu/choosing-a-pipeline.rst deleted file mode 100644 index 8630339dcac2..000000000000 --- a/docs/nlu/choosing-a-pipeline.rst +++ /dev/null @@ -1,490 +0,0 @@ -:desc: Set up a pipeline of components. - -.. _choosing-a-pipeline: - -Choosing a Pipeline -=================== - -.. edit-link:: - -In Rasa Open Source, incoming messages are processed by a sequence of components. -These components are executed one after another in a so-called processing ``pipeline`` defined in your ``config.yml``. -Choosing an NLU pipeline allows you to customize your model and finetune it on your dataset. - - -.. contents:: - :local: - :depth: 2 - -.. note:: - With Rasa 1.8.0 we updated some components and deprecated all existing pipeline templates. - However, **any of the old terminology will still behave the same way as it did before**! - -.. warning:: - We deprecated all existing pipeline templates (e.g. ``supervised_embeddings``). Please list any - components you want to use directly in the configuration file. See - :ref:`how-to-choose-a-pipeline` for recommended starting configurations, or - :ref:`pipeline-templates` for more information. - - -.. _how-to-choose-a-pipeline: - -How to Choose a Pipeline ------------------------- - -The Short Answer -**************** - -If your training data is in English, a good starting point is the following pipeline: - - .. literalinclude:: ../../data/configs_for_docs/default_english_config.yml - :language: yaml - -If your training data is not in English, start with the following pipeline: - - .. literalinclude:: ../../data/configs_for_docs/default_config.yml - :language: yaml - - -A Longer Answer -*************** - -.. _recommended-pipeline-english: - -We recommend using following pipeline, if your training data is in English: - - .. literalinclude:: ../../data/configs_for_docs/default_english_config.yml - :language: yaml - -The pipeline contains the :ref:`ConveRTFeaturizer` that provides pre-trained word embeddings of the user utterance. -Pre-trained word embeddings are helpful as they already encode some kind of linguistic knowledge. -For example, if you have a sentence like "I want to buy apples" in your training data, and Rasa is asked to predict -the intent for "get pears", your model already knows that the words "apples" and "pears" are very similar. -This is especially useful if you don’t have enough training data. -The advantage of the :ref:`ConveRTFeaturizer` is that it doesn't treat each word of the user message independently, but -creates a contextual vector representation for the complete sentence. -However, ``ConveRT`` is only available in English. - - -.. _recommended-pipeline-pretrained-non-english: - -If your training data is not in English, but you still want to use pre-trained word embeddings, we recommend using -the following pipeline: - - .. literalinclude:: ../../data/configs_for_docs/default_spacy_config.yml - :language: yaml - -It uses the :ref:`SpacyFeaturizer` instead of the :ref:`ConveRTFeaturizer`. -:ref:`SpacyFeaturizer` provides pre-trained word embeddings from either GloVe or fastText in many different languages -(see :ref:`pretrained-word-vectors`). - - -.. _recommended-pipeline-non-english: - -If you don't use any pre-trained word embeddings inside your pipeline, you are not bound to a specific language -and can train your model to be more domain specific. -If there are no word embeddings for your language or you have very domain specific terminology, -we recommend using the following pipeline: - - .. literalinclude:: ../../data/configs_for_docs/default_config.yml - :language: yaml - -.. note:: - We encourage everyone to define their own pipeline by listing the names of the components you want to use. - You can find the details of each component in :ref:`components`. - If you want to use custom components in your pipeline, see :ref:`custom-nlu-components`. - -Choosing the Right Components -***************************** - -There are components for entity extraction, for intent classification, response selection, -pre-processing, and others. You can learn more about any specific component on the :ref:`components` page. -If you want to add your own component, for example to run a spell-check or to -do sentiment analysis, check out :ref:`custom-nlu-components`. - -A pipeline usually consists of three main parts: - -.. contents:: - :local: - :depth: 1 - - -Tokenization -~~~~~~~~~~~~ - -For tokenization of English input, we recommend the :ref:`ConveRTTokenizer`. -You can process other whitespace-tokenized (words are separated by spaces) languages -with the :ref:`WhitespaceTokenizer`. If your language is not whitespace-tokenized, you should use a different tokenizer. -We support a number of different :ref:`tokenizers `, or you can -create your own :ref:`custom tokenizer `. - -.. note:: - Some components further down the pipeline may require a specific tokenizer. You can find those requirements - on the individual components in :ref:`components`. If a required component is missing inside the pipeline, an - error will be thrown. - - -Featurization -~~~~~~~~~~~~~ - -You need to decide whether to use components that provide pre-trained word embeddings or not. We recommend in cases -of small amounts of training data to start with pre-trained word embeddings. Once you have a larger amount of data -and ensure that most relevant words will be in your data and therefore will have a word embedding, supervised -embeddings, which learn word meanings directly from your training data, can make your model more specific to your domain. -If you can't find a pre-trained model for your language, you should use supervised embeddings. - -.. contents:: - :local: - -Pre-trained Embeddings -^^^^^^^^^^^^^^^^^^^^^^ - -The advantage of using pre-trained word embeddings in your pipeline is that if you have a training example like: -"I want to buy apples", and Rasa is asked to predict the intent for "get pears", your model already knows that the -words "apples" and "pears" are very similar. This is especially useful if you don't have enough training data. -We support a few components that provide pre-trained word embeddings: - -1. :ref:`MitieFeaturizer` -2. :ref:`SpacyFeaturizer` -3. :ref:`ConveRTFeaturizer` -4. :ref:`LanguageModelFeaturizer` - -If your training data is in English, we recommend using the :ref:`ConveRTFeaturizer`. -The advantage of the :ref:`ConveRTFeaturizer` is that it doesn't treat each word of the user message independently, but -creates a contextual vector representation for the complete sentence. For example, if you -have a training example, like: "Can I book a car?", and Rasa is asked to predict the intent for "I need a ride from -my place", since the contextual vector representation for both examples are already very similar, the intent classified -for both is highly likely to be the same. This is also useful if you don't have enough training data. - -An alternative to :ref:`ConveRTFeaturizer` is the :ref:`LanguageModelFeaturizer` which uses pre-trained language -models such as BERT, GPT-2, etc. to extract similar contextual vector representations for the complete sentence. See -:ref:`HFTransformersNLP` for a full list of supported language models. - -If your training data is not in English you can also use a different variant of a language model which -is pre-trained in the language specific to your training data. -For example, there are chinese (``bert-base-chinese``) and japanese (``bert-base-japanese``) variants of the BERT model. -A full list of different variants of -these language models is available in the -`official documentation of the Transformers library `_. - -:ref:`SpacyFeaturizer` also provides word embeddings in many different languages (see :ref:`pretrained-word-vectors`), -so you can use this as another alternative, depending on the language of your training data. - -Supervised Embeddings -^^^^^^^^^^^^^^^^^^^^^ - -If you don't use any pre-trained word embeddings inside your pipeline, you are not bound to a specific language -and can train your model to be more domain specific. For example, in general English, the word "balance" is closely -related to "symmetry", but very different to the word "cash". In a banking domain, "balance" and "cash" are closely -related and you'd like your model to capture that. -You should only use featurizers from the category :ref:`sparse featurizers `, such as -:ref:`CountVectorsFeaturizer`, :ref:`RegexFeaturizer` or :ref:`LexicalSyntacticFeaturizer`, if you don't want to use -pre-trained word embeddings. - -Entity Recognition / Intent Classification / Response Selectors -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Depending on your data you may want to only perform intent classification, entity recognition or response selection. -Or you might want to combine multiple of those tasks. -We support several components for each of the tasks. All of them are listed in :ref:`components`. -We recommend using :ref:`diet-classifier` for intent classification and entity recognition -and :ref:`response-selector` for response selection. - -By default all of these components consume all available features produced in the pipeline. -However, sometimes it makes sense to restrict the features that are used by a specific component. -For example, :ref:`response-selector` is likely to perform better if no features from the -:ref:`RegexFeaturizer` or :ref:`LexicalSyntacticFeaturizer` are used. -To achieve that, you can do the following: -Set an alias for every featurizer in your pipeline via the option ``alias``. -By default the alias is set the the full featurizer class name, for example, ``RegexFeaturizer``. -You can then specify, for example, on the :ref:`response-selector` via the option ``featurizers`` what features from -which featurizers should be used. -If you don't set the option ``featurizers`` all available features will be used. -To check which components have the option ``featurizers`` available, see :ref:`components`. - -Here is an example configuration file where the ``DIETClassifier`` is using all available features and the -``ResponseSelector`` is just using the features from the ``ConveRTFeaturizer`` and the ``CountVectorsFeaturizer``. - -.. literalinclude:: ../../data/configs_for_docs/config_featurizers.yml - :language: yaml - -Multi-Intent Classification -*************************** - -You can use Rasa Open Source components to split intents into multiple labels. For example, you can predict -multiple intents (``thank+goodbye``) or model hierarchical intent structure (``feedback+positive`` being more similar -to ``feedback+negative`` than ``chitchat``). -To do this, you need to use the :ref:`diet-classifier` in your pipeline. -You'll also need to define these flags in whichever tokenizer you are using: - - - ``intent_tokenization_flag``: Set it to ``True``, so that intent labels are tokenized. - - ``intent_split_symbol``: Set it to the delimiter string that splits the intent labels. In this case ``+``, default ``_``. - -Read a `tutorial `__ -on how to use multiple intents in Rasa. - -Here's an example configuration: - - .. code-block:: yaml - - language: "en" - - pipeline: - - name: "WhitespaceTokenizer" - intent_tokenization_flag: True - intent_split_symbol: "_" - - name: "CountVectorsFeaturizer" - - name: "DIETClassifier" - - -Comparing Pipelines -------------------- - -Rasa gives you the tools to compare the performance of multiple pipelines on your data directly. -See :ref:`comparing-nlu-pipelines` for more information. - -.. note:: - - Intent classification is independent of entity extraction. So sometimes - NLU will get the intent right but entities wrong, or the other way around. - You need to provide enough data for both intents and entities. - - -Handling Class Imbalance ------------------------- - -Classification algorithms often do not perform well if there is a large `class imbalance`, -for example if you have a lot of training data for some intents and very little training data for others. -To mitigate this problem, you can use a ``balanced`` batching strategy. -This algorithm ensures that all classes are represented in every batch, or at least in -as many subsequent batches as possible, still mimicking the fact that some classes are more frequent than others. -Balanced batching is used by default. In order to turn it off and use a classic batching strategy include -``batch_strategy: sequence`` in your config file. - - .. code-block:: yaml - - language: "en" - - pipeline: - # - ... other components - - name: "DIETClassifier" - batch_strategy: sequence - - -.. _component-lifecycle: - -Component Lifecycle -------------------- - -Each component processes an input and/or creates an output. The order of the components is determined by -the order they are listed in the ``config.yml``; the output of a component can be used by any other component that -comes after it in the pipeline. Some components only produce information used by other components -in the pipeline. Other components produce ``output`` attributes that are returned after -the processing has finished. - -For example, for the sentence ``"I am looking for Chinese food"``, the output is: - - .. code-block:: json - - { - "text": "I am looking for Chinese food", - "entities": [ - { - "start": 8, - "end": 15, - "value": "chinese", - "entity": "cuisine", - "extractor": "DIETClassifier", - "confidence": 0.864 - } - ], - "intent": {"confidence": 0.6485910906220309, "name": "restaurant_search"}, - "intent_ranking": [ - {"confidence": 0.6485910906220309, "name": "restaurant_search"}, - {"confidence": 0.1416153159565678, "name": "affirm"} - ] - } - -This is created as a combination of the results of the different components in the following pipeline: - - .. code-block:: yaml - - pipeline: - - name: WhitespaceTokenizer - - name: RegexFeaturizer - - name: LexicalSyntacticFeaturizer - - name: CountVectorsFeaturizer - - name: CountVectorsFeaturizer - analyzer: "char_wb" - min_ngram: 1 - max_ngram: 4 - - name: DIETClassifier - - name: EntitySynonymMapper - - name: ResponseSelector - -For example, the ``entities`` attribute here is created by the ``DIETClassifier`` component. - -Every component can implement several methods from the ``Component`` base class; in a pipeline these different methods -will be called in a specific order. Assuming we added the following pipeline to our ``config.yml``: - - .. code-block:: yaml - - pipeline: - - name: "Component A" - - name: "Component B" - - name: "Last Component" - -The image below shows the call order during the training of this pipeline: - -.. image:: /_static/images/component_lifecycle.png - -Before the first component is created using the ``create`` function, a so -called ``context`` is created (which is nothing more than a python dict). -This context is used to pass information between the components. For example, -one component can calculate feature vectors for the training data, store -that within the context and another component can retrieve these feature -vectors from the context and do intent classification. - -Initially the context is filled with all configuration values. The arrows -in the image show the call order and visualize the path of the passed -context. After all components are trained and persisted, the -final context dictionary is used to persist the model's metadata. - -.. _pipeline-templates: - -Pipeline Templates (deprecated) -------------------------------- - -A template is just a shortcut for a full list of components. For example, this pipeline template: - - .. code-block:: yaml - - language: "en" - pipeline: "pretrained_embeddings_spacy" - -is equivalent to this pipeline: - - .. code-block:: yaml - - language: "en" - pipeline: - - name: "SpacyNLP" - - name: "SpacyTokenizer" - - name: "SpacyFeaturizer" - - name: "RegexFeaturizer" - - name: "CRFEntityExtractor" - - name: "EntitySynonymMapper" - - name: "SklearnIntentClassifier" - -Pipeline templates are deprecated as of Rasa 1.8. To find sensible configurations to get started, -check out :ref:`how-to-choose-a-pipeline`. For more information about a deprecated pipeline template, -expand it below. - - - .. container:: toggle - - .. container:: header - - ``pretrained_embeddings_spacy`` - - .. _section_pretrained_embeddings_spacy_pipeline: - - The advantage of ``pretrained_embeddings_spacy`` pipeline is that if you have a training example like: - "I want to buy apples", and Rasa is asked to predict the intent for "get pears", your model - already knows that the words "apples" and "pears" are very similar. This is especially useful - if you don't have enough training data. - - See :ref:`pretrained-word-vectors` for more information about loading spacy language models. - To use the components and configure them separately: - - .. literalinclude:: ../../data/configs_for_docs/pretrained_embeddings_spacy_config.yml - :language: yaml - - .. container:: toggle - - .. container:: header - - ``pretrained_embeddings_convert`` - - .. _section_pretrained_embeddings_convert_pipeline: - - .. note:: - Since ``ConveRT`` model is trained only on an **English** corpus of conversations, this pipeline should only - be used if your training data is in English language. - - This pipeline uses the `ConveRT `_ model to extract a vector representation of - a sentence and feeds them to the ``DIETClassifier`` for intent classification. - The advantage of using the ``pretrained_embeddings_convert`` pipeline is that it doesn't treat each word of the user - message independently, but creates a contextual vector representation for the complete sentence. For example, if you - have a training example, like: "can I book a car?", and Rasa is asked to predict the intent for "I need a ride from - my place", since the contextual vector representation for both examples are already very similar, the intent classified - for both is highly likely to be the same. This is also useful if you don't have enough training data. - - .. note:: - To use ``pretrained_embeddings_convert`` pipeline, you should install Rasa with ``pip install rasa[convert]``. - Please also note that one of the dependencies(``tensorflow-text``) is currently only supported on Linux - platforms. - - To use the components and configure them separately: - - .. literalinclude:: ../../data/configs_for_docs/pretrained_embeddings_convert_config.yml - :language: yaml - - .. container:: toggle - - .. container:: header - - ``supervised_embeddings`` - - .. _section_supervised_embeddings_pipeline: - - The advantage of the ``supervised_embeddings`` pipeline is that your word vectors will be customized - for your domain. For example, in general English, the word "balance" is closely related to "symmetry", - but very different to the word "cash". In a banking domain, "balance" and "cash" are closely related - and you'd like your model to capture that. This pipeline doesn't use a language-specific model, - so it will work with any language that you can tokenize (on whitespace or using a custom tokenizer). - - You can read more about this topic `in this blog post `__ . - - The ``supervised_embeddings`` pipeline supports any language that can be whitespace tokenized. By default it uses - whitespace for tokenization. You can customize the setup of this pipeline by adding or changing components. Here are - the default components that make up the ``supervised_embeddings`` pipeline: - - .. literalinclude:: ../../data/configs_for_docs/supervised_embeddings_config.yml - :language: yaml - - So for example, if your chosen language is not whitespace-tokenized (words are not separated by spaces), you - can replace the ``WhitespaceTokenizer`` with your own tokenizer. We support a number of different :ref:`tokenizers `, - or you can :ref:`create your own `. - - The pipeline uses two instances of ``CountVectorsFeaturizer``. The first one - featurizes text based on words. The second one featurizes text based on character - n-grams, preserving word boundaries. We empirically found the second featurizer - to be more powerful, but we decided to keep the first featurizer as well to make - featurization more robust. - - .. _section_mitie_pipeline: - - .. container:: toggle - - .. container:: header - - ``MITIE pipeline`` - - You can also use MITIE as a source of word vectors in your pipeline. - The MITIE backend performs well for small datasets, but training can take very long if you have more than a couple - of hundred examples. - - However, we do not recommend that you use it as mitie support is likely to be deprecated in a future release. - - To use the MITIE pipeline, you will have to train word vectors from a corpus. Instructions can be found - :ref:`here `. This will give you the file path to pass to the ``model`` parameter. - - .. literalinclude:: ../../data/configs_for_docs/pretrained_embeddings_mitie_config_1.yml - :language: yaml - - Another version of this pipeline uses MITIE's featurizer and also its multi-class classifier. - Training can be quite slow, so this is not recommended for large datasets. - - .. literalinclude:: ../../data/configs_for_docs/pretrained_embeddings_mitie_config_2.yml - :language: yaml diff --git a/docs/nlu/components.rst b/docs/nlu/components.rst deleted file mode 100644 index 4372f8f16fe5..000000000000 --- a/docs/nlu/components.rst +++ /dev/null @@ -1,1605 +0,0 @@ -:desc: Customize the components and parameters of Rasa's Machine Learning based - Natural Language Understanding pipeline - -.. _components: - -Components -========== - -.. edit-link:: - -This is a reference of the configuration options for every built-in -component in Rasa Open Source. If you want to build a custom component, check -out :ref:`custom-nlu-components`. - -.. contents:: - :local: - - -Word Vector Sources -------------------- - -The following components load pre-trained models that are needed if you want to use pre-trained -word vectors in your pipeline. - -.. _MitieNLP: - -MitieNLP -~~~~~~~~ - -:Short: MITIE initializer -:Outputs: Nothing -:Requires: Nothing -:Description: - Initializes MITIE structures. Every MITIE component relies on this, - hence this should be put at the beginning - of every pipeline that uses any MITIE components. -:Configuration: - The MITIE library needs a language model file, that **must** be specified in - the configuration: - - .. code-block:: yaml - - pipeline: - - name: "MitieNLP" - # language model to load - model: "data/total_word_feature_extractor.dat" - - For more information where to get that file from, head over to - :ref:`installing MITIE `. - -.. _SpacyNLP: - -SpacyNLP -~~~~~~~~ - -:Short: spaCy language initializer -:Outputs: Nothing -:Requires: Nothing -:Description: - Initializes spaCy structures. Every spaCy component relies on this, hence this should be put at the beginning - of every pipeline that uses any spaCy components. -:Configuration: - You need to specify the language model to use. - By default the language configured in the pipeline will be used as the language model name. - If the spaCy model to be used has a name that is different from the language tag (``"en"``, ``"de"``, etc.), - the model name can be specified using the configuration variable ``model``. - The name will be passed to ``spacy.load(name)``. - - .. code-block:: yaml - - pipeline: - - name: "SpacyNLP" - # language model to load - model: "en_core_web_md" - - # when retrieving word vectors, this will decide if the casing - # of the word is relevant. E.g. `hello` and `Hello` will - # retrieve the same vector, if set to `False`. For some - # applications and models it makes sense to differentiate - # between these two words, therefore setting this to `True`. - case_sensitive: False - - For more information on how to download the spaCy models, head over to - :ref:`installing SpaCy `. - -.. _HFTransformersNLP: - -HFTransformersNLP -~~~~~~~~~~~~~~~~~ - -:Short: HuggingFace's Transformers based pre-trained language model initializer -:Outputs: Nothing -:Requires: Nothing -:Description: - Initializes specified pre-trained language model from HuggingFace's `Transformers library - `__. The component applies language model specific tokenization and - featurization to compute sequence and sentence level representations for each example in the training data. - Include :ref:`LanguageModelTokenizer` and :ref:`LanguageModelFeaturizer` to utilize the output of this - component for downstream NLU models. - - .. note:: To use ``HFTransformersNLP`` component, install Rasa Open Source with ``pip install rasa[transformers]``. - -:Configuration: - You should specify what language model to load via the parameter ``model_name``. See the below table for the - available language models. - Additionally, you can also specify the architecture variation of the chosen language model by specifying the - parameter ``model_weights``. - The full list of supported architectures can be found - `here `__. - If left empty, it uses the default model architecture that original Transformers library loads (see table below). - - .. code-block:: none - - +----------------+--------------+-------------------------+ - | Language Model | Parameter | Default value for | - | | "model_name" | "model_weights" | - +----------------+--------------+-------------------------+ - | BERT | bert | bert-base-uncased | - +----------------+--------------+-------------------------+ - | GPT | gpt | openai-gpt | - +----------------+--------------+-------------------------+ - | GPT-2 | gpt2 | gpt2 | - +----------------+--------------+-------------------------+ - | XLNet | xlnet | xlnet-base-cased | - +----------------+--------------+-------------------------+ - | DistilBERT | distilbert | distilbert-base-uncased | - +----------------+--------------+-------------------------+ - | RoBERTa | roberta | roberta-base | - +----------------+--------------+-------------------------+ - - The following configuration loads the language model BERT: - - .. code-block:: yaml - - pipeline: - - name: HFTransformersNLP - # Name of the language model to use - model_name: "bert" - # Pre-Trained weights to be loaded - model_weights: "bert-base-uncased" - - # An optional path to a specific directory to download and cache the pre-trained model weights. - # The `default` cache_dir is the same as https://huggingface.co/transformers/serialization.html#cache-directory . - cache_dir: null - -.. _tokenizers: - -Tokenizers ----------- - -Tokenizers split text into tokens. -If you want to split intents into multiple labels, e.g. for predicting multiple intents or for -modeling hierarchical intent structure, use the following flags with any tokenizer: - -- ``intent_tokenization_flag`` indicates whether to tokenize intent labels or not. Set it to ``True``, so that intent - labels are tokenized. -- ``intent_split_symbol`` sets the delimiter string to split the intent labels, default is underscore - (``_``). - - -.. _WhitespaceTokenizer: - -WhitespaceTokenizer -~~~~~~~~~~~~~~~~~~~ - -:Short: Tokenizer using whitespaces as a separator -:Outputs: ``tokens`` for user messages, responses (if present), and intents (if specified) -:Requires: Nothing -:Description: - Creates a token for every whitespace separated character sequence. -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "WhitespaceTokenizer" - # Flag to check whether to split intents - "intent_tokenization_flag": False - # Symbol on which intent should be split - "intent_split_symbol": "_" - # Regular expression to detect tokens - "token_pattern": None - - -JiebaTokenizer -~~~~~~~~~~~~~~ - -:Short: Tokenizer using Jieba for Chinese language -:Outputs: ``tokens`` for user messages, responses (if present), and intents (if specified) -:Requires: Nothing -:Description: - Creates tokens using the Jieba tokenizer specifically for Chinese - language. It will only work for the Chinese language. - - .. note:: - To use ``JiebaTokenizer`` you need to install Jieba with ``pip install jieba``. - -:Configuration: - User's custom dictionary files can be auto loaded by specifying the files' directory path via ``dictionary_path``. - If the ``dictionary_path`` is ``None`` (the default), then no custom dictionary will be used. - - .. code-block:: yaml - - pipeline: - - name: "JiebaTokenizer" - dictionary_path: "path/to/custom/dictionary/dir" - # Flag to check whether to split intents - "intent_tokenization_flag": False - # Symbol on which intent should be split - "intent_split_symbol": "_" - # Regular expression to detect tokens - "token_pattern": None - - -MitieTokenizer -~~~~~~~~~~~~~~ - -:Short: Tokenizer using MITIE -:Outputs: ``tokens`` for user messages, responses (if present), and intents (if specified) -:Requires: :ref:`MitieNLP` -:Description: Creates tokens using the MITIE tokenizer. -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "MitieTokenizer" - # Flag to check whether to split intents - "intent_tokenization_flag": False - # Symbol on which intent should be split - "intent_split_symbol": "_" - # Regular expression to detect tokens - "token_pattern": None - -SpacyTokenizer -~~~~~~~~~~~~~~ - -:Short: Tokenizer using spaCy -:Outputs: ``tokens`` for user messages, responses (if present), and intents (if specified) -:Requires: :ref:`SpacyNLP` -:Description: - Creates tokens using the spaCy tokenizer. -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "SpacyTokenizer" - # Flag to check whether to split intents - "intent_tokenization_flag": False - # Symbol on which intent should be split - "intent_split_symbol": "_" - # Regular expression to detect tokens - "token_pattern": None - -.. _ConveRTTokenizer: - -ConveRTTokenizer -~~~~~~~~~~~~~~~~ - -:Short: Tokenizer using `ConveRT `__ model. -:Outputs: ``tokens`` for user messages, responses (if present), and intents (if specified) -:Requires: Nothing -:Description: - Creates tokens using the ConveRT tokenizer. Must be used whenever the :ref:`ConveRTFeaturizer` is used. - - .. note:: - Since ``ConveRT`` model is trained only on an English corpus of conversations, this tokenizer should only - be used if your training data is in English language. - - .. note:: - To use ``ConveRTTokenizer``, install Rasa Open Source with ``pip install rasa[convert]``. - - -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "ConveRTTokenizer" - # Flag to check whether to split intents - "intent_tokenization_flag": False - # Symbol on which intent should be split - "intent_split_symbol": "_" - # Regular expression to detect tokens - "token_pattern": None - -.. _LanguageModelTokenizer: - -LanguageModelTokenizer -~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Tokenizer from pre-trained language models -:Outputs: ``tokens`` for user messages, responses (if present), and intents (if specified) -:Requires: :ref:`HFTransformersNLP` -:Description: - Creates tokens using the pre-trained language model specified in upstream :ref:`HFTransformersNLP` component. - Must be used whenever the :ref:`LanguageModelFeaturizer` is used. -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "LanguageModelTokenizer" - # Flag to check whether to split intents - "intent_tokenization_flag": False - # Symbol on which intent should be split - "intent_split_symbol": "_" - - -.. _text-featurizers: - -Text Featurizers ----------------- - -Text featurizers are divided into two different categories: sparse featurizers and dense featurizers. -Sparse featurizers are featurizers that return feature vectors with a lot of missing values, e.g. zeros. -As those feature vectors would normally take up a lot of memory, we store them as sparse features. -Sparse features only store the values that are non zero and their positions in the vector. -Thus, we save a lot of memory and are able to train on larger datasets. - -All featurizers can return two different kind of features: sequence features and sentence features. -The sequence features are a matrix of size ``(number-of-tokens x feature-dimension)``. -The matrix contains a feature vector for every token in the sequence. -This allows us to train sequence models. -The sentence features are represented by a matrix of size ``(1 x feature-dimension)``. -It contains the feature vector for the complete utterance. -The sentence features can be used in any bag-of-words model. -The corresponding classifier can therefore decide what kind of features to use. -Note: The ``feature-dimension`` for sequence and sentence features does not have to be the same. - -.. _MitieFeaturizer: - -MitieFeaturizer -~~~~~~~~~~~~~~~ - -:Short: - Creates a vector representation of user message and response (if specified) using the MITIE featurizer. -:Outputs: ``dense_features`` for user messages and responses -:Requires: :ref:`MitieNLP` -:Type: Dense featurizer -:Description: - Creates features for entity extraction, intent classification, and response classification using the MITIE - featurizer. - - .. note:: - - NOT used by the ``MitieIntentClassifier`` component. But can be used by any component later in the pipeline - that makes use of ``dense_features``. - -:Configuration: - The sentence vector, i.e. the vector of the complete utterance, can be calculated in two different ways, either via - mean or via max pooling. You can specify the pooling method in your configuration file with the option ``pooling``. - The default pooling method is set to ``mean``. - - .. code-block:: yaml - - pipeline: - - name: "MitieFeaturizer" - # Specify what pooling operation should be used to calculate the vector of - # the complete utterance. Available options: 'mean' and 'max'. - "pooling": "mean" - - -.. _SpacyFeaturizer: - -SpacyFeaturizer -~~~~~~~~~~~~~~~ - -:Short: - Creates a vector representation of user message and response (if specified) using the spaCy featurizer. -:Outputs: ``dense_features`` for user messages and responses -:Requires: :ref:`SpacyNLP` -:Type: Dense featurizer -:Description: - Creates features for entity extraction, intent classification, and response classification using the spaCy - featurizer. -:Configuration: - The sentence vector, i.e. the vector of the complete utterance, can be calculated in two different ways, either via - mean or via max pooling. You can specify the pooling method in your configuration file with the option ``pooling``. - The default pooling method is set to ``mean``. - - .. code-block:: yaml - - pipeline: - - name: "SpacyFeaturizer" - # Specify what pooling operation should be used to calculate the vector of - # the complete utterance. Available options: 'mean' and 'max'. - "pooling": "mean" - - -.. _ConveRTFeaturizer: - -ConveRTFeaturizer -~~~~~~~~~~~~~~~~~ - -:Short: - Creates a vector representation of user message and response (if specified) using - `ConveRT `__ model. -:Outputs: ``dense_features`` for user messages and responses -:Requires: :ref:`ConveRTTokenizer` -:Type: Dense featurizer -:Description: - Creates features for entity extraction, intent classification, and response selection. - It uses the `default signature `_ to compute vector - representations of input text. - - .. note:: - Since ``ConveRT`` model is trained only on an English corpus of conversations, this featurizer should only - be used if your training data is in English language. - - .. note:: - To use ``ConveRTTokenizer``, install Rasa Open Source with ``pip install rasa[convert]``. - -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "ConveRTFeaturizer" - - -.. _LanguageModelFeaturizer: - -LanguageModelFeaturizer -~~~~~~~~~~~~~~~~~~~~~~~ - -:Short: - Creates a vector representation of user message and response (if specified) using a pre-trained language model. -:Outputs: ``dense_features`` for user messages and responses -:Requires: :ref:`HFTransformersNLP` and :ref:`LanguageModelTokenizer` -:Type: Dense featurizer -:Description: - Creates features for entity extraction, intent classification, and response selection. - Uses the pre-trained language model specified in upstream :ref:`HFTransformersNLP` component to compute vector - representations of input text. - - .. note:: - Please make sure that you use a language model which is pre-trained on the same language corpus as that of your - training data. - -:Configuration: - - Include :ref:`HFTransformersNLP` and :ref:`LanguageModelTokenizer` components before this component. Use - :ref:`LanguageModelTokenizer` to ensure tokens are correctly set for all components throughout the pipeline. - - .. code-block:: yaml - - pipeline: - - name: "LanguageModelFeaturizer" - - -.. _RegexFeaturizer: - -RegexFeaturizer -~~~~~~~~~~~~~~~ - -:Short: Creates a vector representation of user message using regular expressions. -:Outputs: ``sparse_features`` for user messages and ``tokens.pattern`` -:Requires: ``tokens`` -:Type: Sparse featurizer -:Description: - Creates features for entity extraction and intent classification. - During training the ``RegexFeaturizer`` creates a list of regular expressions defined in the training - data format. - For each regex, a feature will be set marking whether this expression was found in the user message or not. - All features will later be fed into an intent classifier / entity extractor to simplify classification (assuming - the classifier has learned during the training phase, that this set feature indicates a certain intent / entity). - Regex features for entity extraction are currently only supported by the :ref:`CRFEntityExtractor` and the - :ref:`diet-classifier` components! - -:Configuration: - Make the featurizer case insensitive by adding the ``case_sensitive: False`` option, the default being - ``case_sensitive: True``. - - .. code-block:: yaml - - pipeline: - - name: "RegexFeaturizer" - # Text will be processed with case sensitive as default - "case_sensitive": True - -.. _CountVectorsFeaturizer: - -CountVectorsFeaturizer -~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Creates bag-of-words representation of user messages, intents, and responses. -:Outputs: ``sparse_features`` for user messages, intents, and responses -:Requires: ``tokens`` -:Type: Sparse featurizer -:Description: - Creates features for intent classification and response selection. - Creates bag-of-words representation of user message, intent, and response using - `sklearn's CountVectorizer `_. - All tokens which consist only of digits (e.g. 123 and 99 but not a123d) will be assigned to the same feature. - -:Configuration: - See `sklearn's CountVectorizer docs `_ - for detailed description of the configuration parameters. - - This featurizer can be configured to use word or character n-grams, using the ``analyzer`` configuration parameter. - By default ``analyzer`` is set to ``word`` so word token counts are used as features. - If you want to use character n-grams, set ``analyzer`` to ``char`` or ``char_wb``. - The lower and upper boundaries of the n-grams can be configured via the parameters ``min_ngram`` and ``max_ngram``. - By default both of them are set to ``1``. - - .. note:: - Option ``char_wb`` creates character n-grams only from text inside word boundaries; - n-grams at the edges of words are padded with space. - This option can be used to create `Subword Semantic Hashing `_. - - .. note:: - For character n-grams do not forget to increase ``min_ngram`` and ``max_ngram`` parameters. - Otherwise the vocabulary will contain only single letters. - - Handling Out-Of-Vocabulary (OOV) words: - - .. note:: Enabled only if ``analyzer`` is ``word``. - - Since the training is performed on limited vocabulary data, it cannot be guaranteed that during prediction - an algorithm will not encounter an unknown word (a word that were not seen during training). - In order to teach an algorithm how to treat unknown words, some words in training data can be substituted - by generic word ``OOV_token``. - In this case during prediction all unknown words will be treated as this generic word ``OOV_token``. - - For example, one might create separate intent ``outofscope`` in the training data containing messages of - different number of ``OOV_token`` s and maybe some additional general words. - Then an algorithm will likely classify a message with unknown words as this intent ``outofscope``. - - You can either set the ``OOV_token`` or a list of words ``OOV_words``: - - - ``OOV_token`` set a keyword for unseen words; if training data contains ``OOV_token`` as words in some - messages, during prediction the words that were not seen during training will be substituted with - provided ``OOV_token``; if ``OOV_token=None`` (default behavior) words that were not seen during - training will be ignored during prediction time; - - ``OOV_words`` set a list of words to be treated as ``OOV_token`` during training; if a list of words - that should be treated as Out-Of-Vocabulary is known, it can be set to ``OOV_words`` instead of manually - changing it in training data or using custom preprocessor. - - .. note:: - This featurizer creates a bag-of-words representation by **counting** words, - so the number of ``OOV_token`` in the sentence might be important. - - .. note:: - Providing ``OOV_words`` is optional, training data can contain ``OOV_token`` input manually or by custom - additional preprocessor. - Unseen words will be substituted with ``OOV_token`` **only** if this token is present in the training - data or ``OOV_words`` list is provided. - - If you want to share the vocabulary between user messages and intents, you need to set the option - ``use_shared_vocab`` to ``True``. In that case a common vocabulary set between tokens in intents and user messages - is build. - - .. code-block:: yaml - - pipeline: - - name: "CountVectorsFeaturizer" - # Analyzer to use, either 'word', 'char', or 'char_wb' - "analyzer": "word" - # Set the lower and upper boundaries for the n-grams - "min_ngram": 1 - "max_ngram": 1 - # Set the out-of-vocabulary token - "OOV_token": "_oov_" - # Whether to use a shared vocab - "use_shared_vocab": False - - .. container:: toggle - - .. container:: header - - The above configuration parameters are the ones you should configure to fit your model to your data. - However, additional parameters exist that can be adapted. - - .. code-block:: none - - +-------------------+-------------------------+--------------------------------------------------------------+ - | Parameter | Default Value | Description | - +===================+=========================+==============================================================+ - | use_shared_vocab | False | If set to 'True' a common vocabulary is used for labels | - | | | and user message. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | analyzer | word | Whether the features should be made of word n-gram or | - | | | character n-grams. Option ‘char_wb’ creates character | - | | | n-grams only from text inside word boundaries; | - | | | n-grams at the edges of words are padded with space. | - | | | Valid values: 'word', 'char', 'char_wb'. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | strip_accents | None | Remove accents during the pre-processing step. | - | | | Valid values: 'ascii', 'unicode', 'None'. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | stop_words | None | A list of stop words to use. | - | | | Valid values: 'english' (uses an internal list of | - | | | English stop words), a list of custom stop words, or | - | | | 'None'. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | min_df | 1 | When building the vocabulary ignore terms that have a | - | | | document frequency strictly lower than the given threshold. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | max_df | 1 | When building the vocabulary ignore terms that have a | - | | | document frequency strictly higher than the given threshold | - | | | (corpus-specific stop words). | - +-------------------+-------------------------+--------------------------------------------------------------+ - | min_ngram | 1 | The lower boundary of the range of n-values for different | - | | | word n-grams or char n-grams to be extracted. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | max_ngram | 1 | The upper boundary of the range of n-values for different | - | | | word n-grams or char n-grams to be extracted. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | max_features | None | If not 'None', build a vocabulary that only consider the top | - | | | max_features ordered by term frequency across the corpus. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | lowercase | True | Convert all characters to lowercase before tokenizing. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | OOV_token | None | Keyword for unseen words. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | OOV_words | [] | List of words to be treated as 'OOV_token' during training. | - +-------------------+-------------------------+--------------------------------------------------------------+ - | alias | CountVectorFeaturizer | Alias name of featurizer. | - +-------------------+-------------------------+--------------------------------------------------------------+ - - -.. _LexicalSyntacticFeaturizer: - -LexicalSyntacticFeaturizer -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Creates lexical and syntactic features for a user message to support entity extraction. -:Outputs: ``sparse_features`` for user messages -:Requires: ``tokens`` -:Type: Sparse featurizer -:Description: - Creates features for entity extraction. - Moves with a sliding window over every token in the user message and creates features according to the - configuration (see below). As a default configuration is present, you don't need to specify a configuration. -:Configuration: - You can configure what kind of lexical and syntactic features the featurizer should extract. - The following features are available: - - .. code-block:: none - - ============== ========================================================================================== - Feature Name Description - ============== ========================================================================================== - BOS Checks if the token is at the beginning of the sentence. - EOS Checks if the token is at the end of the sentence. - low Checks if the token is lower case. - upper Checks if the token is upper case. - title Checks if the token starts with an uppercase character and all remaining characters are - lowercased. - digit Checks if the token contains just digits. - prefix5 Take the first five characters of the token. - prefix2 Take the first two characters of the token. - suffix5 Take the last five characters of the token. - suffix3 Take the last three characters of the token. - suffix2 Take the last two characters of the token. - suffix1 Take the last character of the token. - pos Take the Part-of-Speech tag of the token (``SpacyTokenizer`` required). - pos2 Take the first two characters of the Part-of-Speech tag of the token - (``SpacyTokenizer`` required). - ============== ========================================================================================== - - As the featurizer is moving over the tokens in a user message with a sliding window, you can define features for - previous tokens, the current token, and the next tokens in the sliding window. - You define the features as a [before, token, after] array. - If you want to define features for the token before, the current token, and the token after, - your features configuration would look like this: - - .. code-block:: yaml - - pipeline: - - name: LexicalSyntacticFeaturizer - "features": [ - ["low", "title", "upper"], - ["BOS", "EOS", "low", "upper", "title", "digit"], - ["low", "title", "upper"], - ] - - This configuration is also the default configuration. - - .. note:: If you want to make use of ``pos`` or ``pos2`` you need to add ``SpacyTokenizer`` to your pipeline. - - -Intent Classifiers ------------------- - -Intent classifiers assign one of the intents defined in the domain file to incoming user messages. - -MitieIntentClassifier -~~~~~~~~~~~~~~~~~~~~~ - -:Short: - MITIE intent classifier (using a - `text categorizer `_) -:Outputs: ``intent`` -:Requires: ``tokens`` for user message and :ref:`MitieNLP` -:Output-Example: - - .. code-block:: json - - { - "intent": {"name": "greet", "confidence": 0.98343} - } - -:Description: - This classifier uses MITIE to perform intent classification. The underlying classifier - is using a multi-class linear SVM with a sparse linear kernel (see - `MITIE trainer code `_). - - .. note:: This classifier does not rely on any featurizer as it extracts features on its own. - -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "MitieIntentClassifier" - -SklearnIntentClassifier -~~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Sklearn intent classifier -:Outputs: ``intent`` and ``intent_ranking`` -:Requires: ``dense_features`` for user messages -:Output-Example: - - .. code-block:: json - - { - "intent": {"name": "greet", "confidence": 0.78343}, - "intent_ranking": [ - { - "confidence": 0.1485910906220309, - "name": "goodbye" - }, - { - "confidence": 0.08161531595656784, - "name": "restaurant_search" - } - ] - } - -:Description: - The sklearn intent classifier trains a linear SVM which gets optimized using a grid search. It also provides - rankings of the labels that did not "win". The ``SklearnIntentClassifier`` needs to be preceded by a dense - featurizer in the pipeline. This dense featurizer creates the features used for the classification. - For more information about the algorithm itself, take a look at the - `GridSearchCV `__ - documentation. - -:Configuration: - During the training of the SVM a hyperparameter search is run to find the best parameter set. - In the configuration you can specify the parameters that will get tried. - - .. code-block:: yaml - - pipeline: - - name: "SklearnIntentClassifier" - # Specifies the list of regularization values to - # cross-validate over for C-SVM. - # This is used with the ``kernel`` hyperparameter in GridSearchCV. - C: [1, 2, 5, 10, 20, 100] - # Specifies the kernel to use with C-SVM. - # This is used with the ``C`` hyperparameter in GridSearchCV. - kernels: ["linear"] - # Gamma parameter of the C-SVM. - "gamma": [0.1] - # We try to find a good number of cross folds to use during - # intent training, this specifies the max number of folds. - "max_cross_validation_folds": 5 - # Scoring function used for evaluating the hyper parameters. - # This can be a name or a function. - "scoring_function": "f1_weighted" - -.. _keyword_intent_classifier: - -KeywordIntentClassifier -~~~~~~~~~~~~~~~~~~~~~~~ - -:Short: Simple keyword matching intent classifier, intended for small, short-term projects. -:Outputs: ``intent`` -:Requires: Nothing - -:Output-Example: - - .. code-block:: json - - { - "intent": {"name": "greet", "confidence": 1.0} - } - -:Description: - This classifier works by searching a message for keywords. - The matching is case sensitive by default and searches only for exact matches of the keyword-string in the user - message. - The keywords for an intent are the examples of that intent in the NLU training data. - This means the entire example is the keyword, not the individual words in the example. - - .. note:: This classifier is intended only for small projects or to get started. If - you have few NLU training data, you can take a look at the recommended pipelines in - :ref:`choosing-a-pipeline`. - -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "KeywordIntentClassifier" - case_sensitive: True - - -DIETClassifier -~~~~~~~~~~~~~~ - -:Short: Dual Intent Entity Transformer (DIET) used for intent classification and entity extraction -:Description: - You can find the detailed description of the :ref:`diet-classifier` under the section - `Combined Entity Extractors and Intent Classifiers`. - -.. _EntityExtractors: - -Entity Extractors ------------------ - -Entity extractors extract entities, such as person names or locations, from the user message. - -MitieEntityExtractor -~~~~~~~~~~~~~~~~~~~~ - -:Short: MITIE entity extraction (using a `MITIE NER trainer `_) -:Outputs: ``entities`` -:Requires: :ref:`MitieNLP` and ``tokens`` -:Output-Example: - - .. code-block:: json - - { - "entities": [{ - "value": "New York City", - "start": 20, - "end": 33, - "confidence": null, - "entity": "city", - "extractor": "MitieEntityExtractor" - }] - } - -:Description: - ``MitieEntityExtractor`` uses the MITIE entity extraction to find entities in a message. The underlying classifier - is using a multi class linear SVM with a sparse linear kernel and custom features. - The MITIE component does not provide entity confidence values. - - .. note:: This entity extractor does not rely on any featurizer as it extracts features on its own. - -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "MitieEntityExtractor" - -.. _SpacyEntityExtractor: - -SpacyEntityExtractor -~~~~~~~~~~~~~~~~~~~~ - -:Short: spaCy entity extraction -:Outputs: ``entities`` -:Requires: :ref:`SpacyNLP` -:Output-Example: - - .. code-block:: json - - { - "entities": [{ - "value": "New York City", - "start": 20, - "end": 33, - "confidence": null, - "entity": "city", - "extractor": "SpacyEntityExtractor" - }] - } - -:Description: - Using spaCy this component predicts the entities of a message. spaCy uses a statistical BILOU transition model. - As of now, this component can only use the spaCy builtin entity extraction models and can not be retrained. - This extractor does not provide any confidence scores. - -:Configuration: - Configure which dimensions, i.e. entity types, the spaCy component - should extract. A full list of available dimensions can be found in - the `spaCy documentation `_. - Leaving the dimensions option unspecified will extract all available dimensions. - - .. code-block:: yaml - - pipeline: - - name: "SpacyEntityExtractor" - # dimensions to extract - dimensions: ["PERSON", "LOC", "ORG", "PRODUCT"] - - -EntitySynonymMapper -~~~~~~~~~~~~~~~~~~~ - -:Short: Maps synonymous entity values to the same value. -:Outputs: Modifies existing entities that previous entity extraction components found. -:Requires: An extractor from :ref:`EntityExtractors` -:Description: - If the training data contains defined synonyms, this component will make sure that detected entity values will - be mapped to the same value. For example, if your training data contains the following examples: - - .. code-block:: json - - [ - { - "text": "I moved to New York City", - "intent": "inform_relocation", - "entities": [{ - "value": "nyc", - "start": 11, - "end": 24, - "entity": "city", - }] - }, - { - "text": "I got a new flat in NYC.", - "intent": "inform_relocation", - "entities": [{ - "value": "nyc", - "start": 20, - "end": 23, - "entity": "city", - }] - } - ] - - This component will allow you to map the entities ``New York City`` and ``NYC`` to ``nyc``. The entity - extraction will return ``nyc`` even though the message contains ``NYC``. When this component changes an - existing entity, it appends itself to the processor list of this entity. - -:Configuration: - - .. code-block:: yaml - - pipeline: - - name: "EntitySynonymMapper" - - .. note:: - - When using the ``EntitySynonymMapper`` as part of an NLU pipeline, it will need to be placed - below any entity extractors in the configuration file. - -.. _CRFEntityExtractor: - -CRFEntityExtractor -~~~~~~~~~~~~~~~~~~ - -:Short: Conditional random field (CRF) entity extraction -:Outputs: ``entities`` -:Requires: ``tokens`` and ``dense_features`` (optional) -:Output-Example: - - .. code-block:: json - - { - "entities": [{ - "value": "New York City", - "start": 20, - "end": 33, - "entity": "city", - "confidence": 0.874, - "extractor": "CRFEntityExtractor" - }] - } - -:Description: - This component implements a conditional random fields (CRF) to do named entity recognition. - CRFs can be thought of as an undirected Markov chain where the time steps are words - and the states are entity classes. Features of the words (capitalization, POS tagging, - etc.) give probabilities to certain entity classes, as are transitions between - neighbouring entity tags: the most likely set of tags is then calculated and returned. - -:Configuration: - ``CRFEntityExtractor`` has a list of default features to use. - However, you can overwrite the default configuration. - The following features are available: - - .. code-block:: none - - ============== ========================================================================================== - Feature Name Description - ============== ========================================================================================== - low Checks if the token is lower case. - upper Checks if the token is upper case. - title Checks if the token starts with an uppercase character and all remaining characters are - lowercased. - digit Checks if the token contains just digits. - prefix5 Take the first five characters of the token. - prefix2 Take the first two characters of the token. - suffix5 Take the last five characters of the token. - suffix3 Take the last three characters of the token. - suffix2 Take the last two characters of the token. - suffix1 Take the last character of the token. - pos Take the Part-of-Speech tag of the token (``SpacyTokenizer`` required). - pos2 Take the first two characters of the Part-of-Speech tag of the token - (``SpacyTokenizer`` required). - pattern Take the patterns defined by ``RegexFeaturizer``. - bias Add an additional "bias" feature to the list of features. - ============== ========================================================================================== - - As the featurizer is moving over the tokens in a user message with a sliding window, you can define features for - previous tokens, the current token, and the next tokens in the sliding window. - You define the features as [before, token, after] array. - - Additional you can set a flag to determine whether to use the BILOU tagging schema or not. - - - ``BILOU_flag`` determines whether to use BILOU tagging or not. Default ``True``. - - .. code-block:: yaml - - pipeline: - - name: "CRFEntityExtractor" - # BILOU_flag determines whether to use BILOU tagging or not. - "BILOU_flag": True - # features to extract in the sliding window - "features": [ - ["low", "title", "upper"], - [ - "bias", - "low", - "prefix5", - "prefix2", - "suffix5", - "suffix3", - "suffix2", - "upper", - "title", - "digit", - "pattern", - ], - ["low", "title", "upper"], - ] - # The maximum number of iterations for optimization algorithms. - "max_iterations": 50 - # weight of the L1 regularization - "L1_c": 0.1 - # weight of the L2 regularization - "L2_c": 0.1 - # Name of dense featurizers to use. - # If list is empty all available dense features are used. - "featurizers": [] - - .. note:: - If POS features are used (``pos`` or ``pos2``), you need to have ``SpacyTokenizer`` in your pipeline. - - .. note:: - If ``pattern`` features are used, you need to have ``RegexFeaturizer`` in your pipeline. - -.. _DucklingHTTPExtractor: - -DucklingHTTPExtractor -~~~~~~~~~~~~~~~~~~~~~ - -:Short: Duckling lets you extract common entities like dates, - amounts of money, distances, and others in a number of languages. -:Outputs: ``entities`` -:Requires: Nothing -:Output-Example: - - .. code-block:: json - - { - "entities": [{ - "end": 53, - "entity": "time", - "start": 48, - "value": "2017-04-10T00:00:00.000+02:00", - "confidence": 1.0, - "extractor": "DucklingHTTPExtractor" - }] - } - -:Description: - To use this component you need to run a duckling server. The easiest - option is to spin up a docker container using - ``docker run -p 8000:8000 rasa/duckling``. - - Alternatively, you can `install duckling directly on your - machine `_ and start the server. - - Duckling allows to recognize dates, numbers, distances and other structured entities - and normalizes them. - Please be aware that duckling tries to extract as many entity types as possible without - providing a ranking. For example, if you specify both ``number`` and ``time`` as dimensions - for the duckling component, the component will extract two entities: ``10`` as a number and - ``in 10 minutes`` as a time from the text ``I will be there in 10 minutes``. In such a - situation, your application would have to decide which entity type is be the correct one. - The extractor will always return `1.0` as a confidence, as it is a rule - based system. - -:Configuration: - Configure which dimensions, i.e. entity types, the duckling component - should extract. A full list of available dimensions can be found in - the `duckling documentation `_. - Leaving the dimensions option unspecified will extract all available dimensions. - - .. code-block:: yaml - - pipeline: - - name: "DucklingHTTPExtractor" - # url of the running duckling server - url: "http://localhost:8000" - # dimensions to extract - dimensions: ["time", "number", "amount-of-money", "distance"] - # allows you to configure the locale, by default the language is - # used - locale: "de_DE" - # if not set the default timezone of Duckling is going to be used - # needed to calculate dates from relative expressions like "tomorrow" - timezone: "Europe/Berlin" - # Timeout for receiving response from http url of the running duckling server - # if not set the default timeout of duckling http url is set to 3 seconds. - timeout : 3 - -DIETClassifier -~~~~~~~~~~~~~~ - -:Short: Dual Intent Entity Transformer (DIET) used for intent classification and entity extraction -:Description: - You can find the detailed description of the :ref:`diet-classifier` under the section - `Combined Entity Extractors and Intent Classifiers`. - - -Selectors ----------- - -Selectors predict a bot response from a set of candidate responses. - -.. _response-selector: - -ResponseSelector -~~~~~~~~~~~~~~~~ - -:Short: Response Selector -:Outputs: A dictionary with key as ``direct_response_intent`` and value containing ``response`` and ``ranking`` -:Requires: ``dense_features`` and/or ``sparse_features`` for user messages and response - -:Output-Example: - - .. code-block:: json - - { - "response_selector": { - "faq": { - "response": {"confidence": 0.7356462617, "name": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, - "ranking": [ - {"confidence": 0.7356462617, "name": "Supports 3.5, 3.6 and 3.7, recommended version is 3.6"}, - {"confidence": 0.2134543431, "name": "You can ask me about how to get started"} - ] - } - } - } - -:Description: - - Response Selector component can be used to build a response retrieval model to directly predict a bot response from - a set of candidate responses. The prediction of this model is used by :ref:`retrieval-actions`. - It embeds user inputs and response labels into the same space and follows the exact same - neural network architecture and optimization as the :ref:`diet-classifier`. - - .. note:: If during prediction time a message contains **only** words unseen during training - and no Out-Of-Vocabulary preprocessor was used, an empty response ``None`` is predicted with confidence - ``0.0``. This might happen if you only use the :ref:`CountVectorsFeaturizer` with a ``word`` analyzer - as featurizer. If you use the ``char_wb`` analyzer, you should always get a response with a confidence - value ``> 0.0``. - -:Configuration: - - The algorithm includes almost all the hyperparameters that :ref:`diet-classifier` uses. - If you want to adapt your model, start by modifying the following parameters: - - - ``epochs``: - This parameter sets the number of times the algorithm will see the training data (default: ``300``). - One ``epoch`` is equals to one forward pass and one backward pass of all the training examples. - Sometimes the model needs more epochs to properly learn. - Sometimes more epochs don't influence the performance. - The lower the number of epochs the faster the model is trained. - - ``hidden_layers_sizes``: - This parameter allows you to define the number of feed forward layers and their output - dimensions for user messages and intents (default: ``text: [256, 128], label: [256, 128]``). - Every entry in the list corresponds to a feed forward layer. - For example, if you set ``text: [256, 128]``, we will add two feed forward layers in front of - the transformer. The vectors of the input tokens (coming from the user message) will be passed on to those - layers. The first layer will have an output dimension of 256 and the second layer will have an output - dimension of 128. If an empty list is used (default behavior), no feed forward layer will be - added. - Make sure to use only positive integer values. Usually, numbers of power of two are used. - Also, it is usual practice to have decreasing values in the list: next value is smaller or equal to the - value before. - - ``embedding_dimension``: - This parameter defines the output dimension of the embedding layers used inside the model (default: ``20``). - We are using multiple embeddings layers inside the model architecture. - For example, the vector of the complete utterance and the intent is passed on to an embedding layer before - they are compared and the loss is calculated. - - ``number_of_transformer_layers``: - This parameter sets the number of transformer layers to use (default: ``0``). - The number of transformer layers corresponds to the transformer blocks to use for the model. - - ``transformer_size``: - This parameter sets the number of units in the transformer (default: ``None``). - The vectors coming out of the transformers will have the given ``transformer_size``. - - ``weight_sparsity``: - This parameter defines the fraction of kernel weights that are set to 0 for all feed forward layers - in the model (default: ``0.8``). The value should be between 0 and 1. If you set ``weight_sparsity`` - to 0, no kernel weights will be set to 0, the layer acts as a standard feed forward layer. You should not - set ``weight_sparsity`` to 1 as this would result in all kernel weights being 0, i.e. the model is not able - to learn. - - | - - In addition, the component can also be configured to train a response selector for a particular retrieval intent. - The parameter ``retrieval_intent`` sets the name of the intent for which this response selector model is trained. - Default is ``None``, i.e. the model is trained for all retrieval intents. - - | - - .. container:: toggle - - .. container:: header - - The above configuration parameters are the ones you should configure to fit your model to your data. - However, additional parameters exist that can be adapted. - - .. code-block:: none - - +---------------------------------+-------------------+--------------------------------------------------------------+ - | Parameter | Default Value | Description | - +=================================+===================+==============================================================+ - | hidden_layers_sizes | text: [256, 128] | Hidden layer sizes for layers before the embedding layers | - | | label: [256, 128] | for user messages and labels. The number of hidden layers is | - | | | equal to the length of the corresponding. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | share_hidden_layers | False | Whether to share the hidden layer weights between user | - | | | messages and labels. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | transformer_size | None | Number of units in transformer. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | number_of_transformer_layers | 0 | Number of transformer layers. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | number_of_attention_heads | 4 | Number of attention heads in transformer. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | use_key_relative_attention | False | If 'True' use key relative embeddings in attention. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | use_value_relative_attention | False | If 'True' use value relative embeddings in attention. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | max_relative_position | None | Maximum position for relative embeddings. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | unidirectional_encoder | False | Use a unidirectional or bidirectional encoder. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | batch_size | [64, 256] | Initial and final value for batch sizes. | - | | | Batch size will be linearly increased for each epoch. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | batch_strategy | "balanced" | Strategy used when creating batches. | - | | | Can be either 'sequence' or 'balanced'. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | epochs | 300 | Number of epochs to train. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | random_seed | None | Set random seed to any 'int' to get reproducible results. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | learning_rate | 0.001 | Initial learning rate for the optimizer. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | embedding_dimension | 20 | Dimension size of embedding vectors. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | dense_dimension | text: 512 | Dense dimension for sparse features to use if no dense | - | | label: 512 | features are present. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | concat_dimension | text: 512 | Concat dimension for sequence and sentence features. | - | | label: 512 | | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | number_of_negative_examples | 20 | The number of incorrect labels. The algorithm will minimize | - | | | their similarity to the user input during training. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | similarity_type | "auto" | Type of similarity measure to use, either 'auto' or 'cosine' | - | | | or 'inner'. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | loss_type | "softmax" | The type of the loss function, either 'softmax' or 'margin'. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | ranking_length | 10 | Number of top actions to normalize scores for loss type | - | | | 'softmax'. Set to 0 to turn off normalization. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | maximum_positive_similarity | 0.8 | Indicates how similar the algorithm should try to make | - | | | embedding vectors for correct labels. | - | | | Should be 0.0 < ... < 1.0 for 'cosine' similarity type. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | maximum_negative_similarity | -0.4 | Maximum negative similarity for incorrect labels. | - | | | Should be -1.0 < ... < 1.0 for 'cosine' similarity type. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | use_maximum_negative_similarity | True | If 'True' the algorithm only minimizes maximum similarity | - | | | over incorrect intent labels, used only if 'loss_type' is | - | | | set to 'margin'. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | scale_loss | True | Scale loss inverse proportionally to confidence of correct | - | | | prediction. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | regularization_constant | 0.002 | The scale of regularization. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | negative_margin_scale | 0.8 | The scale of how important is to minimize the maximum | - | | | similarity between embeddings of different labels. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | weight_sparsity | 0.8 | Sparsity of the weights in dense layers. | - | | | Value should be between 0 and 1. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | drop_rate | 0.2 | Dropout rate for encoder. Value should be between 0 and 1. | - | | | The higher the value the higher the regularization effect. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | drop_rate_attention | 0.0 | Dropout rate for attention. Value should be between 0 and 1. | - | | | The higher the value the higher the regularization effect. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | use_sparse_input_dropout | False | If 'True' apply dropout to sparse input tensors. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | use_dense_input_dropout | False | If 'True' apply dropout to dense input tensors. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | evaluate_every_number_of_epochs | 20 | How often to calculate validation accuracy. | - | | | Set to '-1' to evaluate just once at the end of training. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | evaluate_on_number_of_examples | 0 | How many examples to use for hold out validation set. | - | | | Large values may hurt performance, e.g. model accuracy. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | use_masked_language_model | False | If 'True' random tokens of the input message will be masked | - | | | and the model should predict those tokens. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | retrieval_intent | None | Name of the intent for which this response selector model is | - | | | trained. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | tensorboard_log_directory | None | If you want to use tensorboard to visualize training | - | | | metrics, set this option to a valid output directory. You | - | | | can view the training metrics after training in tensorboard | - | | | via 'tensorboard --logdir '. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | tensorboard_log_level | "epoch" | Define when training metrics for tensorboard should be | - | | | logged. Either after every epoch ("epoch") or for every | - | | | training step ("minibatch"). | - +---------------------------------+-------------------+--------------------------------------------------------------+ - | featurizers | [] | List of featurizer names (alias names). Only features | - | | | coming from the listed names are used. If list is empty | - | | | all available features are used. | - +---------------------------------+-------------------+--------------------------------------------------------------+ - - .. note:: For ``cosine`` similarity ``maximum_positive_similarity`` and ``maximum_negative_similarity`` should - be between ``-1`` and ``1``. - - .. note:: There is an option to use linearly increasing batch size. The idea comes from - ``_. - In order to do it pass a list to ``batch_size``, e.g. ``"batch_size": [64, 256]`` (default behavior). - If constant ``batch_size`` is required, pass an ``int``, e.g. ``"batch_size": 64``. - - .. note:: Parameter ``maximum_negative_similarity`` is set to a negative value to mimic the original - starspace algorithm in the case ``maximum_negative_similarity = maximum_positive_similarity`` - and ``use_maximum_negative_similarity = False``. - See `starspace paper `_ for details. - - -Combined Entity Extractors and Intent Classifiers -------------------------------------------------- - -.. _diet-classifier: - -DIETClassifier -~~~~~~~~~~~~~~ - -:Short: Dual Intent Entity Transformer (DIET) used for intent classification and entity extraction -:Outputs: ``entities``, ``intent`` and ``intent_ranking`` -:Requires: ``dense_features`` and/or ``sparse_features`` for user message and optionally the intent -:Output-Example: - - .. code-block:: json - - { - "intent": {"name": "greet", "confidence": 0.8343}, - "intent_ranking": [ - { - "confidence": 0.385910906220309, - "name": "goodbye" - }, - { - "confidence": 0.28161531595656784, - "name": "restaurant_search" - } - ], - "entities": [{ - "end": 53, - "entity": "time", - "start": 48, - "value": "2017-04-10T00:00:00.000+02:00", - "confidence": 1.0, - "extractor": "DIETClassifier" - }] - } - -:Description: - DIET (Dual Intent and Entity Transformer) is a multi-task architecture for intent classification and entity - recognition. The architecture is based on a transformer which is shared for both tasks. - A sequence of entity labels is predicted through a Conditional Random Field (CRF) tagging layer on top of the - transformer output sequence corresponding to the input sequence of tokens. - For the intent labels the transformer output for the complete utterance and intent labels are embedded into a - single semantic vector space. We use the dot-product loss to maximize the similarity with the target label and - minimize similarities with negative samples. - - If you want to learn more about the model, please take a look at our - `videos `__ where we explain the model - architecture in detail. - - .. note:: If during prediction time a message contains **only** words unseen during training - and no Out-Of-Vocabulary preprocessor was used, an empty intent ``None`` is predicted with confidence - ``0.0``. This might happen if you only use the :ref:`CountVectorsFeaturizer` with a ``word`` analyzer - as featurizer. If you use the ``char_wb`` analyzer, you should always get an intent with a confidence - value ``> 0.0``. - -:Configuration: - - If you want to use the ``DIETClassifier`` just for intent classification, set ``entity_recognition`` to ``False``. - If you want to do only entity recognition, set ``intent_classification`` to ``False``. - By default ``DIETClassifier`` does both, i.e. ``entity_recognition`` and ``intent_classification`` are set to - ``True``. - - You can define a number of hyperparameters to adapt the model. - If you want to adapt your model, start by modifying the following parameters: - - - ``epochs``: - This parameter sets the number of times the algorithm will see the training data (default: ``300``). - One ``epoch`` is equals to one forward pass and one backward pass of all the training examples. - Sometimes the model needs more epochs to properly learn. - Sometimes more epochs don't influence the performance. - The lower the number of epochs the faster the model is trained. - - ``hidden_layers_sizes``: - This parameter allows you to define the number of feed forward layers and their output - dimensions for user messages and intents (default: ``text: [], label: []``). - Every entry in the list corresponds to a feed forward layer. - For example, if you set ``text: [256, 128]``, we will add two feed forward layers in front of - the transformer. The vectors of the input tokens (coming from the user message) will be passed on to those - layers. The first layer will have an output dimension of 256 and the second layer will have an output - dimension of 128. If an empty list is used (default behavior), no feed forward layer will be - added. - Make sure to use only positive integer values. Usually, numbers of power of two are used. - Also, it is usual practice to have decreasing values in the list: next value is smaller or equal to the - value before. - - ``embedding_dimension``: - This parameter defines the output dimension of the embedding layers used inside the model (default: ``20``). - We are using multiple embeddings layers inside the model architecture. - For example, the vector of the complete utterance and the intent is passed on to an embedding layer before - they are compared and the loss is calculated. - - ``number_of_transformer_layers``: - This parameter sets the number of transformer layers to use (default: ``2``). - The number of transformer layers corresponds to the transformer blocks to use for the model. - - ``transformer_size``: - This parameter sets the number of units in the transformer (default: ``256``). - The vectors coming out of the transformers will have the given ``transformer_size``. - - ``weight_sparsity``: - This parameter defines the fraction of kernel weights that are set to 0 for all feed forward layers - in the model (default: ``0.8``). The value should be between 0 and 1. If you set ``weight_sparsity`` - to 0, no kernel weights will be set to 0, the layer acts as a standard feed forward layer. You should not - set ``weight_sparsity`` to 1 as this would result in all kernel weights being 0, i.e. the model is not able - to learn. - - .. container:: toggle - - .. container:: header - - The above configuration parameters are the ones you should configure to fit your model to your data. - However, additional parameters exist that can be adapted. - - .. code-block:: none - - +---------------------------------+------------------+--------------------------------------------------------------+ - | Parameter | Default Value | Description | - +=================================+==================+==============================================================+ - | hidden_layers_sizes | text: [] | Hidden layer sizes for layers before the embedding layers | - | | label: [] | for user messages and labels. The number of hidden layers is | - | | | equal to the length of the corresponding. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | share_hidden_layers | False | Whether to share the hidden layer weights between user | - | | | messages and labels. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | transformer_size | 256 | Number of units in transformer. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | number_of_transformer_layers | 2 | Number of transformer layers. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | number_of_attention_heads | 4 | Number of attention heads in transformer. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | use_key_relative_attention | False | If 'True' use key relative embeddings in attention. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | use_value_relative_attention | False | If 'True' use value relative embeddings in attention. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | max_relative_position | None | Maximum position for relative embeddings. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | unidirectional_encoder | False | Use a unidirectional or bidirectional encoder. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | batch_size | [64, 256] | Initial and final value for batch sizes. | - | | | Batch size will be linearly increased for each epoch. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | batch_strategy | "balanced" | Strategy used when creating batches. | - | | | Can be either 'sequence' or 'balanced'. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | epochs | 300 | Number of epochs to train. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | random_seed | None | Set random seed to any 'int' to get reproducible results. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | learning_rate | 0.001 | Initial learning rate for the optimizer. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | embedding_dimension | 20 | Dimension size of embedding vectors. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | dense_dimension | text: 512 | Dense dimension for sparse features to use if no dense | - | | label: 20 | features are present. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | concat_dimension | text: 512 | Concat dimension for sequence and sentence features. | - | | label: 20 | | - +---------------------------------+------------------+--------------------------------------------------------------+ - | number_of_negative_examples | 20 | The number of incorrect labels. The algorithm will minimize | - | | | their similarity to the user input during training. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | similarity_type | "auto" | Type of similarity measure to use, either 'auto' or 'cosine' | - | | | or 'inner'. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | loss_type | "softmax" | The type of the loss function, either 'softmax' or 'margin'. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | ranking_length | 10 | Number of top actions to normalize scores for loss type | - | | | 'softmax'. Set to 0 to turn off normalization. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | maximum_positive_similarity | 0.8 | Indicates how similar the algorithm should try to make | - | | | embedding vectors for correct labels. | - | | | Should be 0.0 < ... < 1.0 for 'cosine' similarity type. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | maximum_negative_similarity | -0.4 | Maximum negative similarity for incorrect labels. | - | | | Should be -1.0 < ... < 1.0 for 'cosine' similarity type. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | use_maximum_negative_similarity | True | If 'True' the algorithm only minimizes maximum similarity | - | | | over incorrect intent labels, used only if 'loss_type' is | - | | | set to 'margin'. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | scale_loss | False | Scale loss inverse proportionally to confidence of correct | - | | | prediction. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | regularization_constant | 0.002 | The scale of regularization. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | negative_margin_scale | 0.8 | The scale of how important it is to minimize the maximum | - | | | similarity between embeddings of different labels. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | weight_sparsity | 0.8 | Sparsity of the weights in dense layers. | - | | | Value should be between 0 and 1. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | drop_rate | 0.2 | Dropout rate for encoder. Value should be between 0 and 1. | - | | | The higher the value the higher the regularization effect. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | drop_rate_attention | 0.0 | Dropout rate for attention. Value should be between 0 and 1. | - | | | The higher the value the higher the regularization effect. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | use_sparse_input_dropout | True | If 'True' apply dropout to sparse input tensors. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | use_dense_input_dropout | True | If 'True' apply dropout to dense input tensors. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | evaluate_every_number_of_epochs | 20 | How often to calculate validation accuracy. | - | | | Set to '-1' to evaluate just once at the end of training. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | evaluate_on_number_of_examples | 0 | How many examples to use for hold out validation set. | - | | | Large values may hurt performance, e.g. model accuracy. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | intent_classification | True | If 'True' intent classification is trained and intents are | - | | | predicted. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | entity_recognition | True | If 'True' entity recognition is trained and entities are | - | | | extracted. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | use_masked_language_model | False | If 'True' random tokens of the input message will be masked | - | | | and the model has to predict those tokens. It acts like a | - | | | regularizer and should help to learn a better contextual | - | | | representation of the input. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | tensorboard_log_directory | None | If you want to use tensorboard to visualize training | - | | | metrics, set this option to a valid output directory. You | - | | | can view the training metrics after training in tensorboard | - | | | via 'tensorboard --logdir '. | - +---------------------------------+------------------+--------------------------------------------------------------+ - | tensorboard_log_level | "epoch" | Define when training metrics for tensorboard should be | - | | | logged. Either after every epoch ('epoch') or for every | - | | | training step ('minibatch'). | - +---------------------------------+------------------+--------------------------------------------------------------+ - | featurizers | [] | List of featurizer names (alias names). Only features | - | | | coming from the listed names are used. If list is empty | - | | | all available features are used. | - +---------------------------------+------------------+--------------------------------------------------------------+ - - .. note:: For ``cosine`` similarity ``maximum_positive_similarity`` and ``maximum_negative_similarity`` should - be between ``-1`` and ``1``. - - .. note:: There is an option to use linearly increasing batch size. The idea comes from - ``_. - In order to do it pass a list to ``batch_size``, e.g. ``"batch_size": [64, 256]`` (default behavior). - If constant ``batch_size`` is required, pass an ``int``, e.g. ``"batch_size": 64``. - - .. note:: Parameter ``maximum_negative_similarity`` is set to a negative value to mimic the original - starspace algorithm in the case ``maximum_negative_similarity = maximum_positive_similarity`` - and ``use_maximum_negative_similarity = False``. - See `starspace paper `_ for details. diff --git a/docs/nlu/entity-extraction.rst b/docs/nlu/entity-extraction.rst deleted file mode 100644 index 1605aff60c0f..000000000000 --- a/docs/nlu/entity-extraction.rst +++ /dev/null @@ -1,247 +0,0 @@ -:desc: Use open source named entity recognition like Spacy or Duckling - and customize them according to your needs to build contextual - AI assistants - -.. _entity-extraction: - -Entity Extraction -================= - -.. edit-link:: - -Entity extraction involves parsing user messages for required pieces of information. Rasa Open Source -provides entity extractors for custom entities as well as pre-trained ones like dates and locations. -Here is a summary of the available extractors and what they are used for: - -========================= ================= ======================== ================================= -Component Requires Model Notes -========================= ================= ======================== ================================= -``CRFEntityExtractor`` sklearn-crfsuite conditional random field good for training custom entities -``SpacyEntityExtractor`` spaCy averaged perceptron provides pre-trained entities -``DucklingHTTPExtractor`` running duckling context-free grammar provides pre-trained entities -``MitieEntityExtractor`` MITIE structured SVM good for training custom entities -``EntitySynonymMapper`` existing entities N/A maps known synonyms -``DIETClassifier`` conditional random field - on top of a transformer good for training custom entities -========================= ================= ======================== ================================= - -.. contents:: - :local: - -The "entity" Object -^^^^^^^^^^^^^^^^^^^ - -After parsing, an entity is returned as a dictionary. There are two fields that show information -about how the pipeline impacted the entities returned: the ``extractor`` field -of an entity tells you which entity extractor found this particular entity, and -the ``processors`` field contains the name of components that altered this -specific entity. - -The use of synonyms can cause the ``value`` field not match the ``text`` -exactly. Instead it will return the trained synonym. - -.. code-block:: json - - { - "text": "show me chinese restaurants", - "intent": "restaurant_search", - "entities": [ - { - "start": 8, - "end": 15, - "value": "chinese", - "entity": "cuisine", - "extractor": "CRFEntityExtractor", - "confidence": 0.854, - "processors": [] - } - ] - } - -.. note:: - - The ``confidence`` will be set by the ``CRFEntityExtractor`` and the ``DIETClassifier`` component. The - ``DucklingHTTPExtractor`` will always return ``1``. The ``SpacyEntityExtractor`` extractor - does not provide this information and returns ``null``. - - -Some extractors, like ``duckling``, may include additional information. For example: - -.. code-block:: json - - { - "additional_info":{ - "grain":"day", - "type":"value", - "value":"2018-06-21T00:00:00.000-07:00", - "values":[ - { - "grain":"day", - "type":"value", - "value":"2018-06-21T00:00:00.000-07:00" - } - ] - }, - "confidence":1.0, - "end":5, - "entity":"time", - "extractor":"DucklingHTTPExtractor", - "start":0, - "text":"today", - "value":"2018-06-21T00:00:00.000-07:00" - } - - -Custom Entities -^^^^^^^^^^^^^^^ - -Almost every chatbot and voice app will have some custom entities. -A restaurant assistant should understand ``chinese`` as a cuisine, -but to a language-learning assistant it would mean something very different. -The ``CRFEntityExtractor`` and the ``DIETClassifier`` component can learn custom entities in any language, given -some training data. -See :ref:`training-data-format` for details on how to include entities in your training data. - - -.. _entities-roles-groups: - -Entities Roles and Groups -^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - This feature is experimental. - We introduce experimental features to get feedback from our community, so we encourage you to try it out! - However, the functionality might be changed or removed in the future. - If you have feedback (positive or negative) please share it with us on the `forum `_. - -Assigning custom entity labels to words, allow you to define certain concepts in the data. -For example, we can define what a `city` is: - -.. code-block:: none - - I want to fly from [Berlin](city) to [San Francisco](city). - -However, sometimes you want to specify entities even further. -Let's assume we want to build an assistant that should book a flight for us. -The assistant needs to know which of the two cities in the example above is the departure city and which is the -destination city. -``Berlin`` and ``San Francisco`` are still cities, but they play a different role in our example. -To distinguish between the different roles, you can assign a role label in addition to the entity label. - -.. code-block:: none - - - I want to fly from [Berlin]{"entity": "city", "role": "departure"} to [San Francisco]{"entity": "city", "role": "destination"}. - -You can also group different entities by specifying a group label next to the entity label. -The group label can, for example, be used to define different orders. -In the following example we use the group label to reference what toppings goes with which pizza and -what size which pizza has. - -.. code-block:: none - - Give me a [small]{"entity": "size", "group": "1"} pizza with [mushrooms]{"entity": "topping", "group": "1"} and - a [large]{"entity": "size", "group": "2"} [pepperoni]{"entity": "topping", "group": "2"} - -See :ref:`training-data-format` for details on how to define entities with roles and groups in your training data. - -The entity object returned by the extractor will include the detected role/group label. - -.. code-block:: json - - { - "text": "Book a flight from Berlin to SF", - "intent": "book_flight", - "entities": [ - { - "start": 19, - "end": 25, - "value": "Berlin", - "entity": "city", - "role": "departure", - "extractor": "DIETClassifier", - }, - { - "start": 29, - "end": 31, - "value": "San Francisco", - "entity": "city", - "role": "destination", - "extractor": "DIETClassifier", - } - ] - } - -.. note:: - - Composite entities are currently only supported by the :ref:`diet-classifier` and :ref:`CRFEntityExtractor`. - -In order to properly train your model with entities that have roles/groups, make sure to include enough training data -examples for every combination of entity and role/group label. -Also make sure to have some variations in your training data, so that the model is able to generalize. -For example, you should not only have example like ``fly FROM x TO y``, but also include examples like -``fly TO y FROM x``. - -To fill slots from entities with a specific role/group, you need to either define a custom slot mappings using -:ref:`forms` or use :ref:`custom-actions` to extract the corresponding entity directly from the tracker. - - -Extracting Places, Dates, People, Organizations -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -spaCy has excellent pre-trained named-entity recognizers for a few different languages. -You can test them out in this -`interactive demo `_. -We don't recommend that you try to train your own NER using spaCy, -unless you have a lot of data and know what you are doing. -Note that some spaCy models are highly case-sensitive. - -Dates, Amounts of Money, Durations, Distances, Ordinals -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The `duckling `_ library does a great job -of turning expressions like "next Thursday at 8pm" into actual datetime -objects that you can use, e.g. - -.. code-block:: python - - "next Thursday at 8pm" - => {"value":"2018-05-31T20:00:00.000+01:00"} - - -The list of supported languages can be found `here -`_. -Duckling can also handle durations like "two hours", -amounts of money, distances, and ordinals. -Fortunately, there is a duckling docker container ready to use, -that you just need to spin up and connect to Rasa NLU -(see :ref:`DucklingHTTPExtractor`). - - -Regular Expressions (regex) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -You can use regular expressions to help the CRF model learn to recognize entities. -In your training data (see :ref:`training-data-format`) you can provide a list of regular expressions, each of which provides -the ``CRFEntityExtractor`` with an extra binary feature, which says if the regex was found (1) or not (0). - -For example, the names of German streets often end in ``strasse``. By adding this as a regex, -we are telling the model to pay attention to words ending this way, and will quickly learn to -associate that with a location entity. - -If you just want to match regular expressions exactly, you can do this in your code, -as a postprocessing step after receiving the response from Rasa NLU. - - -.. _entity-extraction-custom-features: - -Passing Custom Features to ``CRFEntityExtractor`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to pass custom features, such as pre-trained word embeddings, to ``CRFEntityExtractor``, you can -add any dense featurizer to the pipeline before the ``CRFEntityExtractor``. -``CRFEntityExtractor`` automatically finds the additional dense features and checks if the dense features are an -iterable of ``len(tokens)``, where each entry is a vector. -A warning will be shown in case the check fails. -However, ``CRFEntityExtractor`` will continue to train just without the additional custom features. -In case dense features are present, ``CRFEntityExtractor`` will pass the dense features to ``sklearn_crfsuite`` -and use them for training. diff --git a/docs/nlu/language-support.rst b/docs/nlu/language-support.rst deleted file mode 100644 index 5bd5067407f7..000000000000 --- a/docs/nlu/language-support.rst +++ /dev/null @@ -1,88 +0,0 @@ -:desc: Support all languages via custom domain-trained embeddings or pre-trained embeddings - with open source chatbot framework Rasa. - -.. _language-support: - -Language Support -================ - -.. edit-link:: - -You can use Rasa to build assistants in any language you want! Rasa's -``supervised_embeddings`` pipeline can be used on training data in **any language**. -This pipeline creates word embeddings from scratch with the data you provide. - -In addition, we also support pre-trained word embeddings such as spaCy. For information on -what pipeline is best for your use case, check out :ref:`choosing-a-pipeline`. - -.. contents:: - :local: - - -Training a Model in Any Language --------------------------------- - -Rasa's ``supervised_embeddings`` pipeline can be used to train models in any language, because -it uses your own training data to create custom word embeddings. This means that the vector -representation of any specific word will depend on its relationship with the other words in your -training data. This customization also means that the pipeline is great for use cases that hinge -on domain-specific data, such as those that require picking up on specific product names. - -To train a Rasa model in your preferred language, define the -``supervised_embeddings`` pipeline as your pipeline in your ``config.yml`` or other configuration file -via the instructions :ref:`here `. - -After you define the ``supervised_embeddings`` processing pipeline and generate some :ref:`NLU training data ` -in your chosen language, train the model with ``rasa train nlu``. Once the training is finished, you can test your model's -language skills. See how your model interprets different input messages via: - -.. code-block:: bash - - rasa shell nlu - -.. note:: - - Even more so when training word embeddings from scratch, more training data will lead to a - better model! If you find your model is having trouble discerning your inputs, try training - with more example sentences. - -.. _pretrained-word-vectors: - -Pre-trained Word Vectors ------------------------- - -If you can find them in your language, pre-trained word vectors are a great way to get started with less data, -as the word vectors are trained on large amounts of data such as Wikipedia. - -spaCy -~~~~~ - -With the ``pretrained_embeddings_spacy`` :ref:`pipeline `, you can use spaCy's -`pre-trained language models `_ or load fastText vectors, which are available -for `hundreds of languages `_. If you want -to incorporate a custom model you've found into spaCy, check out their page on -`adding languages `_. As described in the documentation, you need to -register your language model and link it to the language identifier, which will allow Rasa to load and use your new language -by passing in your language identifier as the ``language`` option. - -.. _mitie: - -MITIE -~~~~~ - -You can also pre-train your own word vectors from a language corpus using :ref:`MITIE `. To do so: - -1. Get a clean language corpus (a Wikipedia dump works) as a set of text files. -2. Build and run `MITIE Wordrep Tool`_ on your corpus. - This can take several hours/days depending on your dataset and your workstation. - You'll need something like 128GB of RAM for wordrep to run -- yes, that's a lot: try to extend your swap. -3. Set the path of your new ``total_word_feature_extractor.dat`` as the ``model`` parameter in your - :ref:`configuration `. - -For a full example of how to train MITIE word vectors, check out -`this blogpost `_ -of creating a MITIE model from a Chinese Wikipedia dump. - - -.. _`MITIE Wordrep Tool`: https://github.com/mit-nlp/MITIE/tree/master/tools/wordrep - diff --git a/docs/nlu/old-nlu-change-log.rst b/docs/nlu/old-nlu-change-log.rst deleted file mode 100644 index c6eb86d72f0f..000000000000 --- a/docs/nlu/old-nlu-change-log.rst +++ /dev/null @@ -1,862 +0,0 @@ -:desc: Rasa NLU Changelog - -.. _old-nlu-change-log: - -NLU Change Log -============== - -All notable changes to this project will be documented in this file. -This project adheres to `Semantic Versioning`_ starting with version 0.7.0. - -[0.15.1] - Unreleased -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed bug in rasa_nlu.test script that appeared if no intent classifier was present - -[0.15.0] - 2019-04-23 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- Added a detailed warning showing which entities are overlapping -- Authentication token can be also set with env variable ``RASA_NLU_TOKEN``. -- ``SpacyEntityExtractor`` supports same entity filtering as ``DucklingHTTPExtractor`` -- **added support for python 3.7** - -Changed -------- -- validate training data only if used for training -- applied spacy guidelines on how to disable pipeline components -- starter packs now also tested when attempting to merge a branch to master -- new consistent naming scheme for pipelines: - - ``tensorflow_embedding`` pipeline template renamed to ``supervised_embeddings`` - - ``spacy_sklearn`` pipeline template renamed to ``pretrained_embeddings_spacy`` - - requirements files, sample configs, and dockerfiles renamed accordingly -- ``/train`` endpoint now returns a zipfile of the trained model. -- pipeline components in the config file should be provided - with their class name -- persisted components file name changed -- replace pep8 with pycodestyle -- ``Component.name`` property returns component's class name -- Components ``load(...)``, ``create(...)`` and ``cache_key(...)`` methods - additionally take component's meta/config dicts -- Components ``persist(...)`` method additionally takes file name prefix -- renamed ``rasa_nlu.evaluate`` to ``rasa_nlu.test`` -- renamed ``rasa_nlu.test.run_cv_evaluation`` to - ``rasa_nlu.test.cross_validate`` -- renamed ``rasa_nlu.train.do_train()`` to ``rasa_nlu.train.train()`` -- train command can now also load config from file -- updated to tensorflow 1.13 - -Removed -------- -- **removed python 2.7 support** - -Fixed ------ -- ``RegexFeaturizer`` detects all regex in user message (not just first) -- do_extractors_support_overlap now correctly throws an exception only if no extractors are - passed or if extractors that do not support overlapping entities are used. -- Docs entry for pretrained embeddings pipeline is now consistent with the - code in ``registry.py`` - - -[0.14.6] - 2019-03-20 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed Changelog dates (dates had the wrong year attached) - -[0.14.5] - 2019-03-19 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- properly tag ``stable`` docker image (instead of alpha) - -[0.14.3] - 2019-02-01 -^^^^^^^^^^^^^^^^^^^^^ -- - -Changed -------- -- starter packs are now tested in parallel with the unittests, - and only on branches ending in ``.x`` (i.e. new version releases) -- pinned ``coloredlogs``, ``future`` and ``packaging`` - -[0.14.2] - 2019-01-29 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``rasa_nlu.evaluate`` now exports reports into a folder and also - includes the entity extractor reports - -Changed -------- -- updated requirements to match Core and SDK -- pinned keras dependencies - -[0.14.1] - 2019-01-23 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- scikit-learn is a global requirement - -.. _nluv0-14-0: - -[0.14.0] - 2019-01-23 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- Ability to save successful predictions and classification results to a JSON - file from ``rasa_nlu.evaluate`` -- environment variables specified with ``${env_variable}`` in a yaml - configuration file are now replaced with the value of the environment - variable -- more documentation on how to run NLU with Docker -- ``analyzer`` parameter to ``intent_featurizer_count_vectors`` featurizer to - configure whether to use word or character n-grams -- Travis script now clones and tests the Rasa NLU starter pack - -Changed -------- -- ``EmbeddingIntentClassifier`` has been refactored, including changes to the - config parameters as well as comments and types for all class functions. -- the http server's ``POST /evaluate`` endpoint returns evaluation results - for both entities and intents -- replaced ``yaml`` with ``ruamel.yaml`` -- updated spacy version to 2.0.18 -- updated TensorFlow version to 1.12.0 -- updated scikit-learn version to 0.20.2 -- updated cloudpickle version to 0.6.1 -- updated requirements to match Core and SDK -- pinned keras dependencies - -Removed -------- -- ``/config`` endpoint -- removed pinning of ``msgpack`` and unused package ``python-msgpack`` -- removed support for ``ner_duckling``. Now supports only ``ner_duckling_http`` - -Fixed ------ -- Should loading jieba custom dictionaries only once. -- Set attributes of custom components correctly if they defer from the default -- NLU Server can now handle training data mit emojis in it -- If the ``token_name`` is not given in the endpoint configuration, the default - value is ``token`` instead of ``None`` -- Throws error only if ``ner_crf`` picks up overlapping entities. If the - entity extractor supports overlapping entities no error is thrown. -- Updated CORS support for the server. - Added the ``Access-Control-Allow-Headers`` and ``Content-Type`` headers - for nlu server -- parsing of emojis which are sent within jsons -- Bad input shape error from ``sklearn_intent_classifier`` when using - ``scikit-learn==0.20.2`` - -[0.13.8] - 2018-11-21 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- pinned spacy version to ``spacy<=2.0.12,>2.0`` to avoid dependency conflicts - with tensorflow - -[0.13.7] - 2018-10-11 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- ``rasa_nlu.server`` allowed more than ``max_training_processes`` - to be trained if they belong to different projects. - ``max_training_processes`` is now a global parameter, regardless of what - project the training process belongs to. - - -[0.13.6] - 2018-10-04 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- ``boto3`` is now loaded lazily in ``AWSPersistor`` and is not - included in ``requirements_bare.txt`` anymore - -Fixed ------ -- Allow training of pipelines containing ``EmbeddingIntentClassifier`` in - a separate thread on python 3. This makes http server calls to ``/train`` - non-blocking -- require ``scikit-learn<0.20`` in setup py to avoid corrupted installations - with the most recent scikit learn - - -[0.13.5] - 2018-09-28 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- Training data is now validated after loading from files in ``loading.py`` - instead of on initialization of ``TrainingData`` object - -Fixed ------ -- ``Project`` set up to pull models from a remote server only use - the pulled model instead of searching for models locally - -[0.13.4] - 2018-09-19 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- pinned matplotlib to 2.x (not ready for 3.0 yet) -- pytest-services since it wasn't used and caused issues on Windows - -[0.13.3] - 2018-08-28 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ``EndpointConfig`` class that handles authenticated requests - (ported from Rasa Core) -- ``DataRouter()`` class supports a ``model_server`` ``EndpointConfig``, - which it regularly queries to fetch NLU models -- this can be used with ``rasa_nlu.server`` with the ``--endpoint`` option - (the key for this the model server config is ``model``) -- docs on model fetching from a URL -- ability to specify lookup tables in training data - -Changed -------- -- loading training data from a URL requires an instance of ``EndpointConfig`` - -- Changed evaluate behavior to plot two histogram bars per bin. - Plotting confidence of right predictions in a wine-ish color - and wrong ones in a blue-ish color. - -Removed -------- - -Fixed ------ -- re-added support for entity names with special characters in markdown format - -[0.13.2] - 2018-08-28 -^^^^^^^^^^^^^^^^^^^^^ - -Changed -------- -- added information about migrating the CRF component from 0.12 to 0.13 - -Fixed ------ -- pipelines containing the ``EmbeddingIntentClassifier`` are not trained in a - separate thread, as this may lead to freezing during training - -[0.13.1] - 2018-08-07 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- documentation example for creating a custom component - -Fixed ------ -- correctly pass reference time in milliseconds to duckling_http - -.. _nluv0-13-0: - -[0.13.0] - 2018-08-02 -^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - - This is a release **breaking backwards compatibility**. - Unfortunately, it is not possible to load previously trained models as - the parameters for the tensorflow and CRF models changed. - -Added ------ -- support for `tokenizer_jieba` load custom dictionary from config -- allow pure json including pipeline configuration on train endpoint -- doc link to a community contribution for Rasa NLU in Chinese -- support for component ``count_vectors_featurizer`` use ``tokens`` - feature provide by tokenizer -- 2-character and a 5-character prefix features to ``ner_crf`` -- ``ner_crf`` with whitespaced tokens to ``tensorflow_embedding`` pipeline -- predict empty string instead of None for intent name -- update default parameters for tensorflow embedding classifier -- do not predict anything if feature vector contains only zeros - in tensorflow embedding classifier -- change persistence keywords in tensorflow embedding classifier - (make previously trained models impossible to load) -- intent_featurizer_count_vectors adds features to text_features - instead of overwriting them -- add basic OOV support to intent_featurizer_count_vectors (make - previously trained models impossible to load) -- add a feature for each regex in the training set for crf_entity_extractor -- Current training processes count for server and projects. -- the ``/version`` endpoint returns a new field ``minimum_compatible_version`` -- added logging of intent prediction errors to evaluation script -- added histogram of confidence scores to evaluation script -- documentation for the ``ner_duckling_http`` component - -Changed -------- -- renamed CRF features ``wordX`` to ``suffixX`` and ``preX`` to ``suffixX`` -- L1 and L2 regularization defaults in ``ner_crf`` both set to 0.1 -- ``whitespace_tokenizer`` ignores punctuation ``.,!?`` before - whitespace or end of string -- Allow multiple training processes per project -- Changed AlreadyTrainingError to MaxTrainingError. The first one was used - to indicate that the project was already training. The latest will show - an error when the server isn't able to training more models. -- ``Interpreter.ensure_model_compatibility`` takes a new parameters for - the version to compare the model version against -- confusion matrix plot gets saved to file automatically during evaluation - -Removed -------- -- dependence on spaCy when training ``ner_crf`` without POS features -- documentation for the ``ner_duckling`` component - facebook doesn't maintain - the underlying clojure version of duckling anymore. component will be - removed in the next release. - -Fixed ------ -- Fixed Luis emulation output to add start, end position and - confidence for each entity. -- Fixed byte encoding issue where training data could not be - loaded by URL in python 3. - -[0.12.3] - 2018-05-02 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- Returning used model name and project name in the response - of ``GET /parse`` and ``POST /parse`` as ``model`` and ``project`` - respectively. - -Fixed ------ -- re-added possibility to set fixed model name from http train endpoint - - -[0.12.2] - 2018-04-20 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed duckling text extraction for ner_duckling_http - - -[0.12.1] - 2018-04-18 -^^^^^^^^^^^^^^^^^^^^^ -Added ------ -- support for retrieving training data from a URL - -Fixed ------ -- properly set duckling http url through environment setting -- improvements and fixes to the configuration and pipeline - documentation - -.. _nluv0-12-0: - -[0.12.0] - 2018-04-17 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support for inline entity synonyms in markdown training format -- support for regex features in markdown training format -- support for splitting and training data into multiple and mixing formats -- support for markdown files containing regex-features or synonyms only -- added ability to list projects in cloud storage services for model loading -- server evaluation endpoint at ``POST /evaluate`` -- server endpoint at ``DELETE /models`` to unload models from server memory -- CRF entity recognizer now returns a confidence score when extracting entities -- added count vector featurizer to create bag of words representation -- added embedding intent classifier implemented in tensorflow -- added tensorflow requirements -- added docs blurb on handling contextual dialogue -- distribute package as wheel file in addition to source - distribution (faster install) -- allow a component to specify which languages it supports -- support for persisting models to Azure Storage -- added tokenizer for CHINESE (``zh``) as well as instructions on how to load - MITIE model - -Changed -------- -- model configuration is separated from server / train configuration. This is a - **breaking change** and models need to be retrained. See migrations guide. -- Regex features are now sorted internally. - **retrain your model if you use regex features** -- The keyword intent classifier now returns ``null`` instead - of ``"None"`` as intent name in the json result if there's no match -- in the evaluation results, replaced ``O`` with the string - ``no_entity`` for better understanding -- The ``CRFEntityExtractor`` now only trains entity examples that have - ``"extractor": "ner_crf"`` or no extractor at all -- Ignore hidden files when listing projects or models -- Docker Images now run on python 3.6 for better non-latin character set support -- changed key name for a file in ngram featurizer -- changed ``jsonObserver`` to generate logs without a record separator -- Improve jsonschema validation: text attribute of training data samples - can not be empty -- made the NLU server's ``/evaluate`` endpoint asynchronous - -Fixed ------ -- fixed certain command line arguments not getting passed into - the ``data_router`` - -[0.11.4] - 2018-03-19 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- google analytics docs survey code - - -[0.11.3] - 2018-02-13 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- capitalization issues during spacy named entity recognition - - -[0.11.2] - 2018-02-06 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Formatting of tokens without assigned entities in evaluation - - -[0.11.1] - 2018-02-02 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Changelog doc formatting -- fixed project loading for newly added projects to a running server -- fixed certain command line arguments not getting passed into the data_router - -.. _nluv0-11-0: - -[0.11.0] - 2018-01-30 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- non ascii character support for anything that gets json dumped (e.g. - training data received over HTTP endpoint) -- evaluation of entity extraction performance in ``evaluation.py`` -- support for spacy 2.0 -- evaluation of intent classification with crossvalidation in ``evaluation.py`` -- support for splitting training data into multiple files - (markdown and JSON only) - -Changed -------- -- removed ``-e .`` from requirements files - if you want to install - the app use ``pip install -e .`` -- fixed http duckling parsing for non ``en`` languages -- fixed parsing of entities from markdown training data files - - -[0.10.6] - 2018-01-02 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support asterisk style annotation of examples in markdown format - -Fixed ------ -- Preventing capitalized entities from becoming synonyms of the form - lower-cased → capitalized - - -[0.10.5] - 2017-12-01 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- read token in server from config instead of data router -- fixed reading of models with none date name prefix in server - - -[0.10.4] - 2017-10-27 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- docker image build - - -[0.10.3] - 2017-10-26 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- support for new dialogflow data format (previously api.ai) -- improved support for custom components (components are - stored by class name in stored metadata to allow for components - that are not mentioned in the Rasa NLU registry) -- language option to convert script - -Fixed ------ -- Fixed loading of default model from S3. Fixes #633 -- fixed permanent training status when training fails #652 -- quick fix for None "_formatter_parser" bug - - -[0.10.1] - 2017-10-06 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- readme issues -- improved setup py welcome message - -.. _nluv0-10-0: - -[0.10.0] - 2017-09-27 -^^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- Support for training data in Markdown format -- Cors support. You can now specify allowed cors origins - within your configuration file. -- The HTTP server is now backed by Klein (Twisted) instead of Flask. - The server is now asynchronous but is no more WSGI compatible -- Improved Docker automated builds -- Rasa NLU now works with projects instead of models. A project can - be the basis for a restaurant search bot in German or a customer - service bot in English. A model can be seen as a snapshot of a project. - -Changed -------- -- Root project directories have been slightly rearranged to - clean up new docker support -- use ``Interpreter.create(metadata, ...)`` to create interpreter - from dict and ``Interpreter.load(file_name, ...)`` to create - interpreter with metadata from a file -- Renamed ``name`` parameter to ``project`` -- Docs hosted on GitHub pages now: - `Documentation `_ -- Adapted remote cloud storages to support projects - (backwards incompatible!) - -Fixed ------ -- Fixed training data persistence. Fixes #510 -- Fixed UTF-8 character handling when training through HTTP interface -- Invalid handling of numbers extracted from duckling - during synonym handling. Fixes #517 -- Only log a warning (instead of throwing an exception) on - misaligned entities during mitie NER - - -[0.9.2] - 2017-08-16 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- removed unnecessary `ClassVar` import - - -[0.9.1] - 2017-07-11 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- removed obsolete ``--output`` parameter of ``train.py``. - use ``--path`` instead. fixes #473 - -.. _nluv0-9-0: - -[0.9.0] - 2017-07-07 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- increased test coverage to avoid regressions (ongoing) -- added regex featurization to support intent classification - and entity extraction (``intent_entity_featurizer_regex``) - -Changed -------- -- replaced existing CRF library (python-crfsuite) with - sklearn-crfsuite (due to better windows support) -- updated to spacy 1.8.2 -- logging format of logged request now includes model name and timestamp -- use module specific loggers instead of default python root logger -- output format of the duckling extractor changed. the ``value`` - field now includes the complete value from duckling instead of - just text (so this is an property is an object now instead of just text). - includes granularity information now. -- deprecated ``intent_examples`` and ``entity_examples`` sections in - training data. all examples should go into the ``common_examples`` section -- weight training samples based on class distribution during ner_crf - cross validation and sklearn intent classification training -- large refactoring of the internal training data structure and - pipeline architecture -- numpy is now a required dependency - -Removed -------- -- luis data tokenizer configuration value (not used anymore, - luis exports char offsets now) - -Fixed ------ -- properly update coveralls coverage report from travis -- persistence of duckling dimensions -- changed default response of untrained ``intent_classifier_sklearn`` - from ``"intent": None`` to ``"intent": {"name": None, "confidence": 0.0}`` -- ``/status`` endpoint showing all available models instead of only - those whose name starts with *model* -- properly return training process ids #391 - - -[0.8.12] - 2017-06-29 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed missing argument attribute error - - - -[0.8.11] - 2017-06-07 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- updated mitie installation documentation - - -[0.8.10] - 2017-05-31 -^^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed documentation about training data format - - -[0.8.9] - 2017-05-26 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- properly handle response_log configuration variable being set to ``null`` - - -[0.8.8] - 2017-05-26 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- ``/status`` endpoint showing all available models instead of only - those whose name starts with *model* - - -[0.8.7] - 2017-05-24 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Fixed range calculation for crf #355 - - -[0.8.6] - 2017-05-15 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Fixed duckling dimension persistence. fixes #358 - - -[0.8.5] - 2017-05-10 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Fixed pypi installation dependencies (e.g. flask). fixes #354 - - -[0.8.4] - 2017-05-10 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Fixed CRF model training without entities. fixes #345 - - -[0.8.3] - 2017-05-10 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- Fixed Luis emulation and added test to catch regression. Fixes #353 - - -[0.8.2] - 2017-05-08 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- deepcopy of context #343 - - -[0.8.1] - 2017-05-08 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- NER training reuses context inbetween requests - -.. _nluv0-8-0: - -[0.8.0] - 2017-05-08 -^^^^^^^^^^^^^^^^^^^^ - -Added ------ -- ngram character featurizer (allows better handling of out-of-vocab words) -- replaced pre-wired backends with more flexible pipeline definitions -- return top 10 intents with sklearn classifier - `#199 `_ -- python type annotations for nearly all public functions -- added alternative method of defining entity synonyms -- support for arbitrary spacy language model names -- duckling components to provide normalized output for structured entities -- Conditional random field entity extraction (Markov model for entity - tagging, better named entity recognition with low and medium data and - similarly well at big data level) -- allow naming of trained models instead of generated model names -- dynamic check of requirements for the different components & error - messages on missing dependencies -- support for using multiple entity extractors and combining results downstream - -Changed -------- -- unified tokenizers, classifiers and feature extractors to implement - common component interface -- ``src`` directory renamed to ``rasa_nlu`` -- when loading data in a foreign format (api.ai, luis, wit) the data - gets properly split into intent & entity examples -- Configuration: - - added ``max_number_of_ngrams`` - - removed ``backend`` and added ``pipeline`` as a replacement - - added ``luis_data_tokenizer`` - - added ``duckling_dimensions`` -- parser output format changed - from ``{"intent": "greeting", "confidence": 0.9, "entities": []}`` - - to ``{"intent": {"name": "greeting", "confidence": 0.9}, "entities": []}`` -- entities output format changed - from ``{"start": 15, "end": 28, "value": "New York City", "entity": "GPE"}`` - - to ``{"extractor": "ner_mitie", "processors": ["ner_synonyms"], "start": 15, "end": 28, "value": "New York City", "entity": "GPE"}`` - - where ``extractor`` denotes the entity extractor that originally found an entity, and ``processor`` denotes components that alter entities, such as the synonym component. -- camel cased MITIE classes (e.g. ``MITIETokenizer`` → ``MitieTokenizer``) -- model metadata changed, see migration guide -- updated to spacy 1.7 and dropped training and loading capabilities for - the spacy component (breaks existing spacy models!) -- introduced compatibility with both Python 2 and 3 - -Fixed ------ -- properly parse ``str`` additionally to ``unicode`` - `#210 `_ -- support entity only training - `#181 `_ -- resolved conflicts between metadata and configuration values - `#219 `_ -- removed tokenization when reading Luis.ai data (they changed their format) - `#241 `_ - - -[0.7.4] - 2017-03-27 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed failed loading of example data after renaming attributes, - i.e. "KeyError: 'entities'" - - -[0.7.3] - 2017-03-15 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- fixed regression in mitie entity extraction on special characters -- fixed spacy fine tuning and entity recognition on passed language instance - - -[0.7.2] - 2017-03-13 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- python documentation about calling rasa NLU from python - - -[0.7.1] - 2017-03-10 -^^^^^^^^^^^^^^^^^^^^ - -Fixed ------ -- mitie tokenization value generation - `#207 `_, thanks @cristinacaputo -- changed log file extension from ``.json`` to ``.log``, - since the contained text is not proper json - -.. _nluv0-7-0: - -[0.7.0] - 2017-03-10 -^^^^^^^^^^^^^^^^^^^^ -This is a major version update. Please also have a look at the -`Migration Guide `_. - -Added ------ -- Changelog ;) -- option to use multi-threading during classifier training -- entity synonym support -- proper temporary file creation during tests -- mitie_sklearn backend using mitie tokenization and sklearn classification -- option to fine-tune spacy NER models -- multithreading support of build in REST server (e.g. using gunicorn) -- multitenancy implementation to allow loading multiple models which - share the same backend - -Fixed ------ -- error propagation on failed vector model loading (spacy) -- escaping of special characters during mitie tokenization - - -[0.6-beta] - 2017-01-31 -^^^^^^^^^^^^^^^^^^^^^^^ - -.. _`master`: https://github.com/RasaHQ/rasa_nlu/ - -.. _`Semantic Versioning`: http://semver.org/ diff --git a/docs/nlu/old-nlu-migration-guide.rst b/docs/nlu/old-nlu-migration-guide.rst deleted file mode 100644 index 4e861257cbb4..000000000000 --- a/docs/nlu/old-nlu-migration-guide.rst +++ /dev/null @@ -1,255 +0,0 @@ -:desc: Read more about changes between major versions of our open source - NLP engine and how to migrate from one version to another. - -.. _old-nlu-migration-guide: - -Migration Guide -=============== -This page contains information about changes between major versions and -how you can migrate from one version to another. - -0.14.x to 0.15.0 ----------------- - -.. warning:: - - This is a release **breaking backwards compatibility**. - Unfortunately, it is not possible to load - previously trained models (as the stored file names have changed as - well as the configuration and metadata). Please make sure to retrain - a model before trying to use it with this improved version. - -model configuration -~~~~~~~~~~~~~~~~~~~ -- The standard pipelines have been renamed. ``spacy_sklearn`` is now - ``pretrained_embeddings_spacy`` and ``tensorflow_embedding`` is now - ``supervised_embeddings``. -- Components names used for nlu config have been changed. - Use component class name in nlu config file. - -custom components -~~~~~~~~~~~~~~~~~ -- The signature of Component's methods have been changed: - - - ``load(...)``, ``create(...)`` and ``cache_key(...)`` methods - additionally take component's meta/config dicts - - ``persist(...)`` method additionally takes file name prefix - Change your custom components accordingly. - -function names -~~~~~~~~~~~~~~ -- ``rasa_nlu.evaluate`` was renamed to ``rasa_nlu.test`` -- ``rasa_nlu.test.run_cv_evaluation`` was renamed to - ``rasa_nlu.test.cross_validate`` -- ``rasa_nlu.train.do_train()`` was renamed to to ``rasa_nlu.train.train()`` - -0.13.x to 0.14.0 ----------------- -- ``/config`` endpoint removed, when training a new model, the user should - always post the configuration as part of the request instead of relying - on the servers config. -- ``ner_duckling`` support has been removed. Use ``DucklingHTTPExtractor`` - instead. More info about ``DucklingHTTPExtractor`` can be found at - https://rasa.com/docs/nlu/components/#ner-duckling-http. - -0.13.x to 0.13.3 ----------------- -- ``rasa_nlu.server`` has to be supplied with a ``yml`` file defining the - model endpoint from which to retrieve training data. The file location has - be passed with the ``--endpoints`` argument, e.g. - ``rasa run --endpoints endpoints.yml`` - ``endpoints.yml`` needs to contain the ``model`` key - with a ``url`` and an optional ``token``. Here's an example: - - .. code-block:: yaml - - model: - url: http://my_model_server.com/models/default/nlu/tags/latest - token: my_model_server_token - - .. note:: - - If you configure ``rasa.nlu.server`` to pull models from a remote server, - the default project name will be used. It is defined - ``RasaNLUModelConfig.DEFAULT_PROJECT_NAME``. - - -- ``rasa.nlu.train`` can also be run with the ``--endpoints`` argument - if you want to pull training data from a URL. Alternatively, the - current ``--url`` syntax is still supported. - - .. code-block:: yaml - - data: - url: http://my_data_server.com/projects/default/data - token: my_data_server_token - - .. note:: - - Your endpoint file may contain entries for both ``model`` and ``data``. - ``rasa.nlu.server`` and ``rasa.nlu.train`` will pick the relevant entry. - -- If you directly access the ``DataRouter`` class or ``rasa.nlu.train``'s - ``do_train()`` method, you can directly create instances of - ``EndpointConfig`` without creating a ``yml`` file. Example: - - .. code-block:: python - - from rasa.nlu.utils import EndpointConfig - from rasa.nlu.data_router import DataRouter - - model_endpoint = EndpointConfig( - url="http://my_model_server.com/models/default/nlu/tags/latest", - token="my_model_server_token" - ) - - interpreter = DataRouter("projects", model_server=model_endpoint) - - -0.12.x to 0.13.0 ----------------- - -.. warning:: - - This is a release **breaking backwards compatibility**. - Unfortunately, it is not possible to load previously trained models as - the parameters for the tensorflow and CRF models changed. - -CRF model configuration -~~~~~~~~~~~~~~~~~~~~~~~ - -The feature names for the features of the entity CRF have changed: - -+------------------+------------------+ -| old feature name | new feature name | -+==================+==================+ -| pre2 | prefix2 | -+------------------+------------------+ -| pre5 | prefix5 | -+------------------+------------------+ -| word2 | suffix2 | -+------------------+------------------+ -| word3 | suffix3 | -+------------------+------------------+ -| word5 | suffix5 | -+------------------+------------------+ - -Please change these keys in your pipeline configuration of the ``CRFEntityExtractor`` -components ``features`` attribute if you use them. - -0.11.x to 0.12.0 ----------------- - -.. warning:: - - This is a release **breaking backwards compatibility**. - Unfortunately, it is not possible to load - previously trained models (as the stored file formats have changed as - well as the configuration and metadata). Please make sure to retrain - a model before trying to use it with this improved version. - -model configuration -~~~~~~~~~~~~~~~~~~~ -We have split the configuration in a model configuration and parameters used -to configure the server, train, and evaluate scripts. The configuration -file now only contains the ``pipeline`` as well as the ``language`` -parameters. Example: - - .. code-block:: yaml - - langauge: "en" - - pipeline: - - name: "SpacyNLP" - model: "en" # parameter of the spacy component - - name: "EntitySynonymMapper" - - -All other parameters have either been moved to the scripts -for training, :ref:`serving models `, or put into the -:ref:`pipeline configuration `. - -persistors: -~~~~~~~~~~~ -- renamed ``AWS_REGION`` to ``AWS_DEFAULT_REGION`` -- always make sure to specify the bucket using env ``BUCKET_NAME`` -- are now configured solely over environment variables - -0.9.x to 0.10.0 ---------------- -- We introduced a new concept called a ``project``. You can have multiple versions - of a model trained for a project. E.g. you can train an initial model and - add more training data and retrain that project. This will result in a new - model version for the same project. This allows you to, allways request - the latest model version from the http server and makes the model handling - more structured. -- If you want to reuse trained models you need to move them in a directory named - after the project. E.g. if you already got a trained model in directory ``my_root/model_20170628-002704`` - you need to move that to ``my_root/my_project/model_20170628-002704``. Your - new projects name will be ``my_project`` and you can query the model using the - http server using ``curl http://localhost:5000/parse?q=hello%20there&project=my_project`` -- Docs moved to https://rasahq.github.io/rasa_nlu/ -- Renamed ``name`` parameter to ``project``. This means for training requests you now need to pass the ``project parameter - instead of ``name``, e.g. ``POST /train?project=my_project_name`` with the body of the - request containing the training data -- Adapted remote cloud storages to support projects. This is a backwards incompatible change, - and unfortunately you need to retrain uploaded models and reupload them. - -0.8.x to 0.9.x ---------------- -- add ``SpacyTokenizer`` to trained spacy_sklearn models metadata (right after the ``SpacyNLP``). alternative is to retrain the model - -0.7.x to 0.8.x ---------------- - -- The training and loading capability for the spacy entity extraction was dropped in favor of the new CRF extractor. That means models need to be retrained using the crf extractor. - -- The parameter and configuration value name of ``backend`` changed to ``pipeline``. - -- There have been changes to the model metadata format. You can either retrain your models or change the stored - metadata.json: - - - rename ``language_name`` to ``language`` - - rename ``backend`` to ``pipeline`` - - for mitie models you need to replace ``feature_extractor`` with ``mitie_feature_extractor_fingerprint``. - That fingerprint depends on the language you are using, for ``en`` it - is ``"mitie_feature_extractor_fingerprint": 10023965992282753551``. - -0.6.x to 0.7.x --------------- - -- The parameter and configuration value name of ``server_model_dir`` changed to ``server_model_dirs``. - -- The parameter and configuration value name of ``write`` changed to ``response_log``. It now configures the - *directory* where the logs should be written to (not a file!) - -- The model metadata format has changed. All paths are now relative with respect to the ``path`` specified in the - configuration during training and loading. If you want to run models that are trained with a - version prev to 0.7 you need to adapt the paths manually in ``metadata.json`` from - - .. code-block:: json - - { - "trained_at": "20170304-191111", - "intent_classifier": "model_XXXX_YYYY_ZZZZ/intent_classifier.pkl", - "training_data": "model_XXXX_YYYY_ZZZZ/training_data.json", - "language_name": "en", - "entity_extractor": "model_XXXX_YYYY_ZZZZ/ner", - "feature_extractor": null, - "backend": "spacy_sklearn" - } - - to something along the lines of this (making all paths relative to the models base dir, which is ``model_XXXX_YYYY_ZZZZ/``): - - .. code-block:: json - - { - "trained_at": "20170304-191111", - "intent_classifier": "intent_classifier.pkl", - "training_data": "training_data.json", - "language_name": "en", - "entity_synonyms": null, - "entity_extractor": "ner", - "feature_extractor": null, - "backend": "spacy_sklearn" - } diff --git a/docs/nlu/training-data-format.rst b/docs/nlu/training-data-format.rst deleted file mode 100644 index b6558e185442..000000000000 --- a/docs/nlu/training-data-format.rst +++ /dev/null @@ -1,248 +0,0 @@ -:desc: Read more about how to format training data with Rasa NLU for open - source natural language processing. - -.. _training-data-format: - -Training Data Format -==================== - -.. edit-link:: - -.. contents:: - :local: - -Data Formats -~~~~~~~~~~~~ - - -You can provide training data as Markdown or as JSON, as a single file or as a directory containing multiple files. -Note that Markdown is usually easier to work with. - - -Markdown Format ---------------- - -Markdown is the easiest Rasa NLU format for humans to read and write. -Examples are listed using the unordered list syntax, e.g. minus ``-``, asterisk ``*``, or plus ``+``. -Examples are grouped by intent, and entities are annotated as Markdown links, -e.g. ``[]()``, or by using the following syntax ``[]{"entity": ""}``. -Using the latter syntax, you can also assign synonyms, roles, or groups to an entity, e.g. -``[]{"entity": "", "role": "", "group": "", "value": ""}``. -The keywords ``role``, ``group``, and ``value`` are optional in this notation. -To understand what the labels ``role`` and ``group`` are for, see section :ref:`entities-roles-groups`. - -.. code-block:: md - - ## intent:check_balance - - what is my balance - - how much do I have on my [savings](source_account) - - how much do I have on my [savings account]{"entity": "source_account", "value": "savings"} - - Could I pay in [yen](currency)? - - ## intent:greet - - hey - - hello - - ## synonym:savings - - pink pig - - ## regex:zipcode - - [0-9]{5} - - ## lookup:additional_currencies - path/to/currencies.txt - -The training data for Rasa NLU is structured into different parts: - -- common examples -- synonyms -- regex features and -- lookup tables - -While common examples is the only part that is mandatory, including the others will help the NLU model -learn the domain with fewer examples and also help it be more confident of its predictions. - -Synonyms will map extracted entities to the same name, for example mapping "my savings account" to simply "savings". -However, this only happens *after* the entities have been extracted, so you need to provide examples with the synonyms -present so that Rasa can learn to pick them up. - -Lookup tables may be specified as plain text files containing newline-separated words or -phrases. Upon loading the training data, these files are used to generate -case-insensitive regex patterns that are added to the regex features. - -.. note:: - The common theme here is that common examples, regex features and lookup tables merely act as cues to the final NLU - model by providing additional features to the machine learning algorithm during training. Therefore, it must not be - assumed that having a single example would be enough for the model to robustly identify intents and/or entities - across all variants of that example. - -.. note:: - ``/`` symbol is reserved as a delimiter to separate retrieval intents from response text identifiers. Make sure not - to use it in the name of your intents. - -.. warning:: - The synonym format to specify synonyms ``[savings account](source_account:savings)`` is deprecated. Please use the - new format ``[savings account]{"entity": "source_account", "value": "savings"}``. - - To update your training data file execute the following command on the terminal of your choice: - ``sed -i -E 's/\[([^)]+)\]\(([^)]+):([^)]+)\)/[\1]{"entity": "\2", "value": "\3"}/g' `` - Your NLU training data file will contain the new training data format after you executed the above command. - Depending on your OS you might need to update the syntax of the sed command. - -JSON Format ------------ - -The JSON format consists of a top-level object called ``rasa_nlu_data``, with the keys -``common_examples``, ``entity_synonyms`` and ``regex_features``. -The most important one is ``common_examples``. - -.. code-block:: json - - { - "rasa_nlu_data": { - "common_examples": [], - "regex_features" : [], - "lookup_tables" : [], - "entity_synonyms": [] - } - } - -The ``common_examples`` are used to train your model. You should put all of your training -examples in the ``common_examples`` array. -Regex features are a tool to help the classifier detect entities or intents and improve the performance. - - -Improving Intent Classification and Entity Recognition -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Common Examples ---------------- - -Common examples have three components: ``text``, ``intent`` and ``entities``. The first two are strings while the last -one is an array. - - - The *text* is the user message [required] - - The *intent* is the intent that should be associated with the text [optional] - - The *entities* are specific parts of the text which need to be identified [optional] - -Entities are specified with a ``start`` and an ``end`` value, which together make a range -to apply to the string, e.g. in the example below, with ``text="show me chinese restaurants"``, then -``text[8:15] == 'chinese'``. Entities can span multiple words, and in -fact the ``value`` field does not have to correspond exactly to the substring in your example. -That way you can map synonyms, or misspellings, to the same ``value``. - -.. code-block:: md - - ## intent:restaurant_search - - show me [chinese](cuisine) restaurants - - -Regular Expression Features ---------------------------- -Regular expressions can be used to support the intent classification and entity extraction. For example, if your entity -has a deterministic structure (like a zipcode or an email address), you can use a regular expression to ease detection -of that entity. For the zipcode example it might look like this: - -.. code-block:: md - - ## regex:zipcode - - [0-9]{5} - - ## regex:greet - - hey[^\\s]* - -The name doesn't define the entity nor the intent, it is just a human readable description for you to remember what -this regex is used for and is the title of the corresponding pattern feature. As you can see in the above example, you -can also use the regex features to improve the intent -classification performance. - -Try to create your regular expressions in a way that they match as few words as possible. E.g. using ``hey[^\s]*`` -instead of ``hey.*``, as the later one might match the whole message whereas the first one only matches a single word. - -Regex features for entity extraction are currently only supported by the ``CRFEntityExtractor`` component! Hence, other -entity extractors, like ``MitieEntityExtractor`` or ``SpacyEntityExtractor`` won't use the generated features and their -presence will not improve entity recognition for these extractors. Currently, all intent classifiers make use of -available regex features. - -.. note:: - Regex features don't define entities nor intents! They simply provide patterns to help the classifier - recognize entities and related intents. Hence, you still need to provide intent & entity examples as part of your - training data! - -.. _lookup-tables: - -Lookup Tables -------------- -Lookup tables provide a convenient way to supply a list of entity examples. -The supplied lookup table files must be in a newline-delimited format. -For example, ``data/test/lookup_tables/plates.txt`` may contain: - -.. literalinclude:: ../../data/test/lookup_tables/plates.txt - -And can be loaded and used as shown here: - -.. code-block:: md - - ## lookup:plates - data/test/lookup_tables/plates.txt - - ## intent:food_request - - I'd like beef [tacos](plates) and a [burrito](plates) - - How about some [mapo tofu](plates) - -When lookup tables are supplied in training data, the contents are combined -into a large, case-insensitive regex pattern that looks for exact matches in -the training examples. These regexes match over multiple tokens, so -``lettuce wrap`` would match ``get me a lettuce wrap ASAP`` as ``[0 0 0 1 1 0]``. -These regexes are processed identically to the regular regex patterns -directly specified in the training data. - -.. note:: - For lookup tables to be effective, there must be a few examples of matches in your training data. Otherwise the - model will not learn to use the lookup table match features. - - -.. warning:: - You have to be careful when you add data to the lookup table. - For example if there are false positives or other noise in the table, - this can hurt performance. So make sure your lookup tables contain - clean data. - - -Normalizing Data -~~~~~~~~~~~~~~~~ - -.. _entity_synonyms: - -Entity Synonyms ---------------- -If you define entities as having the same value they will be treated as synonyms. Here is an example of that: - -.. code-block:: md - - ## intent:search - - in the center of [NYC]{"entity": "city", "value": "New York City"} - - in the centre of [New York City](city) - - -As you can see, the entity ``city`` has the value ``New York City`` in both examples, even though the text in the first -example states ``NYC``. By defining the value attribute to be different from the value found in the text between start -and end index of the entity, you can define a synonym. Whenever the same text will be found, the value will use the -synonym instead of the actual text in the message. - -To use the synonyms defined in your training data, you need to make sure the pipeline contains the -``EntitySynonymMapper`` component (see :ref:`components`). - -Alternatively, you can add an "entity_synonyms" array to define several synonyms to one entity value. Here is an -example of that: - -.. code-block:: md - - ## synonym:New York City - - NYC - - nyc - - the big apple - -.. note:: - Please note that adding synonyms using the above format does not improve the model's classification of those entities. - **Entities must be properly classified before they can be replaced with the synonym value.** diff --git a/docs/nlu/using-nlu-only.rst b/docs/nlu/using-nlu-only.rst deleted file mode 100644 index 12acd9c88d12..000000000000 --- a/docs/nlu/using-nlu-only.rst +++ /dev/null @@ -1,63 +0,0 @@ -:desc: Find out how to use only Rasa NLU as a standalone NLU service for your chatbot or virtual assistant. - -.. _using-nlu-only: - -Using NLU Only -============== - -.. edit-link:: - - -If you want to use Rasa only as an NLU component, you can! - -Training NLU-only models ------------------------- - -To train an NLU model only, run: - -.. code-block:: bash - - rasa train nlu - -This will look for NLU training data files in the ``data/`` directory -and saves a trained model in the ``models/`` directory. -The name of the model will start with ``nlu-``. - -Testing your NLU model on the command line ------------------------------------------- - -To try out your NLU model on the command line, use the ``rasa shell nlu`` command: - - -.. code-block:: bash - - rasa shell nlu - - -This will start the rasa shell and ask you to type in a message to test. -You can keep typing in as many messages as you like. - -Alternatively, you can leave out the ``nlu`` argument and pass in an nlu-only model directly: - -.. code-block:: bash - - rasa shell -m models/nlu-20190515-144445.tar.gz - - - -Running an NLU server ---------------------- - -To start a server with your NLU model, pass in the model name at runtime: - -.. code-block:: bash - - rasa run --enable-api -m models/nlu-20190515-144445.tar.gz - - -You can then request predictions from your model using the ``/model/parse`` endpoint. -To do this, run: - -.. code-block:: bash - - curl localhost:5005/model/parse -d '{"text":"hello"}' diff --git a/docs/user-guide/architecture.rst b/docs/user-guide/architecture.rst deleted file mode 100644 index 73d12e29ade8..000000000000 --- a/docs/user-guide/architecture.rst +++ /dev/null @@ -1,37 +0,0 @@ -:desc: Check the architecture to understand how Rasa uses machine - learning, context and state of the conversation to predict the - next action of the AI Assistant. - -.. _architecture: - -Architecture -============ - -.. edit-link:: - - -Message Handling -^^^^^^^^^^^^^^^^ - -This diagram shows the basic steps of how an assistant built with Rasa -responds to a message: - -.. image:: ../_static/images/rasa-message-processing.png - -The steps are: - -1. The message is received and passed to an ``Interpreter``, which - converts it into a dictionary including the original text, the intent, - and any entities that were found. This part is handled by NLU. -2. The ``Tracker`` is the object which keeps track of conversation state. - It receives the info that a new message has come in. -3. The policy receives the current state of the tracker. -4. The policy chooses which action to take next. -5. The chosen action is logged by the tracker. -6. A response is sent to the user. - - -.. note:: - - Messages can be text typed by a human, or structured input - like a button press. diff --git a/docs/user-guide/building-assistants.rst b/docs/user-guide/building-assistants.rst deleted file mode 100644 index ae7725b0a806..000000000000 --- a/docs/user-guide/building-assistants.rst +++ /dev/null @@ -1,1163 +0,0 @@ -:desc: How to build simple FAQ and contextual assistants - -.. _building-assistants: - -Tutorial: Building Assistants -============================= - -.. edit-link:: - -After following the basics of setting up an assistant in the `Rasa Tutorial `_, we'll -now walk through building a basic FAQ chatbot and then build a bot that can handle -contextual conversations. - -.. contents:: - :local: - -.. _build-faq-assistant: - -Building a simple FAQ assistant -------------------------------- - -FAQ assistants are the simplest assistants to build and a good place to get started. -These assistants allow the user to ask a simple question and get a response. We’re going to -build a basic FAQ assistant using features of Rasa designed specifically for this type of assistant. - -In this section we’re going to cover the following topics: - - - `Responding to simple intents `_ with the MemoizationPolicy - - `Handling FAQs `_ using the ResponseSelector - - -We’re going to use content from `Sara `_, the Rasa -assistant that, amongst other things, helps the user get started with the Rasa products. -You should first install Rasa using the `Step-by-step Installation Guide `_ -and then follow the `Rasa Tutorial `_ -to make sure you know the basics. - -To prepare for this tutorial, we're going to create a new directory and start a -new Rasa project. - -.. code-block:: bash - - mkdir rasa-assistant - rasa init - - -Let's remove the default content from this bot, so that the ``data/nlu.md``, ``data/stories.md`` -and ``domain.yml`` files are empty. - -.. _respond-with-memoization-policy: - -Memoization Policy -^^^^^^^^^^^^^^^^^^ - -The MemoizationPolicy remembers examples from training stories for up to a ``max_history`` -of turns. The number of "turns" includes messages the user sent, and actions the -assistant performed. For the purpose of a simple, context-less FAQ bot, we only need -to pay attention to the last message the user sent, and therefore we’ll set that to ``1``. - -You can do this by editing your ``config.yml`` file as follows (you can remove ``TEDPolicy`` for now): - -.. code-block:: yaml - - policies: - - name: MemoizationPolicy - max_history: 1 - - name: MappingPolicy - -.. note:: - The MappingPolicy is there because it handles the logic of the ``/restart`` intent, - which allows you to clear the conversation history and start fresh. - -Now that we’ve defined our policies, we can add some stories for the ``goodbye``, ``thank`` and ``greet`` -intents to the ``data/stories.md`` file: - -.. code-block:: md - - ## greet - * greet - - utter_greet - - ## thank - * thank - - utter_noworries - - ## goodbye - * bye - - utter_bye - -We’ll also need to add the intents, actions and responses to our ``domain.yml`` file in the following sections: - -.. code-block:: md - - intents: - - greet - - bye - - thank - - responses: - utter_noworries: - - text: No worries! - utter_greet: - - text: Hi - utter_bye: - - text: Bye! - -Finally, we’ll copy over some NLU data from Sara into our ``data/nlu.md`` file -(more can be found `here `__): - -.. code-block:: md - - ## intent:greet - - Hi - - Hey - - Hi bot - - Hey bot - - Hello - - Good morning - - hi again - - hi folks - - ## intent:bye - - goodbye - - goodnight - - good bye - - good night - - see ya - - toodle-oo - - bye bye - - gotta go - - farewell - - ## intent:thank - - Thanks - - Thank you - - Thank you so much - - Thanks bot - - Thanks for that - - cheers - -You can now train a first model and test the bot, by running the following commands: - -.. code-block:: bash - - rasa train - rasa shell - -This bot should now be able to reply to the intents we defined consistently, and in any order. - -For example: - -.. image:: /_static/images/memoization_policy_convo.png - :alt: Memoization Policy Conversation - :align: center - - -While it's good to test the bot interactively, we should also add end to end test cases that -can later be included as part of a :ref:`CI/CD system `. End-to-end :ref:`test conversations ` -include NLU data, so that both components of Rasa can be tested. The file -``tests/conversation_tests.md`` contains example test conversations. Delete all the test conversations and replace -them with some test conversations for your assistant so far: - -.. code-block:: md - - ## greet + goodbye - * greet: Hi! - - utter_greet - * bye: Bye - - utter_bye - - ## greet + thanks - * greet: Hello there - - utter_greet - * thank: thanks a bunch - - utter_noworries - - ## greet + thanks + goodbye - * greet: Hey - - utter_greet - * thank: thank you - - utter_noworries - * bye: bye bye - - utter_bye - -To test our model against the test file, run the command: - -.. code-block:: bash - - rasa test --stories tests/conversation_tests.md - -The test command will produce a directory named ``results``. It should contain a file -called ``failed_stories.md``, where any test cases that failed will be printed. It will -also specify whether it was an NLU or Core prediction that went wrong. As part of a -CI/CD pipeline, the test option ``--fail-on-prediction-errors`` can be used to throw -an exception that stops the pipeline. - -.. _faqs-response-selector: - -Response Selectors -^^^^^^^^^^^^^^^^^^ - -The :ref:`response-selector` NLU component is designed to make it easier to handle dialogue -elements like :ref:`small-talk` and FAQ messages in a simple manner. By using the ResponseSelector, -you only need one story to handle all FAQs, instead of adding new stories every time you -want to increase your bot's scope. - -People often ask Sara different questions surrounding the Rasa products, so let’s -start with three intents: ``ask_channels``, ``ask_languages``, and ``ask_rasax``. -We’re going to copy over some NLU data from the `Sara training data `_ -into our ``nlu.md``. It’s important that these intents have an ``faq/`` prefix, so they’re -recognised as the faq intent by the ResponseSelector: - -.. code-block:: md - - ## intent: faq/ask_channels - - What channels of communication does rasa support? - - what channels do you support? - - what chat channels does rasa uses - - channels supported by Rasa - - which messaging channels does rasa support? - - ## intent: faq/ask_languages - - what language does rasa support? - - which language do you support? - - which languages supports rasa - - can I use rasa also for another laguage? - - languages supported - - ## intent: faq/ask_rasax - - I want information about rasa x - - i want to learn more about Rasa X - - what is rasa x? - - Can you tell me about rasa x? - - Tell me about rasa x - - tell me what is rasa x - -Next, we’ll need to define the responses associated with these FAQs in a new file called ``responses.md`` in the ``data/`` directory: - -.. code-block:: md - - ## ask channels - * faq/ask_channels - - We have a comprehensive list of [supported connectors](https://rasa.com/docs/core/connectors/), but if - you don't see the one you're looking for, you can always create a custom connector by following - [this guide](https://rasa.com/docs/rasa/user-guide/connectors/custom-connectors/). - - ## ask languages - * faq/ask_languages - - You can use Rasa to build assistants in any language you want! - - ## ask rasa x - * faq/ask_rasax - - Rasa X is a tool to learn from real conversations and improve your assistant. Read more [here](https://rasa.com/docs/rasa-x/) - -The ResponseSelector should already be at the end of the NLU pipeline in our ``config.yml``: - -.. code-block:: yaml - - language: en - pipeline: - - name: WhitespaceTokenizer - - name: RegexFeaturizer - - name: LexicalSyntacticFeaturizer - - name: CountVectorsFeaturizer - - name: CountVectorsFeaturizer - analyzer: "char_wb" - min_ngram: 1 - max_ngram: 4 - - name: DIETClassifier - epochs: 100 - - name: EntitySynonymMapper - - name: ResponseSelector - epochs: 100 - -Now that we’ve defined the NLU side, we need to make Core aware of these changes. Open your ``domain.yml`` file and add the ``faq`` intent: - -.. code-block:: yaml - - intents: - - greet - - bye - - thank - - faq - -We’ll also need to add a `retrieval action `_, -which takes care of sending the response predicted from the ResponseSelector back to the user, -to the list of actions. These actions always have to start with the ``respond_`` prefix: - -.. code-block:: yaml - - actions: - - respond_faq - -Next we’ll write a story so that Core knows which action to predict: - -.. code-block:: md - - ## Some question from FAQ - * faq - - respond_faq - -This prediction is handled by the MemoizationPolicy, as we described earlier. - -After all of the changes are done, train a new model and test the modified FAQs: - -.. code-block:: bash - - rasa train - rasa shell - -At this stage it makes sense to add a few test cases to your ``test_stories.md`` file again: - -.. code-block:: md - - ## ask channels - * faq: What messaging channels does Rasa support? - - respond_faq - - ## ask languages - * faq: Which languages can I build assistants in? - - respond_faq - - ## ask rasa x - * faq: What’s Rasa X? - - respond_faq - -You can read more in this `blog post `_ and the -`Retrieval Actions `_ page. - -Using the features we described in this tutorial, you can easily build a context-less assistant. -When you’re ready to enhance your assistant with context, check out :ref:`tutorial-contextual-assistants`. - - -.. note:: - Here's a minimal checklist of files we modified to build a basic FAQ assistant: - - - ``data/nlu.md``: Add NLU training data for ``faq/`` intents - - ``data/responses.md``: Add responses associated with ``faq/`` intents - - ``config.yml``: Add ``ReponseSelector`` in your NLU pipeline - - ``domain.yml``: Add a retrieval action ``respond_faq`` and intent ``faq`` - - ``data/stories.md``: Add a simple story for FAQs - - ``test_stories.md``: Add E2E test stories for your FAQs - - -.. _tutorial-contextual-assistants: - -Building a contextual assistant -------------------------------- - -Whether you’ve just created an FAQ bot or are starting from scratch, the next step is to expand -your bot to handle contextual conversations. - -In this tutorial we’re going to cover a variety of topics: - - - :ref:`handling-business-logic` - - :ref:`handling-unexpected-user-input` - - :ref:`failing-gracefully` - - :ref:`more-complex-contextual-conversations` - -Please make sure you’ve got all the data from the :ref:`build-faq-assistant` section before starting this part. -You will need to make some adjustments to your configuration file, since we now need to pay attention to context: - -.. code-block:: yaml - - policies: - - name: MemoizationPolicy - - name: MappingPolicy - -We removed the ``max_history: 1`` configuration. The default is ``5``, -meaning Core will pay attention to the past 5 turns when making a prediction -(see explanation of `max history `_). - -.. _handling-business-logic: - -Handling business logic -^^^^^^^^^^^^^^^^^^^^^^^ - -A lot of conversational assistants have user goals that involve collecting a bunch of information -from the user before being able to do something for them. This is called slot filling. For -example, in the banking industry you may have a user goal of transferring money, where you -need to collect information about which account to transfer from, whom to transfer to and the -amount to transfer. This type of behavior can and should be handled in a rule based way, as it -is clear how this information should be collected. - -For this type of use case, we can use Forms and our FormPolicy. The `FormPolicy `_ -works by predicting the form as the next action until all information is gathered from the user. - -As an example, we will build out the SalesForm from Sara. The user wants to contact -our sales team, and for this we need to gather the following pieces of information: - - - Their job - - Their bot use case - - Their name - - Their email - - Their budget - - Their company - -We will start by defining the ``SalesForm`` as a new class in the file called ``actions.py``. -The first method we need to define is the name, which like in a regular Action -returns the name that will be used in our stories: - -.. code-block:: python - - from rasa_sdk.forms import FormAction - - class SalesForm(FormAction): - """Collects sales information and adds it to the spreadsheet""" - - def name(self): - return "sales_form" - -Next we have to define the ``required_slots`` method which specifies which pieces of information to -ask for, i.e. which slots to fill. - -.. code-block:: python - - @staticmethod - def required_slots(tracker): - return [ - "job_function", - "use_case", - "budget", - "person_name", - "company", - "business_email", - ] - -Note: you can customise the required slots function not to be static. E.g. if the ``job_function`` is a -developer, you could add a ``required_slot`` about the users experience level with Rasa - -Once you’ve done that, you’ll need to specify how the bot should ask for this information. This -is done by specifying ``utter_ask_{slotname}`` responses in your ``domain.yml`` file. For the above -we’ll need to specify the following: - -.. code-block:: yaml - - utter_ask_business_email: - - text: What's your business email? - utter_ask_company: - - text: What company do you work for? - utter_ask_budget: - - text: "What's your annual budget for conversational AI? 💸" - utter_ask_job_function: - - text: "What's your job? 🕴" - utter_ask_person_name: - - text: What's your name? - utter_ask_use_case: - - text: What's your use case? - -We’ll also need to define all these slots in our ``domain.yml`` file: - -.. code-block:: yaml - - slots: - company: - type: unfeaturized - job_function: - type: unfeaturized - person_name: - type: unfeaturized - budget: - type: unfeaturized - business_email: - type: unfeaturized - use_case: - type: unfeaturized - -Going back to our Form definition, we need to define the ``submit`` method as well, -which will do something with the information the user has provided once the form is complete: - -.. code-block:: python - - def submit( - self, - dispatcher: CollectingDispatcher, - tracker: Tracker, - domain: Dict[Text, Any], - ) -> List[Dict]: - - dispatcher.utter_message("Thanks for getting in touch, we’ll contact you soon") - return [] - -In this case, we only tell the user that we’ll be in touch with them, however -usually you would send this information to an API or a database. See the `rasa-demo `_ -for an example of how to store this information in a spreadsheet. - -We’ll need to add the form we just created to a new section in our ``domain.yml`` file: - -.. code-block:: yaml - - forms: - - sales_form - -We also need to create an intent to activate the form, as well as an intent for providing all the -information the form asks the user for. For the form activation intent, we can create an -intent called ``contact_sales``. Add the following training data to your nlu file: - -.. code-block:: md - - ## intent:contact_sales - - I wanna talk to your sales people. - - I want to talk to your sales people - - I want to speak with sales - - Sales - - Please schedule a sales call - - Please connect me to someone from sales - - I want to get in touch with your sales guys - - I would like to talk to someone from your sales team - - sales please - -You can view the full intent `here `__) - -We will also create an intent called ``inform`` which covers any sort of information the user -provides to the bot. *The reason we put all this under one intent, is because there is no -real intent behind providing information, only the entity is important.* Add the following -data to your NLU file: - -.. code-block:: md - - ## intent:inform - - [100k](budget) - - [100k](budget) - - [240k/year](budget) - - [150,000 USD](budget) - - I work for [Rasa](company) - - The name of the company is [ACME](company) - - company: [Rasa Technologies](company) - - it's a small company from the US, the name is [Hooli](company) - - it's a tech company, [Rasa](company) - - [ACME](company) - - [Rasa Technologies](company) - - [maxmeier@firma.de](business_email) - - [bot-fan@bots.com](business_email) - - [maxmeier@firma.de](business_email) - - [bot-fan@bots.com](business_email) - - [my email is email@rasa.com](business_email) - - [engineer](job_function) - - [brand manager](job_function) - - [marketing](job_function) - - [sales manager](job_function) - - [growth manager](job_function) - - [CTO](job_function) - - [CEO](job_function) - - [COO](job_function) - - [John Doe](person_name) - - [Jane Doe](person_name) - - [Max Mustermann](person_name) - - [Max Meier](person_name) - - We plan to build a [sales bot](use_case) to increase our sales by 500%. - - we plan to build a [sales bot](use_case) to increase our revenue by 100%. - - a [insurance tool](use_case) that consults potential customers on the best life insurance to choose. - - we're building a [conversational assistant](use_case) for our employees to book meeting rooms. - -.. note:: - Entities like ``business_email`` and ``budget`` would usually be handled by pretrained entity extractors - (e.g. :ref:`DucklingHTTPExtractor` or :ref:`SpacyEntityExtractor`), but for this tutorial - we want to avoid any additional setup. - -The intents and entities will need to be added to your ``domain.yml`` file as well: - -.. code-block:: yaml - - intents: - - greet - - bye - - thank - - faq - - contact_sales - - inform - - entities: - - company - - job_function - - person_name - - budget - - business_email - - use_case - -A story for a form is very simple, as all the slot collection form happens inside the form, and -therefore doesn’t need to be covered in your stories. You just need to write a single story showing when the form should be activated. For the sales form, add this story -to your ``stories.md`` file: - - -.. code-block:: md - - ## sales form - * contact_sales - - sales_form - - form{"name": "sales_form"} - - form{"name": null} - - - -As a final step, let’s add the FormPolicy to our config file: - -.. code-block:: yaml - - policies: - - name: MemoizationPolicy - - name: TEDPolicy - - name: MappingPolicy - - name: FormPolicy - -At this point, you already have a working form, so let’s try it out. Make sure to uncomment the -``action_endpoint`` in your ``endpoints.yml`` to make Rasa aware of the action server that will run our form: - -.. code-block:: yaml - - action_endpoint: - url: "http://localhost:5055/webhook" - -Then start the action server in a new terminal window: - -.. code-block:: bash - - rasa run actions - -Then you can retrain and talk to your bot: - -.. code-block:: bash - - rasa train - rasa shell - -This simple form will work out of the box, however you will likely want to add a bit -more capability to handle different situations. One example of this is validating -slots, to make sure the user provided information correctly (read more about it -`here `__). - -Another example is that you may want to fill slots from things other than entities -of the same name. E.g. for the "use case" situation in our Form, we would expect -the user to type a full sentence and not something that you could necessarily -extract as an entity. In this case we can make use of the ``slot_mappings`` method, -where you can describe what your entities should be extracted from. Here we can -use the ``from_text`` method to extract the users whole message: - -.. code-block:: python - - def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict[Text, Any]]]]: - """A dictionary to map required slots to - - an extracted entity - - intent: value pairs - - a whole message - or a list of them, where a first match will be picked""" - return {"use_case": self.from_text(intent="inform")} - -Now our bot will extract the full user message when asking for the use case slot, -and we don’t need to use the ``use_case`` entity defined before. - -All of the methods within a form can be customized to handle different branches in your -business logic. Read more about this `here `_. -However, you should make sure not to handle any unhappy paths inside the form. These -should be handled by writing regular stories, so your model can learn this behavior. - - -.. note:: - Here's a minimal checklist of files we modified to handle business logic using a form action: - - - ``actions.py``: Define the form action, including the ``required_slots``, ``slot_mappings`` and ``submit`` methods - - ``data/nlu.md``: - - - Add examples for an intent to activate the form - - Add examples for an ``inform`` intent to fill the form - - ``domain.yml``: - - - Add all slots required by the form - - Add ``utter_ask_{slot}`` responses for all required slots - - Add your form action to the ``forms`` section - - Add all intents and entities from your NLU training data - - - ``data/stories.md``: Add a story for the form - - ``config.yml``: - - - Add the ``FormPolicy`` to your policies - - Add entity extractors to your pipeline - - - ``endpoints.yml``: Define the ``action_endpoint`` - - -.. _handling-unexpected-user-input: - -Handling unexpected user input -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -All expected user inputs should be handled by the form we defined above, i.e. if the -user provides the information the bot asks for. However, in real situations, the user -will often behave differently. In this section we’ll go through various forms of -"interjections" and how to handle them within Rasa. - -The decision to handle these types of user input should always come from reviewing -real conversations. You should first build part of your assistant, test it with real users -(whether that's your end user, or your colleague) and then add what's missing. You shouldn't -try to implement every possible edge case that you think might happen, because in the end -your users may never actually behave in that way. -`Rasa X `__ -is a tool that can help you review conversations and make these types of decisions. - -Generic interjections -""""""""""""""""""""" - -If you have generic interjections that should always have the same single response no -matter the context, you can use the :ref:`mapping-policy` to handle these. It will always -predict the same action for an intent, and when combined with a forgetting mechanism, -you don’t need to write any stories either. - -For example, let's say you see users having conversations like the following one with -your assistant, where they write a greeting in the middle of a conversation - -maybe because they were gone for a few minutes: - -.. image:: /_static/images/greet_interjection.png - :width: 240 - :alt: Greeting Interjection - :align: center - -The greet intent is a good example where we will always give the same response and -yet we don’t want the intent to affect the dialogue history. To do this, the response -must be an action that returns the ``UserUtteranceReverted()`` event to remove the -interaction from the dialogue history. - -First, open the ``domain.yml`` file and modify the greet intent and add a new block ```actions``` in -the file, next, add the ``action_greet`` as shown here: - -.. code-block:: yaml - - intents: - - greet: {triggers: action_greet} - - bye - - thank - - faq - - contact_sales - - inform - - actions: - - action_greet - -Remove any stories using the "greet" intent if you have them. - -Next, we need to define ``action_greet``. Add the following action to your ``actions.py`` file: - -.. code-block:: python - - from rasa_sdk import Action - from rasa_sdk.events import UserUtteranceReverted - - class ActionGreetUser(Action): - """Revertible mapped action for utter_greet""" - - def name(self): - return "action_greet" - - def run(self, dispatcher, tracker, domain): - dispatcher.utter_template("utter_greet", tracker) - return [UserUtteranceReverted()] - -To test the modified intents, we need to re-start our action server: - -.. code-block:: bash - - rasa run actions - -Then we can retrain the model, and try out our additions: - -.. code-block:: bash - - rasa train - rasa shell - -FAQs are another kind of generic interjections that should always get the same response. -For example, a user might ask a related FAQ in the middle of filling a form: - -.. image:: /_static/images/generic_interjection.png - :width: 240 - :alt: Generic Interjections - :align: center - -To handle FAQs defined with retrieval actions, you can add a simple story that will be handled by the MemoizationPolicy: - -.. code-block:: md - - ## just sales, continue - * contact_sales - - sales_form - - form{"name": "sales_form"} - * faq - - respond_faq - - sales_form - - form{"name": null} - -This will break out of the form and deal with the users FAQ question, and then return back to the original task. -For example: - -.. image:: /_static/images/generic_interjection_handled.png - :width: 240 - :alt: Generic Interjection Handled - :align: center - -If you find it difficult to write stories in this format, you can always use `Interactive Learning `_ -to help you create them. - -As always, make sure to add an end to end test case to your `test_stories.md` file. - -Contextual questions -"""""""""""""""""""" - -You can also handle `contextual questions `_, -like the user asking the question "Why do you need to know that". The user could ask this based on a certain slot -the bot has requested, and the response should differ for each slot. For example: - -.. image:: /_static/images/contextual_interjection.png - :width: 240 - :alt: Contextual Interjection - :align: center - -To handle this, we need to make the ``requested_slot`` featurized, and assign it the categorical type: - -.. code-block:: yaml - - slots: - requested_slot: - type: categorical - values: - - business_email - - company - - person_name - - use_case - - budget - - job_function - -This means that Core will pay attention to the value of the slot when making a prediction -(read more about other `featurized slots `_), whereas -unfeaturized slots are only used for storing information. The stories for this should look as follows: - -.. code-block:: md - - ## explain email - * contact_sales - - sales_form - - form{"name": "sales_form"} - - slot{"requested_slot": "business_email"} - * explain - - utter_explain_why_email - - sales_form - - form{"name": null} - - ## explain budget - * contact_sales - - sales_form - - form{"name": "sales_form"} - - slot{"requested_slot": "budget"} - * explain - - utter_explain_why_budget - - sales_form - - form{"name": null} - -We’ll need to add the intent and utterances we just added to our ``domain.yml`` file: - -.. code-block:: yaml - - intents: - - greet: {triggers: action_greet_user} - - bye - - thank - - faq - - explain - - responses: - utter_explain_why_budget: - - text: We need to know your budget to recommend a subscription - utter_explain_why_email: - - text: We need your email so we can contact you - -Finally, we’ll need to add some NLU data for the explain intent: - -.. code-block:: md - - ## intent:explain - - why - - why is that - - why do you need it - - why do you need to know that? - - could you explain why you need it? - -Then you can retrain your bot and test it again: - -.. code-block:: bash - - rasa train - rasa shell - -.. note:: - You will need to add a story for each of the values of the ``requested_slot`` slot - for the bot to handle every case of "Why do you need to know that" - -Don’t forget to add a few end to end stories to your ``test_stories.md`` for testing as well. - - -.. note:: - Here's a minimal checklist of of files we modified to handle unexpected user input: - - - ``actions.py``: Define ``action_greet`` - - ``data/nlu.md``: Add training data for an ``explain`` intent - - ``domain.yml``: - - - Map intent ``greet`` to ``action_greet_user`` - - Make ``requested_slot`` a categorical slot with all required slots as values - - Add the ``explain`` intent - - Add responses for contextual question interruptions - - - ``data/stories.md``: - - - Remove stories using mapped intents if you have them - - Add stories with FAQ & contextual interruptions in the middle of filling a form - - -.. _failing-gracefully: - -Failing gracefully -^^^^^^^^^^^^^^^^^^ - -Even if you design your bot perfectly, users will inevitably say things to your -assistant that you did not anticipate. In these cases, your assistant will fail, -and it’s important you ensure it does so gracefully. - -Fallback policy -""""""""""""""" - -One of the most common failures is low NLU confidence, which is handled very nicely with -the TwoStageFallbackPolicy. You can enable it by adding the following to your configuration file, - -.. code-block:: yaml - - policies: - - name: TwoStageFallbackPolicy - nlu_threshold: 0.8 - -and adding the ``out_of_scope`` intent to your ``domain.yml`` file: - -.. code-block:: yaml - - intents: - - out_of_scope - -When the nlu confidence falls below the defined threshold, the bot will prompt the user to -rephrase their message. If the bot isn’t able to get their message three times, there -will be a final action where the bot can e.g. hand off to a human. - -To try this out, retrain your model and send a message like "order me a pizza" to your bot: - -.. code-block:: bash - - rasa train - rasa shell - -There are also a bunch of ways in which you can customise this policy. In Sara, our demo bot, -we’ve customized it to suggest intents to the user within a certain confidence range to make -it easier for the user to give the bot the information it needs. - -This is done by customizing the action ``ActionDefaultAskAffirmation`` as shown in the -`Sara rasa-demo action server `_ -We define some intent mappings to make it more intuitive to the user what an intent means. - -.. image:: /_static/images/intent_mappings.png - :width: 240 - :alt: Intent Mappings - :align: center - -Out of scope intent -""""""""""""""""""" - -It is good practice to also handle questions you know your users may ask, but for which you haven't necessarily implemented a user goal yet. - -You can define an ``out_of_scope`` intent to handle generic out of scope requests, like "I’m hungry" and have -the bot respond with a default message like "Sorry, I can’t handle that request": - -.. code-block:: md - - * out_of_scope - utter_out_of_scope - -We’ll need to add NLU data for the ``out_of_scope`` intent as well: - -.. code-block:: md - - ## intent:out_of_scope - - I want to order food - - What is 2 + 2? - - Who’s the US President? - - I need a job - -And finally we’ll add a response to our ``domain.yml`` file: - -.. code-block:: yaml - - responses: - utter_out_of_scope: - - text: Sorry, I can’t handle that request. - -We can now re-train, and test this addition - -.. code-block:: bash - - rasa train - rasa shell - -Going one step further, if you observe your users asking for certain things, that you’ll -want to turn into a user goal in future, you can handle these as separate intents, to let -the user know you’ve understood their message, but don’t have a solution quite yet. E.g., -let’s say the user asks "I want to apply for a job at Rasa", we can then reply with -"I understand you’re looking for a job, but I’m afraid I can’t handle that skill yet." - -.. code-block:: md - - * ask_job - utter_job_not_handled - -.. note:: - Here's a minimal checklist of files we modified to help our assistant fail gracefully: - - - ``data/nlu.md``: - - - Add training data for the ``out_of_scope`` intent & any specific out of scope intents that you want to handle seperately - - - ``data/stories.md``: - - - Add stories for any specific out of scope intents - - - ``domain.yml``: - - - Add the ``out_of_scope`` intent & any specific out of scope intents - - Add an ``utter_out_of_scope`` response & responses for any specific out of scope intents - - - ``actions.py``: - - - Customise ``ActionDefaultAskAffirmation`` to suggest intents for the user to choose from - - - ``config.yml``: - - - Add the TwoStageFallbackPolicy to the ``policies`` section - - -.. _more-complex-contextual-conversations: - -More complex contextual conversations -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Not every user goal you define will fall under the category of business logic. For the -other cases you will need to use stories and context to help the user achieve their goal. - -If we take the example of the "getting started" skill from Sara, we want to give them -different information based on whether they’ve built an AI assistant before and are -migrating from a different tool etc. This can be done quite simply with stories and -the concept of `max history `_. - -.. code-block:: md - :emphasize-lines: 4,5,6,7,8,24,25,26,27,28 - - ## new to rasa + built a bot before - * how_to_get_started - - utter_getstarted - - utter_first_bot_with_rasa - * affirm - - action_set_onboarding - - slot{"onboarding": true} - - utter_built_bot_before - * affirm - - utter_ask_migration - * deny - - utter_explain_rasa_components - - utter_rasa_components_details - - utter_ask_explain_nlucorex - * affirm - - utter_explain_nlu - - utter_explain_core - - utter_explain_x - - utter_direct_to_step2 - - ## not new to rasa + core - * how_to_get_started - - utter_getstarted - - utter_first_bot_with_rasa - * deny - - action_set_onboarding - - slot{"onboarding": false} - - utter_ask_which_product - * how_to_get_started{"product": "core"} - - utter_explain_core - - utter_anything_else - - -The above example mostly leverages intents to guide the flow, however you can also -guide the flow with entities and slots. For example, if the user gives you the -information that they’re new to Rasa at the beginning, you may want to skip this -question by storing this information in a slot. - -.. code-block:: md - - * how_to_get_started{"user_type": "new"} - - slot{"user_type":"new"} - - action_set_onboarding - - slot{"onboarding": true} - - utter_getstarted_new - - utter_built_bot_before - -For this to work, keep in mind that the slot has to be featurized in your ``domain.yml`` -file. This time we can use the ``text`` slot type, as we only care about whether the -`slot was set or not `_. - -AugmentedMemoizationPolicy -"""""""""""""""""""""""""" - -To make your bot more robust to interjections, you can replace the MemoizationPolicy -with the AugmentedMemoizationPolicy. It works the same way as the MemoizationPolicy, -but if no exact match is found it additionally has a mechanism that forgets a certain -amount of steps in the conversation history to find a match in your stories (read more -`here `__) - -Using ML to generalise -"""""""""""""""""""""" - -Aside from the more rule-based policies we described above, Core also has some ML -policies you can use. These come in as an additional layer in your policy configuration, -and only jump in if the user follows a path that you have not anticipated. **It is important -to understand that using these policies does not mean letting go of control over your -assistant.** If a rule based policy is able to make a prediction, that prediction will -always have a higher priority (read more `here `__) and predict the next action. The -ML based policies give your assistant the chance not to fail, whereas if they are not -used your assistant will definitely fail, like in state machine based dialogue systems. - -These types of unexpected user behaviors are something our `TEDPolicy `_ deals with -very well. It can learn to bring the user back on track after some -interjections during the main user goal the user is trying to complete. For example, -in the conversation below (extracted from a conversation on `Rasa X `__): - -.. code-block:: md - - ## Story from conversation with a2baab6c83054bfaa8d598459c659d2a on November 28th 2019 - * greet - - action_greet_user - - slot{"shown_privacy":true} - * ask_whoisit - - action_chitchat - * ask_whatspossible - - action_chitchat - * telljoke - - action_chitchat - * how_to_get_started{"product":"x"} - - slot{"product":"x"} - - utter_explain_x - - utter_also_explain_nlucore - * affirm - - utter_explain_nlu - - utter_explain_core - - utter_direct_to_step2 - -Here we can see the user has completed a few chitchat tasks first, and then ultimately -asks how they can get started with Rasa X. The TEDPolicy correctly predicts that -Rasa X should be explained to the user, and then also takes them down the getting started -path, without asking all the qualifying questions first. - -Since the ML policy generalized well in this situation, it makes sense to add this story -to your training data to continuously improve your bot and help the ML generalize even -better in future. `Rasa X `_ is a tool that can help -you improve your bot and make it more contextual. diff --git a/docs/user-guide/cloud-storage.rst b/docs/user-guide/cloud-storage.rst deleted file mode 100644 index f726c6ec6947..000000000000 --- a/docs/user-guide/cloud-storage.rst +++ /dev/null @@ -1,67 +0,0 @@ -:desc: Handle Rasa models on premise or in your private cloud for - GDPR-compliant intent recognition and entity extraction. - -.. _cloud-storage: - -Cloud Storage -============= - -.. edit-link:: - -Rasa supports using `S3 `_ , -`GCS `_ and `Azure Storage `_ to save your models. - -* Amazon S3 Storage - - S3 is supported using the ``boto3`` module which you can - install with ``pip install boto3``. - - Start the Rasa server with ``remote-storage`` option set to - ``aws``. Get your S3 credentials and set the following - environment variables: - - - ``AWS_SECRET_ACCESS_KEY`` - - ``AWS_ACCESS_KEY_ID`` - - ``AWS_DEFAULT_REGION`` - - ``BUCKET_NAME`` - - ``AWS_ENDPOINT_URL`` - - If there is no bucket with the name ``BUCKET_NAME``, Rasa will create it. - -* Google Cloud Storage - - GCS is supported using the ``google-cloud-storage`` package, - which you can install with ``pip install google-cloud-storage``. - - Start the Rasa server with ``remote-storage`` option set to ``gcs``. - - When running on google app engine and compute engine, the auth - credentials are already set up. For running locally or elsewhere, - checkout their - `client repo `_ - for details on setting up authentication. It involves creating - a service account key file from google cloud console, - and setting the ``GOOGLE_APPLICATION_CREDENTIALS`` environment - variable to the path of that key file. - -* Azure Storage - - Azure is supported using the legacy ``azure-storage-blob`` package (v 2.1.0), - which you can install with ``pip install -I azure-storage-blob==2.1.0``. - - Start the Rasa server with ``remote-storage`` option set to ``azure``. - - The following environment variables must be set: - - - ``AZURE_CONTAINER`` - - ``AZURE_ACCOUNT_NAME`` - - ``AZURE_ACCOUNT_KEY`` - - If there is no container with the name ``AZURE_CONTAINER``, Rasa will create it. - -Models are gzipped before they are saved in the cloud. The gzipped file naming convention -is ``{MODEL_NAME}.tar.gz`` and it is stored in the root folder of the storage service. -Currently, you are not able to manually specify the path on the cloud storage. - -If storing trained models, Rasa will gzip the new model and upload it to the container. If retrieving/loading models -from the cloud storage, Rasa will download the gzipped model locally and extract the contents to a temporary directory. diff --git a/docs/user-guide/command-line-interface.rst b/docs/user-guide/command-line-interface.rst deleted file mode 100644 index 465ffcd0204f..000000000000 --- a/docs/user-guide/command-line-interface.rst +++ /dev/null @@ -1,314 +0,0 @@ -:desc: Command line interface for open source chatbot framework Rasa. Learn how to train, test and run your machine learning-based conversational AI assistants - -.. _command-line-interface: - -Command Line Interface -====================== - -.. edit-link:: - - -.. contents:: - :local: - -Cheat Sheet -~~~~~~~~~~~ - -The command line interface (CLI) gives you easy-to-remember commands for common tasks. - -========================= ============================================================================================= -Command Effect -========================= ============================================================================================= -``rasa init`` Creates a new project with example training data, actions, and config files. -``rasa train`` Trains a model using your NLU data and stories, saves trained model in ``./models``. -``rasa interactive`` Starts an interactive learning session to create new training data by chatting. -``rasa shell`` Loads your trained model and lets you talk to your assistant on the command line. -``rasa run`` Starts a Rasa server with your trained model. See the :ref:`configuring-http-api` docs for details. -``rasa run actions`` Starts an action server using the Rasa SDK. -``rasa visualize`` Visualizes stories. -``rasa test`` Tests a trained Rasa model using your test NLU data and stories. -``rasa data split nlu`` Performs a split of your NLU data according to the specified percentages. -``rasa data convert nlu`` Converts NLU training data between different formats. -``rasa export`` Export conversations from a tracker store to an event broker. -``rasa x`` Launch Rasa X locally. -``rasa -h`` Shows all available commands. -========================= ============================================================================================= - - -Create a new project -~~~~~~~~~~~~~~~~~~~~ - -A single command sets up a complete project for you with some example training data. - -.. code:: bash - - rasa init - - -This creates the following files: - -.. code:: bash - - . - ├── __init__.py - ├── actions.py - ├── config.yml - ├── credentials.yml - ├── data - │   ├── nlu.md - │   └── stories.md - ├── domain.yml - ├── endpoints.yml - ├── models - │ └── .tar.gz - └── tests - └── conversation_tests.md - -The ``rasa init`` command will ask you if you want to train an initial model using this data. -If you answer no, the ``models`` directory will be empty. - -With this project setup, common commands are very easy to remember. -To train a model, type ``rasa train``, to talk to your model on the command line, ``rasa shell``, -to test your model type ``rasa test``. - - -Train a Model -~~~~~~~~~~~~~ - -The main command is: - -.. code:: bash - - rasa train - - -This command trains a Rasa model that combines a Rasa NLU and a Rasa Core model. -If you only want to train an NLU or a Core model, you can run ``rasa train nlu`` or ``rasa train core``. -However, Rasa will automatically skip training Core or NLU if the training data and config haven't changed. - -``rasa train`` will store the trained model in the directory defined by ``--out``. The name of the model -is per default ``.tar.gz``. If you want to name your model differently, you can specify the name -using ``--fixed-model-name``. - -The following arguments can be used to configure the training process: - -.. program-output:: rasa train --help - - -.. note:: - - Make sure training data for Core and NLU are present when training a model using ``rasa train``. - If training data for only one model type is present, the command automatically falls back to - ``rasa train nlu`` or ``rasa train core`` depending on the provided training files. - - -Interactive Learning -~~~~~~~~~~~~~~~~~~~~ - -To start an interactive learning session with your assistant, run - -.. code:: bash - - rasa interactive - - -If you provide a trained model using the ``--model`` argument, the interactive learning process -is started with the provided model. If no model is specified, ``rasa interactive`` will -train a new Rasa model with the data located in ``data/`` if no other directory was passed to the -``--data`` flag. After training the initial model, the interactive learning session starts. -Training will be skipped if the training data and config haven't changed. - -The full list of arguments that can be set for ``rasa interactive`` is: - -.. program-output:: rasa interactive --help - -Talk to your Assistant -~~~~~~~~~~~~~~~~~~~~~~ - -To start a chat session with your assistant on the command line, run: - -.. code:: bash - - rasa shell - -The model that should be used to interact with your bot can be specified by ``--model``. -If you start the shell with an NLU-only model, ``rasa shell`` allows -you to obtain the intent and entities of any text you type on the command line. -If your model includes a trained Core model, you can chat with your bot and see -what the bot predicts as a next action. -If you have trained a combined Rasa model but nevertheless want to see what your model -extracts as intents and entities from text, you can use the command ``rasa shell nlu``. - -To increase the logging level for debugging, run: - -.. code:: bash - - rasa shell --debug - -.. note:: - In order to see the typical greetings and/or session start behavior you might see - in an external channel, you will need to explicitly send ``/session_start`` - as the first message. Otherwise, the session start behavior will begin as described in - :ref:`session_config`. - -The full list of options for ``rasa shell`` is: - -.. program-output:: rasa shell --help - - -Start a Server -~~~~~~~~~~~~~~ - -To start a server running your Rasa model, run: - -.. code:: bash - - rasa run - -The following arguments can be used to configure your Rasa server: - -.. program-output:: rasa run --help - -For more information on the additional parameters, see :ref:`configuring-http-api`. -See the Rasa :ref:`http-api` docs for detailed documentation of all the endpoints. - -.. _run-action-server: - -Start an Action Server -~~~~~~~~~~~~~~~~~~~~~~ - -To run your action server run - -.. code:: bash - - rasa run actions - -The following arguments can be used to adapt the server settings: - -.. program-output:: rasa run actions --help - - -Visualize your Stories -~~~~~~~~~~~~~~~~~~~~~~ - -To open a browser tab with a graph showing your stories: - -.. code:: bash - - rasa visualize - -Normally, training stories in the directory ``data`` are visualized. If your stories are located -somewhere else, you can specify their location with ``--stories``. - -Additional arguments are: - -.. program-output:: rasa visualize --help - - -Evaluating a Model on Test Data -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To evaluate your model on test data, run: - -.. code:: bash - - rasa test - - -Specify the model to test using ``--model``. -Check out more details in :ref:`nlu-evaluation` and :ref:`core-evaluation`. - -The following arguments are available for ``rasa test``: - -.. program-output:: rasa test --help - - -.. _train-test-split: - -Create a Train-Test Split -~~~~~~~~~~~~~~~~~~~~~~~~~ - -To create a split of your NLU data, run: - -.. code:: bash - - rasa data split nlu - - -You can specify the training data, the fraction, and the output directory using the following arguments: - -.. program-output:: rasa data split nlu --help - - -This command will attempt to keep the proportions of intents the same in train and test. -If you have NLG data for retrieval actions, this will be saved to seperate files: - -.. code-block:: bash - - ls train_test_split - - nlg_test_data.md test_data.json - nlg_training_data.md training_data.json - -Convert Data Between Markdown and JSON -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To convert NLU data from LUIS data format, WIT data format, Dialogflow data format, JSON, or Markdown -to JSON or Markdown, run: - -.. code:: bash - - rasa data convert nlu - -You can specify the input file, output file, and the output format with the following arguments: - -.. program-output:: rasa data convert nlu --help - - -.. _section_export: - -Export Conversations to an Event Broker -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To export events from a tracker store using an event broker, run: - -.. code:: bash - - rasa export - -You can specify the location of the environments file, the minimum and maximum -timestamps of events that should be published, as well as the conversation IDs that -should be published. - -.. program-output:: rasa export --help - - -.. _section_evaluation: - -Start Rasa X -~~~~~~~~~~~~ - -.. raw:: html - - Rasa X is a toolset that helps you leverage conversations to improve your assistant. - You can find more information about it here. - -You can start Rasa X locally by executing - -.. code:: bash - - rasa x - -.. raw:: html - - To be able to start Rasa X you need to have Rasa X local mode installed - and you need to be in a Rasa project. - -.. note:: - - By default Rasa X runs on the port 5002. Using the argument ``--rasa-x-port`` allows you to change it to - any other port. - -The following arguments are available for ``rasa x``: - -.. program-output:: rasa x --help diff --git a/docs/user-guide/configuring-http-api.rst b/docs/user-guide/configuring-http-api.rst deleted file mode 100644 index 5be3371b11e4..000000000000 --- a/docs/user-guide/configuring-http-api.rst +++ /dev/null @@ -1,269 +0,0 @@ -:desc: Find out how to use Rasa's HTTP API to integrate Rasa - with your backend components. - -.. _configuring-http-api: - -Configuring the HTTP API -======================== - -.. edit-link:: - -.. contents:: - :local: - -Using Rasa's HTTP API ---------------------- - -.. note:: - - The instructions below are relevant for configuring how a model is run - within a Docker container or for testing the HTTP API locally. If you - want to deploy your assistant to users, see :ref:`deploying-your-rasa-assistant`. - -You can run a simple HTTP server that handles requests using your -trained Rasa model with: - -.. code-block:: bash - - rasa run -m models --enable-api --log-file out.log - -All the endpoints this API exposes are documented in :ref:`http-api`. - -The different parameters are: - -- ``-m``: the path to the folder containing your Rasa model, -- ``--enable-api``: enable this additional API, and -- ``--log-file``: the path to the log file. - -Rasa can load your model in three different ways: - -1. Fetch the model from a server (see :ref:`server_fetch_from_server`), or -2. Fetch the model from a remote storage (see :ref:`cloud-storage`). -3. Load the model specified via ``-m`` from your local storage system, - -Rasa tries to load a model in the above mentioned order, i.e. it only tries to load your model from your local -storage system if no model server and no remote storage were configured. - -.. warning:: - - Make sure to secure your server, either by restricting access to the server (e.g. using firewalls), or - by enabling an authentication method: :ref:`server_security`. - - -.. note:: - - If you are using custom actions, make sure your action server is - running (see :ref:`run-action-server`). If your actions are running - on a different machine, or you aren't using the Rasa SDK, make sure - to update your ``endpoints.yml`` file. - - -.. note:: - - If you start the server with an NLU-only model, not all the available endpoints - can be called. Be aware that some endpoints will return a 409 status code, as a trained - Core model is needed to process the request. - - -.. note:: - - By default, the HTTP server runs as a single process. You can change the number - of worker processes using the ``SANIC_WORKERS`` environment variable. It is - recommended that you set the number of workers to the number of available CPU cores - (check out the - `Sanic docs `_ - for more details). This will only work in combination with the - ``RedisLockStore`` (see :ref:`lock-stores`). - - -.. _server_fetch_from_server: - -Fetching Models from a Server -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can configure the HTTP server to fetch models from another URL: - -.. code-block:: bash - - rasa run --enable-api --log-file out.log --endpoints my_endpoints.yml - -The model server is specified in the endpoint configuration -(``my_endpoints.yml``), where you specify the server URL Rasa -regularly queries for zipped Rasa models: - -.. code-block:: yaml - - models: - url: http://my-server.com/models/default@latest - wait_time_between_pulls: 10 # [optional](default: 100) - -.. note:: - - If you want to pull the model just once from the server, set - ``wait_time_between_pulls`` to ``None``. - -.. note:: - - Your model server must provide zipped Rasa models, and have - ``{"ETag": }`` as one of its headers. Rasa will - only download a new model if this model hash has changed. - -Rasa sends requests to your model server with an ``If-None-Match`` -header that contains the current model hash. If your model server can -provide a model with a different hash from the one you sent, it should send it -in as a zip file with an ``ETag`` header containing the new hash. If not, Rasa -expects an empty response with a ``204`` or ``304`` status code. - -An example request Rasa might make to your model server looks like this: - -.. code-block:: bash - - $ curl --header "If-None-Match: d41d8cd98f00b204e9800998ecf8427e" http://my-server.com/models/default@latest - - -.. _server_fetch_from_remote_storage: - -Fetching Models from a Remote Storage -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can also configure the Rasa server to fetch your model from a remote storage: - -.. code-block:: bash - - rasa run -m 20190506-100418.tar.gz --enable-api --log-file out.log --remote-storage aws - -The model will be downloaded and stored in a temporary directory on your local storage system. -For more information see :ref:`cloud-storage`. - -.. _server_ssl: - -Configuring SSL / HTTPS ------------------------ - -By default the Rasa server is using HTTP for its communication. To secure the -communication with SSL, you need to provide a valid certificate and the corresponding -private key file. - -You can specify these files as part of the ``rasa run`` command: - -.. code-block:: bash - - rasa run --ssl-certificate myssl.crt --ssl-keyfile myssl.key - -If you encrypted your keyfile with a password during creation, you need to add -this password to the command: - -.. code-block:: bash - - rasa run --ssl-certificate myssl.crt --ssl-keyfile myssl.key --ssl-password mypassword - - -.. _server_security: - -Security Considerations ------------------------ - -We recommend to not expose the Rasa Server to the outside world, but -rather connect to it from your backend over a private connection (e.g. -between docker containers). - -Nevertheless, there are two authentication methods built in: - -**Token Based Auth:** - -Pass in the token using ``--auth-token thisismysecret`` when starting -the server: - -.. code-block:: bash - - rasa run \ - -m models \ - --enable-api \ - --log-file out.log \ - --auth-token thisismysecret - -Your requests should pass the token, in our case ``thisismysecret``, -as a parameter: - -.. code-block:: bash - - $ curl -XGET localhost:5005/conversations/default/tracker?token=thisismysecret - -**JWT Based Auth:** - -Enable JWT based authentication using ``--jwt-secret thisismysecret``. -Requests to the server need to contain a valid JWT token in -the ``Authorization`` header that is signed using this secret -and the ``HS256`` algorithm. - -The token's payload must contain an object under the ``user`` key, -which in turn must contain the ``username`` and ``role`` attributes. -If the ``role`` is ``admin``, all endpoints are accessible. -If the ``role`` is ``user``, endpoints with a ``sender_id`` parameter are only accessible -if the ``sender_id`` matches the payload's ``username`` property. - -.. code-block:: bash - - rasa run \ - -m models \ - --enable-api \ - --log-file out.log \ - --jwt-secret thisismysecret - - -Your requests should have set a proper JWT header: - -.. code-block:: text - - "Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ" - "zdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIi" - "wiaWF0IjoxNTE2MjM5MDIyfQ.qdrr2_a7Sd80gmCWjnDomO" - "Gl8eZFVfKXA6jhncgRn-I" - - -The following is an example payload for a JWT token: - -.. code-block:: json - - { - "user": { - "username": "", - "role": "user" - } - } - - -To create and encode the token, you can use tools such as the `JWT Debugger `_, or a Python module such as `PyJWT `_. - - -Endpoint Configuration ----------------------- - -To connect Rasa to other endpoints, you can specify an endpoint -configuration within a YAML file. -Then run Rasa with the flag -``--endpoints ``. - -For example: - -.. code-block:: bash - - rasa run \ - --m \ - --endpoints .yml - -.. note:: - You can use environment variables within configuration files by specifying them with ``${name of environment variable}``. - These placeholders are then replaced by the value of the environment variable. - -Connecting a Tracker Store -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure a tracker store within your endpoint configuration, -see :ref:`tracker-stores`. - -Connecting an Event Broker -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To configure an event broker within your endpoint configuration, -see :ref:`event-brokers`. diff --git a/docs/user-guide/connectors/cisco-webex-teams.rst b/docs/user-guide/connectors/cisco-webex-teams.rst deleted file mode 100644 index f4f1eead6840..000000000000 --- a/docs/user-guide/connectors/cisco-webex-teams.rst +++ /dev/null @@ -1,60 +0,0 @@ -:desc: Build a Rasa Chat Bot on Cisco Webex - -.. _cisco-webex-teams: - -Cisco Webex Teams -================= - -.. edit-link:: - -You first have to create a cisco webex app to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to get the Cisco Webex Teams credentials:** - -You need to set up a bot. Check out the Cisco Webex for Developers -`documentation `_ for information -about how to create your bot. - -After you have created the bot through Cisco Webex Teams, you need to create a -room in Cisco Webex Teams. Then add the bot in the room the same way you would -add a person in the room. - -You need to note down the room ID for the room you created. This room ID will -be used in ``room`` variable in the ``credentials.yml`` file. - -Please follow this link below to find the room ID -``https://developer.webex.com/endpoint-rooms-get.html`` - -Running on Cisco Webex -^^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to the ``webexteams`` input channel using the run -script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - webexteams: - access_token: "YOUR-BOT-ACCESS-TOKEN" - room: "YOUR-CISCOWEBEXTEAMS-ROOM-ID" - - -The endpoint for receiving Cisco Webex Teams messages is -``http://localhost:5005/webhooks/webexteams/webhook``, replacing -the host and port with the appropriate values. This is the URL -you should add in the OAuth & Permissions section. - -.. note:: - - If you do not set the ``room`` keyword - argument, messages will by delivered back to - the user who sent them. diff --git a/docs/user-guide/connectors/custom-connectors.rst b/docs/user-guide/connectors/custom-connectors.rst deleted file mode 100644 index 2c9720795644..000000000000 --- a/docs/user-guide/connectors/custom-connectors.rst +++ /dev/null @@ -1,82 +0,0 @@ -:desc: Deploy and Run a Rasa Chat Bot on a custom chat interface - -.. _custom-connectors: - -Custom Connectors -================= - -.. edit-link:: - -You can also implement your own custom channel. You can -use the ``rasa.core.channels.channel.RestInput`` class as a template. -The methods you need to implement are ``blueprint`` and ``name``. The method -needs to create a sanic blueprint that can be attached to a sanic server. - -This allows you to add REST endpoints to the server that the external -messaging service can call to deliver messages. - -Your blueprint should have at least the two routes: ``health`` on ``/``, -and ``receive`` on the HTTP route ``/webhook``. - -The ``name`` method defines the url prefix. E.g. if your component is -named ``myio``, the webhook you can use to attach the external service is: -``http://localhost:5005/webhooks/myio/webhook`` (replacing the hostname -and port with your values). - -To send a message, you would run a command like: - -.. code-block:: bash - - curl -XPOST http://localhost:5005/webhooks/myio/webhook \ - -d '{"sender": "user1", "message": "hello"}' \ - -H "Content-type: application/json" - -where ``myio`` is the name of your component. - -If you need to use extra information from your front end in your custom -actions, you can add this information in the ``metadata`` dict of your user -message. This information will accompany the user message through the rasa -server into the action server when applicable, where you can find it stored in -the ``tracker``. Message metadata will not directly affect NLU classification -or action prediction. If you want to change the way metadata is extracted for an -existing channel, you can overwrite the function ``get_metadata``. The return value -of this method will be passed to the ``UserMessage``. - -Here are all the attributes of ``UserMessage``: - -.. autoclass:: rasa.core.channels.UserMessage - - .. automethod:: __init__ - - -In your implementation of the ``receive`` endpoint, you need to make -sure to call ``on_new_message(UserMessage(text, output, sender_id))``. -This will tell Rasa Core to handle this user message. The ``output`` -is an output channel implementing the ``OutputChannel`` class. You can -either implement the methods for your particular chat channel (e.g. there -are methods to send text and images) or you can use the -``CollectingOutputChannel`` to collect the bot responses Core -creates while the bot is processing your messages and return -them as part of your endpoint response. This is the way the ``RestInput`` -channel is implemented. For examples on how to create and use your own output -channel, take a look at the implementations of the other -output channels, e.g. the ``SlackBot`` in ``rasa.core.channels.slack``. - -To use a custom channel, you need to supply a credentials configuration file -``credentials.yml`` with the command line argument ``--credentials``. -This credentials file has to contain the module path of your custom channel and -any required configuration parameters. For example, this could look like: - -.. code-block:: yaml - - mypackage.MyIO: - username: "user_name" - another_parameter: "some value" - -Here is an example implementation for an input channel that receives the messages, -hands them over to Rasa Core, collects the bot utterances, and returns -these bot utterances as the json response to the webhook call that -posted the message to the channel: - -.. literalinclude:: ../../../rasa/core/channels/channel.py - :pyobject: RestInput diff --git a/docs/user-guide/connectors/facebook-messenger.rst b/docs/user-guide/connectors/facebook-messenger.rst deleted file mode 100644 index bf7c27dfc230..000000000000 --- a/docs/user-guide/connectors/facebook-messenger.rst +++ /dev/null @@ -1,140 +0,0 @@ -:desc: Build a Rasa Chat Bot on Facebook Messenger - -.. _facebook-messenger: - -Facebook Messenger -================== - -.. edit-link:: - -Facebook Setup --------------- - -You first need to set up a facebook page and app to get credentials to connect to -Facebook Messenger. Once you have them you can add these to your ``credentials.yml``. - - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to get the Facebook credentials:** -You need to set up a Facebook app and a page. - - 1. To create the app head over to - `Facebook for Developers `_ - and click on **My Apps** → **Add New App**. - 2. Go onto the dashboard for the app and under **Products**, - find the **Messenger** section and click **Set Up**. Scroll down to - **Token Generation** and click on the link to create a new page for your - app. - 3. Create your page and select it in the dropdown menu for the - **Token Generation**. The shown **Page Access Token** is the - ``page-access-token`` needed later on. - 4. Locate the **App Secret** in the app dashboard under **Settings** → **Basic**. - This will be your ``secret``. - 5. Use the collected ``secret`` and ``page-access-token`` in your - ``credentials.yml``, and add a field called ``verify`` containing - a string of your choice. Start ``rasa run`` with the - ``--credentials credentials.yml`` option. - 6. Set up a **Webhook** and select at least the **messaging** and - **messaging_postback** subscriptions. Insert your callback URL which will - look like ``https:///webhooks/facebook/webhook``. Insert the - **Verify Token** which has to match the ``verify`` - entry in your ``credentials.yml``. - - -For more detailed steps, visit the -`Messenger docs `_. - - -Running On Facebook Messenger -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to Facebook using the run script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - facebook: - verify: "rasa-bot" - secret: "3e34709d01ea89032asdebfe5a74518" - page-access-token: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" - -The endpoint for receiving Facebook messenger messages is -``http://localhost:5005/webhooks/facebook/webhook``, replacing -the host and port with the appropriate values. This is the URL -you should add in the configuration of the webhook. - -Supported response attachments ------------------------------- - -In addition to typical text, image, and custom responses, the Facebook Messenger -channel supports the following additional response template attachments: - -* `Buttons `_ - are structured the same as other Rasa buttons. Facebook API limits the amount of - buttons you can sent in a message to 3. If more than 3 buttons are provided in a - message, Rasa will ignore all provided buttons. - -* `Quick Replies `_ - provide a way to present a set of up to 13 buttons in-conversation that contain a - title and optional image, and appear prominently above the composer. You can also - use quick replies to request a person's email address or phone number. - - .. code-block:: yaml - - utter_fb_quick_reply_example: - - text: Hello World! - quick_replies: - - title: Text quick reply - payload: /example_intent - - title: Image quick reply - payload: /example_intent - image_url: http://example.com/img/red.png - # below are Facebook provided quick replies - # the title and payload will be filled - # with the user's information from their profile - - content_type: user_email - title: - payload: - - content_type: user_phone_number - title: - payload: - -.. note:: - - Both Quick Reply and Button titles in Facebook Messenger have a character limit of - 20. Titles longer than 20 characters will be truncated. - -* `Elements `_ - provide a way to create a horizontally scrollable list up to 10 content elements that - integrate buttons, images, and more alongside text a single message. - - .. code-block:: yaml - - utter_fb_element_example: - - text: Hello World! - elements: - - title: Element Title 1 - subtitle: Subtitles are supported - buttons: # note the button limit still applies here - - title: Example button A - payload: /example_intent - - title: Example button B - payload: /example_intent - - title: Example button C - payload: /example_intent - - title: Element Title 2 - image_url: http://example.com/img/red.png - buttons: - - title: Example button D - payload: /example_intent - - title: Example button E - payload: /example_intent - - title: Example button F - payload: /example_intent diff --git a/docs/user-guide/connectors/hangouts.rst b/docs/user-guide/connectors/hangouts.rst deleted file mode 100644 index 7e2a1a4d1491..000000000000 --- a/docs/user-guide/connectors/hangouts.rst +++ /dev/null @@ -1,75 +0,0 @@ -:desc: Build a Rasa Chat Bot on Google Hangouts Chat - -.. _google-hangouts-chat: - -Google Hangouts Chat -==================== - -.. edit-link:: - -Hangouts Chat Setup -------------------- - -This channel works similar to the standard Rasa REST channel. For each request from the channel, your bot will -send one response. The response will be displayed to the user either as text or a so-called card (for -more information, see the Cards section). - -In order to connect your Rasa bot to Google Hangouts Chat, you first need to create a project in -Google Developer Console that includes the Hangouts API. There you can specify your bot's endpoint -and also obtain your project id, which determines the scope for the OAuth2 authorization in case you -want to use OAuth2. The Hangouts Chat API sends a Bearer token with every request, but it is up to -the bot to actually verify the token, hence the channel also works without this. -For more information see the official Google resources https://developers.google.com/hangouts/chat. - -The possibility to implement asynchronous communication between Hangouts Chat and bot exists, but due -to the usually synchronous nature of Rasa bots, this functionality is not included in this channel. - -Running On Hangouts Chat -^^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to Hangouts Chat using the run script, e.g. using: - -.. code-block:: bash - - rasa run - -you don't need to supply a ``credentials.yml``. - -If you want to use OAuth2, simply put the project id obtained from the Google Developer Console into it. - -.. code-block:: yaml - - hangouts: - project_id: "12345678901" - -The endpoint for receiving Hangouts Chat messages is -``http://localhost:5005/webhooks/hangouts/webhook``, replacing -the host and port with the appropriate values. Hangouts Chat only forwards -messages to endpoints via ``https``, so take appropriate measures to add -it to your setup. - - -Cards and Interactive Cards -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -There are two ways in which Hangouts Chat will display bot messages, either as text or card. For each recevied -request, your bot will send all messages in one response. If one of those messages is a card (e.g. an image), -all other messages are converted to card format as well. - -Interactive cards trigger the ``CARD_CLICKED`` event for user interactions, e.g. when a button is clicked. When -creating an interactive card, e.g. via ``dispatcher.utter_button_message()`` in your ``actions.py``, you can -specify a payload for each button that is going to be returned with the ``CARD_CLICKED`` event and extracted -by the ``HangoutsInput`` channel (for example -``buttons=[{"text":"Yes!", "payload":"/affirm"}, {"text":"Nope.", "payload":"/deny"}])``. -Updating cards is not yet supported. - -For more detailed information on cards, visit the -`Hangouts docs `_. - - -Other Hangouts Chat Events -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Except for ``MESSAGE`` and ``CARD_CLICKED``, Hangouts Chat knows two other event types, ``ADDED_TO_SPACE`` and -``REMOVED_FROM_SPACE``, which are triggered when your bot is added or removed from a direct message or chat room -space. The default intent names for these events can be modified in the ``HangoutsInput`` constructor method. diff --git a/docs/user-guide/connectors/mattermost.rst b/docs/user-guide/connectors/mattermost.rst deleted file mode 100644 index 2de0e3a5aea8..000000000000 --- a/docs/user-guide/connectors/mattermost.rst +++ /dev/null @@ -1,65 +0,0 @@ -:desc: Build a Rasa Chat Bot on Mattermost - -.. _mattermost: - -Mattermost ----------- - -.. edit-link:: - -You first have to create a mattermost app to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ -Mattermost now uses bot accounts for better security. So you can use their guide to create -your bot to get your token required for the `credentials.yml` file. - -For more information on creating a bot account please see -`Bot Creation `_. - -For information on converting existing user account into bot account please see -`User Conversion `_. - -**How to set up the outgoing webhook:** - - 1. To create the Mattermost outgoing webhook, login to your Mattermost - team site and go to **Main Menu > Integrations > Outgoing Webhooks**. - 2. Click **Add outgoing webhook**. - 3. Fill out the details including the channel you want the bot in. - You will need to ensure the **trigger words** section is set up - with ``@yourbotname`` so that the bot doesn't trigger on everything - that is said. - 4. The **Content Type** must be set to ``application/json``. - 5. Make sure **trigger when** is set to value - **first word matches a trigger word exactly**. - 6. The callback url needs to be either your localhost address for Rasa, or your ngrok url where you - have your webhook running in Core or your public address, e.g. - ``http://test.example.com/webhooks/mattermost/webhook`` or ``http://localhost:5005/webhooks/mattermost/webhook``. - - -For more detailed steps, visit the -`Mattermost docs `_. - -Running on Mattermost -^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to the Mattermost input channel using the -run script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - mattermost: - url: "https://chat.example.com/api/v4" - token: "xxxxx" # the token for the bot account from creating the bot step. - webhook_url: "https://server.example.com/webhooks/mattermost/webhook" - -The endpoint for receiving Mattermost channel messages -is ``/webhooks/mattermost/webhook``, the same as ``webhook_url`` here. You should -add this url also in the Mattermost outgoing webhook. diff --git a/docs/user-guide/connectors/microsoft-bot-framework.rst b/docs/user-guide/connectors/microsoft-bot-framework.rst deleted file mode 100644 index 7c9dd31713bc..000000000000 --- a/docs/user-guide/connectors/microsoft-bot-framework.rst +++ /dev/null @@ -1,29 +0,0 @@ -:desc: Build a Rasa Chat Bot on Microsoft Bot Framework - -.. _microsoft-bot-framework: - -Microsoft Bot Framework -======================= - -.. edit-link:: - -You first have to create a Microsoft app to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Running on Microsoft Bot Framework -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to the botframework input channel using the -run script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - botframework: - app_id: "MICROSOFT_APP_ID" - app_password: "MICROSOFT_APP_PASSWORD" diff --git a/docs/user-guide/connectors/rocketchat.rst b/docs/user-guide/connectors/rocketchat.rst deleted file mode 100644 index 4a63e7ea88d2..000000000000 --- a/docs/user-guide/connectors/rocketchat.rst +++ /dev/null @@ -1,52 +0,0 @@ -:desc: Build a Rasa Chat Bot on Rocketchat - -.. _rocketchat: - -RocketChat -========== - -.. edit-link:: - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to set up Rocket.Chat:** - - 1. Create a user that will be used to post messages, and set its - credentials at credentials file. - 2. Create a Rocket.Chat outgoing webhook by logging in as admin to - Rocket.Chat and going to - **Administration > Integrations > New Integration**. - 3. Select **Outgoing Webhook**. - 4. Set **Event Trigger** section to value **Message Sent**. - 5. Fill out the details, including the channel you want the bot - listen to. Optionally, it is possible to set the - **Trigger Words** section with ``@yourbotname`` so that the bot - doesn't trigger on everything that is said. - 6. Set your **URLs** section to the Rasa URL where you have your - webhook running in Core or your public address with - ``/webhooks/rocketchat/webhook``, e.g. - ``http://test.example.com/webhooks/rocketchat/webhook``. - -For more information on the Rocket.Chat Webhooks, see the -`Rocket.Chat Guide `_. - - -Running on RocketChat -^^^^^^^^^^^^^^^^^^^^^ - -If you want to connect to the Rocket.Chat input channel using the run -script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - rocketchat: - user: "yourbotname" - password: "YOUR_PASSWORD" - server_url: "https://demo.rocket.chat" diff --git a/docs/user-guide/connectors/slack.rst b/docs/user-guide/connectors/slack.rst deleted file mode 100644 index d88baafc053a..000000000000 --- a/docs/user-guide/connectors/slack.rst +++ /dev/null @@ -1,83 +0,0 @@ -:desc: Build a Rasa Chat Bot on Slack - -.. _slack: - -Slack -===== - -.. edit-link:: - -You first have to create a Slack app to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to get the Slack credentials:** You need to set up a Slack app. - - 1. To create the app go to https://api.slack.com/apps and click - on **Create New App**. - - 2. Activate the following features: - - - Interactivity & Shortcuts > Interactivity (if your bot uses any `interactive components `_ , e.g. buttons) - - Add your Rasa request URL ``http://:/webhooks/slack/webhook``, replacing - the host and port with the appropriate values that point to your Rasa X or Rasa Open Source deployment. - - Event subscriptions > Subscribe to bot events: ``message.channels``, ``message.groups``, ``message.im``, ``message.mpim`` - - App Home > Always Show My Bot as Online - - 3. Get the ``Bot User OAuth Access Token`` from the OAuth & Permissions page. Click ``Install App to Workspace`` - and allow access to your workspace. You will need - to provide this value in your credentials later in these instructions. It should start - with ``xoxb``. - - 4. In the "OAuth & Permissions > Redirect URLs" enter the endpoint for receiving Slack messages. This is - the same URL you entered above for Interactivity & Shortcuts - ``http://:/webhooks/slack/webhook``. - - 5. Go to the "Event Subscriptions" section, turn on the "Enable Events" and add the endpoint here also. - -For more detailed steps, visit the -`Slack API docs `_. - -Running on Slack -^^^^^^^^^^^^^^^^ - -If you want to connect to the slack input channel using the run -script, e.g. using: - -.. code-block:: bash - - rasa run - -Do not forget to run the action server if this is required by your bot, -e.g. using: - -.. code-block:: bash - - rasa run actions - -You need to supply a ``credentials.yml`` with the following content: - -- The ``slack_channel`` can be a channel or an individual person that the bot should listen to for communications, in - addition to the default behavior of listening for direct messages and app mentions, i.e. "@app_name". To get the channel - id, right click on the channel choose Copy Link and the id will be the last component in the URL. - -- Use the entry for ``Bot User OAuth Access Token`` in the - "OAuth & Permissions" tab as your ``slack_token``. It should start - with ``xoxb``. - -.. code-block:: yaml - - slack: - slack_token: "xoxb-286425452756-safjasdf7sl38KLls" - slack_channel: "C011GR5D33F" - slack_retry_reason_header: "x-slack-retry-reason" # Slack HTTP header name indicating reason that slack send retry request. This configuration is optional. - slack_retry_number_header: "x-slack-retry-num" # Slack HTTP header name indicating the attempt number. This configuration is optional. - errors_ignore_retry: None # Any error codes given by Slack included in this list will be ignored. Error codes are listed `here `_. - - -The endpoint for receiving slack messages is -``http://localhost:5005/webhooks/slack/webhook``, replacing -the host and port with the appropriate values. This is the URL -you should add in the "OAuth & Permissions" section as well as -the "Event Subscriptions". diff --git a/docs/user-guide/connectors/telegram.rst b/docs/user-guide/connectors/telegram.rst deleted file mode 100644 index b333201342e2..000000000000 --- a/docs/user-guide/connectors/telegram.rst +++ /dev/null @@ -1,46 +0,0 @@ -:desc: Build a Rasa Chat Bot on Telegram - -.. _telegram: - -Telegram -======== - -.. edit-link:: - -You first have to create a Telegram bot to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -**How to get the Telegram credentials:** -You need to set up a Telegram bot. - - 1. To create the bot, go to `Bot Father `_, - enter ``/newbot`` and follow the instructions. - 2. At the end you should get your ``access_token`` and the username you - set will be your ``verify``. - 3. If you want to use your bot in a group setting, it's advisable to - turn on group privacy mode by entering ``/setprivacy``. Then the bot - will only listen when a user's message starts with ``/bot``. - -For more information, check out the `Telegram HTTP API -`_. - -Running on Telegram -^^^^^^^^^^^^^^^^^^^ - -If you want to connect to telegram using the run script, e.g. using: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - telegram: - access_token: "490161424:AAGlRxinBRtKGb21_rlOEMtDFZMXBl6EC0o" - verify: "your_bot" - webhook_url: "https://your_url.com/webhooks/telegram/webhook" diff --git a/docs/user-guide/connectors/twilio.rst b/docs/user-guide/connectors/twilio.rst deleted file mode 100644 index 3b2ddb68de3b..000000000000 --- a/docs/user-guide/connectors/twilio.rst +++ /dev/null @@ -1,68 +0,0 @@ -:desc: Deploy a Rasa Open Source assistant through text message or WhatsApp via the Twilio connector - -.. _twilio: - -Twilio -====== - -.. edit-link:: - -You can use the Twilio connector to deploy an assistant that is available over text message. - -.. contents:: - :local: - -Getting Credentials -^^^^^^^^^^^^^^^^^^^ - -You first have to create a Twilio app to get credentials. -Once you have them you can add these to your ``credentials.yml``. - -**How to get the Twilio credentials:** -You need to set up a Twilio account. - - 1. Once you have created a Twilio account, you need to create a new - project. The basic important product to select here - is ``Programmable SMS``. - 2. Once you have created the project, navigate to the Dashboard of - ``Programmable SMS`` and click on ``Get Started``. Follow the - steps to connect a phone number to the project. - 3. Now you can use the ``Account SID``, ``Auth Token``, and the phone - number you purchased in your ``credentials.yml``. - 4. Configure your webhook URL by navigating to - [Phone Numbers](https://www.twilio.com/console/phone-numbers/incoming) in the Twilio - dashboard and selecting your phone number. Find the ``Messaging`` section and add - your webhook URL (e.g. ``https://:/webhooks/twilio/webhook``, - replacing the host and port with your running Rasa X or Rasa Open Source server) - to the ``A MESSAGE COMES IN`` setting. - - -For more information, see the `Twilio REST API `_. - - -Connecting to WhatsApp ----------------------- - -You can deploy a Rasa Open Source assistant to WhatsApp through Twilio. However, to do so, you have -to have a `WhatsApp Business `_ profile. Associate -your Whatsapp Business profile with the phone number you purchased through Twilio to -access the `Twilio API for WhatsApp `_. - -According to the `Twilio API documentation `_, -the phone number you use should be prefixed with `whatsapp:` in the ``credentials.yml`` described below. - - -Applying the Credentials -^^^^^^^^^^^^^^^^^^^^^^^^ - -Add the Twilio credentials to your ``credentials.yml``: - -.. code-block:: yaml - - twilio: - account_sid: "ACbc2dxxxxxxxxxxxx19d54bdcd6e41186" - auth_token: "e231c197493a7122d475b4xxxxxxxxxx" - twilio_number: "+440123456789" # if using WhatsApp: "whatsapp:+440123456789" - -Make sure to restart your Rasa Open Source server or container to make changes to -which connectors are available. diff --git a/docs/user-guide/connectors/your-own-website.rst b/docs/user-guide/connectors/your-own-website.rst deleted file mode 100644 index 89ef40502d21..000000000000 --- a/docs/user-guide/connectors/your-own-website.rst +++ /dev/null @@ -1,139 +0,0 @@ -:desc: Deploy and Run a Rasa Chat Bot on a Website - -.. _your-own-website: - -Your Own Website -================ - -.. edit-link:: - -If you just want an easy way for users to test your bot, the best option -is usually the chat interface that ships with Rasa X, where you can `invite users -to test your bot `_. - -If you already have an existing website and want to add a Rasa assistant to it, -you can use `Chatroom `_, a widget which you can incorporate into your existing webpage by adding a HTML snippet. -Alternatively, you can also build your own chat widget. - - -Websocket Channel -~~~~~~~~~~~~~~~~~ - -The SocketIO channel uses websockets and is real-time. You need to supply -a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - socketio: - user_message_evt: user_uttered - bot_message_evt: bot_uttered - session_persistence: true/false - -The first two configuration values define the event names used by Rasa Core -when sending or receiving messages over socket.io. - -By default, the socketio channel uses the socket id as ``sender_id``, which causes -the session to restart at every page reload. ``session_persistence`` can be -set to ``true`` to avoid that. In that case, the frontend is responsible -for generating a session id and sending it to the Rasa Core server by -emitting the event ``session_request`` with ``{session_id: [session_id]}`` -immediately after the ``connect`` event. - -The example `Webchat `_ -implements this session creation mechanism (version >= 0.5.0). - - -.. _rest_channels: - -REST Channels -~~~~~~~~~~~~~ - - -The ``RestInput`` and ``CallbackInput`` channels can be used for custom integrations. -They provide a URL where you can post messages and either receive response messages -directly, or asynchronously via a webhook. - - -RestInput -^^^^^^^^^ - -The ``rest`` channel will provide you with a REST endpoint to post messages -to and in response to that request will send back the bots messages. -Here is an example on how to connect the ``rest`` input channel -using the run script: - -.. code-block:: bash - - rasa run - -you need to ensure your ``credentials.yml`` has the following content: - -.. code-block:: yaml - - rest: - # you don't need to provide anything here - this channel doesn't - # require any credentials - -After connecting the ``rest`` input channel, you can post messages to -``POST /webhooks/rest/webhook`` with the following format: - -.. code-block:: json - - { - "sender": "Rasa", - "message": "Hi there!" - } - -The response to this request will include the bot responses, e.g. - -.. code-block:: json - - [ - {"text": "Hey Rasa!"}, {"image": "http://example.com/image.jpg"} - ] - - -.. _callbackInput: - -CallbackInput -^^^^^^^^^^^^^ - -The ``callback`` channel behaves very much like the ``rest`` input, -but instead of directly returning the bot messages to the HTTP -request that sends the message, it will call a URL you can specify -to send bot messages. - -Here is an example on how to connect the -``callback`` input channel using the run script: - -.. code-block:: bash - - rasa run - -you need to supply a ``credentials.yml`` with the following content: - -.. code-block:: yaml - - callback: - # URL to which Core will send the bot responses - url: "http://localhost:5034/bot" - -After connecting the ``callback`` input channel, you can post messages to -``POST /webhooks/callback/webhook`` with the following format: - -.. code-block:: json - - { - "sender": "Rasa", - "message": "Hi there!" - } - -The response will simply be ``success``. Once Core wants to send a -message to the user, it will call the URL you specified with a ``POST`` -and the following ``JSON`` body: - -.. code-block:: json - - [ - {"text": "Hey Rasa!"}, {"image": "http://example.com/image.jpg"} - ] diff --git a/docs/user-guide/docker/building-in-docker.rst b/docs/user-guide/docker/building-in-docker.rst deleted file mode 100644 index 1938f5753a93..000000000000 --- a/docs/user-guide/docker/building-in-docker.rst +++ /dev/null @@ -1,278 +0,0 @@ -:desc: Learn how to build a Rasa assistant in Docker. - -.. _building-in-docker: - -Building a Rasa Assistant in Docker -=================================== - -If you don't have a Rasa project yet, you can build one in Docker without having to install Rasa Open Source -on your local machine. If you already have a model you're satisfied with, see -:ref:`deploying-your-rasa-assistant` to learn how to deploy your model. - -.. contents:: - :local: - -Installing Docker -***************** - -If you're not sure if you have Docker installed, you can check by running: - - .. code-block:: bash - - docker -v - # Docker version 18.09.2, build 6247962 - -If Docker is installed on your machine, the output should show you your installed -versions of Docker. If the command doesn't work, you'll have to install Docker. -See `Docker Installation `_ for details. - -Setting up your Rasa Project -**************************** - -Just like in the :ref:`tutorial `, you'll use the ``rasa init`` command to create a project. -The only difference is that you'll be running Rasa inside a Docker container, using -the image ``rasa/rasa``. To initialize your project, run: - - .. parsed-literal:: - - docker run -v $(pwd):/app rasa/rasa:\ |release|-full init --no-prompt - -What does this command mean? - -- ``-v $(pwd):/app`` mounts your current working directory to the working directory - in the Docker container. This means that files you create on your computer will be - visible inside the container, and files created in the container will - get synced back to your computer. -- ``rasa/rasa`` is the name of the docker image to run. '|release|-full' is the name of the tag, - which specifies the version and dependencies. -- the Docker image has the ``rasa`` command as its entrypoint, which means you don't - have to type ``rasa init``, just ``init`` is enough. - -Running this command will produce a lot of output. What happens is: - -- a Rasa project is created -- an initial model is trained using the project's training data. - -To check that the command completed correctly, look at the contents of your working directory: - - .. code-block:: bash - - ls -1 - -The initial project files should all be there, as well as a ``models`` directory that contains your trained model. - - -.. note:: - - If you run into permission errors, it may be because the ``rasa/rasa`` images - run as user ``1001`` as a best practice, to avoid giving the container ``root`` permissions. - Hence, all files created by these containers will be owned by user ``1001``. See the `Docker documentation - `_ - if you want to run the containers as a different user. - -Talking to Your Assistant -************************* - -To talk to your newly-trained assistant, run this command: - - .. parsed-literal:: - - docker run -it -v $(pwd):/app rasa/rasa:\ |release|-full shell - -This will start a shell where you can chat to your assistant. -Note that this command includes the flags ``-it``, which means that you are running -Docker interactively, and you are able to give input via the command line. -For commands which require interactive input, like ``rasa shell`` and ``rasa interactive``, -you need to pass the ``-it`` flags. - - -.. _model_training_docker: - -Training a Model -**************** - -If you edit the NLU or Core training data or edit the ``config.yml`` file, you'll need to -retrain your Rasa model. You can do so by running: - - .. parsed-literal:: - - docker run -v $(pwd):/app rasa/rasa:\ |release|-full train --domain domain.yml --data data --out models - -Here's what's happening in that command: - - - ``-v $(pwd):/app``: Mounts your project directory into the Docker - container so that Rasa can train a model on your training data - - rasa/rasa:|release|-full: Use the Rasa image with the tag '|release|-full' - - ``train``: Execute the ``rasa train`` command within the container. For more - information see :ref:`command-line-interface`. - -In this case, we've also passed values for the location of the domain file, training -data, and the models output directory to show how these can be customized. -You can also leave these out, since we are passing the default values. - -Customizing your Model -********************** - -.. _choosing-a-tag: - -Choosing a Tag -############## - -All ``rasa/rasa`` image tags start with a version number. The current version is |release|. The tags are: - -- ``{version}`` -- ``{version}-full`` -- ``{version}-spacy-en`` -- ``{version}-spacy-de`` -- ``{version}-mitie-en`` - - -The ``{version}-full`` tag includes all possible pipeline dependencies, allowing you to change your ``config.yml`` -as you like without worrying about missing dependencies. The plain ``{version}`` tag includes all the -dependencies you need to run the default pipeline created by ``rasa init``. - -To keep images as small as possible, we also publish different tags of the ``rasa/rasa`` image -with different dependencies installed. See :ref:`choosing-a-pipeline` for more dependency information -specific to your pipeline. For example, if you are using components with pre-trained word vectors from spaCy or -MITIE, you should choose the corresponding tag. - -If your model has a dependency that is not included in any of the tags (for example, a different spaCy language model), -you can build a docker image that extends the ``rasa/rasa`` image. - -.. note:: - - You can see a list of all the versions and tags of the Rasa Open Source - Docker image on `DockerHub `_. - -.. warning:: - - The ``latest`` tags correspond to the current master build. These tags are not recommended for use, - as they are not guaranteed to be stable. - -Adding Custom Components -######################## - -If you are using a custom NLU component or policy in your ``config.yml``, you have to add the module file to your -Docker container. You can do this by either mounting the file or by including it in your -own custom image (e.g. if the custom component or policy has extra dependencies). Make sure -that your module is in the Python module search path by setting the -environment variable ``PYTHONPATH=$PYTHONPATH:``. - - -Adding Custom Actions -##################### - -To create more sophisticated assistants, you will want to use :ref:`custom-actions`. -Continuing the example from above, you might want to add an action which tells -the user a joke to cheer them up. - -Start by creating the custom actions in a directory ``actions`` in your working directory: - - .. code-block:: bash - - mkdir actions - mv actions.py actions/actions.py - # Rasa SDK expects a python module. - # Therefore, make sure that you have this file in the directory. - touch actions/__init__.py - - -Then build a custom action using the Rasa SDK by editing ``actions/actions.py``, for example: - - .. code-block:: python - - import requests - import json - from rasa_sdk import Action - - - class ActionJoke(Action): - def name(self): - return "action_joke" - - def run(self, dispatcher, tracker, domain): - request = requests.get('http://api.icndb.com/jokes/random').json() # make an api call - joke = request['value']['joke'] # extract a joke from returned json response - dispatcher.utter_message(text=joke) # send the message back to the user - return [] - -In ``data/stories.md``, replace ``utter_cheer_up`` in with the custom action ``action_joke`` -tell your bot to use this new action. - -In ``domain.yml``, add a section for custom actions, including your new action: - - .. code-block:: yaml - - actions: - - action_joke - -After updating your domain and stories, you have to retrain your model: - - .. parsed-literal:: - - docker run -v $(pwd):/app rasa/rasa:\ |release|-full train - -Your actions will run on a separate server from your Rasa server. First create a network to connect the two containers: - - .. code-block:: bash - - docker network create my-project - -You can then run the actions with the following command: - - .. parsed-literal:: - - docker run -d -v $(pwd)/actions:/app/actions --net my-project --name action-server rasa/rasa-sdk:\ |rasa_sdk_version| - - -Here's what's happening in that command: - - - ``-d``: Runs the container in detached mode so that you can run the rasa container in the same window. - - ``-v $(pwd):/app``: Mounts your project directory into the Docker - container so that the action server can run the code in the ``actions`` folder - - ``net my-project``: Run the server on a specific network so that the rasa container can find it - - ``--name action-server``: Gives the server a specific name for the rasa server to reference - - rasa/rasa-sdk:|rasa_sdk_version| : Uses the Rasa SDK image with the tag |rasa_sdk_version| - - -Because the action server is running in detached mode, if you want to stop the container, -do it with ``docker stop action-server``. You can also run ``docker ps`` at any time to see all -of your currently running containers. - -To instruct the Rasa server to use the action server, you have to tell Rasa its location. -Add this endpoint to your ``endpoints.yml``, referencing the ``--name`` you gave the server: - - .. code-block:: yaml - - action_endpoint: - url: "http://action-server:5055/webhook" - -Now you can talk to your bot again via the ``shell`` command: - - .. parsed-literal:: - - docker run -it -v $(pwd):/app -p 5005:5005 --net my-project rasa/rasa:\ |release|-full shell - -.. note:: - - If you stop and restart the ``action-server`` container, you might see an error like this: - - .. code-block:: none - - docker: Error response from daemon: Conflict. The container name "/action-server" is - already in use by container "f7ffc625e81ad4ad54cf8704e6ad85123c71781ca0a8e4b862f41c5796c33530". - You have to remove (or rename) that container to be able to reuse that name. - - If that happens, it means you have a (stopped) container with the name already. You can remove it via: - - .. code-block:: bash - - docker rm action-server - -Deploying your Assistant -************************ - -Work on your bot until you have a minimum viable assistant that can handle your happy paths. After -that, you'll want to deploy your model to get feedback from real test users. To do so, you can deploy the -model you created with Rasa X via one of our :ref:`recommended deployment methods`. -Or, you can do a :ref:`Rasa-only deployment in Docker Compose`. diff --git a/docs/user-guide/docker/deploying-in-docker-compose.rst b/docs/user-guide/docker/deploying-in-docker-compose.rst deleted file mode 100644 index e286ed0e389b..000000000000 --- a/docs/user-guide/docker/deploying-in-docker-compose.rst +++ /dev/null @@ -1,149 +0,0 @@ -:desc: Use Docker Compose to deploy a Rasa Open Source assistant - -.. _deploying-rasa-in-docker-compose: - -Deploying a Rasa Open Source Assistant in Docker Compose -======================================================== - -If you would like to deploy your assistant without Rasa X, you can do so by deploying it in Docker Compose. -To deploy Rasa X and your assistant together, see the :ref:`recommended-deployment-methods`. - -.. contents:: - :local: - :depth: 1 - - -Installing Docker -~~~~~~~~~~~~~~~~~ - -If you're not sure if you have Docker installed, you can check by running: - - .. code-block:: bash - - docker -v && docker-compose -v - # Docker version 18.09.2, build 6247962 - # docker-compose version 1.23.2, build 1110ad01 - -If Docker is installed on your machine, the output should show you your installed -versions of Docker and Docker Compose. If the command doesn't work, you'll have to -install Docker. -See `Docker Installation `_ for details. - - -.. _docker-compose-configuring-channels: - -Configuring Channels -~~~~~~~~~~~~~~~~~~~~ - -To run your AI assistant in production, don't forget to configure your required -:ref:`messaging-and-voice-channels` in ``credentials.yml``. For example, to add a -REST channel, uncomment this section in the ``credentials.yml``: - - .. code-block:: yaml - - rest: - # you don't need to provide anything here - this channel doesn't - # require any credentials - -The REST channel will open your bot up to incoming requests at the ``/webhooks/rest/webhook`` endpoint. - - -.. _running-multiple-services: - -Using Docker Compose to Run Multiple Services -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Docker Compose provides an easy way to run multiple containers together without -having to run multiple commands or configure networks. This is essential when you -want to deploy an assistant that also has an action server. - -.. contents:: - :local: - :depth: 2 - -Start by creating a file called ``docker-compose.yml``: - - .. code-block:: bash - - touch docker-compose.yml - -Add the following content to the file: - - .. parsed-literal:: - - version: '3.0' - services: - rasa: - image: rasa/rasa:\ |release|-full - ports: - - 5005:5005 - volumes: - - ./:/app - command: - - run - -The file starts with the version of the Docker Compose specification that you -want to use. -Each container is declared as a ``service`` within the ``docker-compose.yml``. -The first service is the ``rasa`` service, which runs your Rasa server. - -To add the action server, add the image of your action server code. To learn how to deploy -an action server image, see :ref:`building-an-action-server-image`. - - .. parsed-literal:: - - version: '3.0' - services: - rasa: - image: rasa/rasa:\ |release|-full - ports: - - 5005:5005 - volumes: - - ./:/app - command: - - run - app: - image: - expose: 5055 - -The ``expose: 5005`` is what allows the ``rasa`` service to reach the ``app`` service on that port. -To instruct the ``rasa`` service to send its action requests to that endpoint, add it to your ``endpoints.yml``: - - .. code-block:: yaml - - action_endpoint: - url: http://app:5055/webhook - -To run the services configured in your ``docker-compose.yml`` execute: - - .. code-block:: bash - - docker-compose up - -You should then be able to interact with your bot via requests to port 5005, on the webhook endpoint that -corresponds to a :ref:`configured channel `: - - .. code-block:: bash - - curl -XPOST http://localhost:5005/webhooks/rest/webhook \ - -H "Content-type: application/json" \ - -d '{"sender": "test", "message": "hello"}' - - -Configuring a Tracker Store -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -By default, all conversations are saved in memory. This means that all -conversations are lost as soon as you restart the Rasa server. -If you want to persist your conversations, you can use a different -:ref:`Tracker Store `. - -To add a tracker store to a Docker Compose deployment, you need to add a new -service to your ``docker-compose.yml`` and modify the ``endpoints.yml`` to add -the new tracker store, pointing to your new service. More information about how -to do so can be found in the tracker store documentation: - - - :ref:`sql-tracker-store` - - :ref:`redis-tracker-store` - - :ref:`mongo-tracker-store` - - :ref:`custom-tracker-store` diff --git a/docs/user-guide/how-to-deploy.rst b/docs/user-guide/how-to-deploy.rst deleted file mode 100644 index ccb5e8548c9a..000000000000 --- a/docs/user-guide/how-to-deploy.rst +++ /dev/null @@ -1,193 +0,0 @@ -:desc: How to deploy your Rasa Assistant with Docker Compose or Kubernetes/Openshift - -.. _deploying-your-rasa-assistant: - -Deploying Your Rasa Assistant -============================= - -.. edit-link:: - -This page explains when and how to deploy an assistant built with Rasa. -It will allow you to make your assistant available to users and set you up with a production-ready environment. - -.. contents:: - :local: - :depth: 2 - - -When to Deploy Your Assistant ------------------------------ - -The best time to deploy your assistant and make it available to test users is once it can handle the most -important happy paths or is what we call a `minimum viable assistant `_. - -The recommended deployment methods described below make it easy to share your assistant -with test users via the `share your assistant feature in -Rasa X `_. -Then, when you’re ready to make your assistant available via one or more :ref:`messaging-and-voice-channels`, -you can easily add them to your existing deployment set up. - -.. _recommended-deployment-methods: - -Recommended Deployment Methods ------------------------------- - -The recommended way to deploy an assistant is using either the Server Quick-Install or Helm Chart -options we support. Both deploy Rasa X and your assistant. They are the easiest ways to deploy your assistant, -allow you to use Rasa X to view conversations and turn them into training data, and are production-ready. -For more details on deployment methods see the `Rasa X Installation Guide `_. - -Server Quick-Install -~~~~~~~~~~~~~~~~~~~~ - -The Server Quick-Install script is the easiest way to deploy Rasa X and your assistant. It installs a Kubernetes -cluster on your machine with sensible defaults, getting you up and running in one command. - - - Default: Make sure you meet the `OS Requirements `_, - then run: - - .. copyable:: - - curl -s get-rasa-x.rasa.com | sudo bash - - - Custom: See `Customizing the Script `_ - and the `Server Quick-Install docs `_ docs. - -Helm Chart -~~~~~~~~~~ - -For assistants that will receive a lot of user traffic, setting up a Kubernetes or Openshift deployment via -our Helm charts is the best option. This provides a scalable architecture that is also straightforward to deploy. -However, you can also customize the Helm charts if you have specific requirements. - - - Default: Read the `Helm Chart Installation `_ docs. - - Custom: Read the above, as well as the `Advanced Configuration `_ - documentation, and customize the `open source Helm charts `_ to your needs. - -.. _rasa-only-deployment: - -Alternative Deployment Methods ------------------------------- - -Docker Compose -~~~~~~~~~~~~~~ - -You can also run Rasa X in a Docker Compose setup, without the cluster environment. We have an install script -for doing so, as well as manual instructions for any custom setups. - - - Default: Read the `Docker Compose Install Script `_ docs or watch the `Masterclass Video `_ on deploying Rasa X. - - Custom: Read the `Docker Compose Manual Install `_ documentation for full customization options. - -Rasa Open Source Only Deployment -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -It is also possible to deploy a Rasa assistant without Rasa X using Docker Compose. To do so, you can build your -Rasa Assistant locally or in Docker. Then you can deploy your model in Docker Compose. - -.. toctree:: - :titlesonly: - :maxdepth: 1 - - Building a Rasa Assistant Locally - docker/building-in-docker - docker/deploying-in-docker-compose - - -Deploying Your Action Server ----------------------------- - -.. _building-an-action-server-image: - -Building an Action Server Image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you build an image that includes your action code and store it in a container registry, you can run it -as part of your deployment, without having to move code between servers. -In addition, you can add any additional dependencies of systems or Python libraries -that are part of your action code but not included in the base ``rasa/rasa-sdk`` image. - -To create your image: - - #. Move your actions code to a folder ``actions`` in your project directory. - Make sure to also add an empty ``actions/__init__.py`` file: - - .. code-block:: bash - - mkdir actions - mv actions.py actions/actions.py - touch actions/__init__.py # the init file indicates actions.py is a python module - - The ``rasa/rasa-sdk`` image will automatically look for the actions in ``actions/actions.py``. - - #. If your actions have any extra dependencies, create a list of them in a file, - ``actions/requirements-actions.txt``. - - #. Create a file named ``Dockerfile`` in your project directory, - in which you'll extend the official SDK image, copy over your code, and add any custom dependencies (if necessary). - For example: - - .. parsed-literal:: - - # Extend the official Rasa SDK image - FROM rasa/rasa-sdk:|rasa_sdk_version| - - # Use subdirectory as working directory - WORKDIR /app - - # Copy any additional custom requirements, if necessary (uncomment next line) - # COPY actions/requirements-actions.txt ./ - - # Change back to root user to install dependencies - USER root - - # Install extra requirements for actions code, if necessary (uncomment next line) - # RUN pip install -r requirements-actions.txt - - # Copy actions folder to working directory - COPY ./actions /app/actions - - # By best practices, don't run the code with root user - USER 1001 - -You can then build the image via the following command: - - .. code-block:: bash - - docker build . -t /: - -The ```` should reference how this image will be different from others. For -example, you could version or date your tags, as well as create different tags that have different code for production -and development servers. You should create a new tag any time you update your code and want to re-deploy it. - - -Using your Custom Action Server Image -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If you're building this image to make it available from another server, -for example a Rasa X or Rasa Enterprise deployment, you should push the image to a cloud repository. - -This documentation assumes you are pushing your images to `DockerHub `_. -DockerHub will let you host multiple public repositories and -one private repository for free. Be sure to first `create an account `_ -and `create a repository `_ to store your images. You could also push images to -a different Docker registry, such as `Google Container Registry `_, -`Amazon Elastic Container Registry `_, or -`Azure Container Registry `_. - -You can push the image to DockerHub via: - - .. code-block:: bash - - docker login --username --password - docker push /: - -To authenticate and push images to a different container registry, please refer to the documentation of -your chosen container registry. - -How you reference the custom action image will depend on your deployment. Pick the relevant documentation for -your deployment: - - - `Server Quick-Install `_ - - `Helm Chart `_ - - `Docker Compose `_ - - :ref:`Rasa Open Source Only ` diff --git a/docs/user-guide/installation.rst b/docs/user-guide/installation.rst deleted file mode 100644 index e6fda2c3b73f..000000000000 --- a/docs/user-guide/installation.rst +++ /dev/null @@ -1,265 +0,0 @@ -:desc: Install Rasa Open Source on premises to enable local and customizable - Natural Lanaguage Understanding and Dialogue Management. -:meta_image: https://i.imgur.com/nGF1K8f.jpg - -.. _installation: - -============ -Installation -============ - -.. edit-link:: - -Quick Installation -~~~~~~~~~~~~~~~~~~ - -You can install Rasa Open Source using pip (requires Python 3.6 or 3.7). - -.. code-block:: bash - - $ pip3 install rasa - -- Having trouble installing? Read our :ref:`step-by-step installation guide `. -- You can also :ref:`build Rasa Open Source from source `. -- For advanced installation options such as building from source and installation instructions for - custom pipelines, head over :ref:`here `. -- Prefer following video instructions? Watch our installation series on `Youtube `_. - - -When you're done installing, you can head over to the tutorial! - -.. button:: - :text: Next Step: Tutorial - :link: ../rasa-tutorial/ - - - -| - -------------------------------------------- - -.. _installation_guide: - -Step-by-step Installation Guide -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -1. Install the Python development environment ---------------------------------------------- - -Check if your Python environment is already configured: - -.. code-block:: bash - - $ python3 --version - $ pip3 --version - -If these packages are already installed, these commands should display version -numbers for each step, and you can skip to the next step. - -Otherwise, proceed with the instructions below to install them. - -.. tabs:: - - .. tab:: Ubuntu - - Fetch the relevant packages using ``apt``, and install virtualenv using ``pip``. - - .. code-block:: bash - - $ sudo apt update - $ sudo apt install python3-dev python3-pip - - .. tab:: macOS - - Install the `Homebrew `_ package manager if you haven't already. - - Once you're done, you can install Python3. - - .. code-block:: bash - - $ brew update - $ brew install python - - .. tab:: Windows - - .. raw:: html - - Make sure the Microsoft VC++ Compiler is installed, so python can compile - any dependencies. You can get the compiler from Visual Studio. Download the installer and select - VC++ Build tools in the list. - - Install `Python 3 `_ (64-bit version) for Windows. - - .. code-block:: bat - - C:\> pip3 install -U pip - -.. note:: - Note that `pip` in this refers to `pip3` as Rasa Open Source requires python3. To see which version - the `pip` command on your machine calls use `pip --version`. - - -2. Create a virtual environment (strongly recommended) ------------------------------------------------------- - -Tools like `virtualenv `_ and `virtualenvwrapper `_ provide isolated Python environments, which are cleaner than installing packages systemwide (as they prevent dependency conflicts). They also let you install packages without root privileges. - -.. tabs:: - - .. tab:: Ubuntu / macOS - - Create a new virtual environment by choosing a Python interpreter and making a ``./venv`` directory to hold it: - - .. code-block:: bash - - $ python3 -m venv ./venv - - Activate the virtual environment: - - .. code-block:: bash - - $ source ./venv/bin/activate - - .. tab:: Windows - - Create a new virtual environment by choosing a Python interpreter and making a ``.\venv`` directory to hold it: - - .. code-block:: bat - - C:\> python3 -m venv ./venv - - Activate the virtual environment: - - .. code-block:: bat - - C:\> .\venv\Scripts\activate - - -3. Install Rasa Open Source ---------------------------- - -.. tabs:: - - .. tab:: Ubuntu / macOS / Windows - - First make sure your ``pip`` version is up to date: - - .. code-block:: bash - - $ pip install -U pip - - To install Rasa Open Source: - - .. code-block:: bash - - $ pip install rasa - -**Congratulations! You have successfully installed Rasa Open Source!** - -You can now head over to the tutorial. - -.. button:: - :text: Next Step: Tutorial - :link: ../rasa-tutorial/ - -| - -------------------------------------------- - - -.. _build_from_source: - -Building from Source -~~~~~~~~~~~~~~~~~~~~ - -If you want to use the development version of Rasa Open Source, you can get it from GitHub: - -.. code-block:: bash - - $ curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python - $ git clone https://github.com/RasaHQ/rasa.git - $ cd rasa - $ poetry install - --------------------------------- - -.. _pipeline_dependencies: - -NLU Pipeline Dependencies -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Several NLU components have additional dependencies that need to -be installed separately. - -Here, you will find installation instructions for each of them below. - -How do I choose a pipeline? ---------------------------- - -The page on :ref:`choosing-a-pipeline` will help you pick the right pipeline -for your assistant. - -I have decided on a pipeline. How do I install the dependencies for it? ------------------------------------------------------------------------ - -When you install Rasa Open Source, the dependencies for the ``supervised_embeddings`` - TensorFlow -and sklearn_crfsuite get automatically installed. However, spaCy and MITIE need to be separately installed if you want to use pipelines containing components from those libraries. - -.. admonition:: Just give me everything! - - If you don't mind the additional dependencies lying around, you can use - this to install everything. - - You'll first need to clone the repository and then run the following - command to install all the packages: - - .. code-block:: bash - - $ poetry install --extras full - -.. _install-spacy: - -Dependencies for spaCy -###################### - - -For more information on spaCy, check out the `spaCy docs `_. - -You can install it with the following commands: - -.. code-block:: bash - - $ pip install rasa[spacy] - $ python -m spacy download en_core_web_md - $ python -m spacy link en_core_web_md en - -This will install Rasa Open Source as well as spaCy and its language model -for the English language. We recommend using at least the -"medium" sized models (``_md``) instead of the spaCy's -default small ``en_core_web_sm`` model. Small models require less -memory to run, but will somewhat reduce intent classification performance. - -.. _install-mitie: - -Dependencies for MITIE -###################### - -First, run - -.. code-block:: bash - - $ pip install git+https://github.com/mit-nlp/MITIE.git - $ pip install rasa[mitie] - -and then download the -`MITIE models `_. -The file you need is ``total_word_feature_extractor.dat``. Save this -anywhere. If you want to use MITIE, you need to -tell it where to find this file (in this example it was saved in the -``data`` folder of the project directory). - -.. warning:: - - Mitie support is likely to be deprecated in a future release. diff --git a/docs/user-guide/messaging-and-voice-channels.rst b/docs/user-guide/messaging-and-voice-channels.rst deleted file mode 100644 index 9b03ddb7b0c1..000000000000 --- a/docs/user-guide/messaging-and-voice-channels.rst +++ /dev/null @@ -1,69 +0,0 @@ -:desc: Check out how to make your Rasa assistant available on platforms like - Facebook Messenger, Slack, Telegram or even your very own website. - -.. _messaging-and-voice-channels: - -Messaging and Voice Channels -============================ - -.. edit-link:: - -To make your assistant available on a messaging platform you need to provide credentials -in a ``credentials.yml`` file. -An example file is created when you run ``rasa init``, so it's easiest to edit that file -and add your credentials there. Here is an example with Facebook credentials: - - -.. code-block:: yaml - - facebook: - verify: "rasa-bot" - secret: "3e34709d01ea89032asdebfe5a74518" - page-access-token: "EAAbHPa7H9rEBAAuFk4Q3gPKbDedQnx4djJJ1JmQ7CAqO4iJKrQcNT0wtD" - - -Learn how to make your assistant available on: - -.. toctree:: - :titlesonly: - :maxdepth: 1 - - connectors/your-own-website - connectors/facebook-messenger - connectors/slack - connectors/telegram - connectors/twilio - connectors/microsoft-bot-framework - connectors/cisco-webex-teams - connectors/rocketchat - connectors/mattermost - connectors/hangouts - connectors/custom-connectors - - -.. _using-ngrok: - -Testing Channels on Your Local Machine with Ngrok -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -You can use `ngrok `_ to create a connection to your local -computer that is publicly available on the internet. -You don't need this when running Rasa on a server because, you can set up a domain -name to point to that server's IP address, or use the IP address itself. - -After installing ngrok, run: - -.. code-block:: bash - - ngrok http 5005; rasa run - -Your webhook address will look like the following: - -- ``https://yyyyyy.ngrok.io/webhooks//webhook``, e.g. -- ``https://yyyyyy.ngrok.io/webhooks/facebook/webhook`` - -.. warning:: - - With the free-tier of ngrok, you can run into limits on how many connections you can make per minute. - As of writing this, it is set to 40 connections / minute. - diff --git a/docs/user-guide/rasa-tutorial.rst b/docs/user-guide/rasa-tutorial.rst deleted file mode 100644 index 7c97bfa4684a..000000000000 --- a/docs/user-guide/rasa-tutorial.rst +++ /dev/null @@ -1,272 +0,0 @@ -:desc: This tutorial will show you the different parts needed to build a - chatbot or AI assistant using open source Rasa. - -.. _rasa-tutorial: - -Tutorial: Rasa Basics -===================== - -.. edit-link:: - -This page explains the basics of building an assistant with Rasa and -shows the structure of a Rasa project. You can test it out right here without -installing anything. -You can also :ref:`install Rasa ` and follow along in your command line. - -The :ref:`glossary` contains an overview of the most common terms you’ll see in the Rasa documentation. - - - -.. contents:: - :local: - - -In this tutorial, you will build a simple, friendly assistant which will ask how you're doing -and send you a fun picture to cheer you up if you are sad. - -.. image:: /_static/images/mood_bot.png - - -1. Create a New Project -^^^^^^^^^^^^^^^^^^^^^^^ - -The first step is to create a new Rasa project. To do this, run: - -.. runnable:: - - rasa init --no-prompt - - -The ``rasa init`` command creates all the files that a Rasa project needs and -trains a simple bot on some sample data. -If you leave out the ``--no-prompt`` flag you will be asked some questions about -how you want your project to be set up. - -This creates the following files: - - -+-------------------------------+--------------------------------------------------------+ -| ``__init__.py`` | an empty file that helps python find your actions | -+-------------------------------+--------------------------------------------------------+ -| ``actions.py`` | code for your custom actions | -+-------------------------------+--------------------------------------------------------+ -| ``config.yml`` '*' | configuration of your NLU and Core models | -+-------------------------------+--------------------------------------------------------+ -| ``credentials.yml`` | details for connecting to other services | -+-------------------------------+--------------------------------------------------------+ -| ``data/nlu.md`` '*' | your NLU training data | -+-------------------------------+--------------------------------------------------------+ -| ``data/stories.md`` '*' | your stories | -+-------------------------------+--------------------------------------------------------+ -| ``domain.yml`` '*' | your assistant's domain | -+-------------------------------+--------------------------------------------------------+ -| ``endpoints.yml`` | details for connecting to channels like fb messenger | -+-------------------------------+--------------------------------------------------------+ -| ``models/.tar.gz`` | your initial model | -+-------------------------------+--------------------------------------------------------+ - - - -The most important files are marked with a '*'. -You will learn about all of these in this tutorial. - - -2. View Your NLU Training Data -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The first piece of a Rasa assistant is an NLU model. -NLU stands for Natural Language Understanding, which means turning -user messages into structured data. To do this with Rasa, -you provide training examples that show how Rasa should understand -user messages, and then train a model by showing it those examples. - -Run the code cell below to see the NLU training data created by -the ``rasa init`` command: - - -.. runnable:: - - cat data/nlu.md - - - - -The lines starting with ``##`` define the names of your ``intents``, which -are groups of messages with the same meaning. Rasa's job will be to -predict the correct intent when your users send new, unseen messages to -your assistant. You can find all the details of the data format in :ref:`training-data-format`. - -.. _model-configuration: - -3. Define Your Model Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The configuration file defines the NLU and Core components that your model -will use. In this example, your NLU model will use the -``supervised_embeddings`` pipeline. You can learn about the different NLU pipelines -:ref:`here `. - -Let's take a look at your model configuration file. - -.. runnable:: - - cat config.yml - - - -The ``language`` and ``pipeline`` keys specify how the NLU model should be built. -The ``policies`` key defines the :ref:`policies ` that the Core model will use. - - - -4. Write Your First Stories -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -At this stage, you will teach your assistant how to respond to your messages. -This is called dialogue management, and is handled by your Core model. - -Core models learn from real conversational data in the form of training "stories". -A story is a real conversation between a user and an assistant. -Lines with intents and entities reflect the user's input and action names show what the -assistant should do in response. - -Below is an example of a simple conversation. -The user says hello, and the assistant says hello back. -This is how it looks as a story: - -.. code-block:: story - - ## story1 - * greet - - utter_greet - - -You can see the full details in :ref:`stories`. - -Lines that start with ``-`` are actions taken by the assistant. -In this tutorial, all of our actions are messages sent back to the user, -like ``utter_greet``, but in general, an action can do anything, -including calling an API and interacting with the outside world. - -Run the command below to view the example stories inside the file ``data/stories.md``: - - -.. runnable:: - - cat data/stories.md - - - -5. Define a Domain -^^^^^^^^^^^^^^^^^^ - -The next thing we need to do is define a :ref:`Domain `. -The domain defines the universe your assistant lives in: what user inputs it -should expect to get, what actions it should be able to predict, how to -respond, and what information to store. -The domain for our assistant is saved in a -file called ``domain.yml``: - - - -.. runnable:: - - cat domain.yml - - - -So what do the different parts mean? - - -+---------------+-------------------------------------------------------------+ -| ``intents`` | things you expect users to say | -+---------------+-------------------------------------------------------------+ -| ``actions`` | things your assistant can do and say | -+---------------+-------------------------------------------------------------+ -| ``responses`` | response strings for the things your assistant can say | -+---------------+-------------------------------------------------------------+ - - -**How does this fit together?** -Rasa Core's job is to choose the right action to execute at each step -of the conversation. In this case, our actions simply send a message to the user. -These simple utterance actions are the ``actions`` in the domain that start -with ``utter_``. The assistant will respond with a message based on a response -from the ``responses`` section. See :ref:`custom-actions` -to build actions that do more than just send a message. - - - -6. Train a Model -^^^^^^^^^^^^^^^^ - -Anytime we add new NLU or Core data, or update the domain or configuration, we -need to re-train a neural network on our example stories and NLU data. -To do this, run the command below. This command will call the Rasa Core and NLU train -functions and store the trained model -into the ``models/`` directory. The command will automatically only retrain the -different model parts if something has changed in their data or configuration. - - - -.. runnable:: - - rasa train - - echo "Finished training." - - - -The ``rasa train`` command will look for both NLU and Core data and will train a combined model. - -7. Test Your Assistant -^^^^^^^^^^^^^^^^^^^^^^ - -After you train a model, you always want to check that your assistant still behaves as you expect. -In Rasa Open Source, you use end-to-end tests defined in your ``tests/`` directory to run through -test conversations that ensure both NLU and Core make correct predictions. - -.. runnable:: - - rasa test - - echo "Finished running tests." - -See :ref:`testing-your-assistant` to learn more about how to evaluate your model as you improve it. - -8. Talk to Your Assistant -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Congratulations! 🚀 You just built an assistant -powered entirely by machine learning. - -The next step is to try it out! -If you're following this tutorial on your local machine, start talking to your -assistant by running: - -.. code-block:: bash - - rasa shell - - -Next Steps -^^^^^^^^^^ - -Now that you've built your first Rasa bot it's time to learn about -some more advanced Rasa features. - -- Learn how to implement business logic using :ref:`forms ` -- Learn how to integrate other APIs using :ref:`custom actions ` -- Learn how to connect your bot to different :ref:`messaging apps ` -- Learn about customizing the :ref:`components ` in your NLU pipeline -- Read about custom and built-in :ref:`entities ` - -You can also use Rasa X to collect more conversations -and improve your assistant: - -.. button:: - :text: Try Rasa X - :link: ../../../rasa-x/ - -.. juniper:: - :language: bash diff --git a/docs/user-guide/setting-up-ci-cd.rst b/docs/user-guide/setting-up-ci-cd.rst deleted file mode 100644 index ae26b5573c97..000000000000 --- a/docs/user-guide/setting-up-ci-cd.rst +++ /dev/null @@ -1,215 +0,0 @@ -:desc: Set up a CI/CD pipeline to ensure that iterative improvements to your assistant are tested - and deployed with minimum manual effort - -.. _setting-up-ci-cd: - -Setting up CI/CD -================ - -Even though developing a contextual assistant is different from developing traditional -software, you should still follow software development best practices. -Setting up a Continuous Integration (CI) and Continuous Deployment (CD) -pipeline ensures that incremental updates to your bot are improving it, not harming it. - -.. contents:: - :local: - :depth: 2 - - -Overview --------- - -Continous Integration (CI) is the practice of merging in code changes -frequently and automatically testing changes as they are committed. Continuous -Deployment (CD) means automatically deploying integrated changes to a staging -or production environment. Together, they allow you to make more frequent improvements -to your assistant and efficiently test and deploy those changes. - -This guide will cover **what** should go in a CI/CD pipeline, specific to a -Rasa project. **How** you implement that pipeline is up to you. -There are many CI/CD tools out there, such as `GitHub Actions `_, -`GitLab CI/CD `_, `Jenkins `_, and -`CircleCI `_. We recommend choosing a tool that integrates with -whatever Git repository you use. - - -Continuous Integration (CI) ---------------------------- - -The best way to improve an assistant is with frequent `incremental updates -`_. -No matter how small a change is, you want to be sure that it doesn't introduce -new problems or negatively impact the performance of your assistant. - -It is usually best to run CI checks on merge / pull requests or on commit. Most tests are -quick enough to run on every change. However, you can choose to run more -resource-intensive tests only when certain files have been changed or when some -other indicator is present. For example, if your code is hosted on Github, -you can make a test run only if the pull request has a certain label (e.g. "NLU testing required"). - -.. contents:: - :local: - -Validate Data and Stories -######################### - -:ref:`Data validation ` verifies that there are no mistakes or -major inconsistencies in your domain file, NLU data, or story data. - -.. code-block:: bash - - rasa data validate --fail-on-warnings --max-history - -If data validation results in errors, training a model will also fail. By -including the ``--fail-on-warnings`` flag, validation will also fail on -warnings about problems that won't prevent training a model, but might indicate -messy data, such as actions listed in the domain that aren't used in any -stories. - -Data validation includes :ref:`story structure validation `. -Story validation checks if you have any -stories where different bot actions follow from the same dialogue history. -Conflicts between stories will prevent a model from learning the correct -pattern for a dialogue. Set the ``--max-history`` parameter to the value of ``max_history`` for the -memoization policy in your ``config.yml``. If you haven't set one, use the default of ``5``. - -Train a Model -############# - -.. code-block:: bash - - rasa train - -Training a model verifies that your NLU pipeline and policy configurations are -valid and trainable, and it provides a model to use for test conversations. -If it passes the CI tests, then you can also :ref:`upload the trained model ` -to your server as part of the continuous deployment process . - -.. _test-the-assistant: - -Test the Assistant -################## - -Testing your trained model on :ref:`test conversations -` is the best way to have confidence in how your assistant -will act in certain situations. These stories, written in a modified story -format, allow you to provide entire conversations and test that, given this -user input, your model will behave in the expected manner. This is especially -important as you start introducing more complicated stories from user -conversations. - - -.. code-block:: bash - - rasa test --stories tests/conversation_tests.md --fail-on-prediction-errors - -The ``--fail-on-prediction-errors`` flag ensures the test will fail if any test -conversation fails. - -End-to-end testing is only as thorough and accurate as the test -cases you include, so you should continue to grow your set of test conversations -as you make improvements to your assistant. A good rule of thumb to follow is that you should aim for your test conversations -to be representative of the true distribution of real conversations. -Rasa X makes it easy to `add test conversations based on real conversations `_. - -Note: End-to-end testing does **not** execute your action code. You will need to -:ref:`test your action code ` in a seperate step. - -Compare NLU Performance -####################### - -If you've made significant changes to your NLU training data (e.g. -splitting an intent into two intents or adding a lot of training examples), you should run a -:ref:`full NLU evaluation `. You'll want to compare -the performance of the NLU model without your changes to an NLU model with your -changes. - -You can do this by running NLU testing in cross-validation mode: - -.. code-block:: bash - - rasa test nlu --cross-validation - -You could also train a model on a training set and testing it on a test set. If you use the train-test -set approach, it is best to :ref:`shuffle and split your data ` using ``rasa data split`` as part of this CI step, as -opposed to using a static NLU test set, which can easily become outdated. - -Because this test doesn't result in a pass/fail exit code, it's best to make -the results visible so that you can interpret them. -For example, `this workflow `_ -includes commenting on a PR with a results table that shows which intents are confused with others. - -Since NLU comparison can be a fairly resource intensive test, you may choose to run this test -only when certain conditions are met. Conditions might include the presence of a manual label (e.g. "NLU -testing required"), changes to NLU data, or changes to the NLU pipeline. - -.. _testing-action-code: - -Test Action Code -################ - -The approach used to test your action code will depend on how it is -implemented. For example, if you connect to external APIs, it is recommended to write unit tests to ensure -that those APIs respond as expected to common inputs. However you test your action code, you should -include these tests in your CI pipeline so that they run each time you make changes. - -Continuous Deployment (CD) --------------------------- - -To get improvements out to your users frequently, you will want to automate as -much of the deployment process as possible. - -CD steps usually run on push or merge to a certain branch, once CI checks have -succeeded. - -.. contents:: - :local: - -.. _uploading-a-model: - -Deploy your Rasa Model -###################### - -If you ran :ref:`end-to-end tests ` in your CI pipeline, -you'll already have a trained model. You can set up your CD pipeline to upload the trained model to your -Rasa server if the CI results are satisfactory. For example, to upload a model to Rasa X: - -.. code-block:: bash - - curl -k -F "model=@models/my_model.tar.gz" "https://example.rasa.com/api/projects/default/models?api_token={your_api_token}" - -If you are using Rasa X, you can also `tag the uploaded model `_ -as ``active`` (or whichever deployment you want to tag if using multiple `deployment environments `_): - -.. code-block:: bash - - curl -X PUT "https://example.rasa.com/api/projects/default/models/my_model/tags/active" - - -However, if your update includes changes to both your model and your action -code, and these changes depend on each other in any way, you should **not** -automatically tag the model as ``production``. You will first need to build and -deploy your updated action server, so that the new model won't e.g. call -actions that don't exist in the pre-update action server. - -Deploy your Action Server -######################### - -You can automate -:ref:`building and uploading a new image for your action server `, -to an image repository for each -update to your action code. As noted above, you should be careful with -automatically deploying a new image tag to production if the action server -would be incompatible with the current production model. - -Example CI/CD pipelines ------------------------ - -As examples, see the CI/CD pipelines for -`Sara `_, -the Rasa assistant that you can talk to in the Rasa Docs, and -`Carbon Bot `_. -Both use `Github Actions `_ as a CI/CD tool. - -These examples are just two of many possibilities. If you have a CI/CD setup you like, please -share it with the Rasa community on the `forum `_. diff --git a/docs/user-guide/testing-your-assistant.rst b/docs/user-guide/testing-your-assistant.rst deleted file mode 100644 index 1276fbd25ea5..000000000000 --- a/docs/user-guide/testing-your-assistant.rst +++ /dev/null @@ -1,320 +0,0 @@ -:desc: Test your Rasa Open Source assistant to validate and improve your - conversations - -.. _testing-your-assistant: - -Testing Your Assistant -====================== - -.. edit-link:: - -.. contents:: - :local: - -.. note:: - If you are looking to tune the hyperparameters of your NLU model, - check out this `tutorial `_. - -.. _end-to-end-testing: - -End-to-End Testing ------------------- - -Rasa Open Source lets you test dialogues end-to-end by running through -test conversations and making sure that both NLU and Core make correct predictions. - -To do this, you need some stories in the end-to-end format, -which includes both the NLU output and the original text. -Here are some examples: - -.. tabs:: - - .. group-tab:: Basics - - .. code-block:: story - - ## A basic end-to-end test - * greet: hello - - utter_ask_howcanhelp - * inform: show me [chinese](cuisine) restaurants - - utter_ask_location - * inform: in [Paris](location) - - utter_ask_price - - .. group-tab:: Custom Actions - - .. code-block:: story - - ## End-to-End tests where a custom action appends events - * greet: hi - - my_custom_action - - - slot{"my_slot": "value added by custom action"} - - utter_ask_age - * thankyou: thanks - - utter_noworries - - .. group-tab:: Forms Happy Path - - .. code-block:: story - - ## Testing a conversation with a form - * greet: hi - - utter_greet - * request_restaurant: im looking for a restaurant - - restaurant_form - - form{"name": "restaurant_form"} - * inform: [afghan](cuisine) food - - form: restaurant_form - - form{"name": null} - - utter_slots_values - * thankyou: thanks - - utter_noworries - - .. group-tab:: Forms Unhappy Path - - .. code-block:: story - - ## Testing a conversation with a form and unexpected user input - * greet: hi - - utter_greet - * request_restaurant: im looking for a restaurant - - restaurant_form - - form{"name": "restaurant_form"} - - * chitchat: can you share your boss with me? - - utter_chitchat - - restaurant_form - - form{"name": null} - - utter_slots_values - * thankyou: thanks - - utter_noworries - -By default Rasa Open Source saves conversation tests to ``tests/conversation_tests.md``. -You can test your assistant against them by running: - -.. code-block:: bash - - $ rasa test - -.. note:: - - :ref:`custom-actions` are not executed as part of end-to-end tests. If your custom - actions append any events to the tracker, this has to be reflected in your end-to-end - tests (e.g. by adding ``slot`` events to your end-to-end story). - -If you have any questions or problems, please share them with us in the dedicated -`testing section on our forum `_ ! - -.. note:: - - Make sure your model file in ``models`` is a combined ``core`` - and ``nlu`` model. If it does not contain an NLU model, Core will use - the default ``RegexInterpreter``. - -.. _nlu-evaluation: - -Evaluating an NLU Model ------------------------ - -A standard technique in machine learning is to keep some data separate as a *test set*. -You can :ref:`split your NLU training data ` -into train and test sets using: - -.. code-block:: bash - - rasa data split nlu - - -If you've done this, you can see how well your NLU model predicts the test cases using this command: - -.. code-block:: bash - - rasa test nlu -u train_test_split/test_data.md --model models/nlu-20180323-145833.tar.gz - - -If you don't want to create a separate test set, you can -still estimate how well your model generalises using cross-validation. -To do this, add the flag ``--cross-validation``: - -.. code-block:: bash - - rasa test nlu -u data/nlu.md --config config.yml --cross-validation - -The full list of options for the script is: - -.. program-output:: rasa test nlu --help - -.. _comparing-nlu-pipelines: - -Comparing NLU Pipelines -^^^^^^^^^^^^^^^^^^^^^^^ - -By passing multiple pipeline configurations (or a folder containing them) to the CLI, Rasa will run -a comparative examination between the pipelines. - -.. code-block:: bash - - $ rasa test nlu --config pretrained_embeddings_spacy.yml supervised_embeddings.yml - --nlu data/nlu.md --runs 3 --percentages 0 25 50 70 90 - - -The command in the example above will create a train/test split from your data, -then train each pipeline multiple times with 0, 25, 50, 70 and 90% of your intent data excluded from the training set. -The models are then evaluated on the test set and the f1-score for each exclusion percentage is recorded. This process -runs three times (i.e. with 3 test sets in total) and then a graph is plotted using the means and standard deviations of -the f1-scores. - -The f1-score graph - along with all train/test sets, the trained models, classification and error reports - will be saved into a folder -called ``nlu_comparison_results``. - - -Intent Classification -^^^^^^^^^^^^^^^^^^^^^ - -The evaluation script will produce a report, confusion matrix, -and confidence histogram for your model. - -The report logs precision, recall and f1 measure for -each intent and entity, as well as providing an overall average. -You can save these reports as JSON files using the ``--report`` argument. - -The confusion matrix shows you which -intents are mistaken for others; any samples which have been -incorrectly predicted are logged and saved to a file -called ``errors.json`` for easier debugging. - -The histogram that the script produces allows you to visualise the -confidence distribution for all predictions, -with the volume of correct and incorrect predictions being displayed by -blue and red bars respectively. -Improving the quality of your training data will move the blue -histogram bars to the right and the red histogram bars -to the left of the plot. - - -.. warning:: - If any of your entities are incorrectly annotated, your evaluation may fail. One common problem - is that an entity cannot stop or start inside a token. - For example, if you have an example for a ``name`` entity - like ``[Brian](name)'s house``, this is only valid if your tokenizer splits ``Brian's`` into - multiple tokens. - - -Response Selection -^^^^^^^^^^^^^^^^^^^^^ - -The evaluation script will produce a combined report for all response selector models in your pipeline. - -The report logs precision, recall and f1 measure for -each response, as well as providing an overall average. -You can save these reports as JSON files using the ``--report`` argument. - - -Entity Extraction -^^^^^^^^^^^^^^^^^ - -The ``CRFEntityExtractor`` is the only entity extractor which you train using your own data, -and so is the only one that will be evaluated. If you use the spaCy or duckling -pre-trained entity extractors, Rasa NLU will not include these in the evaluation. - -Rasa NLU will report recall, precision, and f1 measure for each entity type that -``CRFEntityExtractor`` is trained to recognize. - - -Entity Scoring -^^^^^^^^^^^^^^ - -To evaluate entity extraction we apply a simple tag-based approach. We don't consider BILOU tags, but only the -entity type tags on a per token basis. For location entity like "near Alexanderplatz" we -expect the labels ``LOC LOC`` instead of the BILOU-based ``B-LOC L-LOC``. Our approach is more lenient -when it comes to evaluation, as it rewards partial extraction and does not punish the splitting of entities. -For example, given the aforementioned entity "near Alexanderplatz" and a system that extracts -"Alexanderplatz", our approach rewards the extraction of "Alexanderplatz" and punishes the missed out word "near". -The BILOU-based approach, however, would label this as a complete failure since it expects Alexanderplatz -to be labeled as a last token in an entity (``L-LOC``) instead of a single token entity (``U-LOC``). Note also that -a split extraction of "near" and "Alexanderplatz" would get full scores on our approach and zero on the -BILOU-based one. - -Here's a comparison between the two scoring mechanisms for the phrase "near Alexanderplatz tonight": - -====================================================== ======================== =========================== -extracted Simple tags (score) BILOU tags (score) -====================================================== ======================== =========================== -``[near Alexanderplatz](loc) [tonight](time)`` loc loc time (3) B-loc L-loc U-time (3) -``[near](loc) [Alexanderplatz](loc) [tonight](time)`` loc loc time (3) U-loc U-loc U-time (1) -``near [Alexanderplatz](loc) [tonight](time)`` O loc time (2) O U-loc U-time (1) -``[near](loc) Alexanderplatz [tonight](time)`` loc O time (2) U-loc O U-time (1) -``[near Alexanderplatz tonight](loc)`` loc loc loc (2) B-loc I-loc L-loc (1) -====================================================== ======================== =========================== - - -.. _core-evaluation: - -Evaluating a Core Model ------------------------ - -You can evaluate your trained model on a set of test stories -by using the evaluate script: - -.. code-block:: bash - - rasa test core --stories test_stories.md --out results - - -This will print the failed stories to ``results/failed_stories.md``. -We count any story as `failed` if at least one of the actions -was predicted incorrectly. - -In addition, this will save a confusion matrix to a file called -``results/story_confmat.pdf``. For each action in your domain, the confusion -matrix shows how often the action was correctly predicted and how often an -incorrect action was predicted instead. - -The full list of options for the script is: - -.. program-output:: rasa test core --help - - -Comparing Core Configurations ------------------------------ - -To choose a configuration for your core model, or to choose hyperparameters for a -specific policy, you want to measure how well Rasa Core will `generalise` -to conversations which it hasn't seen before. Especially in the beginning -of a project, you do not have a lot of real conversations to use to train -your bot, so you don't just want to throw some away to use as a test set. - -Rasa Core has some scripts to help you choose and fine-tune your policy configuration. -Once you are happy with it, you can then train your final configuration on your -full data set. To do this, you first have to train models for your different -configurations. Create two (or more) config files including the policies you want to -compare, and then use the ``compare`` mode of the train script to train your models: - -.. code-block:: bash - - $ rasa train core -c config_1.yml config_2.yml \ - -d domain.yml -s stories_folder --out comparison_models --runs 3 \ - --percentages 0 5 25 50 70 95 - -For each policy configuration provided, Rasa Core will be trained multiple times -with 0, 5, 25, 50, 70 and 95% of your training stories excluded from the training -data. This is done for multiple runs to ensure consistent results. - -Once this script has finished, you can use the evaluate script in ``compare`` -mode to evaluate the models you just trained: - -.. code-block:: bash - - $ rasa test core -m comparison_models --stories stories_folder - --out comparison_results --evaluate-model-directory - -This will evaluate each of the models on the provided stories -(can be either training or test set) and plot some graphs -to show you which policy performs best. By evaluating on the full set of stories, you -can measure how well Rasa Core is predicting the held-out stories. -To compare single policies create config files containing only one policy each. - -.. note:: - This training process can take a long time, so we'd suggest letting it run - somewhere in the background where it can't be interrupted. diff --git a/docs/user-guide/validate-files.rst b/docs/user-guide/validate-files.rst deleted file mode 100644 index 23eba7a0fca7..000000000000 --- a/docs/user-guide/validate-files.rst +++ /dev/null @@ -1,120 +0,0 @@ -:desc: Check your domain, stories and intent files for possible errors. - -.. _validate-files: - -Validate Data -============= - -.. edit-link:: - - -Test Domain and Data Files for Mistakes ---------------------------------------- - -To verify if there are any mistakes in your domain file, NLU data, or story data, run the validate script. -You can run it with the following command: - -.. code-block:: bash - - rasa data validate - -The script above runs all the validations on your files, except for story structure validation, -which is omitted unless you provide the ``--max-history`` argument. Here is the list of options to -the script: - -.. program-output:: rasa data validate --help - -By default the validator searches only for errors in the data (e.g. the same -example being listed as an example for two intents), but does not report other -minor issues (such as unused intents, utterances that are not listed as -actions). To also report the later use the ``-debug`` flag. - -You can also run these validations through the Python API by importing the `Validator` class, -which has the following methods: - -**from_files():** Creates the instance from string paths to the necessary files. - -**verify_intents():** Checks if intents listed in domain file are consistent with the NLU data. - -**verify_example_repetition_in_intents():** Checks if there is no duplicated data among distinct intents at NLU data. - -**verify_intents_in_stories():** Verification for intents in the stories, to check if they are valid. - -**verify_utterances():** Checks domain file for consistency between responses listed in the `responses` section -and the utterance actions you have defined. - -**verify_utterances_in_stories():** Verification for utterances in stories, to check if they are valid. - -**verify_all():** Runs all verifications above. - -**verify_domain_validity():** Check if domain is valid. - -To use these functions it is necessary to create a `Validator` object and initialize the logger. See the following code: - -.. code-block:: python - - import logging - from rasa import utils - from rasa.core.validator import Validator - - logger = logging.getLogger(__name__) - - utils.configure_colored_logging('DEBUG') - - validator = Validator.from_files(domain_file='domain.yml', - nlu_data='data/nlu_data.md', - stories='data/stories.md') - - validator.verify_all() - -.. _test-story-files-for-conflicts: - -Test Story Files for Conflicts ------------------------------- - -In addition to the default tests described above, you can also do a more in-depth structural test of your stories. -In particular, you can test if your stories are inconsistent, i.e. if different bot actions follow from the same dialogue history. -If this is not the case, then Rasa cannot learn the correct behavior. - -Take, for example, the following two stories: - -.. code-block:: md - - ## Story 1 - * greet - - utter_greet - * inform_happy - - utter_happy - - utter_goodbye - - ## Story 2 - * greet - - utter_greet - * inform_happy - - utter_goodbye - -These two stories are inconsistent, because Rasa doesn't know if it should predict ``utter_happy`` or ``utter_goodbye`` -after ``inform_happy``, as there is nothing that would distinguish the dialogue states at ``inform_happy`` in the two -stories and the subsequent actions are different in Story 1 and Story 2. - -This conflict can be automatically identified with our story structure validation tool. -To do this, use ``rasa data validate`` in the command line, as follows: - -.. code-block:: bash - - rasa data validate stories --max-history 3 - > 2019-12-09 09:32:13 INFO rasa.core.validator - Story structure validation... - > 2019-12-09 09:32:13 INFO rasa.core.validator - Assuming max_history = 3 - > Processed Story Blocks: 100% 2/2 [00:00<00:00, 3237.59it/s, # trackers=1] - > 2019-12-09 09:32:13 WARNING rasa.core.validator - CONFLICT after intent 'inform_happy': - > utter_goodbye predicted in 'Story 2' - > utter_happy predicted in 'Story 1' - -Here we specify a ``max-history`` value of 3. -This means, that 3 events (user messages / bot actions) are taken into account for action predictions, but the particular setting does not matter for this example, because regardless of how long of a history you take into account, the conflict always exists. - -.. warning:: - - The ``rasa data validate stories`` script assumes that all your **story names are unique**. - If your stories are in the Markdown format, you may find duplicate names with a command like - ``grep -h "##" data/*.md | uniq -c | grep "^[^1]"``. diff --git a/docs/utils/StoryLexer.py b/docs/utils/StoryLexer.py deleted file mode 100644 index 949a68e571ae..000000000000 --- a/docs/utils/StoryLexer.py +++ /dev/null @@ -1,64 +0,0 @@ -from pygments.lexer import RegexLexer, bygroups, using, default, include -from pygments.lexers.data import JsonLexer -from pygments.token import Keyword, Comment, Token, Text, Generic, Name - - -class StoryLexer(RegexLexer): - """Lexer for the Rasa Core story file format. - Used for syntax highlighting of story snippets in the docs.""" - - name = "Story" - aliases = ["story"] - filenames = ["*.md"] - - tokens = { - "comment": [ - ( - r"(\s*)", - bygroups(Keyword, Comment.MultiLine, Keyword), - ) - ], - "root": [ - include("comment"), - (r"\s*-\s*(slot)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(restart)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(rewind)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(reset_slots)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(reminder)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(undo)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(export)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(pause)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(resume)", Token.Operator, ("event", "event_rx")), - (r"\s*-\s*(utter_[^\s]*)", Token.Text, ("event", "event_rx")), - ( - r"(\s*-(?:\s*)(?:.*?))(\s*)(?:(?:())|(\n|$))", - bygroups(Text, Text, Keyword, Comment.MultiLine, Keyword, Text), - ), - (r"\s*\>\s*[^\s]*", Name.Constant), - ( - r"(#+(?:\s*)(?:.*?))(\s*)(?:(?:())|(\n|$))", - bygroups( - Generic.Heading, Text, Keyword, Comment.MultiLine, Keyword, Text - ), - ), - (r"\s*\*\s*", Name.Variable.Magic, ("intent", "intent_rx")), - (r".*\n", Text), - ], - "event": [include("comment"), (r"\s*(\n|$)", Text, "#pop")], - "event_rx": [(r"({.*?})?", bygroups(using(JsonLexer)), "#pop")], - "intent": [ - (r"\s*OR\s*", Keyword, "intent_rx"), - include("comment"), - (r"\s*(?:\n|$)", Text, "#pop"), - default("#pop"), - ], - "intent_rx": [ - (r'["\'].*["\']', Name.Variable.Magic, "#pop"), - ( - r"([^\s\{]*\s*)({.*?})?", - bygroups(Name.Variable.Magic, using(JsonLexer)), - "#pop", - ), - (r"\s*(\n|$)", Text, "#pop:2"), - ], - } diff --git a/docs/utils/__init__.py b/docs/utils/__init__.py deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/poetry.lock b/poetry.lock index 92f9251cfcae..dc99777b6102 100644 --- a/poetry.lock +++ b/poetry.lock @@ -54,14 +54,6 @@ version = "0.6.4" [package.dependencies] aiohttp = ">=2.0.0,<4.0.0" -[[package]] -category = "dev" -description = "A configurable sidebar-enabled Sphinx theme" -name = "alabaster" -optional = false -python-versions = "*" -version = "0.7.12" - [[package]] category = "dev" description = "apipkg: namespace control and lazy-import mechanism" @@ -105,14 +97,6 @@ tornado = ["tornado (>=4.3)"] twisted = ["twisted"] zookeeper = ["kazoo"] -[[package]] -category = "dev" -description = "An unobtrusive argparse wrapper with natural syntax" -name = "argh" -optional = false -python-versions = "*" -version = "0.26.2" - [[package]] category = "main" description = "An AST unparser for Python" @@ -219,17 +203,6 @@ azure-core = ">=1.6.0,<2.0.0" cryptography = ">=2.1.4" msrest = ">=0.6.10" -[[package]] -category = "dev" -description = "Internationalization utilities" -name = "babel" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "2.8.0" - -[package.dependencies] -pytz = ">=2015.7" - [[package]] category = "dev" description = "The uncompromising code formatter." @@ -250,19 +223,6 @@ typed-ast = ">=1.4.0" [package.extras] d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] -[[package]] -category = "dev" -description = "An easy safelist-based HTML-sanitizing tool." -name = "bleach" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "3.1.5" - -[package.dependencies] -packaging = "*" -six = ">=1.9.0" -webencodings = "*" - [[package]] category = "main" description = "The Blis BLAS-like linear algebra library, as a self-contained C-extension." @@ -288,10 +248,10 @@ description = "The AWS SDK for Python" name = "boto3" optional = false python-versions = "*" -version = "1.14.21" +version = "1.14.25" [package.dependencies] -botocore = ">=1.17.21,<1.18.0" +botocore = ">=1.17.25,<1.18.0" jmespath = ">=0.7.1,<1.0.0" s3transfer = ">=0.3.0,<0.4.0" @@ -301,7 +261,7 @@ description = "Low-level, data-driven core of boto 3." name = "botocore" optional = false python-versions = "*" -version = "1.17.21" +version = "1.17.25" [package.dependencies] docutils = ">=0.10,<0.16" @@ -490,17 +450,18 @@ description = "cryptography is a package which provides cryptographic recipes an name = "cryptography" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" -version = "2.9.2" +version = "3.0" [package.dependencies] cffi = ">=1.8,<1.11.3 || >1.11.3" six = ">=1.4.1" [package.extras] -docs = ["sphinx (>=1.6.5,<1.8.0 || >1.8.0)", "sphinx-rtd-theme"] +docs = ["sphinx (>=1.6.5,<1.8.0 || >1.8.0,<3.1.0 || >3.1.0,<3.1.1 || >3.1.1)", "sphinx-rtd-theme"] docstest = ["doc8", "pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] idna = ["idna (>=2.1)"] -pep8test = ["flake8", "flake8-import-order", "pep8-naming"] +pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] +ssh = ["bcrypt (>=3.1.5)"] test = ["pytest (>=3.6.0,<3.9.0 || >3.9.0,<3.9.1 || >3.9.1,<3.9.2 || >3.9.2)", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,<3.79.2 || >3.79.2)"] [[package]] @@ -539,14 +500,6 @@ optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*" version = "4.4.2" -[[package]] -category = "dev" -description = "XML bomb protection for Python stdlib modules" -name = "defusedxml" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "0.6.0" - [[package]] category = "main" description = "DNS toolkit" @@ -611,14 +564,6 @@ six = ">=1.9.0" gmpy = ["gmpy"] gmpy2 = ["gmpy2"] -[[package]] -category = "dev" -description = "Discover and load entry points from installed packages." -name = "entrypoints" -optional = false -python-versions = ">=2.7" -version = "0.3" - [[package]] category = "dev" description = "execnet: rapid multi-Python deployment" @@ -738,10 +683,10 @@ description = "Google API client core library" name = "google-api-core" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" -version = "1.21.0" +version = "1.22.0" [package.dependencies] -google-auth = ">=1.18.0,<2.0dev" +google-auth = ">=1.19.1,<2.0dev" googleapis-common-protos = ">=1.6.0,<2.0dev" protobuf = ">=3.12.0" pytz = "*" @@ -760,7 +705,7 @@ description = "Google Authentication Library" name = "google-auth" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" -version = "1.19.1" +version = "1.19.2" [package.dependencies] cachetools = ">=2.0.0,<5.0" @@ -910,7 +855,7 @@ description = "Chromium HSTS Preload list as a Python package and updated daily" name = "hstspreload" optional = false python-versions = ">=3.6" -version = "2020.7.15" +version = "2020.7.22" [[package]] category = "main" @@ -988,14 +933,6 @@ version = "1.1.0" [package.dependencies] idna = ">=2.0" -[[package]] -category = "dev" -description = "Getting image size from png/jpeg/jpeg2000/gif file" -name = "imagesize" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -version = "1.2.0" - [[package]] category = "main" description = "Immutable Collections" @@ -1072,14 +1009,6 @@ optional = false python-versions = "*" version = "1.0.23" -[[package]] -category = "dev" -description = "Vestigial utilities from IPython" -name = "ipython-genutils" -optional = false -python-versions = "*" -version = "0.2.0" - [[package]] category = "dev" description = "An ISO 8601 date/time/duration parser and formatter" @@ -1207,18 +1136,6 @@ version = "1.9" [package.dependencies] six = "*" -[[package]] -category = "dev" -description = "Jupyter core package. A base package on which Jupyter projects rely." -name = "jupyter-core" -optional = false -python-versions = "!=3.0,!=3.1,!=3.2,!=3.3,!=3.4,>=2.7" -version = "4.6.3" - -[package.dependencies] -pywin32 = ">=1.0" -traitlets = "*" - [[package]] category = "main" description = "Implementation of JOSE Web standards" @@ -1263,21 +1180,6 @@ optional = false python-versions = ">=3.6" version = "1.2.0" -[[package]] -category = "dev" -description = "Python LiveReload is an awesome tool for web developers" -name = "livereload" -optional = false -python-versions = "*" -version = "2.6.2" - -[package.dependencies] -six = "*" - -[package.dependencies.tornado] -python = ">=2.8" -version = "*" - [[package]] category = "main" description = "Python implementation of Markdown." @@ -1336,14 +1238,6 @@ optional = false python-versions = "*" version = "0.6.1" -[[package]] -category = "dev" -description = "The fastest markdown parser in pure Python" -name = "mistune" -optional = false -python-versions = "*" -version = "0.8.4" - [[package]] category = "dev" description = "Rolling backport of unittest.mock for all Pythons" @@ -1444,67 +1338,6 @@ optional = true python-versions = "*" version = "1.0.2" -[[package]] -category = "dev" -description = "Converting Jupyter Notebooks" -name = "nbconvert" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -version = "5.6.1" - -[package.dependencies] -bleach = "*" -defusedxml = "*" -entrypoints = ">=0.2.2" -jinja2 = ">=2.4" -jupyter-core = "*" -mistune = ">=0.8.1,<2" -nbformat = ">=4.4" -pandocfilters = ">=1.4.1" -pygments = "*" -testpath = "*" -traitlets = ">=4.2" - -[package.extras] -all = ["pytest", "pytest-cov", "ipykernel", "jupyter-client (>=5.3.1)", "ipywidgets (>=7)", "pebble", "tornado (>=4.0)", "sphinx (>=1.5.1)", "sphinx-rtd-theme", "nbsphinx (>=0.2.12)", "sphinxcontrib-github-alt", "ipython", "mock"] -docs = ["sphinx (>=1.5.1)", "sphinx-rtd-theme", "nbsphinx (>=0.2.12)", "sphinxcontrib-github-alt", "ipython", "jupyter-client (>=5.3.1)"] -execute = ["jupyter-client (>=5.3.1)"] -serve = ["tornado (>=4.0)"] -test = ["pytest", "pytest-cov", "ipykernel", "jupyter-client (>=5.3.1)", "ipywidgets (>=7)", "pebble", "mock"] - -[[package]] -category = "dev" -description = "The Jupyter Notebook format" -name = "nbformat" -optional = false -python-versions = ">=3.5" -version = "5.0.7" - -[package.dependencies] -ipython-genutils = "*" -jsonschema = ">=2.4,<2.5.0 || >2.5.0" -jupyter-core = "*" -traitlets = ">=4.1" - -[package.extras] -test = ["pytest", "pytest-cov", "testpath"] - -[[package]] -category = "dev" -description = "Jupyter Notebook Tools for Sphinx" -name = "nbsphinx" -optional = false -python-versions = ">=3" -version = "0.7.1" - -[package.dependencies] -docutils = "*" -jinja2 = "*" -nbconvert = "!=5.4" -nbformat = "*" -sphinx = ">=1.8" -traitlets = "*" - [[package]] category = "main" description = "Python package for creating and manipulating graphs and networks" @@ -1543,7 +1376,7 @@ description = "NumPy is the fundamental package for array computing with Python. name = "numpy" optional = false python-versions = ">=3.6" -version = "1.19.0" +version = "1.19.1" [[package]] category = "main" @@ -1579,7 +1412,7 @@ description = "Optimizing numpys einsum function" name = "opt-einsum" optional = false python-versions = ">=3.5" -version = "3.2.1" +version = "3.3.0" [package.dependencies] numpy = ">=1.7" @@ -1600,14 +1433,6 @@ version = "20.4" pyparsing = ">=2.0.2" six = "*" -[[package]] -category = "dev" -description = "Utilities for writing pandoc filters in python" -name = "pandocfilters" -optional = false -python-versions = "*" -version = "1.4.2" - [[package]] category = "dev" description = "Utility library for gitignore style pattern matching of file paths." @@ -1616,14 +1441,6 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" version = "0.8.0" -[[package]] -category = "dev" -description = "File system general utilities" -name = "pathtools" -optional = false -python-versions = "*" -version = "0.1.2" - [[package]] category = "dev" description = "Utilities to deal with pep440 versioning" @@ -1671,14 +1488,6 @@ version = ">=0.12" [package.extras] dev = ["pre-commit", "tox"] -[[package]] -category = "dev" -description = "Utility that helps with local TCP ports managment. It can find an unused TCP localhost port and remember the association." -name = "port-for" -optional = false -python-versions = "*" -version = "0.3.1" - [[package]] category = "main" description = "Cython hash table that trusts the keys are pre-hashed" @@ -1785,14 +1594,6 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" version = "2.2.0" -[[package]] -category = "dev" -description = "Pygments is a syntax highlighting package written in Python." -name = "pygments" -optional = false -python-versions = ">=3.5" -version = "2.6.1" - [[package]] category = "main" description = "JSON Web Token implementation in Python" @@ -2087,7 +1888,7 @@ description = "Python type inferencer" name = "pytype" optional = false python-versions = "<3.9,>=3.5" -version = "2020.7.14" +version = "2020.7.20" [package.dependencies] attrs = "*" @@ -2108,7 +1909,7 @@ version = "2020.1" [[package]] category = "dev" description = "Python for Window Extensions" -marker = "sys_platform == \"win32\" and python_version >= \"3.6\" or sys_platform == \"win32\"" +marker = "sys_platform == \"win32\" and python_version >= \"3.6\"" name = "pywin32" optional = false python-versions = "*" @@ -2150,21 +1951,6 @@ requests = ">=2.0,<3.0" sanic = ">=19.12.2,<20.0.0" sanic-cors = ">=0.10.0b1,<0.11.0" -[[package]] -category = "dev" -description = "A configurable sidebar-enabled Sphinx theme" -name = "rasabaster" -optional = false -python-versions = "*" -version = "0.7.27" - -[package.dependencies] -"ruamel.yaml" = ">=0.16.10,<0.17.0" - -[package.source] -reference = "rasa-pypi" -url = "https://pypi.rasa.com/simple" - [[package]] category = "main" description = "Python client for Redis key-value store" @@ -2461,7 +2247,7 @@ description = "Slack API clients for Web API and RTM API" name = "slackclient" optional = false python-versions = ">=3.6.0" -version = "2.7.2" +version = "2.7.3" [package.dependencies] aiohttp = ">3.5.2,<4.0.0" @@ -2482,14 +2268,6 @@ version = "1.1.0" python = "<3.7" version = ">=2.1" -[[package]] -category = "dev" -description = "This package provides 26 stemmers for 25 languages generated from Snowball algorithms." -name = "snowballstemmer" -optional = false -python-versions = "*" -version = "2.0.0" - [[package]] category = "dev" description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" @@ -2533,229 +2311,6 @@ ko = ["natto-py (0.9.0)"] lookups = ["spacy-lookups-data (>=0.0.5,<0.2.0)"] th = ["pythainlp (>=2.0)"] -[[package]] -category = "dev" -description = "Python documentation generator" -name = "sphinx" -optional = false -python-versions = ">=3.5" -version = "3.1.2" - -[package.dependencies] -Jinja2 = ">=2.3" -Pygments = ">=2.0" -alabaster = ">=0.7,<0.8" -babel = ">=1.3" -colorama = ">=0.3.5" -docutils = ">=0.12" -imagesize = "*" -packaging = "*" -requests = ">=2.5.0" -setuptools = "*" -snowballstemmer = ">=1.1" -sphinxcontrib-applehelp = "*" -sphinxcontrib-devhelp = "*" -sphinxcontrib-htmlhelp = "*" -sphinxcontrib-jsmath = "*" -sphinxcontrib-qthelp = "*" -sphinxcontrib-serializinghtml = "*" - -[package.extras] -docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=3.5.0)", "flake8-import-order", "mypy (>=0.780)", "docutils-stubs"] -test = ["pytest", "pytest-cov", "html5lib", "typed-ast", "cython"] - -[[package]] -category = "dev" -description = "Watch a Sphinx directory and rebuild the documentation when a change is detected. Also includes a livereload enabled web server." -name = "sphinx-autobuild" -optional = false -python-versions = "*" -version = "0.7.1" - -[package.dependencies] -PyYAML = ">=3.10" -argh = ">=0.24.1" -livereload = ">=2.3.0" -pathtools = ">=0.1.2" -port-for = "0.3.1" -tornado = ">=3.2" -watchdog = ">=0.7.1" - -[[package]] -category = "dev" -description = "Type hints (PEP 484) support for the Sphinx autodoc extension" -name = "sphinx-autodoc-typehints" -optional = false -python-versions = ">=3.5.2" -version = "1.10.3" - -[package.dependencies] -Sphinx = ">=2.1" - -[package.extras] -test = ["pytest (>=3.1.0)", "typing-extensions (>=3.5)", "sphobjinv (>=2.0)", "dataclasses"] -type_comments = ["typed-ast (>=1.4.0)"] - -[[package]] -category = "dev" -description = "Read the Docs theme for Sphinx" -name = "sphinx-rtd-theme" -optional = false -python-versions = "*" -version = "0.2.5b1" - -[package.source] -reference = "6b89a7c13bba2a7100d7093ed5007b2abf67f277" -type = "git" -url = "https://github.com/RasaHQ/sphinx_rtd_theme.git" - -[[package]] -category = "dev" -description = "Tab views for Sphinx" -name = "sphinx-tabs" -optional = false -python-versions = "*" -version = "1.1.13" - -[package.dependencies] -sphinx = ">=1.4" - -[[package]] -category = "dev" -description = "sphinxcontrib-applehelp is a sphinx extension which outputs Apple help books" -name = "sphinxcontrib-applehelp" -optional = false -python-versions = ">=3.5" -version = "1.0.2" - -[package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest"] - -[[package]] -category = "dev" -description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." -name = "sphinxcontrib-devhelp" -optional = false -python-versions = ">=3.5" -version = "1.0.2" - -[package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest"] - -[[package]] -category = "dev" -description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -name = "sphinxcontrib-htmlhelp" -optional = false -python-versions = ">=3.5" -version = "1.0.3" - -[package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest", "html5lib"] - -[[package]] -category = "dev" -description = "Sphinx domain for documenting HTTP APIs" -name = "sphinxcontrib-httpdomain" -optional = false -python-versions = "*" -version = "1.7.0" - -[package.dependencies] -Sphinx = ">=1.5" -six = "*" - -[[package]] -category = "dev" -description = "A sphinx extension which renders display math in HTML via JavaScript" -name = "sphinxcontrib-jsmath" -optional = false -python-versions = ">=3.5" -version = "1.0.1" - -[package.extras] -test = ["pytest", "flake8", "mypy"] - -[[package]] -category = "dev" -description = "Sphinx extension to include program output" -name = "sphinxcontrib-programoutput" -optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*" -version = "0.16" - -[package.dependencies] -Sphinx = ">=1.7.0" - -[[package]] -category = "dev" -description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." -name = "sphinxcontrib-qthelp" -optional = false -python-versions = ">=3.5" -version = "1.0.3" - -[package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest"] - -[[package]] -category = "dev" -description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)." -name = "sphinxcontrib-serializinghtml" -optional = false -python-versions = ">=3.5" -version = "1.1.4" - -[package.extras] -lint = ["flake8", "mypy", "docutils-stubs"] -test = ["pytest"] - -[[package]] -category = "dev" -description = "Make Sphinx better at documenting Python functions and methods" -name = "sphinxcontrib-trio" -optional = false -python-versions = "*" -version = "1.1.2" - -[package.dependencies] -sphinx = ">=1.7" - -[[package]] -category = "dev" -description = "Sphinx extension that allows building versioned docs for self-hosting." -name = "sphinxcontrib-versioning" -optional = false -python-versions = "*" -version = "2.2.1" - -[package.dependencies] -click = "*" -colorclass = "*" -sphinx = "*" - -[package.source] -reference = "b335b378e38411499f3f22d51314a68b74f1dbfa" -type = "git" -url = "https://github.com/RasaHQ/sphinxcontrib-versioning.git" - -[[package]] -category = "dev" -description = "Sphinx API for Web Apps" -name = "sphinxcontrib-websupport" -optional = false -python-versions = ">=3.5" -version = "1.2.3" - -[package.extras] -lint = ["flake8"] -test = ["pytest", "sqlalchemy", "whoosh", "sphinx"] - [[package]] category = "main" description = "Database Abstraction Library" @@ -2962,17 +2517,6 @@ optional = false python-versions = "*" version = "3.1.0" -[[package]] -category = "dev" -description = "Test utilities for code working with files and commands" -name = "testpath" -optional = false -python-versions = "*" -version = "0.4.4" - -[package.extras] -test = ["pathlib2"] - [[package]] category = "main" description = "Practical Machine Learning for NLP" @@ -3062,22 +2606,6 @@ version = "4.47.0" [package.extras] dev = ["py-make (>=0.1.0)", "twine", "argopt", "pydoc-markdown"] -[[package]] -category = "dev" -description = "Traitlets Python config system" -name = "traitlets" -optional = false -python-versions = "*" -version = "4.3.3" - -[package.dependencies] -decorator = "*" -ipython-genutils = "*" -six = "*" - -[package.extras] -test = ["pytest", "mock"] - [[package]] category = "main" description = "State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch" @@ -3215,21 +2743,7 @@ description = "A lightweight console printing and formatting toolkit" name = "wasabi" optional = true python-versions = "*" -version = "0.7.0" - -[[package]] -category = "dev" -description = "Filesystem events monitoring" -name = "watchdog" -optional = false -python-versions = "*" -version = "0.10.3" - -[package.dependencies] -pathtools = ">=0.1.1" - -[package.extras] -watchmedo = ["PyYAML (>=3.10)", "argh (>=0.24.1)"] +version = "0.7.1" [[package]] category = "main" @@ -3239,14 +2753,6 @@ optional = false python-versions = "*" version = "0.2.5" -[[package]] -category = "dev" -description = "Character encoding aliases for legacy web content" -name = "webencodings" -optional = false -python-versions = "*" -version = "0.5.1" - [[package]] category = "main" description = "Community-developed Python SDK for the Webex Teams APIs" @@ -3352,7 +2858,7 @@ spacy = ["spacy"] transformers = ["transformers"] [metadata] -content-hash = "a25412f2c5f7834611ede0021052cef984cf975b6d8ae4dd7941e131519319e0" +content-hash = "a08c1168edfaf681dfee21fba618ec81169299b12872cbbe8e932726dcd9a766" python-versions = ">=3.6,<3.9" [metadata.files] @@ -3381,10 +2887,6 @@ aioresponses = [ {file = "aioresponses-0.6.4-py2.py3-none-any.whl", hash = "sha256:8e8b430aeddbacd25f4d94bfe11a46bc88a47be689df12c423e62cb86652ba3b"}, {file = "aioresponses-0.6.4.tar.gz", hash = "sha256:4397ca736238a1ada8c7f47e557dda05e9ecfdd467b9f6b83871efd365af7e9f"}, ] -alabaster = [ - {file = "alabaster-0.7.12-py2.py3-none-any.whl", hash = "sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359"}, - {file = "alabaster-0.7.12.tar.gz", hash = "sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02"}, -] apipkg = [ {file = "apipkg-1.5-py2.py3-none-any.whl", hash = "sha256:58587dd4dc3daefad0487f6d9ae32b4542b185e1c36db6993290e7c41ca2b47c"}, {file = "apipkg-1.5.tar.gz", hash = "sha256:37228cda29411948b422fae072f57e31d3396d2ee1c9783775980ee9c9990af6"}, @@ -3397,10 +2899,6 @@ apscheduler = [ {file = "APScheduler-3.6.3-py2.py3-none-any.whl", hash = "sha256:e8b1ecdb4c7cb2818913f766d5898183c7cb8936680710a4d3a966e02262e526"}, {file = "APScheduler-3.6.3.tar.gz", hash = "sha256:3bb5229eed6fbbdafc13ce962712ae66e175aa214c69bed35a06bffcf0c5e244"}, ] -argh = [ - {file = "argh-0.26.2-py2.py3-none-any.whl", hash = "sha256:a9b3aaa1904eeb78e32394cd46c6f37ac0fb4af6dc488daa58971bdc7d7fcaf3"}, - {file = "argh-0.26.2.tar.gz", hash = "sha256:e9535b8c84dc9571a48999094fda7f33e63c3f1b74f3e5f3ac0105a58405bb65"}, -] astunparse = [ {file = "astunparse-1.6.3-py2.py3-none-any.whl", hash = "sha256:c2652417f2c8b5bb325c885ae329bdf3f86424075c4fd1a128674bc6fba4b8e8"}, {file = "astunparse-1.6.3.tar.gz", hash = "sha256:5ad93a8456f0d084c3456d059fd9a92cce667963232cbf763eac3bc5b7940872"}, @@ -3438,18 +2936,10 @@ azure-storage-blob = [ {file = "azure-storage-blob-12.3.2.zip", hash = "sha256:b99ce18c5063b22a988e6e997a491aab6c7c4dd62d1424b4e2b934e6ef104356"}, {file = "azure_storage_blob-12.3.2-py2.py3-none-any.whl", hash = "sha256:8a02a33cd28a16963274dc928960642e99ec19cad27166fb386ebcc0f1216706"}, ] -babel = [ - {file = "Babel-2.8.0-py2.py3-none-any.whl", hash = "sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4"}, - {file = "Babel-2.8.0.tar.gz", hash = "sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38"}, -] black = [ {file = "black-19.10b0-py36-none-any.whl", hash = "sha256:1b30e59be925fafc1ee4565e5e08abef6b03fe455102883820fe5ee2e4734e0b"}, {file = "black-19.10b0.tar.gz", hash = "sha256:c2edb73a08e9e0e6f65a0e6af18b059b8b1cdd5bef997d7a0b181df93dc81539"}, ] -bleach = [ - {file = "bleach-3.1.5-py2.py3-none-any.whl", hash = "sha256:2bce3d8fab545a6528c8fa5d9f9ae8ebc85a56da365c7f85180bfe96a35ef22f"}, - {file = "bleach-3.1.5.tar.gz", hash = "sha256:3c4c520fdb9db59ef139915a5db79f8b51bc2a7257ea0389f30c846883430a4b"}, -] blis = [ {file = "blis-0.4.1-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:135450caabc8aea9bb9250329ebdf7189982d9b57d5c92789b2ba2fe52c247a7"}, {file = "blis-0.4.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:26b16d6005bb2671699831b5cc699905215d1abde1ec5c1d04de7dcd9eb29f75"}, @@ -3473,12 +2963,12 @@ boto = [ {file = "boto-2.49.0.tar.gz", hash = "sha256:ea0d3b40a2d852767be77ca343b58a9e3a4b00d9db440efb8da74b4e58025e5a"}, ] boto3 = [ - {file = "boto3-1.14.21-py2.py3-none-any.whl", hash = "sha256:5c2fa8ac4e4a4800d02b11d75b71bb8fcc190d6fbb15d957f99661414e655e10"}, - {file = "boto3-1.14.21.tar.gz", hash = "sha256:b093e00e98ab31f98d444efad1738ab4795226f0b05abc6655d747f286f9baf9"}, + {file = "boto3-1.14.25-py2.py3-none-any.whl", hash = "sha256:45331be8740a8f5ee873a487a7f91d93d9c360bb1408fc887f3a2f5c37377d87"}, + {file = "boto3-1.14.25.tar.gz", hash = "sha256:69038d4a42056ec67060020a64001ac09a6ef668aca81c45af1cbbdb7b56a4f6"}, ] botocore = [ - {file = "botocore-1.17.21-py2.py3-none-any.whl", hash = "sha256:9c4694e413c344ca2fb1175f33a97265dbf7f8a5943fbeafde3161d080b72308"}, - {file = "botocore-1.17.21.tar.gz", hash = "sha256:fcbcda16c815744482be4206ed098f4d08f2dfddfb1fcfa0f822f4cfd94adb85"}, + {file = "botocore-1.17.25-py2.py3-none-any.whl", hash = "sha256:98dc8b99e47d2d0bc14c017c957b4fb23172d2fc7db5e8529576308a6898f0fc"}, + {file = "botocore-1.17.25.tar.gz", hash = "sha256:918c5ccde335545a2c2eb426bc47520a960d11e3e3ab72aa43b73f1d692f07e1"}, ] cachetools = [ {file = "cachetools-4.1.1-py3-none-any.whl", hash = "sha256:513d4ff98dd27f85743a8dc0e92f55ddb1b49e060c2d5961512855cda2c01a98"}, @@ -3598,25 +3088,25 @@ coveralls = [ {file = "coveralls-2.1.1.tar.gz", hash = "sha256:afe359cd5b350e1b3895372bda32af8f0260638c7c4a31a5c0f15aa6a96f40d9"}, ] cryptography = [ - {file = "cryptography-2.9.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:daf54a4b07d67ad437ff239c8a4080cfd1cc7213df57d33c97de7b4738048d5e"}, - {file = "cryptography-2.9.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:3b3eba865ea2754738616f87292b7f29448aec342a7c720956f8083d252bf28b"}, - {file = "cryptography-2.9.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:c447cf087cf2dbddc1add6987bbe2f767ed5317adb2d08af940db517dd704365"}, - {file = "cryptography-2.9.2-cp27-cp27m-win32.whl", hash = "sha256:f118a95c7480f5be0df8afeb9a11bd199aa20afab7a96bcf20409b411a3a85f0"}, - {file = "cryptography-2.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:c4fd17d92e9d55b84707f4fd09992081ba872d1a0c610c109c18e062e06a2e55"}, - {file = "cryptography-2.9.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d0d5aeaedd29be304848f1c5059074a740fa9f6f26b84c5b63e8b29e73dfc270"}, - {file = "cryptography-2.9.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:1e4014639d3d73fbc5ceff206049c5a9a849cefd106a49fa7aaaa25cc0ce35cf"}, - {file = "cryptography-2.9.2-cp35-abi3-macosx_10_9_x86_64.whl", hash = "sha256:96c080ae7118c10fcbe6229ab43eb8b090fccd31a09ef55f83f690d1ef619a1d"}, - {file = "cryptography-2.9.2-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:e993468c859d084d5579e2ebee101de8f5a27ce8e2159959b6673b418fd8c785"}, - {file = "cryptography-2.9.2-cp35-abi3-manylinux2010_x86_64.whl", hash = "sha256:88c881dd5a147e08d1bdcf2315c04972381d026cdb803325c03fe2b4a8ed858b"}, - {file = "cryptography-2.9.2-cp35-cp35m-win32.whl", hash = "sha256:651448cd2e3a6bc2bb76c3663785133c40d5e1a8c1a9c5429e4354201c6024ae"}, - {file = "cryptography-2.9.2-cp35-cp35m-win_amd64.whl", hash = "sha256:726086c17f94747cedbee6efa77e99ae170caebeb1116353c6cf0ab67ea6829b"}, - {file = "cryptography-2.9.2-cp36-cp36m-win32.whl", hash = "sha256:091d31c42f444c6f519485ed528d8b451d1a0c7bf30e8ca583a0cac44b8a0df6"}, - {file = "cryptography-2.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:bb1f0281887d89617b4c68e8db9a2c42b9efebf2702a3c5bf70599421a8623e3"}, - {file = "cryptography-2.9.2-cp37-cp37m-win32.whl", hash = "sha256:18452582a3c85b96014b45686af264563e3e5d99d226589f057ace56196ec78b"}, - {file = "cryptography-2.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:22e91636a51170df0ae4dcbd250d318fd28c9f491c4e50b625a49964b24fe46e"}, - {file = "cryptography-2.9.2-cp38-cp38-win32.whl", hash = "sha256:844a76bc04472e5135b909da6aed84360f522ff5dfa47f93e3dd2a0b84a89fa0"}, - {file = "cryptography-2.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:1dfa985f62b137909496e7fc182dac687206d8d089dd03eaeb28ae16eec8e7d5"}, - {file = "cryptography-2.9.2.tar.gz", hash = "sha256:a0c30272fb4ddda5f5ffc1089d7405b7a71b0b0f51993cb4e5dbb4590b2fc229"}, + {file = "cryptography-3.0-cp27-cp27m-macosx_10_10_x86_64.whl", hash = "sha256:ab49edd5bea8d8b39a44b3db618e4783ef84c19c8b47286bf05dfdb3efb01c83"}, + {file = "cryptography-3.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:124af7255ffc8e964d9ff26971b3a6153e1a8a220b9a685dc407976ecb27a06a"}, + {file = "cryptography-3.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:51e40123083d2f946794f9fe4adeeee2922b581fa3602128ce85ff813d85b81f"}, + {file = "cryptography-3.0-cp27-cp27m-win32.whl", hash = "sha256:dea0ba7fe6f9461d244679efa968d215ea1f989b9c1957d7f10c21e5c7c09ad6"}, + {file = "cryptography-3.0-cp27-cp27m-win_amd64.whl", hash = "sha256:8ecf9400d0893836ff41b6f977a33972145a855b6efeb605b49ee273c5e6469f"}, + {file = "cryptography-3.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:0c608ff4d4adad9e39b5057de43657515c7da1ccb1807c3a27d4cf31fc923b4b"}, + {file = "cryptography-3.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:bec7568c6970b865f2bcebbe84d547c52bb2abadf74cefce396ba07571109c67"}, + {file = "cryptography-3.0-cp35-abi3-macosx_10_10_x86_64.whl", hash = "sha256:0cbfed8ea74631fe4de00630f4bb592dad564d57f73150d6f6796a24e76c76cd"}, + {file = "cryptography-3.0-cp35-abi3-manylinux1_x86_64.whl", hash = "sha256:a09fd9c1cca9a46b6ad4bea0a1f86ab1de3c0c932364dbcf9a6c2a5eeb44fa77"}, + {file = "cryptography-3.0-cp35-abi3-manylinux2010_x86_64.whl", hash = "sha256:ce82cc06588e5cbc2a7df3c8a9c778f2cb722f56835a23a68b5a7264726bb00c"}, + {file = "cryptography-3.0-cp35-cp35m-win32.whl", hash = "sha256:9367d00e14dee8d02134c6c9524bb4bd39d4c162456343d07191e2a0b5ec8b3b"}, + {file = "cryptography-3.0-cp35-cp35m-win_amd64.whl", hash = "sha256:384d7c681b1ab904fff3400a6909261cae1d0939cc483a68bdedab282fb89a07"}, + {file = "cryptography-3.0-cp36-cp36m-win32.whl", hash = "sha256:4d355f2aee4a29063c10164b032d9fa8a82e2c30768737a2fd56d256146ad559"}, + {file = "cryptography-3.0-cp36-cp36m-win_amd64.whl", hash = "sha256:45741f5499150593178fc98d2c1a9c6722df88b99c821ad6ae298eff0ba1ae71"}, + {file = "cryptography-3.0-cp37-cp37m-win32.whl", hash = "sha256:8ecef21ac982aa78309bb6f092d1677812927e8b5ef204a10c326fc29f1367e2"}, + {file = "cryptography-3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:4b9303507254ccb1181d1803a2080a798910ba89b1a3c9f53639885c90f7a756"}, + {file = "cryptography-3.0-cp38-cp38-win32.whl", hash = "sha256:8713ddb888119b0d2a1462357d5946b8911be01ddbf31451e1d07eaa5077a261"}, + {file = "cryptography-3.0-cp38-cp38-win_amd64.whl", hash = "sha256:bea0b0468f89cdea625bb3f692cd7a4222d80a6bdafd6fb923963f2b9da0e15f"}, + {file = "cryptography-3.0.tar.gz", hash = "sha256:8e924dbc025206e97756e8903039662aa58aa9ba357d8e1d8fc29e3092322053"}, ] cycler = [ {file = "cycler-0.10.0-py2.py3-none-any.whl", hash = "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d"}, @@ -3644,10 +3134,6 @@ decorator = [ {file = "decorator-4.4.2-py2.py3-none-any.whl", hash = "sha256:41fa54c2a0cc4ba648be4fd43cff00aedf5b9465c9bf18d64325bc225f08f760"}, {file = "decorator-4.4.2.tar.gz", hash = "sha256:e3a62f0520172440ca0dcc823749319382e377f37f140a0b99ef45fecb84bfe7"}, ] -defusedxml = [ - {file = "defusedxml-0.6.0-py2.py3-none-any.whl", hash = "sha256:6687150770438374ab581bb7a1b327a847dd9c5749e396102de3fad4e8a3ef93"}, - {file = "defusedxml-0.6.0.tar.gz", hash = "sha256:f684034d135af4c6cbb949b8a4d2ed61634515257a67299e5f940fbaa34377f5"}, -] dnspython = [ {file = "dnspython-1.16.0-py2.py3-none-any.whl", hash = "sha256:f69c21288a962f4da86e56c4905b49d11aba7938d3d740e80d9e366ee4f1632d"}, {file = "dnspython-1.16.0.zip", hash = "sha256:36c5e8e38d4369a08b6780b7f27d790a292b2b08eea01607865bf0936c558e01"}, @@ -3668,10 +3154,6 @@ ecdsa = [ {file = "ecdsa-0.15-py2.py3-none-any.whl", hash = "sha256:867ec9cf6df0b03addc8ef66b56359643cb5d0c1dc329df76ba7ecfe256c8061"}, {file = "ecdsa-0.15.tar.gz", hash = "sha256:8f12ac317f8a1318efa75757ef0a651abe12e51fc1af8838fb91079445227277"}, ] -entrypoints = [ - {file = "entrypoints-0.3-py2.py3-none-any.whl", hash = "sha256:589f874b313739ad35be6e0cd7efde2a4e9b6fea91edcc34e58ecbb8dbe56d19"}, - {file = "entrypoints-0.3.tar.gz", hash = "sha256:c70dd71abe5a8c85e55e12c19bd91ccfeec11a6e99044204511f9ed547d48451"}, -] execnet = [ {file = "execnet-1.7.1-py2.py3-none-any.whl", hash = "sha256:d4efd397930c46415f62f8a31388d6be4f27a91d7550eb79bc64a756e0056547"}, {file = "execnet-1.7.1.tar.gz", hash = "sha256:cacb9df31c9680ec5f95553976c4da484d407e85e41c83cb812aa014f0eddc50"}, @@ -3708,12 +3190,12 @@ gast = [ {file = "github3.py-1.3.0.tar.gz", hash = "sha256:15a115c18f7bfcf934dfef7ab103844eb9f620c586bad65967708926da47cbda"}, ] google-api-core = [ - {file = "google-api-core-1.21.0.tar.gz", hash = "sha256:fea9a434068406ddabe2704988d24d6c5bde3ecfc40823a34f43892d017b14f6"}, - {file = "google_api_core-1.21.0-py2.py3-none-any.whl", hash = "sha256:7b65e8e5ee59bd7517eab2bf9b3008e7b50fd9fb591d4efd780ead6859cd904b"}, + {file = "google-api-core-1.22.0.tar.gz", hash = "sha256:aaedc40ae977dbc2710f0de0012b673c8c7644f81ca0c93e839d22895f2ff29d"}, + {file = "google_api_core-1.22.0-py2.py3-none-any.whl", hash = "sha256:c4e3b3d914e09d181287abb7101b42f308204fa5e8f89efc4839f607303caa2f"}, ] google-auth = [ - {file = "google-auth-1.19.1.tar.gz", hash = "sha256:2b6bb2a6578233bb78f755fc7eb54e0b71f8eb1314d25baeef93b88ad10524fa"}, - {file = "google_auth-1.19.1-py2.py3-none-any.whl", hash = "sha256:167b1bcc51da05d32a1a843815967b4fa81a556e4c9c3377e766f136a2a30f65"}, + {file = "google-auth-1.19.2.tar.gz", hash = "sha256:f404448f3d3c91944b1d907427d4a0c48f465898e9dbacf1bdebf95c5fe03273"}, + {file = "google_auth-1.19.2-py2.py3-none-any.whl", hash = "sha256:15b42d57d6c3d868d318e8273c06b2692fc5aad1bc45989a4f68f1fee05d41b2"}, ] google-auth-oauthlib = [ {file = "google-auth-oauthlib-0.4.1.tar.gz", hash = "sha256:88d2cd115e3391eb85e1243ac6902e76e77c5fe438b7276b297fbe68015458dd"}, @@ -3817,8 +3299,8 @@ hpack = [ {file = "hpack-3.0.0.tar.gz", hash = "sha256:8eec9c1f4bfae3408a3f30500261f7e6a65912dc138526ea054f9ad98892e9d2"}, ] hstspreload = [ - {file = "hstspreload-2020.7.15-py3-none-any.whl", hash = "sha256:ad805d99ea5af9b4342e1c5afd742f0d941f8821ad3711c42f72b2d050ab5a19"}, - {file = "hstspreload-2020.7.15.tar.gz", hash = "sha256:71e536f0c54006f778dcb72f1b1f4027a9e9848997dafa9095949fb30cff24bb"}, + {file = "hstspreload-2020.7.22-py3-none-any.whl", hash = "sha256:79edbdbd09346b4c5cf729384498818943114b0a4f939a5f80abacbc47aa2197"}, + {file = "hstspreload-2020.7.22.tar.gz", hash = "sha256:7bc3d59d3f8c8dd03f0266f7bb309070e6a968edc19d29a812ebd49b280c5965"}, ] httplib2 = [ {file = "httplib2-0.18.1-py3-none-any.whl", hash = "sha256:ca2914b015b6247791c4866782fa6042f495b94401a0f0bd3e1d6e0ba2236782"}, @@ -3857,10 +3339,6 @@ idna = [ idna-ssl = [ {file = "idna-ssl-1.1.0.tar.gz", hash = "sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c"}, ] -imagesize = [ - {file = "imagesize-1.2.0-py2.py3-none-any.whl", hash = "sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1"}, - {file = "imagesize-1.2.0.tar.gz", hash = "sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1"}, -] immutables = [ {file = "immutables-0.14-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:860666fab142401a5535bf65cbd607b46bc5ed25b9d1eb053ca8ed9a1a1a80d6"}, {file = "immutables-0.14-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:ce01788878827c3f0331c254a4ad8d9721489a5e65cc43e19c80040b46e0d297"}, @@ -3894,10 +3372,6 @@ ipaddress = [ {file = "ipaddress-1.0.23-py2.py3-none-any.whl", hash = "sha256:6e0f4a39e66cb5bb9a137b00276a2eff74f93b71dcbdad6f10ff7df9d3557fcc"}, {file = "ipaddress-1.0.23.tar.gz", hash = "sha256:b7f8e0369580bb4a24d5ba1d7cc29660a4a6987763faf1d8a8046830e020e7e2"}, ] -ipython-genutils = [ - {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, - {file = "ipython_genutils-0.2.0.tar.gz", hash = "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"}, -] isodate = [ {file = "isodate-0.6.0-py2.py3-none-any.whl", hash = "sha256:aa4d33c06640f5352aca96e4b81afd8ab3b47337cc12089822d6f322ac772c81"}, {file = "isodate-0.6.0.tar.gz", hash = "sha256:2e364a3d5759479cdb2d37cce6b9376ea504db2ff90252a2e5b7cc89cc9ff2d8"}, @@ -3939,10 +3413,6 @@ jsonschema = [ junit-xml = [ {file = "junit_xml-1.9-py2.py3-none-any.whl", hash = "sha256:ec5ca1a55aefdd76d28fcc0b135251d156c7106fa979686a4b48d62b761b4732"}, ] -jupyter-core = [ - {file = "jupyter_core-4.6.3-py2.py3-none-any.whl", hash = "sha256:a4ee613c060fe5697d913416fc9d553599c05e4492d58fac1192c9a6844abb21"}, - {file = "jupyter_core-4.6.3.tar.gz", hash = "sha256:394fd5dd787e7c8861741880bdf8a00ce39f95de5d18e579c74b882522219e7e"}, -] jwcrypto = [ {file = "jwcrypto-0.7-py2.py3-none-any.whl", hash = "sha256:618ded1d25d3f806a1ab05cee42633a5a2787af33fca8d8f539b0aa1478b3728"}, {file = "jwcrypto-0.7.tar.gz", hash = "sha256:adbe1f6266cde35d40d5de6d1419612b3bd4c869b9332c88c9d7a9163d305100"}, @@ -3959,23 +3429,23 @@ kiwisolver = [ {file = "kiwisolver-1.2.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:443c2320520eda0a5b930b2725b26f6175ca4453c61f739fef7a5847bd262f74"}, {file = "kiwisolver-1.2.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:efcf3397ae1e3c3a4a0a0636542bcad5adad3b1dd3e8e629d0b6e201347176c8"}, {file = "kiwisolver-1.2.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:fccefc0d36a38c57b7bd233a9b485e2f1eb71903ca7ad7adacad6c28a56d62d2"}, + {file = "kiwisolver-1.2.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:be046da49fbc3aa9491cc7296db7e8d27bcf0c3d5d1a40259c10471b014e4e0c"}, {file = "kiwisolver-1.2.0-cp36-none-win32.whl", hash = "sha256:60a78858580761fe611d22127868f3dc9f98871e6fdf0a15cc4203ed9ba6179b"}, {file = "kiwisolver-1.2.0-cp36-none-win_amd64.whl", hash = "sha256:556da0a5f60f6486ec4969abbc1dd83cf9b5c2deadc8288508e55c0f5f87d29c"}, {file = "kiwisolver-1.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7cc095a4661bdd8a5742aaf7c10ea9fac142d76ff1770a0f84394038126d8fc7"}, {file = "kiwisolver-1.2.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:c955791d80e464da3b471ab41eb65cf5a40c15ce9b001fdc5bbc241170de58ec"}, {file = "kiwisolver-1.2.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:603162139684ee56bcd57acc74035fceed7dd8d732f38c0959c8bd157f913fec"}, + {file = "kiwisolver-1.2.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:63f55f490b958b6299e4e5bdac66ac988c3d11b7fafa522800359075d4fa56d1"}, {file = "kiwisolver-1.2.0-cp37-none-win32.whl", hash = "sha256:03662cbd3e6729f341a97dd2690b271e51a67a68322affab12a5b011344b973c"}, {file = "kiwisolver-1.2.0-cp37-none-win_amd64.whl", hash = "sha256:4eadb361baf3069f278b055e3bb53fa189cea2fd02cb2c353b7a99ebb4477ef1"}, {file = "kiwisolver-1.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c31bc3c8e903d60a1ea31a754c72559398d91b5929fcb329b1c3a3d3f6e72113"}, {file = "kiwisolver-1.2.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:d52b989dc23cdaa92582ceb4af8d5bcc94d74b2c3e64cd6785558ec6a879793e"}, {file = "kiwisolver-1.2.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:e586b28354d7b6584d8973656a7954b1c69c93f708c0c07b77884f91640b7657"}, + {file = "kiwisolver-1.2.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:38d05c9ecb24eee1246391820ed7137ac42a50209c203c908154782fced90e44"}, {file = "kiwisolver-1.2.0-cp38-none-win32.whl", hash = "sha256:d069ef4b20b1e6b19f790d00097a5d5d2c50871b66d10075dab78938dc2ee2cf"}, {file = "kiwisolver-1.2.0-cp38-none-win_amd64.whl", hash = "sha256:18d749f3e56c0480dccd1714230da0f328e6e4accf188dd4e6884bdd06bf02dd"}, {file = "kiwisolver-1.2.0.tar.gz", hash = "sha256:247800260cd38160c362d211dcaf4ed0f7816afb5efe56544748b21d6ad6d17f"}, ] -livereload = [ - {file = "livereload-2.6.2.tar.gz", hash = "sha256:d1eddcb5c5eb8d2ca1fa1f750e580da624c0f7fcb734aa5780dc81b7dcbd89be"}, -] markdown = [ {file = "Markdown-3.2.2-py3-none-any.whl", hash = "sha256:c467cd6233885534bf0fe96e62e3cf46cfc1605112356c4f9981512b8174de59"}, {file = "Markdown-3.2.2.tar.gz", hash = "sha256:1fafe3f1ecabfb514a5285fca634a53c1b32a81cb0feb154264d55bf2ff22c17"}, @@ -4040,10 +3510,6 @@ mccabe = [ {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, ] -mistune = [ - {file = "mistune-0.8.4-py2.py3-none-any.whl", hash = "sha256:88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"}, - {file = "mistune-0.8.4.tar.gz", hash = "sha256:59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e"}, -] mock = [ {file = "mock-4.0.2-py3-none-any.whl", hash = "sha256:3f9b2c0196c60d21838f307f5825a7b86b678cedc58ab9e50a8988187b4d81e0"}, {file = "mock-4.0.2.tar.gz", hash = "sha256:dd33eb70232b6118298d516bbcecd26704689c386594f0f3c4f13867b2c56f72"}, @@ -4101,18 +3567,6 @@ murmurhash = [ {file = "murmurhash-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:f468e4868f78c3ac202a66abfe2866414bca4ae7666a21ef0938c423de0f7d50"}, {file = "murmurhash-1.0.2.tar.gz", hash = "sha256:c7a646f6b07b033642b4f52ae2e45efd8b80780b3b90e8092a0cec935fbf81e2"}, ] -nbconvert = [ - {file = "nbconvert-5.6.1-py2.py3-none-any.whl", hash = "sha256:f0d6ec03875f96df45aa13e21fd9b8450c42d7e1830418cccc008c0df725fcee"}, - {file = "nbconvert-5.6.1.tar.gz", hash = "sha256:21fb48e700b43e82ba0e3142421a659d7739b65568cc832a13976a77be16b523"}, -] -nbformat = [ - {file = "nbformat-5.0.7-py3-none-any.whl", hash = "sha256:ea55c9b817855e2dfcd3f66d74857342612a60b1f09653440f4a5845e6e3523f"}, - {file = "nbformat-5.0.7.tar.gz", hash = "sha256:54d4d6354835a936bad7e8182dcd003ca3dc0cedfee5a306090e04854343b340"}, -] -nbsphinx = [ - {file = "nbsphinx-0.7.1-py3-none-any.whl", hash = "sha256:560b23ff8468643b49e19293c154c93c6ee7090786922731e1c391bd566aac86"}, - {file = "nbsphinx-0.7.1.tar.gz", hash = "sha256:f50bd750e4ee3a4e4c3cf571155eab413dc87d581c1380021e7623205b5fa648"}, -] networkx = [ {file = "networkx-2.4-py3-none-any.whl", hash = "sha256:cdfbf698749a5014bf2ed9db4a07a5295df1d3a53bf80bf3cbd61edf9df05fa1"}, {file = "networkx-2.4.tar.gz", hash = "sha256:f8f4ff0b6f96e4f9b16af6b84622597b5334bf9cae8cf9b2e42e7985d5c95c64"}, @@ -4131,32 +3585,32 @@ ninja = [ {file = "ninja-1.10.0.post1.tar.gz", hash = "sha256:ddfac074ae408e42c617cd44f90a95bf6db94f0c846c95ef2a3a9a03438027a1"}, ] numpy = [ - {file = "numpy-1.19.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:63d971bb211ad3ca37b2adecdd5365f40f3b741a455beecba70fd0dde8b2a4cb"}, - {file = "numpy-1.19.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:b6aaeadf1e4866ca0fdf7bb4eed25e521ae21a7947c59f78154b24fc7abbe1dd"}, - {file = "numpy-1.19.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:13af0184177469192d80db9bd02619f6fa8b922f9f327e077d6f2a6acb1ce1c0"}, - {file = "numpy-1.19.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:356f96c9fbec59974a592452ab6a036cd6f180822a60b529a975c9467fcd5f23"}, - {file = "numpy-1.19.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa1fe75b4a9e18b66ae7f0b122543c42debcf800aaafa0212aaff3ad273c2596"}, - {file = "numpy-1.19.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:cbe326f6d364375a8e5a8ccb7e9cd73f4b2f6dc3b2ed205633a0db8243e2a96a"}, - {file = "numpy-1.19.0-cp36-cp36m-win32.whl", hash = "sha256:a2e3a39f43f0ce95204beb8fe0831199542ccab1e0c6e486a0b4947256215632"}, - {file = "numpy-1.19.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7b852817800eb02e109ae4a9cef2beda8dd50d98b76b6cfb7b5c0099d27b52d4"}, - {file = "numpy-1.19.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d97a86937cf9970453c3b62abb55a6475f173347b4cde7f8dcdb48c8e1b9952d"}, - {file = "numpy-1.19.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:a86c962e211f37edd61d6e11bb4df7eddc4a519a38a856e20a6498c319efa6b0"}, - {file = "numpy-1.19.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:d34fbb98ad0d6b563b95de852a284074514331e6b9da0a9fc894fb1cdae7a79e"}, - {file = "numpy-1.19.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:658624a11f6e1c252b2cd170d94bf28c8f9410acab9f2fd4369e11e1cd4e1aaf"}, - {file = "numpy-1.19.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:4d054f013a1983551254e2379385e359884e5af105e3efe00418977d02f634a7"}, - {file = "numpy-1.19.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:26a45798ca2a4e168d00de75d4a524abf5907949231512f372b217ede3429e98"}, - {file = "numpy-1.19.0-cp37-cp37m-win32.whl", hash = "sha256:3c40c827d36c6d1c3cf413694d7dc843d50997ebffbc7c87d888a203ed6403a7"}, - {file = "numpy-1.19.0-cp37-cp37m-win_amd64.whl", hash = "sha256:be62aeff8f2f054eff7725f502f6228298891fd648dc2630e03e44bf63e8cee0"}, - {file = "numpy-1.19.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dd53d7c4a69e766e4900f29db5872f5824a06827d594427cf1a4aa542818b796"}, - {file = "numpy-1.19.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:30a59fb41bb6b8c465ab50d60a1b298d1cd7b85274e71f38af5a75d6c475d2d2"}, - {file = "numpy-1.19.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:df1889701e2dfd8ba4dc9b1a010f0a60950077fb5242bb92c8b5c7f1a6f2668a"}, - {file = "numpy-1.19.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:33c623ef9ca5e19e05991f127c1be5aeb1ab5cdf30cb1c5cf3960752e58b599b"}, - {file = "numpy-1.19.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:26f509450db547e4dfa3ec739419b31edad646d21fb8d0ed0734188b35ff6b27"}, - {file = "numpy-1.19.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:7b57f26e5e6ee2f14f960db46bd58ffdca25ca06dd997729b1b179fddd35f5a3"}, - {file = "numpy-1.19.0-cp38-cp38-win32.whl", hash = "sha256:a8705c5073fe3fcc297fb8e0b31aa794e05af6a329e81b7ca4ffecab7f2b95ef"}, - {file = "numpy-1.19.0-cp38-cp38-win_amd64.whl", hash = "sha256:c2edbb783c841e36ca0fa159f0ae97a88ce8137fb3a6cd82eae77349ba4b607b"}, - {file = "numpy-1.19.0-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:8cde829f14bd38f6da7b2954be0f2837043e8b8d7a9110ec5e318ae6bf706610"}, - {file = "numpy-1.19.0.zip", hash = "sha256:76766cc80d6128750075378d3bb7812cf146415bd29b588616f72c943c00d598"}, + {file = "numpy-1.19.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b1cca51512299841bf69add3b75361779962f9cee7d9ee3bb446d5982e925b69"}, + {file = "numpy-1.19.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:c9591886fc9cbe5532d5df85cb8e0cc3b44ba8ce4367bd4cf1b93dc19713da72"}, + {file = "numpy-1.19.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:cf1347450c0b7644ea142712619533553f02ef23f92f781312f6a3553d031fc7"}, + {file = "numpy-1.19.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:ed8a311493cf5480a2ebc597d1e177231984c818a86875126cfd004241a73c3e"}, + {file = "numpy-1.19.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:3673c8b2b29077f1b7b3a848794f8e11f401ba0b71c49fbd26fb40b71788b132"}, + {file = "numpy-1.19.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:56ef7f56470c24bb67fb43dae442e946a6ce172f97c69f8d067ff8550cf782ff"}, + {file = "numpy-1.19.1-cp36-cp36m-win32.whl", hash = "sha256:aaf42a04b472d12515debc621c31cf16c215e332242e7a9f56403d814c744624"}, + {file = "numpy-1.19.1-cp36-cp36m-win_amd64.whl", hash = "sha256:082f8d4dd69b6b688f64f509b91d482362124986d98dc7dc5f5e9f9b9c3bb983"}, + {file = "numpy-1.19.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e4f6d3c53911a9d103d8ec9518190e52a8b945bab021745af4939cfc7c0d4a9e"}, + {file = "numpy-1.19.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:5b6885c12784a27e957294b60f97e8b5b4174c7504665333c5e94fbf41ae5d6a"}, + {file = "numpy-1.19.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1bc0145999e8cb8aed9d4e65dd8b139adf1919e521177f198529687dbf613065"}, + {file = "numpy-1.19.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:5a936fd51049541d86ccdeef2833cc89a18e4d3808fe58a8abeb802665c5af93"}, + {file = "numpy-1.19.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:ef71a1d4fd4858596ae80ad1ec76404ad29701f8ca7cdcebc50300178db14dfc"}, + {file = "numpy-1.19.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b9792b0ac0130b277536ab8944e7b754c69560dac0415dd4b2dbd16b902c8954"}, + {file = "numpy-1.19.1-cp37-cp37m-win32.whl", hash = "sha256:b12e639378c741add21fbffd16ba5ad25c0a1a17cf2b6fe4288feeb65144f35b"}, + {file = "numpy-1.19.1-cp37-cp37m-win_amd64.whl", hash = "sha256:8343bf67c72e09cfabfab55ad4a43ce3f6bf6e6ced7acf70f45ded9ebb425055"}, + {file = "numpy-1.19.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e45f8e981a0ab47103181773cc0a54e650b2aef8c7b6cd07405d0fa8d869444a"}, + {file = "numpy-1.19.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:667c07063940e934287993366ad5f56766bc009017b4a0fe91dbd07960d0aba7"}, + {file = "numpy-1.19.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:480fdd4dbda4dd6b638d3863da3be82873bba6d32d1fc12ea1b8486ac7b8d129"}, + {file = "numpy-1.19.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:935c27ae2760c21cd7354402546f6be21d3d0c806fffe967f745d5f2de5005a7"}, + {file = "numpy-1.19.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:309cbcfaa103fc9a33ec16d2d62569d541b79f828c382556ff072442226d1968"}, + {file = "numpy-1.19.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:7ed448ff4eaffeb01094959b19cbaf998ecdee9ef9932381420d514e446601cd"}, + {file = "numpy-1.19.1-cp38-cp38-win32.whl", hash = "sha256:de8b4a9b56255797cbddb93281ed92acbc510fb7b15df3f01bd28f46ebc4edae"}, + {file = "numpy-1.19.1-cp38-cp38-win_amd64.whl", hash = "sha256:92feb989b47f83ebef246adabc7ff3b9a59ac30601c3f6819f8913458610bdcc"}, + {file = "numpy-1.19.1-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:e1b1dc0372f530f26a03578ac75d5e51b3868b9b76cd2facba4c9ee0eb252ab1"}, + {file = "numpy-1.19.1.zip", hash = "sha256:b8456987b637232602ceb4d663cb34106f7eb780e247d51a260b84760fd8f491"}, ] oauth2client = [ {file = "oauth2client-4.1.3-py2.py3-none-any.whl", hash = "sha256:b8a81cc5d60e2d364f0b1b98f958dbd472887acaf1a5b05e21c28c31a2d6d3ac"}, @@ -4167,23 +3621,17 @@ oauthlib = [ {file = "oauthlib-3.1.0.tar.gz", hash = "sha256:bee41cc35fcca6e988463cacc3bcb8a96224f470ca547e697b604cc697b2f889"}, ] opt-einsum = [ - {file = "opt_einsum-3.2.1-py3-none-any.whl", hash = "sha256:96f819d46da2f937eaf326336a114aaeccbcbdb9de460d42e8b5f480a69adca7"}, - {file = "opt_einsum-3.2.1.tar.gz", hash = "sha256:83b76a98d18ae6a5cc7a0d88955a7f74881f0e567a0f4c949d24c942753eb998"}, + {file = "opt_einsum-3.3.0-py3-none-any.whl", hash = "sha256:2455e59e3947d3c275477df7f5205b30635e266fe6dc300e3d9f9646bfcea147"}, + {file = "opt_einsum-3.3.0.tar.gz", hash = "sha256:59f6475f77bbc37dcf7cd748519c0ec60722e91e63ca114e68821c0c54a46549"}, ] packaging = [ {file = "packaging-20.4-py2.py3-none-any.whl", hash = "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"}, {file = "packaging-20.4.tar.gz", hash = "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8"}, ] -pandocfilters = [ - {file = "pandocfilters-1.4.2.tar.gz", hash = "sha256:b3dd70e169bb5449e6bc6ff96aea89c5eea8c5f6ab5e207fc2f521a2cf4a0da9"}, -] pathspec = [ {file = "pathspec-0.8.0-py2.py3-none-any.whl", hash = "sha256:7d91249d21749788d07a2d0f94147accd8f845507400749ea19c1ec9054a12b0"}, {file = "pathspec-0.8.0.tar.gz", hash = "sha256:da45173eb3a6f2a5a487efba21f050af2b41948be6ab52b6a1e3ff22bb8b7061"}, ] -pathtools = [ - {file = "pathtools-0.1.2.tar.gz", hash = "sha256:7c35c5421a39bb82e58018febd90e3b6e5db34c5443aaaf742b3f33d4655f1c0"}, -] pep440-version-utils = [ {file = "pep440-version-utils-0.3.0.tar.gz", hash = "sha256:ceb8c8da63b54cc555946d91829f72fe323f8d635b22fa54ef0a9800c37f50df"}, {file = "pep440_version_utils-0.3.0-py3-none-any.whl", hash = "sha256:73780b2c31adad5ca35c89eb008f51c2a47aee0318debe31391b673b90577e1b"}, @@ -4200,9 +3648,6 @@ pluggy = [ {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, ] -port-for = [ - {file = "port-for-0.3.1.tar.gz", hash = "sha256:b16a84bb29c2954db44c29be38b17c659c9c27e33918dec16b90d375cc596f1c"}, -] preshed = [ {file = "preshed-3.0.2-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl", hash = "sha256:448d9df12e63fe4a3024f6153ee6703bb95d2be0ce887b5eda7ddc41acfba825"}, {file = "preshed-3.0.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:633358f1fb0ec5dd6dbe4971c328d08809e5a8dbefdf13a802ae0a7cb45306c7"}, @@ -4328,10 +3773,6 @@ pyflakes = [ {file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"}, {file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"}, ] -pygments = [ - {file = "Pygments-2.6.1-py3-none-any.whl", hash = "sha256:ff7a40b4860b727ab48fad6360eb351cc1b33cbf9b15a0f689ca5353e9463324"}, - {file = "Pygments-2.6.1.tar.gz", hash = "sha256:647344a061c249a3b74e230c739f434d7ea4d8b1d5f3721bc0f3558049b38f44"}, -] pyjwt = [ {file = "PyJWT-1.7.1-py2.py3-none-any.whl", hash = "sha256:5c6eca3c2940464d106b99ba83b00c6add741c9becaec087fb7ccdefea71350e"}, {file = "PyJWT-1.7.1.tar.gz", hash = "sha256:8d59a976fb773f3e6a39c85636357c4f0e242707394cadadd9814f5cbaa20e96"}, @@ -4495,13 +3936,13 @@ python-telegram-bot = [ {file = "python_telegram_bot-12.8-py2.py3-none-any.whl", hash = "sha256:7eebed539ccacf77896cff9e41d1f68746b8ff3ca4da1e2e59285e9c749cb050"}, ] pytype = [ - {file = "pytype-2020.7.14-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:9d8e383ab83945df787a69c433b0bede3f69acebb5d70eec7a83a08c5637995f"}, - {file = "pytype-2020.7.14-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:911ae7339fee729708136e999e9919b72c0e67f297cdbcb78ac373f5ffaea40c"}, - {file = "pytype-2020.7.14-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:60eaabb395995c9c1b7430e54e65fbaca348cd7f975d2588e8c23773ffc96f2a"}, - {file = "pytype-2020.7.14-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:abca0ea63898b09d444d9d42a59bf1eedc2331f89b79437663d528ab92c3c6f5"}, - {file = "pytype-2020.7.14-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:b14a37bd0f376990404b57eff7415e175ff403e13dd429e89e15e7e12da4df12"}, - {file = "pytype-2020.7.14-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:5d39e4a287ad6488475e1e2b1f8aae9c4a1c70c67b7ee3d1185a2ceefe811775"}, - {file = "pytype-2020.7.14.tar.gz", hash = "sha256:31b8cc522d844971543845b2683a34ddefa83e3b6895dc970e9a8d9254219d97"}, + {file = "pytype-2020.7.20-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:783cc42579f034c62c662c4bcefe37a9e6df989dca12f87f64e40e5688561238"}, + {file = "pytype-2020.7.20-cp35-cp35m-manylinux2014_x86_64.whl", hash = "sha256:8bbd58aab7ca50f4eb6a03d69939b81415bc81479638d2ebabf7f0e63cb607d0"}, + {file = "pytype-2020.7.20-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:8da8eb89e6ec0b0f4e546c6e463efd31b3df9c6a2a1ce160f5f457c4e9de28ee"}, + {file = "pytype-2020.7.20-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:e93614bb3e38c9e2ed3b4646bd2a726c2b4d2b8a92bf74a1a76d435d67ef3ced"}, + {file = "pytype-2020.7.20-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:70ae9014de5609b2f00e19480671b1eacc048d46e458e53bc965fe324d57c87f"}, + {file = "pytype-2020.7.20-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:0cdc97996759e9bf1389a3b0644c503fc38f581572a213c384511bf33d910d93"}, + {file = "pytype-2020.7.20.tar.gz", hash = "sha256:2382e535b371338ea18f1fd5ab06e01bec4078ca62d5118f84d61d76a9413eda"}, ] pytz = [ {file = "pytz-2020.1-py2.py3-none-any.whl", hash = "sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed"}, @@ -4542,7 +3983,6 @@ rasa-sdk = [ {file = "rasa-sdk-2.0.0a1.tar.gz", hash = "sha256:9108e131117df183c858dc1dcf5ca955661281e65e1b821e2635ca6b9ebe803f"}, {file = "rasa_sdk-2.0.0a1-py3-none-any.whl", hash = "sha256:fa413e549d38f14ce24bfc0eb6971ce5c1be37204c43c2b36d4d75a3bd1ab672"}, ] -rasabaster = [] redis = [ {file = "redis-3.5.3-py2.py3-none-any.whl", hash = "sha256:432b788c4530cfe16d8d943a09d40ca6c16149727e4afe8c2c9d5580c59d9f24"}, {file = "redis-3.5.3.tar.gz", hash = "sha256:0e7e0cfca8660dea8b7d5cd8c4f6c5e29e11f31158c0b0ae91a397f00e5a05a2"}, @@ -4596,7 +4036,7 @@ rocketchat-api = [ {file = "rocketchat_API-1.4-py3-none-any.whl", hash = "sha256:8a5826a972547a6ffb09db171e1ad9990eebf32ed396a3e1bec1f13c18577311"}, ] rsa = [ - {file = "rsa-4.6-py2.py3-none-any.whl", hash = "sha256:23778f5523461cf86ae075f9482a99317f362bca752ae57cb118044066f4026f"}, + {file = "rsa-4.6-py3-none-any.whl", hash = "sha256:6166864e23d6b5195a5cfed6cd9fed0fe774e226d8f854fcb23b7bbef0350233"}, {file = "rsa-4.6.tar.gz", hash = "sha256:109ea5a66744dd859bf16fe904b8d8b627adafb9408753161e766a92e7d681fa"}, ] "ruamel.yaml" = [ @@ -4725,17 +4165,13 @@ sklearn-crfsuite = [ {file = "sklearn_crfsuite-0.3.6-py2.py3-none-any.whl", hash = "sha256:6e9a42bc3de96941d5f7262335130955b8c380b1356147622368f385075705d9"}, ] slackclient = [ - {file = "slackclient-2.7.2-py2.py3-none-any.whl", hash = "sha256:475d97788a909a809737d962566165a00c40b355dfb56a1c39ce073faefbca8e"}, - {file = "slackclient-2.7.2.tar.gz", hash = "sha256:a2d0710a1c2c99989e944a6c2d0ca1d567cbf255c7c0e25a17633cd530f3ae73"}, + {file = "slackclient-2.7.3-py2.py3-none-any.whl", hash = "sha256:766e10d1b78b9660d644a4e70b4cd386c372ae69711b0f53c756171007fa3ae6"}, + {file = "slackclient-2.7.3.tar.gz", hash = "sha256:cb29c0b3cddad67f8369df82aadb5e031e7457ec3c45c0c86cec10c31209f4b6"}, ] sniffio = [ {file = "sniffio-1.1.0-py3-none-any.whl", hash = "sha256:20ed6d5b46f8ae136d00b9dcb807615d83ed82ceea6b2058cecb696765246da5"}, {file = "sniffio-1.1.0.tar.gz", hash = "sha256:8e3810100f69fe0edd463d02ad407112542a11ffdc29f67db2bf3771afb87a21"}, ] -snowballstemmer = [ - {file = "snowballstemmer-2.0.0-py2.py3-none-any.whl", hash = "sha256:209f257d7533fdb3cb73bdbd24f436239ca3b2fa67d56f6ff88e86be08cc5ef0"}, - {file = "snowballstemmer-2.0.0.tar.gz", hash = "sha256:df3bac3df4c2c01363f3dd2cfa78cce2840a79b9f1c2d2de9ce8d31683992f52"}, -] sortedcontainers = [ {file = "sortedcontainers-2.2.2-py2.py3-none-any.whl", hash = "sha256:c633ebde8580f241f274c1f8994a665c0e54a17724fecd0cae2f079e09c36d3f"}, {file = "sortedcontainers-2.2.2.tar.gz", hash = "sha256:4e73a757831fc3ca4de2859c422564239a31d8213d09a2a666e375807034d2ba"}, @@ -4752,63 +4188,6 @@ spacy = [ {file = "spacy-2.2.4-cp38-cp38-win_amd64.whl", hash = "sha256:877d8e157a708c8b77c0dea61e526632f6d57f27be64087dac22a4581facea68"}, {file = "spacy-2.2.4.tar.gz", hash = "sha256:f0f3a67c5841e6e35d62c98f40ebb3d132587d3aba4f4dccac5056c4e90ff5b9"}, ] -sphinx = [ - {file = "Sphinx-3.1.2-py3-none-any.whl", hash = "sha256:97dbf2e31fc5684bb805104b8ad34434ed70e6c588f6896991b2fdfd2bef8c00"}, - {file = "Sphinx-3.1.2.tar.gz", hash = "sha256:b9daeb9b39aa1ffefc2809b43604109825300300b987a24f45976c001ba1a8fd"}, -] -sphinx-autobuild = [ - {file = "sphinx-autobuild-0.7.1.tar.gz", hash = "sha256:66388f81884666e3821edbe05dd53a0cfb68093873d17320d0610de8db28c74e"}, - {file = "sphinx_autobuild-0.7.1-py2-none-any.whl", hash = "sha256:e60aea0789cab02fa32ee63c7acae5ef41c06f1434d9fd0a74250a61f5994692"}, -] -sphinx-autodoc-typehints = [ - {file = "sphinx-autodoc-typehints-1.10.3.tar.gz", hash = "sha256:a6b3180167479aca2c4d1ed3b5cb044a70a76cccd6b38662d39288ebd9f0dff0"}, - {file = "sphinx_autodoc_typehints-1.10.3-py3-none-any.whl", hash = "sha256:27c9e6ef4f4451766ab8d08b2d8520933b97beb21c913f3df9ab2e59b56e6c6c"}, -] -sphinx-rtd-theme = [] -sphinx-tabs = [ - {file = "sphinx-tabs-1.1.13.tar.gz", hash = "sha256:7ad881daa4d18799b254db4aa7feeb9d30256cbccf7d4f3de746d9fcc14e0196"}, -] -sphinxcontrib-applehelp = [ - {file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"}, - {file = "sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a"}, -] -sphinxcontrib-devhelp = [ - {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, - {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, -] -sphinxcontrib-htmlhelp = [ - {file = "sphinxcontrib-htmlhelp-1.0.3.tar.gz", hash = "sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b"}, - {file = "sphinxcontrib_htmlhelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f"}, -] -sphinxcontrib-httpdomain = [ - {file = "sphinxcontrib-httpdomain-1.7.0.tar.gz", hash = "sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335"}, - {file = "sphinxcontrib_httpdomain-1.7.0-py2.py3-none-any.whl", hash = "sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e"}, -] -sphinxcontrib-jsmath = [ - {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, - {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, -] -sphinxcontrib-programoutput = [ - {file = "sphinxcontrib-programoutput-0.16.tar.gz", hash = "sha256:0caaa216d0ad8d2cfa90a9a9dba76820e376da6e3152be28d10aedc09f82a3b0"}, - {file = "sphinxcontrib_programoutput-0.16-py2.py3-none-any.whl", hash = "sha256:8009d1326b89cd029ee477ce32b45c58d92b8504d48811461c3117014a8f4b1e"}, -] -sphinxcontrib-qthelp = [ - {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, - {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, -] -sphinxcontrib-serializinghtml = [ - {file = "sphinxcontrib-serializinghtml-1.1.4.tar.gz", hash = "sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc"}, - {file = "sphinxcontrib_serializinghtml-1.1.4-py2.py3-none-any.whl", hash = "sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a"}, -] -sphinxcontrib-trio = [ - {file = "sphinxcontrib-trio-1.1.2.tar.gz", hash = "sha256:9f1ba9c1d5965b534e85258d8b677dd94e9b1a9a2e918b85ccd42590596b47c0"}, - {file = "sphinxcontrib_trio-1.1.2-py3-none-any.whl", hash = "sha256:1b849be08a147ef4113e35c191a51c5792613a9a54697b497cd91656d906a232"}, -] -sphinxcontrib-versioning = [] -sphinxcontrib-websupport = [ - {file = "sphinxcontrib-websupport-1.2.3.tar.gz", hash = "sha256:ee1d43e6e0332558a66fcb4005b9ba7313ad9764d0df0e6703ae869a028e451f"}, - {file = "sphinxcontrib_websupport-1.2.3-py2.py3-none-any.whl", hash = "sha256:a2100b79096bbaea5a41e03261cee279d19c803218b9401a1d84ef6aeae17338"}, -] sqlalchemy = [ {file = "SQLAlchemy-1.3.18-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:f11c2437fb5f812d020932119ba02d9e2bc29a6eca01a055233a8b449e3e1e7d"}, {file = "SQLAlchemy-1.3.18-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0ec575db1b54909750332c2e335c2bb11257883914a03bc5a3306a4488ecc772"}, @@ -4920,10 +4299,6 @@ termcolor = [ terminaltables = [ {file = "terminaltables-3.1.0.tar.gz", hash = "sha256:f3eb0eb92e3833972ac36796293ca0906e998dc3be91fbe1f8615b331b853b81"}, ] -testpath = [ - {file = "testpath-0.4.4-py2.py3-none-any.whl", hash = "sha256:bfcf9411ef4bf3db7579063e0546938b1edda3d69f4e1fb8756991f5951f85d4"}, - {file = "testpath-0.4.4.tar.gz", hash = "sha256:60e0a3261c149755f4399a1fff7d37523179a70fdc3abdf78de9fc2604aeec7e"}, -] thinc = [ {file = "thinc-7.4.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9c40101f3148405cb291be2033758d011d348a5dea5d151811def8d1e466f25a"}, {file = "thinc-7.4.0-cp35-cp35m-win_amd64.whl", hash = "sha256:ebb81b7ff8f852aae1b9c26dfb629344ab962e221ec87c83b2a7c4aec337477d"}, @@ -4984,10 +4359,6 @@ tqdm = [ {file = "tqdm-4.47.0-py2.py3-none-any.whl", hash = "sha256:7810e627bcf9d983a99d9ff8a0c09674400fd2927eddabeadf153c14a2ec8656"}, {file = "tqdm-4.47.0.tar.gz", hash = "sha256:63ef7a6d3eb39f80d6b36e4867566b3d8e5f1fe3d6cb50c5e9ede2b3198ba7b7"}, ] -traitlets = [ - {file = "traitlets-4.3.3-py2.py3-none-any.whl", hash = "sha256:70b4c6a1d9019d7b4f6846832288f86998aa3b9207c6821f3578a6a6a467fe44"}, - {file = "traitlets-4.3.3.tar.gz", hash = "sha256:d023ee369ddd2763310e4c3eae1ff649689440d4ae59d7485eb4cfbbe3e359f7"}, -] transformers = [ {file = "transformers-2.11.0-py3-none-any.whl", hash = "sha256:b3e5198266f2a4b14841c70427cad46b89f473e6b0d0d3ab7461bf775f31631d"}, {file = "transformers-2.11.0.tar.gz", hash = "sha256:8de20f03a94ebf16d98610a7df0acc6ba68c80bd44605cf5ad4300c642a7b57a"}, @@ -5070,19 +4441,12 @@ uvloop = [ {file = "uvloop-0.14.0.tar.gz", hash = "sha256:123ac9c0c7dd71464f58f1b4ee0bbd81285d96cdda8bc3519281b8973e3a461e"}, ] wasabi = [ - {file = "wasabi-0.7.0.tar.gz", hash = "sha256:e875f11d7126a2796999ff7f092195f24005edbd90b32b2df16dde5d392ecc8c"}, -] -watchdog = [ - {file = "watchdog-0.10.3.tar.gz", hash = "sha256:4214e1379d128b0588021880ccaf40317ee156d4603ac388b9adcf29165e0c04"}, + {file = "wasabi-0.7.1.tar.gz", hash = "sha256:ee3809f4ce00e1e7f424b1572c753cff0dcaca2ca684e67e31f985033a9f070b"}, ] wcwidth = [ {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, {file = "wcwidth-0.2.5.tar.gz", hash = "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"}, ] -webencodings = [ - {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, - {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, -] webexteamssdk = [ {file = "webexteamssdk-1.3.tar.gz", hash = "sha256:161e0bbc9b7b044f9b765b9b9767642740e3421a428ec7bfc34b1b8e25437127"}, ] diff --git a/pyproject.toml b/pyproject.toml index e63e1150759a..372caa51aba6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -136,7 +136,6 @@ pytest-xdist = "^1.32.0" pytest = "^5.3.4" freezegun = "^0.3.14" responses = "^0.10.15" -nbsphinx = "~0.7" aioresponses = "^0.6.2" moto = "==1.3.14" fakeredis = "^1.4.0" @@ -149,16 +148,6 @@ azure-storage-blob = "<12.4.0" coveralls = "^2.0.0" towncrier = "^19.2.0" toml = "^0.10.0" -sphinx = "^3.1.1" -sphinx-autobuild = "~0.7.1" -sphinxcontrib-programoutput = "==0.16" -pygments = "^2.6.1" -sphinxcontrib-httpdomain = "==1.7.0" -sphinxcontrib-websupport = "^1.1.0" -sphinxcontrib-trio = "==1.1.2" -sphinx-tabs = "~1.1.13" -sphinx-autodoc-typehints = "==1.10.3" -rasabaster = "^0.7.23" pep440-version-utils = "^0.3.0" [tool.poetry.extras] @@ -194,9 +183,3 @@ optional = true [tool.poetry.dependencies.pymongo] version = ">=3.8,<3.11" extras = [ "tls", "srv",] - -[tool.poetry.dev-dependencies.sphinxcontrib-versioning] -git = "https://github.com/RasaHQ/sphinxcontrib-versioning.git" - -[tool.poetry.dev-dependencies.sphinx_rtd_theme] -git = "https://github.com/RasaHQ/sphinx_rtd_theme.git" diff --git a/setup.cfg b/setup.cfg index 8abcc1eed4eb..455ee150056f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -7,8 +7,6 @@ python_functions=test_ codestyle_max_line_length = 88 codestyle_ignore = E302 W503 E203 E501 E265 E402 E251 E211 codestyle_exclude = - docs/core/conf.py - docs/nlu/conf.py rasa/core/policies/tf_utils.py rasa/core/policies/__init__.py filterwarnings =