diff --git a/pyextra/Flask-1.0.2-py2.7.egg-info/PKG-INFO b/pyextra/Flask-1.0.2-py2.7.egg-info/PKG-INFO deleted file mode 100644 index 59a627f61..000000000 --- a/pyextra/Flask-1.0.2-py2.7.egg-info/PKG-INFO +++ /dev/null @@ -1,103 +0,0 @@ -Metadata-Version: 1.1 -Name: Flask -Version: 1.0.2 -Summary: A simple framework for building complex web applications. -Home-page: https://www.palletsprojects.com/p/flask/ -Author: Pallets team -Author-email: contact@palletsprojects.com -License: BSD -Description: Flask - ===== - - Flask is a lightweight `WSGI`_ web application framework. It is designed - to make getting started quick and easy, with the ability to scale up to - complex applications. It began as a simple wrapper around `Werkzeug`_ - and `Jinja`_ and has become one of the most popular Python web - application frameworks. - - Flask offers suggestions, but doesn't enforce any dependencies or - project layout. It is up to the developer to choose the tools and - libraries they want to use. There are many extensions provided by the - community that make adding new functionality easy. - - - Installing - ---------- - - Install and update using `pip`_: - - .. code-block:: text - - pip install -U Flask - - - A Simple Example - ---------------- - - .. code-block:: python - - from flask import Flask - - app = Flask(__name__) - - @app.route('/') - def hello(): - return 'Hello, World!' - - .. code-block:: text - - $ FLASK_APP=hello.py flask run - * Serving Flask app "hello" - * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) - - - Donate - ------ - - The Pallets organization develops and supports Flask and the libraries - it uses. In order to grow the community of contributors and users, and - allow the maintainers to devote more time to the projects, `please - donate today`_. - - .. _please donate today: https://psfmember.org/civicrm/contribute/transact?reset=1&id=20 - - - Links - ----- - - * Website: https://www.palletsprojects.com/p/flask/ - * Documentation: http://flask.pocoo.org/docs/ - * License: `BSD `_ - * Releases: https://pypi.org/project/Flask/ - * Code: https://github.com/pallets/flask - * Issue tracker: https://github.com/pallets/flask/issues - * Test status: - - * Linux, Mac: https://travis-ci.org/pallets/flask - * Windows: https://ci.appveyor.com/project/pallets/flask - - * Test coverage: https://codecov.io/gh/pallets/flask - - .. _WSGI: https://wsgi.readthedocs.io - .. _Werkzeug: https://www.palletsprojects.com/p/werkzeug/ - .. _Jinja: https://www.palletsprojects.com/p/jinja/ - .. _pip: https://pip.pypa.io/en/stable/quickstart/ - -Platform: any -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Framework :: Flask -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application -Classifier: Topic :: Software Development :: Libraries :: Application Frameworks -Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/pyextra/Flask-1.0.2-py2.7.egg-info/SOURCES.txt b/pyextra/Flask-1.0.2-py2.7.egg-info/SOURCES.txt deleted file mode 100644 index 52f6db191..000000000 --- a/pyextra/Flask-1.0.2-py2.7.egg-info/SOURCES.txt +++ /dev/null @@ -1,223 +0,0 @@ -AUTHORS -CHANGES.rst -LICENSE -MANIFEST.in -Makefile -README.rst -setup.cfg -setup.py -tox.ini -Flask.egg-info/PKG-INFO -Flask.egg-info/SOURCES.txt -Flask.egg-info/dependency_links.txt -Flask.egg-info/entry_points.txt -Flask.egg-info/not-zip-safe -Flask.egg-info/requires.txt -Flask.egg-info/top_level.txt -artwork/LICENSE -artwork/logo-full.svg -artwork/logo-lineart.svg -docs/Makefile -docs/advanced_foreword.rst -docs/api.rst -docs/appcontext.rst -docs/becomingbig.rst -docs/blueprints.rst -docs/changelog.rst -docs/cli.rst -docs/conf.py -docs/config.rst -docs/contents.rst.inc -docs/contributing.rst -docs/design.rst -docs/errorhandling.rst -docs/extensiondev.rst -docs/extensions.rst -docs/flaskstyle.sty -docs/foreword.rst -docs/htmlfaq.rst -docs/index.rst -docs/installation.rst -docs/latexindex.rst -docs/license.rst -docs/logging.rst -docs/logo.pdf -docs/make.bat -docs/quickstart.rst -docs/reqcontext.rst -docs/security.rst -docs/server.rst -docs/shell.rst -docs/signals.rst -docs/styleguide.rst -docs/templating.rst -docs/testing.rst -docs/unicode.rst -docs/upgrading.rst -docs/views.rst -docs/_static/debugger.png -docs/_static/flask-favicon.ico -docs/_static/flask.png -docs/_static/logo-full.png -docs/_static/no.png -docs/_static/pycharm-runconfig.png -docs/_static/touch-icon.png -docs/_static/yes.png -docs/deploying/cgi.rst -docs/deploying/fastcgi.rst -docs/deploying/index.rst -docs/deploying/mod_wsgi.rst -docs/deploying/uwsgi.rst -docs/deploying/wsgi-standalone.rst -docs/patterns/apierrors.rst -docs/patterns/appdispatch.rst -docs/patterns/appfactories.rst -docs/patterns/caching.rst -docs/patterns/celery.rst -docs/patterns/deferredcallbacks.rst -docs/patterns/distribute.rst -docs/patterns/errorpages.rst -docs/patterns/fabric.rst -docs/patterns/favicon.rst -docs/patterns/fileuploads.rst -docs/patterns/flashing.rst -docs/patterns/index.rst -docs/patterns/jquery.rst -docs/patterns/lazyloading.rst -docs/patterns/methodoverrides.rst -docs/patterns/mongokit.rst -docs/patterns/packages.rst -docs/patterns/requestchecksum.rst -docs/patterns/sqlalchemy.rst -docs/patterns/sqlite3.rst -docs/patterns/streaming.rst -docs/patterns/subclassing.rst -docs/patterns/templateinheritance.rst -docs/patterns/urlprocessors.rst -docs/patterns/viewdecorators.rst -docs/patterns/wtforms.rst -docs/tutorial/blog.rst -docs/tutorial/database.rst -docs/tutorial/deploy.rst -docs/tutorial/factory.rst -docs/tutorial/flaskr_edit.png -docs/tutorial/flaskr_index.png -docs/tutorial/flaskr_login.png -docs/tutorial/index.rst -docs/tutorial/install.rst -docs/tutorial/layout.rst -docs/tutorial/next.rst -docs/tutorial/static.rst -docs/tutorial/templates.rst -docs/tutorial/tests.rst -docs/tutorial/views.rst -examples/javascript/.gitignore -examples/javascript/LICENSE -examples/javascript/MANIFEST.in -examples/javascript/README.rst -examples/javascript/setup.cfg -examples/javascript/setup.py -examples/javascript/js_example/__init__.py -examples/javascript/js_example/views.py -examples/javascript/js_example/templates/base.html -examples/javascript/js_example/templates/fetch.html -examples/javascript/js_example/templates/jquery.html -examples/javascript/js_example/templates/plain.html -examples/javascript/tests/conftest.py -examples/javascript/tests/test_js_example.py -examples/tutorial/.gitignore -examples/tutorial/LICENSE -examples/tutorial/MANIFEST.in -examples/tutorial/README.rst -examples/tutorial/setup.cfg -examples/tutorial/setup.py -examples/tutorial/flaskr/__init__.py -examples/tutorial/flaskr/auth.py -examples/tutorial/flaskr/blog.py -examples/tutorial/flaskr/db.py -examples/tutorial/flaskr/schema.sql -examples/tutorial/flaskr/static/style.css -examples/tutorial/flaskr/templates/base.html -examples/tutorial/flaskr/templates/auth/login.html -examples/tutorial/flaskr/templates/auth/register.html -examples/tutorial/flaskr/templates/blog/create.html -examples/tutorial/flaskr/templates/blog/index.html -examples/tutorial/flaskr/templates/blog/update.html -examples/tutorial/tests/conftest.py -examples/tutorial/tests/data.sql -examples/tutorial/tests/test_auth.py -examples/tutorial/tests/test_blog.py -examples/tutorial/tests/test_db.py -examples/tutorial/tests/test_factory.py -flask/__init__.py -flask/__main__.py -flask/_compat.py -flask/app.py -flask/blueprints.py -flask/cli.py -flask/config.py -flask/ctx.py -flask/debughelpers.py -flask/globals.py -flask/helpers.py -flask/logging.py -flask/sessions.py -flask/signals.py -flask/templating.py -flask/testing.py -flask/views.py -flask/wrappers.py -flask/json/__init__.py -flask/json/tag.py -tests/conftest.py -tests/test_appctx.py -tests/test_basic.py -tests/test_blueprints.py -tests/test_cli.py -tests/test_config.py -tests/test_helpers.py -tests/test_instance_config.py -tests/test_json_tag.py -tests/test_logging.py -tests/test_regression.py -tests/test_reqctx.py -tests/test_signals.py -tests/test_subclassing.py -tests/test_templating.py -tests/test_testing.py -tests/test_user_error_handler.py -tests/test_views.py -tests/static/config.json -tests/static/index.html -tests/templates/_macro.html -tests/templates/context_template.html -tests/templates/escaping_template.html -tests/templates/mail.txt -tests/templates/non_escaping_template.txt -tests/templates/simple_template.html -tests/templates/template_filter.html -tests/templates/template_test.html -tests/templates/nested/nested.txt -tests/test_apps/.env -tests/test_apps/.flaskenv -tests/test_apps/blueprintapp/__init__.py -tests/test_apps/blueprintapp/apps/__init__.py -tests/test_apps/blueprintapp/apps/admin/__init__.py -tests/test_apps/blueprintapp/apps/admin/static/test.txt -tests/test_apps/blueprintapp/apps/admin/static/css/test.css -tests/test_apps/blueprintapp/apps/admin/templates/admin/index.html -tests/test_apps/blueprintapp/apps/frontend/__init__.py -tests/test_apps/blueprintapp/apps/frontend/templates/frontend/index.html -tests/test_apps/cliapp/__init__.py -tests/test_apps/cliapp/app.py -tests/test_apps/cliapp/factory.py -tests/test_apps/cliapp/importerrorapp.py -tests/test_apps/cliapp/message.txt -tests/test_apps/cliapp/multiapp.py -tests/test_apps/cliapp/inner1/__init__.py -tests/test_apps/cliapp/inner1/inner2/__init__.py -tests/test_apps/cliapp/inner1/inner2/flask.py -tests/test_apps/helloworld/hello.py -tests/test_apps/helloworld/wsgi.py -tests/test_apps/subdomaintestmodule/__init__.py -tests/test_apps/subdomaintestmodule/static/hello.txt \ No newline at end of file diff --git a/pyextra/Flask-1.0.2-py2.7.egg-info/dependency_links.txt b/pyextra/Flask-1.0.2-py2.7.egg-info/dependency_links.txt deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/Flask-1.0.2-py2.7.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/Flask-1.0.2-py2.7.egg-info/entry_points.txt b/pyextra/Flask-1.0.2-py2.7.egg-info/entry_points.txt deleted file mode 100644 index 1eb025200..000000000 --- a/pyextra/Flask-1.0.2-py2.7.egg-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[console_scripts] -flask = flask.cli:main - diff --git a/pyextra/Flask-1.0.2-py2.7.egg-info/installed-files.txt b/pyextra/Flask-1.0.2-py2.7.egg-info/installed-files.txt deleted file mode 100644 index 150041120..000000000 --- a/pyextra/Flask-1.0.2-py2.7.egg-info/installed-files.txt +++ /dev/null @@ -1,48 +0,0 @@ -../flask/testing.py -../flask/templating.py -../flask/__main__.py -../flask/sessions.py -../flask/signals.py -../flask/helpers.py -../flask/debughelpers.py -../flask/wrappers.py -../flask/app.py -../flask/ctx.py -../flask/config.py -../flask/logging.py -../flask/blueprints.py -../flask/views.py -../flask/cli.py -../flask/_compat.py -../flask/globals.py -../flask/__init__.py -../flask/json/tag.py -../flask/json/__init__.py -../flask/testing.pyc -../flask/templating.pyc -../flask/__main__.pyc -../flask/sessions.pyc -../flask/signals.pyc -../flask/helpers.pyc -../flask/debughelpers.pyc -../flask/wrappers.pyc -../flask/app.pyc -../flask/ctx.pyc -../flask/config.pyc -../flask/logging.pyc -../flask/blueprints.pyc -../flask/views.pyc -../flask/cli.pyc -../flask/_compat.pyc -../flask/globals.pyc -../flask/__init__.pyc -../flask/json/tag.pyc -../flask/json/__init__.pyc -not-zip-safe -entry_points.txt -dependency_links.txt -PKG-INFO -top_level.txt -requires.txt -SOURCES.txt -../../../../bin/flask diff --git a/pyextra/Flask-1.0.2-py2.7.egg-info/not-zip-safe b/pyextra/Flask-1.0.2-py2.7.egg-info/not-zip-safe deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/Flask-1.0.2-py2.7.egg-info/not-zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/Flask-1.0.2-py2.7.egg-info/requires.txt b/pyextra/Flask-1.0.2-py2.7.egg-info/requires.txt deleted file mode 100644 index f51579ea9..000000000 --- a/pyextra/Flask-1.0.2-py2.7.egg-info/requires.txt +++ /dev/null @@ -1,20 +0,0 @@ -Werkzeug>=0.14 -Jinja2>=2.10 -itsdangerous>=0.24 -click>=5.1 - -[dev] -pytest>=3 -coverage -tox -sphinx -pallets-sphinx-themes -sphinxcontrib-log-cabinet - -[docs] -sphinx -pallets-sphinx-themes -sphinxcontrib-log-cabinet - -[dotenv] -python-dotenv diff --git a/pyextra/Flask-1.0.2-py2.7.egg-info/top_level.txt b/pyextra/Flask-1.0.2-py2.7.egg-info/top_level.txt deleted file mode 100644 index 7e1060246..000000000 --- a/pyextra/Flask-1.0.2-py2.7.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -flask diff --git a/pyextra/Jinja2-2.10-py2.7.egg-info/PKG-INFO b/pyextra/Jinja2-2.10-py2.7.egg-info/PKG-INFO deleted file mode 100644 index b4398d649..000000000 --- a/pyextra/Jinja2-2.10-py2.7.egg-info/PKG-INFO +++ /dev/null @@ -1,62 +0,0 @@ -Metadata-Version: 1.1 -Name: Jinja2 -Version: 2.10 -Summary: A small but fast and easy to use stand-alone template engine written in pure python. -Home-page: http://jinja.pocoo.org/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -License: BSD -Description: - Jinja2 - ~~~~~~ - - Jinja2 is a template engine written in pure Python. It provides a - `Django`_ inspired non-XML syntax but supports inline expressions and - an optional `sandboxed`_ environment. - - Nutshell - -------- - - Here a small example of a Jinja template:: - - {% extends 'base.html' %} - {% block title %}Memberlist{% endblock %} - {% block content %} - - {% endblock %} - - Philosophy - ---------- - - Application logic is for the controller but don't try to make the life - for the template designer too hard by giving him too few functionality. - - For more informations visit the new `Jinja2 webpage`_ and `documentation`_. - - .. _sandboxed: https://en.wikipedia.org/wiki/Sandbox_(computer_security) - .. _Django: https://www.djangoproject.com/ - .. _Jinja2 webpage: http://jinja.pocoo.org/ - .. _documentation: http://jinja.pocoo.org/2/documentation/ - -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: HTML diff --git a/pyextra/Jinja2-2.10-py2.7.egg-info/SOURCES.txt b/pyextra/Jinja2-2.10-py2.7.egg-info/SOURCES.txt deleted file mode 100644 index 93158e1f6..000000000 --- a/pyextra/Jinja2-2.10-py2.7.egg-info/SOURCES.txt +++ /dev/null @@ -1,133 +0,0 @@ -AUTHORS -CHANGES.rst -LICENSE -MANIFEST.in -README.rst -setup.cfg -setup.py -Jinja2.egg-info/PKG-INFO -Jinja2.egg-info/SOURCES.txt -Jinja2.egg-info/dependency_links.txt -Jinja2.egg-info/entry_points.txt -Jinja2.egg-info/not-zip-safe -Jinja2.egg-info/requires.txt -Jinja2.egg-info/top_level.txt -artwork/jinjalogo.svg -docs/Makefile -docs/api.rst -docs/cache_extension.py -docs/changelog.rst -docs/conf.py -docs/contents.rst.inc -docs/extensions.rst -docs/faq.rst -docs/index.rst -docs/integration.rst -docs/intro.rst -docs/jinjaext.py -docs/jinjastyle.sty -docs/latexindex.rst -docs/logo.pdf -docs/nativetypes.rst -docs/sandbox.rst -docs/switching.rst -docs/templates.rst -docs/tricks.rst -docs/_static/.ignore -docs/_static/jinja-small.png -docs/_templates/sidebarintro.html -docs/_templates/sidebarlogo.html -docs/_themes/LICENSE -docs/_themes/README -docs/_themes/jinja/layout.html -docs/_themes/jinja/relations.html -docs/_themes/jinja/theme.conf -docs/_themes/jinja/static/jinja.css_t -examples/bench.py -examples/profile.py -examples/basic/cycle.py -examples/basic/debugger.py -examples/basic/inheritance.py -examples/basic/test.py -examples/basic/test_filter_and_linestatements.py -examples/basic/test_loop_filter.py -examples/basic/translate.py -examples/basic/templates/broken.html -examples/basic/templates/subbroken.html -examples/rwbench/djangoext.py -examples/rwbench/rwbench.py -examples/rwbench/django/_form.html -examples/rwbench/django/_input_field.html -examples/rwbench/django/_textarea.html -examples/rwbench/django/index.html -examples/rwbench/django/layout.html -examples/rwbench/genshi/helpers.html -examples/rwbench/genshi/index.html -examples/rwbench/genshi/layout.html -examples/rwbench/jinja/helpers.html -examples/rwbench/jinja/index.html -examples/rwbench/jinja/layout.html -examples/rwbench/mako/helpers.html -examples/rwbench/mako/index.html -examples/rwbench/mako/layout.html -ext/djangojinja2.py -ext/inlinegettext.py -ext/jinja.el -ext/Vim/jinja.vim -ext/django2jinja/django2jinja.py -ext/django2jinja/example.py -ext/django2jinja/templates/index.html -ext/django2jinja/templates/layout.html -ext/django2jinja/templates/subtemplate.html -jinja2/__init__.py -jinja2/_compat.py -jinja2/_identifier.py -jinja2/asyncfilters.py -jinja2/asyncsupport.py -jinja2/bccache.py -jinja2/compiler.py -jinja2/constants.py -jinja2/debug.py -jinja2/defaults.py -jinja2/environment.py -jinja2/exceptions.py -jinja2/ext.py -jinja2/filters.py -jinja2/idtracking.py -jinja2/lexer.py -jinja2/loaders.py -jinja2/meta.py -jinja2/nativetypes.py -jinja2/nodes.py -jinja2/optimizer.py -jinja2/parser.py -jinja2/runtime.py -jinja2/sandbox.py -jinja2/tests.py -jinja2/utils.py -jinja2/visitor.py -tests/conftest.py -tests/test_api.py -tests/test_async.py -tests/test_asyncfilters.py -tests/test_bytecode_cache.py -tests/test_core_tags.py -tests/test_debug.py -tests/test_ext.py -tests/test_features.py -tests/test_filters.py -tests/test_idtracking.py -tests/test_imports.py -tests/test_inheritance.py -tests/test_lexnparse.py -tests/test_loader.py -tests/test_nativetypes.py -tests/test_regression.py -tests/test_security.py -tests/test_tests.py -tests/test_utils.py -tests/res/__init__.py -tests/res/templates/broken.html -tests/res/templates/syntaxerror.html -tests/res/templates/test.html -tests/res/templates/foo/test.html \ No newline at end of file diff --git a/pyextra/Jinja2-2.10-py2.7.egg-info/dependency_links.txt b/pyextra/Jinja2-2.10-py2.7.egg-info/dependency_links.txt deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/Jinja2-2.10-py2.7.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/Jinja2-2.10-py2.7.egg-info/entry_points.txt b/pyextra/Jinja2-2.10-py2.7.egg-info/entry_points.txt deleted file mode 100644 index 32e6b7530..000000000 --- a/pyextra/Jinja2-2.10-py2.7.egg-info/entry_points.txt +++ /dev/null @@ -1,4 +0,0 @@ - - [babel.extractors] - jinja2 = jinja2.ext:babel_extract[i18n] - \ No newline at end of file diff --git a/pyextra/Jinja2-2.10-py2.7.egg-info/installed-files.txt b/pyextra/Jinja2-2.10-py2.7.egg-info/installed-files.txt deleted file mode 100644 index c2af213ac..000000000 --- a/pyextra/Jinja2-2.10-py2.7.egg-info/installed-files.txt +++ /dev/null @@ -1,61 +0,0 @@ -../jinja2/lexer.py -../jinja2/idtracking.py -../jinja2/_identifier.py -../jinja2/nodes.py -../jinja2/asyncfilters.py -../jinja2/loaders.py -../jinja2/defaults.py -../jinja2/meta.py -../jinja2/compiler.py -../jinja2/environment.py -../jinja2/tests.py -../jinja2/sandbox.py -../jinja2/filters.py -../jinja2/exceptions.py -../jinja2/asyncsupport.py -../jinja2/visitor.py -../jinja2/constants.py -../jinja2/utils.py -../jinja2/ext.py -../jinja2/optimizer.py -../jinja2/nativetypes.py -../jinja2/parser.py -../jinja2/runtime.py -../jinja2/debug.py -../jinja2/_compat.py -../jinja2/bccache.py -../jinja2/__init__.py -../jinja2/lexer.pyc -../jinja2/idtracking.pyc -../jinja2/_identifier.pyc -../jinja2/nodes.pyc -../jinja2/asyncfilters.pyc -../jinja2/loaders.pyc -../jinja2/defaults.pyc -../jinja2/meta.pyc -../jinja2/compiler.pyc -../jinja2/environment.pyc -../jinja2/tests.pyc -../jinja2/sandbox.pyc -../jinja2/filters.pyc -../jinja2/exceptions.pyc -../jinja2/asyncsupport.pyc -../jinja2/visitor.pyc -../jinja2/constants.pyc -../jinja2/utils.pyc -../jinja2/ext.pyc -../jinja2/optimizer.pyc -../jinja2/nativetypes.pyc -../jinja2/parser.pyc -../jinja2/runtime.pyc -../jinja2/debug.pyc -../jinja2/_compat.pyc -../jinja2/bccache.pyc -../jinja2/__init__.pyc -not-zip-safe -entry_points.txt -dependency_links.txt -PKG-INFO -top_level.txt -requires.txt -SOURCES.txt diff --git a/pyextra/Jinja2-2.10-py2.7.egg-info/not-zip-safe b/pyextra/Jinja2-2.10-py2.7.egg-info/not-zip-safe deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/Jinja2-2.10-py2.7.egg-info/not-zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/Jinja2-2.10-py2.7.egg-info/requires.txt b/pyextra/Jinja2-2.10-py2.7.egg-info/requires.txt deleted file mode 100644 index 1d74a32c6..000000000 --- a/pyextra/Jinja2-2.10-py2.7.egg-info/requires.txt +++ /dev/null @@ -1,4 +0,0 @@ -MarkupSafe>=0.23 - -[i18n] -Babel>=0.8 diff --git a/pyextra/Jinja2-2.10-py2.7.egg-info/top_level.txt b/pyextra/Jinja2-2.10-py2.7.egg-info/top_level.txt deleted file mode 100644 index 7f7afbf3b..000000000 --- a/pyextra/Jinja2-2.10-py2.7.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -jinja2 diff --git a/pyextra/Jinja2-2.9.6.dist-info/DESCRIPTION.rst b/pyextra/Jinja2-2.9.6.dist-info/DESCRIPTION.rst deleted file mode 100644 index 4421f046e..000000000 --- a/pyextra/Jinja2-2.9.6.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,36 +0,0 @@ -Jinja2 -~~~~~~ - -Jinja2 is a template engine written in pure Python. It provides a -`Django`_ inspired non-XML syntax but supports inline expressions and -an optional `sandboxed`_ environment. - -Nutshell --------- - -Here a small example of a Jinja template:: - - {% extends 'base.html' %} - {% block title %}Memberlist{% endblock %} - {% block content %} - - {% endblock %} - -Philosophy ----------- - -Application logic is for the controller but don't try to make the life -for the template designer too hard by giving him too few functionality. - -For more informations visit the new `Jinja2 webpage`_ and `documentation`_. - -.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security) -.. _Django: http://www.djangoproject.com/ -.. _Jinja2 webpage: http://jinja.pocoo.org/ -.. _documentation: http://jinja.pocoo.org/2/documentation/ - - diff --git a/pyextra/Jinja2-2.9.6.dist-info/INSTALLER b/pyextra/Jinja2-2.9.6.dist-info/INSTALLER deleted file mode 100644 index a1b589e38..000000000 --- a/pyextra/Jinja2-2.9.6.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/pyextra/Jinja2-2.9.6.dist-info/LICENSE.txt b/pyextra/Jinja2-2.9.6.dist-info/LICENSE.txt deleted file mode 100644 index 10145a264..000000000 --- a/pyextra/Jinja2-2.9.6.dist-info/LICENSE.txt +++ /dev/null @@ -1,31 +0,0 @@ -Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details. - -Some rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * The names of the contributors may not be used to endorse or - promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pyextra/Jinja2-2.9.6.dist-info/METADATA b/pyextra/Jinja2-2.9.6.dist-info/METADATA deleted file mode 100644 index ea69de172..000000000 --- a/pyextra/Jinja2-2.9.6.dist-info/METADATA +++ /dev/null @@ -1,65 +0,0 @@ -Metadata-Version: 2.0 -Name: Jinja2 -Version: 2.9.6 -Summary: A small but fast and easy to use stand-alone template engine written in pure python. -Home-page: http://jinja.pocoo.org/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -License: BSD -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: HTML -Requires-Dist: MarkupSafe (>=0.23) -Provides-Extra: i18n -Requires-Dist: Babel (>=0.8); extra == 'i18n' - -Jinja2 -~~~~~~ - -Jinja2 is a template engine written in pure Python. It provides a -`Django`_ inspired non-XML syntax but supports inline expressions and -an optional `sandboxed`_ environment. - -Nutshell --------- - -Here a small example of a Jinja template:: - - {% extends 'base.html' %} - {% block title %}Memberlist{% endblock %} - {% block content %} - - {% endblock %} - -Philosophy ----------- - -Application logic is for the controller but don't try to make the life -for the template designer too hard by giving him too few functionality. - -For more informations visit the new `Jinja2 webpage`_ and `documentation`_. - -.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security) -.. _Django: http://www.djangoproject.com/ -.. _Jinja2 webpage: http://jinja.pocoo.org/ -.. _documentation: http://jinja.pocoo.org/2/documentation/ - - diff --git a/pyextra/Jinja2-2.9.6.dist-info/RECORD b/pyextra/Jinja2-2.9.6.dist-info/RECORD deleted file mode 100644 index 024e38991..000000000 --- a/pyextra/Jinja2-2.9.6.dist-info/RECORD +++ /dev/null @@ -1,59 +0,0 @@ -jinja2/__init__.py,sha256=Cx_UnJO4i_GqvKQsOu__mvGE_eMJSsBqITa26irtg5A,2565 -jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596 -jinja2/_stringdefs.py,sha256=PYtqTmmWIhjXlFBoH-eE6fJkQvlu7nxUyQ2YlFB97VA,589381 -jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144 -jinja2/asyncsupport.py,sha256=ZJO1Fdd9R93sDLrk6TZNuMQGgtuDmpTlENNRkLwZF7c,7765 -jinja2/bccache.py,sha256=0xoVw0R9nj3vtzPl9g-zB5BKTLFJ7FFMq2ABbn1IkCI,12793 -jinja2/compiler.py,sha256=lE5owyPwT1cGGZxWyzQtZLW7Uj1g3Vw1oVtBU8Uc_uM,62929 -jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626 -jinja2/debug.py,sha256=UqEbKb4zofBABwvyA77Kr0-5IAQawKqC9t8ZeTIzpGU,12038 -jinja2/defaults.py,sha256=GvVEQqIRvRMCbQF2NZSr0mlEN8lxvGixU5wIIAeRe1A,1323 -jinja2/environment.py,sha256=z91L_efdYs-KNs6DBxQWDyYncOwOqn_0J4M5CfFj0Q8,50848 -jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428 -jinja2/ext.py,sha256=9xq8fd_QPBIe4Z7hE1XawB7f1EDHrVZjpb2JiRTiG94,23867 -jinja2/filters.py,sha256=1OYGhyN84yVmFUIOwJNRV_StqTCfPhnRLfJTmWbEe_8,33424 -jinja2/idtracking.py,sha256=HHcCOMsQhCrrjwYAmikKqq_XetXLovCjXAThh9WbRAc,8760 -jinja2/lexer.py,sha256=W4A830e-fj12zRT6rL7H91F4D6xwED5LjR8iMxjWuVQ,28238 -jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382 -jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340 -jinja2/nodes.py,sha256=4_Ucxbkohtj4BAlpV0w_MpVmIxJNaVXDTBb4EHBA2JI,29392 -jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722 -jinja2/parser.py,sha256=3tc82qO1Ovs9och_PjirbAmnWNT77n4wWjIQ8pEVKvU,35465 -jinja2/runtime.py,sha256=axkTQXg2-oc_Cm35NEMDDas3Jbq3ATxNrDOEa5v3wIw,26835 -jinja2/sandbox.py,sha256=Jx4MTxly8KvdkSWyui_kHY1_ZZ0RAQL4ojAy1KDRyK0,16707 -jinja2/tests.py,sha256=iFuUTbUYv7TFffq2aTswCRdIhQ6wyrby1YevChVPqkE,4428 -jinja2/utils.py,sha256=BIFqeXXsCUSjWx6MEwYhY6V4tXzVNs9WRXfB60MA9HY,19941 -jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316 -Jinja2-2.9.6.dist-info/DESCRIPTION.rst,sha256=CXIS1UnPSk5_lZBS6Lb8ko-3lqGfjsiUwNBLXCTj2lc,975 -Jinja2-2.9.6.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72 -Jinja2-2.9.6.dist-info/LICENSE.txt,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554 -Jinja2-2.9.6.dist-info/METADATA,sha256=53LSXlqC86JTyLSPsDyAOmyV4pXIzzmmZoUXz7ogytA,2172 -Jinja2-2.9.6.dist-info/metadata.json,sha256=vzvX25T4hwMOe1EIOBo9rpfiZerOB_KVLcplGG_qYtE,1394 -Jinja2-2.9.6.dist-info/RECORD,, -Jinja2-2.9.6.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 -Jinja2-2.9.6.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 -Jinja2-2.9.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -jinja2/_compat.pyc,, -jinja2/sandbox.pyc,, -jinja2/_stringdefs.pyc,, -jinja2/bccache.pyc,, -jinja2/runtime.pyc,, -jinja2/utils.pyc,, -jinja2/parser.pyc,, -jinja2/debug.pyc,, -jinja2/lexer.pyc,, -jinja2/defaults.pyc,, -jinja2/visitor.pyc,, -jinja2/nodes.pyc,, -jinja2/environment.pyc,, -jinja2/compiler.pyc,, -jinja2/exceptions.pyc,, -jinja2/filters.pyc,, -jinja2/__init__.pyc,, -jinja2/meta.pyc,, -jinja2/loaders.pyc,, -jinja2/ext.pyc,, -jinja2/optimizer.pyc,, -jinja2/constants.pyc,, -jinja2/tests.pyc,, -jinja2/idtracking.pyc,, diff --git a/pyextra/Jinja2-2.9.6.dist-info/WHEEL b/pyextra/Jinja2-2.9.6.dist-info/WHEEL deleted file mode 100644 index 9dff69d86..000000000 --- a/pyextra/Jinja2-2.9.6.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.24.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/pyextra/Jinja2-2.9.6.dist-info/entry_points.txt b/pyextra/Jinja2-2.9.6.dist-info/entry_points.txt deleted file mode 100644 index 32e6b7530..000000000 --- a/pyextra/Jinja2-2.9.6.dist-info/entry_points.txt +++ /dev/null @@ -1,4 +0,0 @@ - - [babel.extractors] - jinja2 = jinja2.ext:babel_extract[i18n] - \ No newline at end of file diff --git a/pyextra/Jinja2-2.9.6.dist-info/metadata.json b/pyextra/Jinja2-2.9.6.dist-info/metadata.json deleted file mode 100644 index 9bbf942f1..000000000 --- a/pyextra/Jinja2-2.9.6.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"license": "BSD", "name": "Jinja2", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "run_requires": [{"requires": ["Babel (>=0.8)"], "extra": "i18n"}, {"requires": ["MarkupSafe (>=0.23)"]}], "version": "2.9.6", "extensions": {"python.details": {"project_urls": {"Home": "http://jinja.pocoo.org/"}, "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "contacts": [{"role": "author", "email": "armin.ronacher@active-4.com", "name": "Armin Ronacher"}]}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "extras": ["i18n"]} \ No newline at end of file diff --git a/pyextra/Jinja2-2.9.6.dist-info/top_level.txt b/pyextra/Jinja2-2.9.6.dist-info/top_level.txt deleted file mode 100644 index 7f7afbf3b..000000000 --- a/pyextra/Jinja2-2.9.6.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -jinja2 diff --git a/pyextra/MarkupSafe-1.0-py2.7.egg-info/PKG-INFO b/pyextra/MarkupSafe-1.0-py2.7.egg-info/PKG-INFO deleted file mode 100644 index 6f2568f66..000000000 --- a/pyextra/MarkupSafe-1.0-py2.7.egg-info/PKG-INFO +++ /dev/null @@ -1,133 +0,0 @@ -Metadata-Version: 1.1 -Name: MarkupSafe -Version: 1.0 -Summary: Implements a XML/HTML/XHTML Markup safe string for Python -Home-page: http://github.com/pallets/markupsafe -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -License: BSD -Description: MarkupSafe - ========== - - Implements a unicode subclass that supports HTML strings: - - .. code-block:: python - - >>> from markupsafe import Markup, escape - >>> escape("") - Markup(u'<script>alert(document.cookie);</script>') - >>> tmpl = Markup("%s") - >>> tmpl % "Peter > Lustig" - Markup(u'Peter > Lustig') - - If you want to make an object unicode that is not yet unicode - but don't want to lose the taint information, you can use the - ``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which - is a different name for the same function). - - .. code-block:: python - - >>> from markupsafe import soft_unicode - >>> soft_unicode(42) - u'42' - >>> soft_unicode(Markup('foo')) - Markup(u'foo') - - HTML Representations - -------------------- - - Objects can customize their HTML markup equivalent by overriding - the ``__html__`` function: - - .. code-block:: python - - >>> class Foo(object): - ... def __html__(self): - ... return 'Nice' - ... - >>> escape(Foo()) - Markup(u'Nice') - >>> Markup(Foo()) - Markup(u'Nice') - - Silent Escapes - -------------- - - Since MarkupSafe 0.10 there is now also a separate escape function - called ``escape_silent`` that returns an empty string for ``None`` for - consistency with other systems that return empty strings for ``None`` - when escaping (for instance Pylons' webhelpers). - - If you also want to use this for the escape method of the Markup - object, you can create your own subclass that does that: - - .. code-block:: python - - from markupsafe import Markup, escape_silent as escape - - class SilentMarkup(Markup): - __slots__ = () - - @classmethod - def escape(cls, s): - return cls(escape(s)) - - New-Style String Formatting - --------------------------- - - Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and - 3.x are now fully supported. Previously the escape behavior of those - functions was spotty at best. The new implementations operates under the - following algorithm: - - 1. if an object has an ``__html_format__`` method it is called as - replacement for ``__format__`` with the format specifier. It either - has to return a string or markup object. - 2. if an object has an ``__html__`` method it is called. - 3. otherwise the default format system of Python kicks in and the result - is HTML escaped. - - Here is how you can implement your own formatting: - - .. code-block:: python - - class User(object): - - def __init__(self, id, username): - self.id = id - self.username = username - - def __html_format__(self, format_spec): - if format_spec == 'link': - return Markup('{1}').format( - self.id, - self.__html__(), - ) - elif format_spec: - raise ValueError('Invalid format spec') - return self.__html__() - - def __html__(self): - return Markup('{0}').format(self.username) - - And to format that user: - - .. code-block:: python - - >>> user = User(1, 'foo') - >>> Markup('

User: {0:link}').format(user) - Markup(u'

User: foo') - - Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher. - -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: HTML diff --git a/pyextra/MarkupSafe-1.0-py2.7.egg-info/SOURCES.txt b/pyextra/MarkupSafe-1.0-py2.7.egg-info/SOURCES.txt deleted file mode 100644 index 210b339ce..000000000 --- a/pyextra/MarkupSafe-1.0-py2.7.egg-info/SOURCES.txt +++ /dev/null @@ -1,18 +0,0 @@ -AUTHORS -CHANGES -LICENSE -MANIFEST.in -README.rst -setup.cfg -setup.py -tests.py -MarkupSafe.egg-info/PKG-INFO -MarkupSafe.egg-info/SOURCES.txt -MarkupSafe.egg-info/dependency_links.txt -MarkupSafe.egg-info/not-zip-safe -MarkupSafe.egg-info/top_level.txt -markupsafe/__init__.py -markupsafe/_compat.py -markupsafe/_constants.py -markupsafe/_native.py -markupsafe/_speedups.c \ No newline at end of file diff --git a/pyextra/MarkupSafe-1.0-py2.7.egg-info/dependency_links.txt b/pyextra/MarkupSafe-1.0-py2.7.egg-info/dependency_links.txt deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/MarkupSafe-1.0-py2.7.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/MarkupSafe-1.0-py2.7.egg-info/installed-files.txt b/pyextra/MarkupSafe-1.0-py2.7.egg-info/installed-files.txt deleted file mode 100644 index 0fcc7c540..000000000 --- a/pyextra/MarkupSafe-1.0-py2.7.egg-info/installed-files.txt +++ /dev/null @@ -1,15 +0,0 @@ -../markupsafe/__init__.py -../markupsafe/_compat.py -../markupsafe/_constants.py -../markupsafe/_native.py -../markupsafe/_speedups.c -../markupsafe/__init__.pyc -../markupsafe/_compat.pyc -../markupsafe/_constants.pyc -../markupsafe/_native.pyc -../markupsafe/_speedups.so -dependency_links.txt -not-zip-safe -PKG-INFO -SOURCES.txt -top_level.txt diff --git a/pyextra/MarkupSafe-1.0-py2.7.egg-info/not-zip-safe b/pyextra/MarkupSafe-1.0-py2.7.egg-info/not-zip-safe deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/MarkupSafe-1.0-py2.7.egg-info/not-zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/MarkupSafe-1.0-py2.7.egg-info/top_level.txt b/pyextra/MarkupSafe-1.0-py2.7.egg-info/top_level.txt deleted file mode 100644 index 75bf72925..000000000 --- a/pyextra/MarkupSafe-1.0-py2.7.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -markupsafe diff --git a/pyextra/PyJWT-1.4.1-py2.7.egg-info/PKG-INFO b/pyextra/PyJWT-1.4.1-py2.7.egg-info/PKG-INFO deleted file mode 100644 index 91af37c9f..000000000 --- a/pyextra/PyJWT-1.4.1-py2.7.egg-info/PKG-INFO +++ /dev/null @@ -1,70 +0,0 @@ -Metadata-Version: 1.1 -Name: PyJWT -Version: 1.4.1 -Summary: JSON Web Token implementation in Python -Home-page: http://github.com/jpadilla/pyjwt -Author: José Padilla -Author-email: hello@jpadilla.com -License: MIT -Description: # PyJWT - - [![travis-status-image]][travis] - [![appveyor-status-image]][appveyor] - [![pypi-version-image]][pypi] - [![coveralls-status-image]][coveralls] - [![docs-status-image]][docs] - - A Python implementation of [RFC 7519][jwt-spec]. - Original implementation was written by [@progrium][progrium]. - - ## Installing - - ``` - $ pip install PyJWT - ``` - - ## Usage - - ```python - >>> import jwt - >>> encoded = jwt.encode({'some': 'payload'}, 'secret', algorithm='HS256') - 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzb21lIjoicGF5bG9hZCJ9.4twFt5NiznN84AWoo1d7KO1T_yoc0Z6XOpOVswacPZg' - - >>> jwt.decode(encoded, 'secret', algorithms=['HS256']) - {'some': 'payload'} - ``` - - ## Tests - - You can run tests from the project root after cloning with: - - ``` - $ python setup.py test - ``` - - [travis-status-image]: https://secure.travis-ci.org/jpadilla/pyjwt.svg?branch=master - [travis]: http://travis-ci.org/jpadilla/pyjwt?branch=master - [appveyor-status-image]: https://ci.appveyor.com/api/projects/status/h8nt70aqtwhht39t?svg=true - [appveyor]: https://ci.appveyor.com/project/jpadilla/pyjwt - [pypi-version-image]: https://img.shields.io/pypi/v/pyjwt.svg - [pypi]: https://pypi.python.org/pypi/pyjwt - [coveralls-status-image]: https://coveralls.io/repos/jpadilla/pyjwt/badge.svg?branch=master - [coveralls]: https://coveralls.io/r/jpadilla/pyjwt?branch=master - [docs-status-image]: https://readthedocs.org/projects/pyjwt/badge/?version=latest - [docs]: http://pyjwt.readthedocs.org - [jwt-spec]: https://tools.ietf.org/html/rfc7519 - [progrium]: https://github.com/progrium - -Keywords: jwt json web token security signing -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: Natural Language :: English -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Topic :: Utilities diff --git a/pyextra/PyJWT-1.4.1-py2.7.egg-info/SOURCES.txt b/pyextra/PyJWT-1.4.1-py2.7.egg-info/SOURCES.txt deleted file mode 100644 index 972fb0f8a..000000000 --- a/pyextra/PyJWT-1.4.1-py2.7.egg-info/SOURCES.txt +++ /dev/null @@ -1,49 +0,0 @@ -AUTHORS -CHANGELOG.md -LICENSE -MANIFEST.in -README.md -setup.cfg -setup.py -tox.ini -PyJWT.egg-info/PKG-INFO -PyJWT.egg-info/SOURCES.txt -PyJWT.egg-info/dependency_links.txt -PyJWT.egg-info/entry_points.txt -PyJWT.egg-info/requires.txt -PyJWT.egg-info/top_level.txt -jwt/__init__.py -jwt/__main__.py -jwt/algorithms.py -jwt/api_jws.py -jwt/api_jwt.py -jwt/compat.py -jwt/exceptions.py -jwt/utils.py -jwt/contrib/__init__.py -jwt/contrib/algorithms/__init__.py -jwt/contrib/algorithms/py_ecdsa.py -jwt/contrib/algorithms/pycrypto.py -tests/__init__.py -tests/compat.py -tests/test_algorithms.py -tests/test_api_jws.py -tests/test_api_jwt.py -tests/test_compat.py -tests/test_exceptions.py -tests/test_jwt.py -tests/utils.py -tests/contrib/__init__.py -tests/contrib/test_algorithms.py -tests/keys/__init__.py -tests/keys/jwk_ec_key.json -tests/keys/jwk_ec_pub.json -tests/keys/jwk_hmac.json -tests/keys/jwk_rsa_key.json -tests/keys/jwk_rsa_pub.json -tests/keys/testkey2_rsa.pub.pem -tests/keys/testkey_ec -tests/keys/testkey_ec.pub -tests/keys/testkey_rsa -tests/keys/testkey_rsa.cer -tests/keys/testkey_rsa.pub \ No newline at end of file diff --git a/pyextra/PyJWT-1.4.1-py2.7.egg-info/dependency_links.txt b/pyextra/PyJWT-1.4.1-py2.7.egg-info/dependency_links.txt deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/PyJWT-1.4.1-py2.7.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/PyJWT-1.4.1-py2.7.egg-info/entry_points.txt b/pyextra/PyJWT-1.4.1-py2.7.egg-info/entry_points.txt deleted file mode 100644 index cbdf40f14..000000000 --- a/pyextra/PyJWT-1.4.1-py2.7.egg-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[console_scripts] -jwt = jwt.__main__:main - diff --git a/pyextra/PyJWT-1.4.1-py2.7.egg-info/installed-files.txt b/pyextra/PyJWT-1.4.1-py2.7.egg-info/installed-files.txt deleted file mode 100644 index d3e93ab1d..000000000 --- a/pyextra/PyJWT-1.4.1-py2.7.egg-info/installed-files.txt +++ /dev/null @@ -1,31 +0,0 @@ -../../../../bin/jwt -../jwt/__init__.py -../jwt/__init__.pyc -../jwt/__main__.py -../jwt/__main__.pyc -../jwt/algorithms.py -../jwt/algorithms.pyc -../jwt/api_jws.py -../jwt/api_jws.pyc -../jwt/api_jwt.py -../jwt/api_jwt.pyc -../jwt/compat.py -../jwt/compat.pyc -../jwt/contrib/__init__.py -../jwt/contrib/__init__.pyc -../jwt/contrib/algorithms/__init__.py -../jwt/contrib/algorithms/__init__.pyc -../jwt/contrib/algorithms/py_ecdsa.py -../jwt/contrib/algorithms/py_ecdsa.pyc -../jwt/contrib/algorithms/pycrypto.py -../jwt/contrib/algorithms/pycrypto.pyc -../jwt/exceptions.py -../jwt/exceptions.pyc -../jwt/utils.py -../jwt/utils.pyc -PKG-INFO -SOURCES.txt -dependency_links.txt -entry_points.txt -requires.txt -top_level.txt diff --git a/pyextra/PyJWT-1.4.1-py2.7.egg-info/requires.txt b/pyextra/PyJWT-1.4.1-py2.7.egg-info/requires.txt deleted file mode 100644 index 67cccf541..000000000 --- a/pyextra/PyJWT-1.4.1-py2.7.egg-info/requires.txt +++ /dev/null @@ -1,13 +0,0 @@ - -[crypto] -cryptography - -[flake8] -flake8 -flake8-import-order -pep8-naming - -[test] -pytest==2.7.3 -pytest-cov -pytest-runner diff --git a/pyextra/PyJWT-1.4.1-py2.7.egg-info/top_level.txt b/pyextra/PyJWT-1.4.1-py2.7.egg-info/top_level.txt deleted file mode 100644 index 27ccc9bc3..000000000 --- a/pyextra/PyJWT-1.4.1-py2.7.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -jwt diff --git a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/PKG-INFO b/pyextra/Werkzeug-0.14.1-py2.7.egg-info/PKG-INFO deleted file mode 100644 index a0a627684..000000000 --- a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/PKG-INFO +++ /dev/null @@ -1,104 +0,0 @@ -Metadata-Version: 1.1 -Name: Werkzeug -Version: 0.14.1 -Summary: The comprehensive WSGI web application library. -Home-page: https://www.palletsprojects.org/p/werkzeug/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -License: BSD -Description: Werkzeug - ======== - - Werkzeug is a comprehensive `WSGI`_ web application library. It began as - a simple collection of various utilities for WSGI applications and has - become one of the most advanced WSGI utility libraries. - - It includes: - - * An interactive debugger that allows inspecting stack traces and source - code in the browser with an interactive interpreter for any frame in - the stack. - * A full-featured request object with objects to interact with headers, - query args, form data, files, and cookies. - * A response object that can wrap other WSGI applications and handle - streaming data. - * A routing system for matching URLs to endpoints and generating URLs - for endpoints, with an extensible system for capturing variables from - URLs. - * HTTP utilities to handle entity tags, cache control, dates, user - agents, cookies, files, and more. - * A threaded WSGI server for use while developing applications locally. - * A test client for simulating HTTP requests during testing without - requiring running a server. - - Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up - to the developer to choose a template engine, database adapter, and even - how to handle requests. It can be used to build all sorts of end user - applications such as blogs, wikis, or bulletin boards. - - `Flask`_ wraps Werkzeug, using it to handle the details of WSGI while - providing more structure and patterns for defining powerful - applications. - - - Installing - ---------- - - Install and update using `pip`_: - - .. code-block:: text - - pip install -U Werkzeug - - - A Simple Example - ---------------- - - .. code-block:: python - - from werkzeug.wrappers import Request, Response - - @Request.application - def application(request): - return Response('Hello, World!') - - if __name__ == '__main__': - from werkzeug.serving import run_simple - run_simple('localhost', 4000, application) - - - Links - ----- - - * Website: https://www.palletsprojects.com/p/werkzeug/ - * Releases: https://pypi.org/project/Werkzeug/ - * Code: https://github.com/pallets/werkzeug - * Issue tracker: https://github.com/pallets/werkzeug/issues - * Test status: - - * Linux, Mac: https://travis-ci.org/pallets/werkzeug - * Windows: https://ci.appveyor.com/project/davidism/werkzeug - - * Test coverage: https://codecov.io/gh/pallets/werkzeug - - .. _WSGI: https://wsgi.readthedocs.io/en/latest/ - .. _Flask: https://www.palletsprojects.com/p/flask/ - .. _pip: https://pip.pypa.io/en/stable/quickstart/ - -Platform: any -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules diff --git a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/SOURCES.txt b/pyextra/Werkzeug-0.14.1-py2.7.egg-info/SOURCES.txt deleted file mode 100644 index 9d3b76908..000000000 --- a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/SOURCES.txt +++ /dev/null @@ -1,299 +0,0 @@ -AUTHORS -CHANGES.rst -LICENSE -MANIFEST.in -Makefile -README.rst -setup.cfg -setup.py -tox.ini -Werkzeug.egg-info/PKG-INFO -Werkzeug.egg-info/SOURCES.txt -Werkzeug.egg-info/dependency_links.txt -Werkzeug.egg-info/not-zip-safe -Werkzeug.egg-info/requires.txt -Werkzeug.egg-info/top_level.txt -artwork/.DS_Store -artwork/logo.png -artwork/logo.svg -docs/.DS_Store -docs/Makefile -docs/changes.rst -docs/conf.py -docs/contents.rst.inc -docs/datastructures.rst -docs/debug.rst -docs/exceptions.rst -docs/filesystem.rst -docs/http.rst -docs/index.rst -docs/installation.rst -docs/latexindex.rst -docs/levels.rst -docs/local.rst -docs/logo.pdf -docs/make.bat -docs/makearchive.py -docs/middlewares.rst -docs/python3.rst -docs/quickstart.rst -docs/request_data.rst -docs/routing.rst -docs/serving.rst -docs/terms.rst -docs/test.rst -docs/transition.rst -docs/tutorial.rst -docs/unicode.rst -docs/urls.rst -docs/utils.rst -docs/werkzeugext.py -docs/werkzeugstyle.sty -docs/wrappers.rst -docs/wsgi.rst -docs/_static/background.png -docs/_static/codebackground.png -docs/_static/contents.png -docs/_static/debug-screenshot.png -docs/_static/favicon.ico -docs/_static/header.png -docs/_static/navigation.png -docs/_static/navigation_active.png -docs/_static/shortly.png -docs/_static/shorty-screenshot.png -docs/_static/style.css -docs/_static/werkzeug.js -docs/_static/werkzeug.png -docs/_templates/sidebarintro.html -docs/_templates/sidebarlogo.html -docs/contrib/atom.rst -docs/contrib/cache.rst -docs/contrib/fixers.rst -docs/contrib/index.rst -docs/contrib/iterio.rst -docs/contrib/lint.rst -docs/contrib/profiler.rst -docs/contrib/securecookie.rst -docs/contrib/sessions.rst -docs/contrib/wrappers.rst -docs/deployment/cgi.rst -docs/deployment/fastcgi.rst -docs/deployment/index.rst -docs/deployment/mod_wsgi.rst -docs/deployment/proxying.rst -examples/README -examples/cookieauth.py -examples/httpbasicauth.py -examples/manage-coolmagic.py -examples/manage-couchy.py -examples/manage-cupoftee.py -examples/manage-i18nurls.py -examples/manage-plnt.py -examples/manage-shorty.py -examples/manage-simplewiki.py -examples/manage-webpylike.py -examples/upload.py -examples/contrib/README -examples/contrib/securecookie.py -examples/contrib/sessions.py -examples/coolmagic/__init__.py -examples/coolmagic/application.py -examples/coolmagic/helpers.py -examples/coolmagic/utils.py -examples/coolmagic/public/style.css -examples/coolmagic/templates/layout.html -examples/coolmagic/templates/static/about.html -examples/coolmagic/templates/static/index.html -examples/coolmagic/templates/static/not_found.html -examples/coolmagic/views/__init__.py -examples/coolmagic/views/static.py -examples/couchy/README -examples/couchy/__init__.py -examples/couchy/application.py -examples/couchy/models.py -examples/couchy/utils.py -examples/couchy/views.py -examples/couchy/static/style.css -examples/couchy/templates/display.html -examples/couchy/templates/layout.html -examples/couchy/templates/list.html -examples/couchy/templates/new.html -examples/couchy/templates/not_found.html -examples/cupoftee/__init__.py -examples/cupoftee/application.py -examples/cupoftee/db.py -examples/cupoftee/network.py -examples/cupoftee/pages.py -examples/cupoftee/utils.py -examples/cupoftee/shared/content.png -examples/cupoftee/shared/down.png -examples/cupoftee/shared/favicon.ico -examples/cupoftee/shared/header.png -examples/cupoftee/shared/logo.png -examples/cupoftee/shared/style.css -examples/cupoftee/shared/up.png -examples/cupoftee/templates/layout.html -examples/cupoftee/templates/missingpage.html -examples/cupoftee/templates/search.html -examples/cupoftee/templates/server.html -examples/cupoftee/templates/serverlist.html -examples/i18nurls/__init__.py -examples/i18nurls/application.py -examples/i18nurls/urls.py -examples/i18nurls/views.py -examples/i18nurls/templates/about.html -examples/i18nurls/templates/blog.html -examples/i18nurls/templates/index.html -examples/i18nurls/templates/layout.html -examples/partial/README -examples/partial/complex_routing.py -examples/plnt/__init__.py -examples/plnt/database.py -examples/plnt/sync.py -examples/plnt/utils.py -examples/plnt/views.py -examples/plnt/webapp.py -examples/plnt/shared/style.css -examples/plnt/templates/about.html -examples/plnt/templates/index.html -examples/plnt/templates/layout.html -examples/shortly/shortly.py -examples/shortly/static/style.css -examples/shortly/templates/404.html -examples/shortly/templates/layout.html -examples/shortly/templates/new_url.html -examples/shortly/templates/short_link_details.html -examples/shorty/__init__.py -examples/shorty/application.py -examples/shorty/models.py -examples/shorty/utils.py -examples/shorty/views.py -examples/shorty/static/style.css -examples/shorty/templates/display.html -examples/shorty/templates/layout.html -examples/shorty/templates/list.html -examples/shorty/templates/new.html -examples/shorty/templates/not_found.html -examples/simplewiki/__init__.py -examples/simplewiki/actions.py -examples/simplewiki/application.py -examples/simplewiki/database.py -examples/simplewiki/specialpages.py -examples/simplewiki/utils.py -examples/simplewiki/shared/style.css -examples/simplewiki/templates/action_diff.html -examples/simplewiki/templates/action_edit.html -examples/simplewiki/templates/action_log.html -examples/simplewiki/templates/action_revert.html -examples/simplewiki/templates/action_show.html -examples/simplewiki/templates/layout.html -examples/simplewiki/templates/macros.xml -examples/simplewiki/templates/missing_action.html -examples/simplewiki/templates/page_index.html -examples/simplewiki/templates/page_missing.html -examples/simplewiki/templates/recent_changes.html -examples/webpylike/example.py -examples/webpylike/webpylike.py -tests/__init__.py -tests/conftest.py -tests/test_compat.py -tests/test_datastructures.py -tests/test_debug.py -tests/test_exceptions.py -tests/test_formparser.py -tests/test_http.py -tests/test_internal.py -tests/test_local.py -tests/test_routing.py -tests/test_security.py -tests/test_serving.py -tests/test_test.py -tests/test_urls.py -tests/test_utils.py -tests/test_wrappers.py -tests/test_wsgi.py -tests/contrib/__init__.py -tests/contrib/test_atom.py -tests/contrib/test_cache.py -tests/contrib/test_fixers.py -tests/contrib/test_iterio.py -tests/contrib/test_securecookie.py -tests/contrib/test_sessions.py -tests/contrib/test_wrappers.py -tests/contrib/cache/conftest.py -tests/contrib/cache/test_cache.py -tests/hypothesis/__init__.py -tests/hypothesis/test_urls.py -tests/multipart/__init__.py -tests/multipart/ie7_full_path_request.txt -tests/multipart/test_collect.py -tests/multipart/firefox3-2png1txt/file1.png -tests/multipart/firefox3-2png1txt/file2.png -tests/multipart/firefox3-2png1txt/request.txt -tests/multipart/firefox3-2png1txt/text.txt -tests/multipart/firefox3-2pnglongtext/file1.png -tests/multipart/firefox3-2pnglongtext/file2.png -tests/multipart/firefox3-2pnglongtext/request.txt -tests/multipart/firefox3-2pnglongtext/text.txt -tests/multipart/ie6-2png1txt/file1.png -tests/multipart/ie6-2png1txt/file2.png -tests/multipart/ie6-2png1txt/request.txt -tests/multipart/ie6-2png1txt/text.txt -tests/multipart/opera8-2png1txt/file1.png -tests/multipart/opera8-2png1txt/file2.png -tests/multipart/opera8-2png1txt/request.txt -tests/multipart/opera8-2png1txt/text.txt -tests/multipart/webkit3-2png1txt/file1.png -tests/multipart/webkit3-2png1txt/file2.png -tests/multipart/webkit3-2png1txt/request.txt -tests/multipart/webkit3-2png1txt/text.txt -tests/res/chunked.txt -tests/res/test.txt -werkzeug/__init__.py -werkzeug/_compat.py -werkzeug/_internal.py -werkzeug/_reloader.py -werkzeug/datastructures.py -werkzeug/exceptions.py -werkzeug/filesystem.py -werkzeug/formparser.py -werkzeug/http.py -werkzeug/local.py -werkzeug/posixemulation.py -werkzeug/routing.py -werkzeug/security.py -werkzeug/serving.py -werkzeug/test.py -werkzeug/testapp.py -werkzeug/urls.py -werkzeug/useragents.py -werkzeug/utils.py -werkzeug/websocket.py -werkzeug/wrappers.py -werkzeug/wsgi.py -werkzeug/contrib/__init__.py -werkzeug/contrib/atom.py -werkzeug/contrib/cache.py -werkzeug/contrib/fixers.py -werkzeug/contrib/iterio.py -werkzeug/contrib/jsrouting.py -werkzeug/contrib/limiter.py -werkzeug/contrib/lint.py -werkzeug/contrib/profiler.py -werkzeug/contrib/securecookie.py -werkzeug/contrib/sessions.py -werkzeug/contrib/testtools.py -werkzeug/contrib/wrappers.py -werkzeug/debug/__init__.py -werkzeug/debug/console.py -werkzeug/debug/repr.py -werkzeug/debug/tbtools.py -werkzeug/debug/shared/FONT_LICENSE -werkzeug/debug/shared/console.png -werkzeug/debug/shared/debugger.js -werkzeug/debug/shared/jquery.js -werkzeug/debug/shared/less.png -werkzeug/debug/shared/more.png -werkzeug/debug/shared/source.png -werkzeug/debug/shared/style.css -werkzeug/debug/shared/ubuntu.ttf \ No newline at end of file diff --git a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/dependency_links.txt b/pyextra/Werkzeug-0.14.1-py2.7.egg-info/dependency_links.txt deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/installed-files.txt b/pyextra/Werkzeug-0.14.1-py2.7.egg-info/installed-files.txt deleted file mode 100644 index 105fb3c0b..000000000 --- a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/installed-files.txt +++ /dev/null @@ -1,93 +0,0 @@ -../werkzeug/_reloader.py -../werkzeug/_internal.py -../werkzeug/serving.py -../werkzeug/local.py -../werkzeug/filesystem.py -../werkzeug/security.py -../werkzeug/__init__.py -../werkzeug/test.py -../werkzeug/formparser.py -../werkzeug/posixemulation.py -../werkzeug/utils.py -../werkzeug/wrappers.py -../werkzeug/routing.py -../werkzeug/http.py -../werkzeug/useragents.py -../werkzeug/exceptions.py -../werkzeug/_compat.py -../werkzeug/datastructures.py -../werkzeug/urls.py -../werkzeug/websocket.py -../werkzeug/wsgi.py -../werkzeug/testapp.py -../werkzeug/contrib/sessions.py -../werkzeug/contrib/cache.py -../werkzeug/contrib/__init__.py -../werkzeug/contrib/testtools.py -../werkzeug/contrib/wrappers.py -../werkzeug/contrib/jsrouting.py -../werkzeug/contrib/fixers.py -../werkzeug/contrib/profiler.py -../werkzeug/contrib/iterio.py -../werkzeug/contrib/atom.py -../werkzeug/contrib/securecookie.py -../werkzeug/contrib/limiter.py -../werkzeug/contrib/lint.py -../werkzeug/debug/console.py -../werkzeug/debug/tbtools.py -../werkzeug/debug/__init__.py -../werkzeug/debug/repr.py -../werkzeug/debug/shared/FONT_LICENSE -../werkzeug/debug/shared/console.png -../werkzeug/debug/shared/debugger.js -../werkzeug/debug/shared/jquery.js -../werkzeug/debug/shared/less.png -../werkzeug/debug/shared/more.png -../werkzeug/debug/shared/source.png -../werkzeug/debug/shared/style.css -../werkzeug/debug/shared/ubuntu.ttf -../werkzeug/_reloader.pyc -../werkzeug/_internal.pyc -../werkzeug/serving.pyc -../werkzeug/local.pyc -../werkzeug/filesystem.pyc -../werkzeug/security.pyc -../werkzeug/__init__.pyc -../werkzeug/test.pyc -../werkzeug/formparser.pyc -../werkzeug/posixemulation.pyc -../werkzeug/utils.pyc -../werkzeug/wrappers.pyc -../werkzeug/routing.pyc -../werkzeug/http.pyc -../werkzeug/useragents.pyc -../werkzeug/exceptions.pyc -../werkzeug/_compat.pyc -../werkzeug/datastructures.pyc -../werkzeug/urls.pyc -../werkzeug/websocket.pyc -../werkzeug/wsgi.pyc -../werkzeug/testapp.pyc -../werkzeug/contrib/sessions.pyc -../werkzeug/contrib/cache.pyc -../werkzeug/contrib/__init__.pyc -../werkzeug/contrib/testtools.pyc -../werkzeug/contrib/wrappers.pyc -../werkzeug/contrib/jsrouting.pyc -../werkzeug/contrib/fixers.pyc -../werkzeug/contrib/profiler.pyc -../werkzeug/contrib/iterio.pyc -../werkzeug/contrib/atom.pyc -../werkzeug/contrib/securecookie.pyc -../werkzeug/contrib/limiter.pyc -../werkzeug/contrib/lint.pyc -../werkzeug/debug/console.pyc -../werkzeug/debug/tbtools.pyc -../werkzeug/debug/__init__.pyc -../werkzeug/debug/repr.pyc -PKG-INFO -not-zip-safe -SOURCES.txt -requires.txt -top_level.txt -dependency_links.txt diff --git a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/not-zip-safe b/pyextra/Werkzeug-0.14.1-py2.7.egg-info/not-zip-safe deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/not-zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/requires.txt b/pyextra/Werkzeug-0.14.1-py2.7.egg-info/requires.txt deleted file mode 100644 index dd8c7c51f..000000000 --- a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/requires.txt +++ /dev/null @@ -1,12 +0,0 @@ - -[dev] -pytest -coverage -tox -sphinx - -[termcolor] -termcolor - -[watchdog] -watchdog diff --git a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/top_level.txt b/pyextra/Werkzeug-0.14.1-py2.7.egg-info/top_level.txt deleted file mode 100644 index 6fe8da849..000000000 --- a/pyextra/Werkzeug-0.14.1-py2.7.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -werkzeug diff --git a/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/PKG-INFO b/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/PKG-INFO deleted file mode 100644 index 035a98f8c..000000000 --- a/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/PKG-INFO +++ /dev/null @@ -1,89 +0,0 @@ -Metadata-Version: 1.1 -Name: backports.ssl-match-hostname -Version: 3.7.0.1 -Summary: The ssl.match_hostname() function from Python 3.5 -Home-page: http://bitbucket.org/brandon/backports.ssl_match_hostname -Author: Toshio Kuratomi -Author-email: toshio@fedoraproject.org -License: Python Software Foundation License -Description: - The ssl.match_hostname() function from Python 3.7 - ================================================= - - The Secure Sockets Layer is only actually *secure* - if you check the hostname in the certificate returned - by the server to which you are connecting, - and verify that it matches to hostname - that you are trying to reach. - - But the matching logic, defined in `RFC2818`_, - can be a bit tricky to implement on your own. - So the ``ssl`` package in the Standard Library of Python 3.2 - and greater now includes a ``match_hostname()`` function - for performing this check instead of requiring every application - to implement the check separately. - - This backport brings ``match_hostname()`` to users - of earlier versions of Python. - Simply make this distribution a dependency of your package, - and then use it like this:: - - from backports.ssl_match_hostname import match_hostname, CertificateError - [...] - sslsock = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_SSLv23, - cert_reqs=ssl.CERT_REQUIRED, ca_certs=...) - try: - match_hostname(sslsock.getpeercert(), hostname) - except CertificateError, ce: - ... - - Brandon Craig Rhodes is merely the packager of this distribution; - the actual code inside comes from Python 3.7 with small changes for - portability. - - - Requirements - ------------ - - * If you need to use this on Python versions earlier than 2.6 you will need to - install the `ssl module`_. From Python 2.6 upwards ``ssl`` is included in - the Python Standard Library so you do not need to install it separately. - - .. _`ssl module`:: https://pypi.python.org/pypi/ssl - - History - ------- - - * This function was introduced in python-3.2 - * It was updated for python-3.4a1 for a CVE - (backports-ssl_match_hostname-3.4.0.1) - * It was updated from RFC2818 to RFC 6125 compliance in order to fix another - security flaw for python-3.3.3 and python-3.4a5 - (backports-ssl_match_hostname-3.4.0.2) - * It was updated in python-3.5 to handle IPAddresses in ServerAltName fields - (something that backports.ssl_match_hostname will do if you also install the - ipaddress library from pypi). - * It was updated in python-3.7 to handle IPAddresses without the ipaddress library and dropped - support for partial wildcards - - .. _`ipaddress module`:: https://pypi.python.org/pypi/ipaddress - - .. _RFC2818: http://tools.ietf.org/html/rfc2818.html - -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: License :: OSI Approved :: Python Software Foundation License -Classifier: Programming Language :: Python :: 2.4 -Classifier: Programming Language :: Python :: 2.5 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.0 -Classifier: Programming Language :: Python :: 3.1 -Classifier: Programming Language :: Python :: 3.2 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Topic :: Security :: Cryptography diff --git a/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/SOURCES.txt b/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/SOURCES.txt deleted file mode 100644 index 95b961609..000000000 --- a/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/SOURCES.txt +++ /dev/null @@ -1,8 +0,0 @@ -README.txt -setup.cfg -backports/__init__.py -backports.ssl_match_hostname.egg-info/PKG-INFO -backports.ssl_match_hostname.egg-info/SOURCES.txt -backports.ssl_match_hostname.egg-info/dependency_links.txt -backports.ssl_match_hostname.egg-info/top_level.txt -backports/ssl_match_hostname/__init__.py \ No newline at end of file diff --git a/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/dependency_links.txt b/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/dependency_links.txt deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/installed-files.txt b/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/installed-files.txt deleted file mode 100644 index fd1aae367..000000000 --- a/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/installed-files.txt +++ /dev/null @@ -1,8 +0,0 @@ -../backports/__init__.py -../backports/__init__.pyc -../backports/ssl_match_hostname/__init__.py -../backports/ssl_match_hostname/__init__.pyc -PKG-INFO -SOURCES.txt -dependency_links.txt -top_level.txt diff --git a/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/top_level.txt b/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/top_level.txt deleted file mode 100644 index 99d2be5b6..000000000 --- a/pyextra/backports.ssl_match_hostname-3.7.0.1-py2.7.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -backports diff --git a/pyextra/backports/__init__.py b/pyextra/backports/__init__.py deleted file mode 100644 index 612d32836..000000000 --- a/pyextra/backports/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# This is a Python "namespace package" http://www.python.org/dev/peps/pep-0382/ -from pkgutil import extend_path -__path__ = extend_path(__path__, __name__) diff --git a/pyextra/backports/ssl_match_hostname/__init__.py b/pyextra/backports/ssl_match_hostname/__init__.py deleted file mode 100644 index cdfef013d..000000000 --- a/pyextra/backports/ssl_match_hostname/__init__.py +++ /dev/null @@ -1,204 +0,0 @@ -"""The match_hostname() function from Python 3.7.0, essential when using SSL.""" - -import sys -import socket as _socket - -try: - # Divergence: Python-3.7+'s _ssl has this exception type but older Pythons do not - from _ssl import SSLCertVerificationError - CertificateError = SSLCertVerificationError -except: - class CertificateError(ValueError): - pass - - -__version__ = '3.7.0.1' - - -# Divergence: Added to deal with ipaddess as bytes on python2 -def _to_text(obj): - if isinstance(obj, str) and sys.version_info < (3,): - obj = unicode(obj, encoding='ascii', errors='strict') - elif sys.version_info >= (3,) and isinstance(obj, bytes): - obj = str(obj, encoding='ascii', errors='strict') - return obj - - -def _to_bytes(obj): - if isinstance(obj, str) and sys.version_info >= (3,): - obj = bytes(obj, encoding='ascii', errors='strict') - elif sys.version_info < (3,) and isinstance(obj, unicode): - obj = obj.encode('ascii', 'strict') - return obj - - -def _dnsname_match(dn, hostname): - """Matching according to RFC 6125, section 6.4.3 - - - Hostnames are compared lower case. - - For IDNA, both dn and hostname must be encoded as IDN A-label (ACE). - - Partial wildcards like 'www*.example.org', multiple wildcards, sole - wildcard or wildcards in labels other then the left-most label are not - supported and a CertificateError is raised. - - A wildcard must match at least one character. - """ - if not dn: - return False - - wildcards = dn.count('*') - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - if wildcards > 1: - # Divergence .format() to percent formatting for Python < 2.6 - raise CertificateError( - "too many wildcards in certificate DNS name: %s" % repr(dn)) - - dn_leftmost, sep, dn_remainder = dn.partition('.') - - if '*' in dn_remainder: - # Only match wildcard in leftmost segment. - # Divergence .format() to percent formatting for Python < 2.6 - raise CertificateError( - "wildcard can only be present in the leftmost label: " - "%s." % repr(dn)) - - if not sep: - # no right side - # Divergence .format() to percent formatting for Python < 2.6 - raise CertificateError( - "sole wildcard without additional labels are not support: " - "%s." % repr(dn)) - - if dn_leftmost != '*': - # no partial wildcard matching - # Divergence .format() to percent formatting for Python < 2.6 - raise CertificateError( - "partial wildcards in leftmost label are not supported: " - "%s." % repr(dn)) - - hostname_leftmost, sep, hostname_remainder = hostname.partition('.') - if not hostname_leftmost or not sep: - # wildcard must match at least one char - return False - return dn_remainder.lower() == hostname_remainder.lower() - - -def _inet_paton(ipname): - """Try to convert an IP address to packed binary form - - Supports IPv4 addresses on all platforms and IPv6 on platforms with IPv6 - support. - """ - # inet_aton() also accepts strings like '1' - # Divergence: We make sure we have native string type for all python versions - try: - b_ipname = _to_bytes(ipname) - except UnicodeError: - raise ValueError("%s must be an all-ascii string." % repr(ipname)) - - # Set ipname in native string format - if sys.version_info < (3,): - n_ipname = b_ipname - else: - n_ipname = ipname - - if n_ipname.count('.') == 3: - try: - return _socket.inet_aton(n_ipname) - # Divergence: OSError on late python3. socket.error earlier. - # Null bytes generate ValueError on python3(we want to raise - # ValueError anyway), TypeError # earlier - except (OSError, _socket.error, TypeError): - pass - - try: - return _socket.inet_pton(_socket.AF_INET6, n_ipname) - # Divergence: OSError on late python3. socket.error earlier. - # Null bytes generate ValueError on python3(we want to raise - # ValueError anyway), TypeError # earlier - except (OSError, _socket.error, TypeError): - # Divergence .format() to percent formatting for Python < 2.6 - raise ValueError("%s is neither an IPv4 nor an IP6 " - "address." % repr(ipname)) - except AttributeError: - # AF_INET6 not available - pass - - # Divergence .format() to percent formatting for Python < 2.6 - raise ValueError("%s is not an IPv4 address." % repr(ipname)) - - -def _ipaddress_match(ipname, host_ip): - """Exact matching of IP addresses. - - RFC 6125 explicitly doesn't define an algorithm for this - (section 1.7.2 - "Out of Scope"). - """ - # OpenSSL may add a trailing newline to a subjectAltName's IP address - ip = _inet_paton(ipname.rstrip()) - return ip == host_ip - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed. - - The function matches IP addresses rather than dNSNames if hostname is a - valid ipaddress string. IPv4 addresses are supported on all platforms. - IPv6 addresses are supported on platforms with IPv6 support (AF_INET6 - and inet_pton). - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate, match_hostname needs a " - "SSL socket or SSL context with either " - "CERT_OPTIONAL or CERT_REQUIRED") - try: - # Divergence: Deal with hostname as bytes - host_ip = _inet_paton(_to_text(hostname)) - except ValueError: - # Not an IP address (common case) - host_ip = None - except UnicodeError: - # Divergence: Deal with hostname as byte strings. - # IP addresses should be all ascii, so we consider it not - # an IP address if this fails - host_ip = None - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if host_ip is None and _dnsname_match(value, hostname): - return - dnsnames.append(value) - elif key == 'IP Address': - if host_ip is not None and _ipaddress_match(value, host_ip): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") diff --git a/pyextra/bin/flask b/pyextra/bin/flask deleted file mode 100755 index 2adb78c4a..000000000 --- a/pyextra/bin/flask +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/local/bin/python -# EASY-INSTALL-ENTRY-SCRIPT: 'Flask==1.0.2','console_scripts','flask' -__requires__ = 'Flask==1.0.2' -import sys -from pkg_resources import load_entry_point - -if __name__ == '__main__': - sys.exit( - load_entry_point('Flask==1.0.2', 'console_scripts', 'flask')() - ) diff --git a/pyextra/bin/wsdump.py b/pyextra/bin/wsdump.py deleted file mode 100755 index 246d15050..000000000 --- a/pyextra/bin/wsdump.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/local/bin/python - -import argparse -import code -import sys -import threading -import time -import ssl - -import six -from six.moves.urllib.parse import urlparse - -import websocket - -try: - import readline -except ImportError: - pass - - -def get_encoding(): - encoding = getattr(sys.stdin, "encoding", "") - if not encoding: - return "utf-8" - else: - return encoding.lower() - - -OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY) -ENCODING = get_encoding() - - -class VAction(argparse.Action): - - def __call__(self, parser, args, values, option_string=None): - if values is None: - values = "1" - try: - values = int(values) - except ValueError: - values = values.count("v") + 1 - setattr(args, self.dest, values) - - -def parse_args(): - parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool") - parser.add_argument("url", metavar="ws_url", - help="websocket url. ex. ws://echo.websocket.org/") - parser.add_argument("-p", "--proxy", - help="proxy url. ex. http://127.0.0.1:8080") - parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction, - dest="verbose", - help="set verbose mode. If set to 1, show opcode. " - "If set to 2, enable to trace websocket module") - parser.add_argument("-n", "--nocert", action='store_true', - help="Ignore invalid SSL cert") - parser.add_argument("-r", "--raw", action="store_true", - help="raw output") - parser.add_argument("-s", "--subprotocols", nargs='*', - help="Set subprotocols") - parser.add_argument("-o", "--origin", - help="Set origin") - parser.add_argument("--eof-wait", default=0, type=int, - help="wait time(second) after 'EOF' received.") - parser.add_argument("-t", "--text", - help="Send initial text") - parser.add_argument("--timings", action="store_true", - help="Print timings in seconds") - parser.add_argument("--headers", - help="Set custom headers. Use ',' as separator") - - return parser.parse_args() - - -class RawInput: - - def raw_input(self, prompt): - if six.PY3: - line = input(prompt) - else: - line = raw_input(prompt) - - if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type): - line = line.decode(ENCODING).encode("utf-8") - elif isinstance(line, six.text_type): - line = line.encode("utf-8") - - return line - - -class InteractiveConsole(RawInput, code.InteractiveConsole): - - def write(self, data): - sys.stdout.write("\033[2K\033[E") - # sys.stdout.write("\n") - sys.stdout.write("\033[34m< " + data + "\033[39m") - sys.stdout.write("\n> ") - sys.stdout.flush() - - def read(self): - return self.raw_input("> ") - - -class NonInteractive(RawInput): - - def write(self, data): - sys.stdout.write(data) - sys.stdout.write("\n") - sys.stdout.flush() - - def read(self): - return self.raw_input("") - - -def main(): - start_time = time.time() - args = parse_args() - if args.verbose > 1: - websocket.enableTrace(True) - options = {} - if args.proxy: - p = urlparse(args.proxy) - options["http_proxy_host"] = p.hostname - options["http_proxy_port"] = p.port - if args.origin: - options["origin"] = args.origin - if args.subprotocols: - options["subprotocols"] = args.subprotocols - opts = {} - if args.nocert: - opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False} - if args.headers: - options['header'] = map(str.strip, args.headers.split(',')) - ws = websocket.create_connection(args.url, sslopt=opts, **options) - if args.raw: - console = NonInteractive() - else: - console = InteractiveConsole() - print("Press Ctrl+C to quit") - - def recv(): - try: - frame = ws.recv_frame() - except websocket.WebSocketException: - return websocket.ABNF.OPCODE_CLOSE, None - if not frame: - raise websocket.WebSocketException("Not a valid frame %s" % frame) - elif frame.opcode in OPCODE_DATA: - return frame.opcode, frame.data - elif frame.opcode == websocket.ABNF.OPCODE_CLOSE: - ws.send_close() - return frame.opcode, None - elif frame.opcode == websocket.ABNF.OPCODE_PING: - ws.pong(frame.data) - return frame.opcode, frame.data - - return frame.opcode, frame.data - - def recv_ws(): - while True: - opcode, data = recv() - msg = None - if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes): - data = str(data, "utf-8") - if not args.verbose and opcode in OPCODE_DATA: - msg = data - elif args.verbose: - msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data) - - if msg is not None: - if args.timings: - console.write(str(time.time() - start_time) + ": " + msg) - else: - console.write(msg) - - if opcode == websocket.ABNF.OPCODE_CLOSE: - break - - thread = threading.Thread(target=recv_ws) - thread.daemon = True - thread.start() - - if args.text: - ws.send(args.text) - - while True: - try: - message = console.read() - ws.send(message) - except KeyboardInterrupt: - return - except EOFError: - time.sleep(args.eof_wait) - return - - -if __name__ == "__main__": - try: - main() - except Exception as e: - print(e) diff --git a/pyextra/click-6.7-py2.7.egg-info/PKG-INFO b/pyextra/click-6.7-py2.7.egg-info/PKG-INFO deleted file mode 100644 index bbb17b383..000000000 --- a/pyextra/click-6.7-py2.7.egg-info/PKG-INFO +++ /dev/null @@ -1,13 +0,0 @@ -Metadata-Version: 1.1 -Name: click -Version: 6.7 -Summary: A simple wrapper around optparse for powerful command line utilities. -Home-page: http://github.com/mitsuhiko/click -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -License: UNKNOWN -Description: UNKNOWN -Platform: UNKNOWN -Classifier: License :: OSI Approved :: BSD License -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 diff --git a/pyextra/click-6.7-py2.7.egg-info/SOURCES.txt b/pyextra/click-6.7-py2.7.egg-info/SOURCES.txt deleted file mode 100644 index 5eed4349e..000000000 --- a/pyextra/click-6.7-py2.7.egg-info/SOURCES.txt +++ /dev/null @@ -1,114 +0,0 @@ -CHANGES -LICENSE -MANIFEST.in -Makefile -README -setup.cfg -setup.py -artwork/logo.svg -click/__init__.py -click/_bashcomplete.py -click/_compat.py -click/_termui_impl.py -click/_textwrap.py -click/_unicodefun.py -click/_winconsole.py -click/core.py -click/decorators.py -click/exceptions.py -click/formatting.py -click/globals.py -click/parser.py -click/termui.py -click/testing.py -click/types.py -click/utils.py -click.egg-info/PKG-INFO -click.egg-info/SOURCES.txt -click.egg-info/dependency_links.txt -click.egg-info/top_level.txt -docs/Makefile -docs/advanced.rst -docs/api.rst -docs/arguments.rst -docs/bashcomplete.rst -docs/changelog.rst -docs/clickdoctools.py -docs/commands.rst -docs/complex.rst -docs/conf.py -docs/contrib.rst -docs/documentation.rst -docs/exceptions.rst -docs/index.rst -docs/license.rst -docs/make.bat -docs/options.rst -docs/parameters.rst -docs/prompts.rst -docs/python3.rst -docs/quickstart.rst -docs/setuptools.rst -docs/testing.rst -docs/upgrading.rst -docs/utils.rst -docs/why.rst -docs/wincmd.rst -docs/_static/click-small.png -docs/_static/click-small@2x.png -docs/_static/click.png -docs/_static/click@2x.png -docs/_templates/sidebarintro.html -docs/_templates/sidebarlogo.html -examples/README -examples/aliases/README -examples/aliases/aliases.ini -examples/aliases/aliases.py -examples/aliases/setup.py -examples/colors/README -examples/colors/colors.py -examples/colors/setup.py -examples/complex/README -examples/complex/setup.py -examples/complex/complex/__init__.py -examples/complex/complex/cli.py -examples/complex/complex/commands/__init__.py -examples/complex/complex/commands/cmd_init.py -examples/complex/complex/commands/cmd_status.py -examples/imagepipe/.gitignore -examples/imagepipe/README -examples/imagepipe/example01.jpg -examples/imagepipe/example02.jpg -examples/imagepipe/imagepipe.py -examples/imagepipe/setup.py -examples/inout/README -examples/inout/inout.py -examples/inout/setup.py -examples/naval/README -examples/naval/naval.py -examples/naval/setup.py -examples/repo/README -examples/repo/repo.py -examples/repo/setup.py -examples/termui/README -examples/termui/setup.py -examples/termui/termui.py -examples/validation/README -examples/validation/setup.py -examples/validation/validation.py -tests/conftest.py -tests/test_arguments.py -tests/test_bashcomplete.py -tests/test_basic.py -tests/test_chain.py -tests/test_commands.py -tests/test_compat.py -tests/test_context.py -tests/test_defaults.py -tests/test_formatting.py -tests/test_imports.py -tests/test_normalization.py -tests/test_options.py -tests/test_termui.py -tests/test_testing.py -tests/test_utils.py \ No newline at end of file diff --git a/pyextra/click-6.7-py2.7.egg-info/dependency_links.txt b/pyextra/click-6.7-py2.7.egg-info/dependency_links.txt deleted file mode 100644 index 8b1378917..000000000 --- a/pyextra/click-6.7-py2.7.egg-info/dependency_links.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pyextra/click-6.7-py2.7.egg-info/installed-files.txt b/pyextra/click-6.7-py2.7.egg-info/installed-files.txt deleted file mode 100644 index 63d1b52c1..000000000 --- a/pyextra/click-6.7-py2.7.egg-info/installed-files.txt +++ /dev/null @@ -1,38 +0,0 @@ -../click/exceptions.py -../click/testing.py -../click/decorators.py -../click/parser.py -../click/formatting.py -../click/globals.py -../click/_termui_impl.py -../click/__init__.py -../click/_compat.py -../click/_winconsole.py -../click/_unicodefun.py -../click/_textwrap.py -../click/_bashcomplete.py -../click/core.py -../click/types.py -../click/termui.py -../click/utils.py -../click/exceptions.pyc -../click/testing.pyc -../click/decorators.pyc -../click/parser.pyc -../click/formatting.pyc -../click/globals.pyc -../click/_termui_impl.pyc -../click/__init__.pyc -../click/_compat.pyc -../click/_winconsole.pyc -../click/_unicodefun.pyc -../click/_textwrap.pyc -../click/_bashcomplete.pyc -../click/core.pyc -../click/types.pyc -../click/termui.pyc -../click/utils.pyc -SOURCES.txt -top_level.txt -PKG-INFO -dependency_links.txt diff --git a/pyextra/click-6.7-py2.7.egg-info/top_level.txt b/pyextra/click-6.7-py2.7.egg-info/top_level.txt deleted file mode 100644 index dca9a9096..000000000 --- a/pyextra/click-6.7-py2.7.egg-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -click diff --git a/pyextra/click/__init__.py b/pyextra/click/__init__.py deleted file mode 100644 index 971e55d0a..000000000 --- a/pyextra/click/__init__.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -""" - click - ~~~~~ - - Click is a simple Python module that wraps the stdlib's optparse to make - writing command line scripts fun. Unlike other modules, it's based around - a simple API that does not come with too much magic and is composable. - - In case optparse ever gets removed from the stdlib, it will be shipped by - this module. - - :copyright: (c) 2014 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" - -# Core classes -from .core import Context, BaseCommand, Command, MultiCommand, Group, \ - CommandCollection, Parameter, Option, Argument - -# Globals -from .globals import get_current_context - -# Decorators -from .decorators import pass_context, pass_obj, make_pass_decorator, \ - command, group, argument, option, confirmation_option, \ - password_option, version_option, help_option - -# Types -from .types import ParamType, File, Path, Choice, IntRange, Tuple, \ - STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED - -# Utilities -from .utils import echo, get_binary_stream, get_text_stream, open_file, \ - format_filename, get_app_dir, get_os_args - -# Terminal functions -from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \ - progressbar, clear, style, unstyle, secho, edit, launch, getchar, \ - pause - -# Exceptions -from .exceptions import ClickException, UsageError, BadParameter, \ - FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \ - MissingParameter - -# Formatting -from .formatting import HelpFormatter, wrap_text - -# Parsing -from .parser import OptionParser - - -__all__ = [ - # Core classes - 'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group', - 'CommandCollection', 'Parameter', 'Option', 'Argument', - - # Globals - 'get_current_context', - - # Decorators - 'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group', - 'argument', 'option', 'confirmation_option', 'password_option', - 'version_option', 'help_option', - - # Types - 'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', 'STRING', - 'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED', - - # Utilities - 'echo', 'get_binary_stream', 'get_text_stream', 'open_file', - 'format_filename', 'get_app_dir', 'get_os_args', - - # Terminal functions - 'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager', - 'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch', - 'getchar', 'pause', - - # Exceptions - 'ClickException', 'UsageError', 'BadParameter', 'FileError', - 'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage', - 'MissingParameter', - - # Formatting - 'HelpFormatter', 'wrap_text', - - # Parsing - 'OptionParser', -] - - -# Controls if click should emit the warning about the use of unicode -# literals. -disable_unicode_literals_warning = False - - -__version__ = '6.7' diff --git a/pyextra/click/_bashcomplete.py b/pyextra/click/_bashcomplete.py deleted file mode 100644 index d9d26d28b..000000000 --- a/pyextra/click/_bashcomplete.py +++ /dev/null @@ -1,83 +0,0 @@ -import os -import re -from .utils import echo -from .parser import split_arg_string -from .core import MultiCommand, Option - - -COMPLETION_SCRIPT = ''' -%(complete_func)s() { - COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\ - COMP_CWORD=$COMP_CWORD \\ - %(autocomplete_var)s=complete $1 ) ) - return 0 -} - -complete -F %(complete_func)s -o default %(script_names)s -''' - -_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]') - - -def get_completion_script(prog_name, complete_var): - cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_')) - return (COMPLETION_SCRIPT % { - 'complete_func': '_%s_completion' % cf_name, - 'script_names': prog_name, - 'autocomplete_var': complete_var, - }).strip() + ';' - - -def resolve_ctx(cli, prog_name, args): - ctx = cli.make_context(prog_name, args, resilient_parsing=True) - while ctx.protected_args + ctx.args and isinstance(ctx.command, MultiCommand): - a = ctx.protected_args + ctx.args - cmd = ctx.command.get_command(ctx, a[0]) - if cmd is None: - return None - ctx = cmd.make_context(a[0], a[1:], parent=ctx, resilient_parsing=True) - return ctx - - -def get_choices(cli, prog_name, args, incomplete): - ctx = resolve_ctx(cli, prog_name, args) - if ctx is None: - return - - choices = [] - if incomplete and not incomplete[:1].isalnum(): - for param in ctx.command.params: - if not isinstance(param, Option): - continue - choices.extend(param.opts) - choices.extend(param.secondary_opts) - elif isinstance(ctx.command, MultiCommand): - choices.extend(ctx.command.list_commands(ctx)) - - for item in choices: - if item.startswith(incomplete): - yield item - - -def do_complete(cli, prog_name): - cwords = split_arg_string(os.environ['COMP_WORDS']) - cword = int(os.environ['COMP_CWORD']) - args = cwords[1:cword] - try: - incomplete = cwords[cword] - except IndexError: - incomplete = '' - - for item in get_choices(cli, prog_name, args, incomplete): - echo(item) - - return True - - -def bashcomplete(cli, prog_name, complete_var, complete_instr): - if complete_instr == 'source': - echo(get_completion_script(prog_name, complete_var)) - return True - elif complete_instr == 'complete': - return do_complete(cli, prog_name) - return False diff --git a/pyextra/click/_compat.py b/pyextra/click/_compat.py deleted file mode 100644 index 2b43412c4..000000000 --- a/pyextra/click/_compat.py +++ /dev/null @@ -1,648 +0,0 @@ -import re -import io -import os -import sys -import codecs -from weakref import WeakKeyDictionary - - -PY2 = sys.version_info[0] == 2 -WIN = sys.platform.startswith('win') -DEFAULT_COLUMNS = 80 - - -_ansi_re = re.compile('\033\[((?:\d|;)*)([a-zA-Z])') - - -def get_filesystem_encoding(): - return sys.getfilesystemencoding() or sys.getdefaultencoding() - - -def _make_text_stream(stream, encoding, errors): - if encoding is None: - encoding = get_best_encoding(stream) - if errors is None: - errors = 'replace' - return _NonClosingTextIOWrapper(stream, encoding, errors, - line_buffering=True) - - -def is_ascii_encoding(encoding): - """Checks if a given encoding is ascii.""" - try: - return codecs.lookup(encoding).name == 'ascii' - except LookupError: - return False - - -def get_best_encoding(stream): - """Returns the default stream encoding if not found.""" - rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding() - if is_ascii_encoding(rv): - return 'utf-8' - return rv - - -class _NonClosingTextIOWrapper(io.TextIOWrapper): - - def __init__(self, stream, encoding, errors, **extra): - self._stream = stream = _FixupStream(stream) - io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra) - - # The io module is a place where the Python 3 text behavior - # was forced upon Python 2, so we need to unbreak - # it to look like Python 2. - if PY2: - def write(self, x): - if isinstance(x, str) or is_bytes(x): - try: - self.flush() - except Exception: - pass - return self.buffer.write(str(x)) - return io.TextIOWrapper.write(self, x) - - def writelines(self, lines): - for line in lines: - self.write(line) - - def __del__(self): - try: - self.detach() - except Exception: - pass - - def isatty(self): - # https://bitbucket.org/pypy/pypy/issue/1803 - return self._stream.isatty() - - -class _FixupStream(object): - """The new io interface needs more from streams than streams - traditionally implement. As such, this fix-up code is necessary in - some circumstances. - """ - - def __init__(self, stream): - self._stream = stream - - def __getattr__(self, name): - return getattr(self._stream, name) - - def read1(self, size): - f = getattr(self._stream, 'read1', None) - if f is not None: - return f(size) - # We only dispatch to readline instead of read in Python 2 as we - # do not want cause problems with the different implementation - # of line buffering. - if PY2: - return self._stream.readline(size) - return self._stream.read(size) - - def readable(self): - x = getattr(self._stream, 'readable', None) - if x is not None: - return x() - try: - self._stream.read(0) - except Exception: - return False - return True - - def writable(self): - x = getattr(self._stream, 'writable', None) - if x is not None: - return x() - try: - self._stream.write('') - except Exception: - try: - self._stream.write(b'') - except Exception: - return False - return True - - def seekable(self): - x = getattr(self._stream, 'seekable', None) - if x is not None: - return x() - try: - self._stream.seek(self._stream.tell()) - except Exception: - return False - return True - - -if PY2: - text_type = unicode - bytes = str - raw_input = raw_input - string_types = (str, unicode) - iteritems = lambda x: x.iteritems() - range_type = xrange - - def is_bytes(x): - return isinstance(x, (buffer, bytearray)) - - _identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$') - - # For Windows, we need to force stdout/stdin/stderr to binary if it's - # fetched for that. This obviously is not the most correct way to do - # it as it changes global state. Unfortunately, there does not seem to - # be a clear better way to do it as just reopening the file in binary - # mode does not change anything. - # - # An option would be to do what Python 3 does and to open the file as - # binary only, patch it back to the system, and then use a wrapper - # stream that converts newlines. It's not quite clear what's the - # correct option here. - # - # This code also lives in _winconsole for the fallback to the console - # emulation stream. - # - # There are also Windows environments where the `msvcrt` module is not - # available (which is why we use try-catch instead of the WIN variable - # here), such as the Google App Engine development server on Windows. In - # those cases there is just nothing we can do. - try: - import msvcrt - except ImportError: - set_binary_mode = lambda x: x - else: - def set_binary_mode(f): - try: - fileno = f.fileno() - except Exception: - pass - else: - msvcrt.setmode(fileno, os.O_BINARY) - return f - - def isidentifier(x): - return _identifier_re.search(x) is not None - - def get_binary_stdin(): - return set_binary_mode(sys.stdin) - - def get_binary_stdout(): - return set_binary_mode(sys.stdout) - - def get_binary_stderr(): - return set_binary_mode(sys.stderr) - - def get_text_stdin(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdin, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stdin, encoding, errors) - - def get_text_stdout(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdout, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stdout, encoding, errors) - - def get_text_stderr(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stderr, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stderr, encoding, errors) - - def filename_to_ui(value): - if isinstance(value, bytes): - value = value.decode(get_filesystem_encoding(), 'replace') - return value -else: - import io - text_type = str - raw_input = input - string_types = (str,) - range_type = range - isidentifier = lambda x: x.isidentifier() - iteritems = lambda x: iter(x.items()) - - def is_bytes(x): - return isinstance(x, (bytes, memoryview, bytearray)) - - def _is_binary_reader(stream, default=False): - try: - return isinstance(stream.read(0), bytes) - except Exception: - return default - # This happens in some cases where the stream was already - # closed. In this case, we assume the default. - - def _is_binary_writer(stream, default=False): - try: - stream.write(b'') - except Exception: - try: - stream.write('') - return False - except Exception: - pass - return default - return True - - def _find_binary_reader(stream): - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detaching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_reader(stream, False): - return stream - - buf = getattr(stream, 'buffer', None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_reader(buf, True): - return buf - - def _find_binary_writer(stream): - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detatching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_writer(stream, False): - return stream - - buf = getattr(stream, 'buffer', None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_writer(buf, True): - return buf - - def _stream_is_misconfigured(stream): - """A stream is misconfigured if its encoding is ASCII.""" - # If the stream does not have an encoding set, we assume it's set - # to ASCII. This appears to happen in certain unittest - # environments. It's not quite clear what the correct behavior is - # but this at least will force Click to recover somehow. - return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii') - - def _is_compatible_text_stream(stream, encoding, errors): - stream_encoding = getattr(stream, 'encoding', None) - stream_errors = getattr(stream, 'errors', None) - - # Perfect match. - if stream_encoding == encoding and stream_errors == errors: - return True - - # Otherwise, it's only a compatible stream if we did not ask for - # an encoding. - if encoding is None: - return stream_encoding is not None - - return False - - def _force_correct_text_reader(text_reader, encoding, errors): - if _is_binary_reader(text_reader, False): - binary_reader = text_reader - else: - # If there is no target encoding set, we need to verify that the - # reader is not actually misconfigured. - if encoding is None and not _stream_is_misconfigured(text_reader): - return text_reader - - if _is_compatible_text_stream(text_reader, encoding, errors): - return text_reader - - # If the reader has no encoding, we try to find the underlying - # binary reader for it. If that fails because the environment is - # misconfigured, we silently go with the same reader because this - # is too common to happen. In that case, mojibake is better than - # exceptions. - binary_reader = _find_binary_reader(text_reader) - if binary_reader is None: - return text_reader - - # At this point, we default the errors to replace instead of strict - # because nobody handles those errors anyways and at this point - # we're so fundamentally fucked that nothing can repair it. - if errors is None: - errors = 'replace' - return _make_text_stream(binary_reader, encoding, errors) - - def _force_correct_text_writer(text_writer, encoding, errors): - if _is_binary_writer(text_writer, False): - binary_writer = text_writer - else: - # If there is no target encoding set, we need to verify that the - # writer is not actually misconfigured. - if encoding is None and not _stream_is_misconfigured(text_writer): - return text_writer - - if _is_compatible_text_stream(text_writer, encoding, errors): - return text_writer - - # If the writer has no encoding, we try to find the underlying - # binary writer for it. If that fails because the environment is - # misconfigured, we silently go with the same writer because this - # is too common to happen. In that case, mojibake is better than - # exceptions. - binary_writer = _find_binary_writer(text_writer) - if binary_writer is None: - return text_writer - - # At this point, we default the errors to replace instead of strict - # because nobody handles those errors anyways and at this point - # we're so fundamentally fucked that nothing can repair it. - if errors is None: - errors = 'replace' - return _make_text_stream(binary_writer, encoding, errors) - - def get_binary_stdin(): - reader = _find_binary_reader(sys.stdin) - if reader is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stdin.') - return reader - - def get_binary_stdout(): - writer = _find_binary_writer(sys.stdout) - if writer is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stdout.') - return writer - - def get_binary_stderr(): - writer = _find_binary_writer(sys.stderr) - if writer is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stderr.') - return writer - - def get_text_stdin(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdin, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_reader(sys.stdin, encoding, errors) - - def get_text_stdout(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdout, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer(sys.stdout, encoding, errors) - - def get_text_stderr(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stderr, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer(sys.stderr, encoding, errors) - - def filename_to_ui(value): - if isinstance(value, bytes): - value = value.decode(get_filesystem_encoding(), 'replace') - else: - value = value.encode('utf-8', 'surrogateescape') \ - .decode('utf-8', 'replace') - return value - - -def get_streerror(e, default=None): - if hasattr(e, 'strerror'): - msg = e.strerror - else: - if default is not None: - msg = default - else: - msg = str(e) - if isinstance(msg, bytes): - msg = msg.decode('utf-8', 'replace') - return msg - - -def open_stream(filename, mode='r', encoding=None, errors='strict', - atomic=False): - # Standard streams first. These are simple because they don't need - # special handling for the atomic flag. It's entirely ignored. - if filename == '-': - if 'w' in mode: - if 'b' in mode: - return get_binary_stdout(), False - return get_text_stdout(encoding=encoding, errors=errors), False - if 'b' in mode: - return get_binary_stdin(), False - return get_text_stdin(encoding=encoding, errors=errors), False - - # Non-atomic writes directly go out through the regular open functions. - if not atomic: - if encoding is None: - return open(filename, mode), True - return io.open(filename, mode, encoding=encoding, errors=errors), True - - # Some usability stuff for atomic writes - if 'a' in mode: - raise ValueError( - 'Appending to an existing file is not supported, because that ' - 'would involve an expensive `copy`-operation to a temporary ' - 'file. Open the file in normal `w`-mode and copy explicitly ' - 'if that\'s what you\'re after.' - ) - if 'x' in mode: - raise ValueError('Use the `overwrite`-parameter instead.') - if 'w' not in mode: - raise ValueError('Atomic writes only make sense with `w`-mode.') - - # Atomic writes are more complicated. They work by opening a file - # as a proxy in the same folder and then using the fdopen - # functionality to wrap it in a Python file. Then we wrap it in an - # atomic file that moves the file over on close. - import tempfile - fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename), - prefix='.__atomic-write') - - if encoding is not None: - f = io.open(fd, mode, encoding=encoding, errors=errors) - else: - f = os.fdopen(fd, mode) - - return _AtomicFile(f, tmp_filename, filename), True - - -# Used in a destructor call, needs extra protection from interpreter cleanup. -if hasattr(os, 'replace'): - _replace = os.replace - _can_replace = True -else: - _replace = os.rename - _can_replace = not WIN - - -class _AtomicFile(object): - - def __init__(self, f, tmp_filename, real_filename): - self._f = f - self._tmp_filename = tmp_filename - self._real_filename = real_filename - self.closed = False - - @property - def name(self): - return self._real_filename - - def close(self, delete=False): - if self.closed: - return - self._f.close() - if not _can_replace: - try: - os.remove(self._real_filename) - except OSError: - pass - _replace(self._tmp_filename, self._real_filename) - self.closed = True - - def __getattr__(self, name): - return getattr(self._f, name) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close(delete=exc_type is not None) - - def __repr__(self): - return repr(self._f) - - -auto_wrap_for_ansi = None -colorama = None -get_winterm_size = None - - -def strip_ansi(value): - return _ansi_re.sub('', value) - - -def should_strip_ansi(stream=None, color=None): - if color is None: - if stream is None: - stream = sys.stdin - return not isatty(stream) - return not color - - -# If we're on Windows, we provide transparent integration through -# colorama. This will make ANSI colors through the echo function -# work automatically. -if WIN: - # Windows has a smaller terminal - DEFAULT_COLUMNS = 79 - - from ._winconsole import _get_windows_console_stream - - def _get_argv_encoding(): - import locale - return locale.getpreferredencoding() - - if PY2: - def raw_input(prompt=''): - sys.stderr.flush() - if prompt: - stdout = _default_text_stdout() - stdout.write(prompt) - stdin = _default_text_stdin() - return stdin.readline().rstrip('\r\n') - - try: - import colorama - except ImportError: - pass - else: - _ansi_stream_wrappers = WeakKeyDictionary() - - def auto_wrap_for_ansi(stream, color=None): - """This function wraps a stream so that calls through colorama - are issued to the win32 console API to recolor on demand. It - also ensures to reset the colors if a write call is interrupted - to not destroy the console afterwards. - """ - try: - cached = _ansi_stream_wrappers.get(stream) - except Exception: - cached = None - if cached is not None: - return cached - strip = should_strip_ansi(stream, color) - ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) - rv = ansi_wrapper.stream - _write = rv.write - - def _safe_write(s): - try: - return _write(s) - except: - ansi_wrapper.reset_all() - raise - - rv.write = _safe_write - try: - _ansi_stream_wrappers[stream] = rv - except Exception: - pass - return rv - - def get_winterm_size(): - win = colorama.win32.GetConsoleScreenBufferInfo( - colorama.win32.STDOUT).srWindow - return win.Right - win.Left, win.Bottom - win.Top -else: - def _get_argv_encoding(): - return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding() - - _get_windows_console_stream = lambda *x: None - - -def term_len(x): - return len(strip_ansi(x)) - - -def isatty(stream): - try: - return stream.isatty() - except Exception: - return False - - -def _make_cached_stream_func(src_func, wrapper_func): - cache = WeakKeyDictionary() - def func(): - stream = src_func() - try: - rv = cache.get(stream) - except Exception: - rv = None - if rv is not None: - return rv - rv = wrapper_func() - try: - cache[stream] = rv - except Exception: - pass - return rv - return func - - -_default_text_stdin = _make_cached_stream_func( - lambda: sys.stdin, get_text_stdin) -_default_text_stdout = _make_cached_stream_func( - lambda: sys.stdout, get_text_stdout) -_default_text_stderr = _make_cached_stream_func( - lambda: sys.stderr, get_text_stderr) - - -binary_streams = { - 'stdin': get_binary_stdin, - 'stdout': get_binary_stdout, - 'stderr': get_binary_stderr, -} - -text_streams = { - 'stdin': get_text_stdin, - 'stdout': get_text_stdout, - 'stderr': get_text_stderr, -} diff --git a/pyextra/click/_termui_impl.py b/pyextra/click/_termui_impl.py deleted file mode 100644 index 7cfd3d5c4..000000000 --- a/pyextra/click/_termui_impl.py +++ /dev/null @@ -1,547 +0,0 @@ -""" - click._termui_impl - ~~~~~~~~~~~~~~~~~~ - - This module contains implementations for the termui module. To keep the - import time of Click down, some infrequently used functionality is placed - in this module and only imported as needed. - - :copyright: (c) 2014 by Armin Ronacher. - :license: BSD, see LICENSE for more details. -""" -import os -import sys -import time -import math -from ._compat import _default_text_stdout, range_type, PY2, isatty, \ - open_stream, strip_ansi, term_len, get_best_encoding, WIN -from .utils import echo -from .exceptions import ClickException - - -if os.name == 'nt': - BEFORE_BAR = '\r' - AFTER_BAR = '\n' -else: - BEFORE_BAR = '\r\033[?25l' - AFTER_BAR = '\033[?25h\n' - - -def _length_hint(obj): - """Returns the length hint of an object.""" - try: - return len(obj) - except (AttributeError, TypeError): - try: - get_hint = type(obj).__length_hint__ - except AttributeError: - return None - try: - hint = get_hint(obj) - except TypeError: - return None - if hint is NotImplemented or \ - not isinstance(hint, (int, long)) or \ - hint < 0: - return None - return hint - - -class ProgressBar(object): - - def __init__(self, iterable, length=None, fill_char='#', empty_char=' ', - bar_template='%(bar)s', info_sep=' ', show_eta=True, - show_percent=None, show_pos=False, item_show_func=None, - label=None, file=None, color=None, width=30): - self.fill_char = fill_char - self.empty_char = empty_char - self.bar_template = bar_template - self.info_sep = info_sep - self.show_eta = show_eta - self.show_percent = show_percent - self.show_pos = show_pos - self.item_show_func = item_show_func - self.label = label or '' - if file is None: - file = _default_text_stdout() - self.file = file - self.color = color - self.width = width - self.autowidth = width == 0 - - if length is None: - length = _length_hint(iterable) - if iterable is None: - if length is None: - raise TypeError('iterable or length is required') - iterable = range_type(length) - self.iter = iter(iterable) - self.length = length - self.length_known = length is not None - self.pos = 0 - self.avg = [] - self.start = self.last_eta = time.time() - self.eta_known = False - self.finished = False - self.max_width = None - self.entered = False - self.current_item = None - self.is_hidden = not isatty(self.file) - self._last_line = None - - def __enter__(self): - self.entered = True - self.render_progress() - return self - - def __exit__(self, exc_type, exc_value, tb): - self.render_finish() - - def __iter__(self): - if not self.entered: - raise RuntimeError('You need to use progress bars in a with block.') - self.render_progress() - return self - - def render_finish(self): - if self.is_hidden: - return - self.file.write(AFTER_BAR) - self.file.flush() - - @property - def pct(self): - if self.finished: - return 1.0 - return min(self.pos / (float(self.length) or 1), 1.0) - - @property - def time_per_iteration(self): - if not self.avg: - return 0.0 - return sum(self.avg) / float(len(self.avg)) - - @property - def eta(self): - if self.length_known and not self.finished: - return self.time_per_iteration * (self.length - self.pos) - return 0.0 - - def format_eta(self): - if self.eta_known: - t = self.eta + 1 - seconds = t % 60 - t /= 60 - minutes = t % 60 - t /= 60 - hours = t % 24 - t /= 24 - if t > 0: - days = t - return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds) - else: - return '%02d:%02d:%02d' % (hours, minutes, seconds) - return '' - - def format_pos(self): - pos = str(self.pos) - if self.length_known: - pos += '/%s' % self.length - return pos - - def format_pct(self): - return ('% 4d%%' % int(self.pct * 100))[1:] - - def format_progress_line(self): - show_percent = self.show_percent - - info_bits = [] - if self.length_known: - bar_length = int(self.pct * self.width) - bar = self.fill_char * bar_length - bar += self.empty_char * (self.width - bar_length) - if show_percent is None: - show_percent = not self.show_pos - else: - if self.finished: - bar = self.fill_char * self.width - else: - bar = list(self.empty_char * (self.width or 1)) - if self.time_per_iteration != 0: - bar[int((math.cos(self.pos * self.time_per_iteration) - / 2.0 + 0.5) * self.width)] = self.fill_char - bar = ''.join(bar) - - if self.show_pos: - info_bits.append(self.format_pos()) - if show_percent: - info_bits.append(self.format_pct()) - if self.show_eta and self.eta_known and not self.finished: - info_bits.append(self.format_eta()) - if self.item_show_func is not None: - item_info = self.item_show_func(self.current_item) - if item_info is not None: - info_bits.append(item_info) - - return (self.bar_template % { - 'label': self.label, - 'bar': bar, - 'info': self.info_sep.join(info_bits) - }).rstrip() - - def render_progress(self): - from .termui import get_terminal_size - nl = False - - if self.is_hidden: - buf = [self.label] - nl = True - else: - buf = [] - # Update width in case the terminal has been resized - if self.autowidth: - old_width = self.width - self.width = 0 - clutter_length = term_len(self.format_progress_line()) - new_width = max(0, get_terminal_size()[0] - clutter_length) - if new_width < old_width: - buf.append(BEFORE_BAR) - buf.append(' ' * self.max_width) - self.max_width = new_width - self.width = new_width - - clear_width = self.width - if self.max_width is not None: - clear_width = self.max_width - - buf.append(BEFORE_BAR) - line = self.format_progress_line() - line_len = term_len(line) - if self.max_width is None or self.max_width < line_len: - self.max_width = line_len - buf.append(line) - - buf.append(' ' * (clear_width - line_len)) - line = ''.join(buf) - - # Render the line only if it changed. - if line != self._last_line: - self._last_line = line - echo(line, file=self.file, color=self.color, nl=nl) - self.file.flush() - - def make_step(self, n_steps): - self.pos += n_steps - if self.length_known and self.pos >= self.length: - self.finished = True - - if (time.time() - self.last_eta) < 1.0: - return - - self.last_eta = time.time() - self.avg = self.avg[-6:] + [-(self.start - time.time()) / (self.pos)] - - self.eta_known = self.length_known - - def update(self, n_steps): - self.make_step(n_steps) - self.render_progress() - - def finish(self): - self.eta_known = 0 - self.current_item = None - self.finished = True - - def next(self): - if self.is_hidden: - return next(self.iter) - try: - rv = next(self.iter) - self.current_item = rv - except StopIteration: - self.finish() - self.render_progress() - raise StopIteration() - else: - self.update(1) - return rv - - if not PY2: - __next__ = next - del next - - -def pager(text, color=None): - """Decide what method to use for paging through text.""" - stdout = _default_text_stdout() - if not isatty(sys.stdin) or not isatty(stdout): - return _nullpager(stdout, text, color) - pager_cmd = (os.environ.get('PAGER', None) or '').strip() - if pager_cmd: - if WIN: - return _tempfilepager(text, pager_cmd, color) - return _pipepager(text, pager_cmd, color) - if os.environ.get('TERM') in ('dumb', 'emacs'): - return _nullpager(stdout, text, color) - if WIN or sys.platform.startswith('os2'): - return _tempfilepager(text, 'more <', color) - if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: - return _pipepager(text, 'less', color) - - import tempfile - fd, filename = tempfile.mkstemp() - os.close(fd) - try: - if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: - return _pipepager(text, 'more', color) - return _nullpager(stdout, text, color) - finally: - os.unlink(filename) - - -def _pipepager(text, cmd, color): - """Page through text by feeding it to another program. Invoking a - pager through this might support colors. - """ - import subprocess - env = dict(os.environ) - - # If we're piping to less we might support colors under the - # condition that - cmd_detail = cmd.rsplit('/', 1)[-1].split() - if color is None and cmd_detail[0] == 'less': - less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:]) - if not less_flags: - env['LESS'] = '-R' - color = True - elif 'r' in less_flags or 'R' in less_flags: - color = True - - if not color: - text = strip_ansi(text) - - c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, - env=env) - encoding = get_best_encoding(c.stdin) - try: - c.stdin.write(text.encode(encoding, 'replace')) - c.stdin.close() - except (IOError, KeyboardInterrupt): - pass - - # Less doesn't respect ^C, but catches it for its own UI purposes (aborting - # search or other commands inside less). - # - # That means when the user hits ^C, the parent process (click) terminates, - # but less is still alive, paging the output and messing up the terminal. - # - # If the user wants to make the pager exit on ^C, they should set - # `LESS='-K'`. It's not our decision to make. - while True: - try: - c.wait() - except KeyboardInterrupt: - pass - else: - break - - -def _tempfilepager(text, cmd, color): - """Page through text by invoking a program on a temporary file.""" - import tempfile - filename = tempfile.mktemp() - if not color: - text = strip_ansi(text) - encoding = get_best_encoding(sys.stdout) - with open_stream(filename, 'wb')[0] as f: - f.write(text.encode(encoding)) - try: - os.system(cmd + ' "' + filename + '"') - finally: - os.unlink(filename) - - -def _nullpager(stream, text, color): - """Simply print unformatted text. This is the ultimate fallback.""" - if not color: - text = strip_ansi(text) - stream.write(text) - - -class Editor(object): - - def __init__(self, editor=None, env=None, require_save=True, - extension='.txt'): - self.editor = editor - self.env = env - self.require_save = require_save - self.extension = extension - - def get_editor(self): - if self.editor is not None: - return self.editor - for key in 'VISUAL', 'EDITOR': - rv = os.environ.get(key) - if rv: - return rv - if WIN: - return 'notepad' - for editor in 'vim', 'nano': - if os.system('which %s >/dev/null 2>&1' % editor) == 0: - return editor - return 'vi' - - def edit_file(self, filename): - import subprocess - editor = self.get_editor() - if self.env: - environ = os.environ.copy() - environ.update(self.env) - else: - environ = None - try: - c = subprocess.Popen('%s "%s"' % (editor, filename), - env=environ, shell=True) - exit_code = c.wait() - if exit_code != 0: - raise ClickException('%s: Editing failed!' % editor) - except OSError as e: - raise ClickException('%s: Editing failed: %s' % (editor, e)) - - def edit(self, text): - import tempfile - - text = text or '' - if text and not text.endswith('\n'): - text += '\n' - - fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension) - try: - if WIN: - encoding = 'utf-8-sig' - text = text.replace('\n', '\r\n') - else: - encoding = 'utf-8' - text = text.encode(encoding) - - f = os.fdopen(fd, 'wb') - f.write(text) - f.close() - timestamp = os.path.getmtime(name) - - self.edit_file(name) - - if self.require_save \ - and os.path.getmtime(name) == timestamp: - return None - - f = open(name, 'rb') - try: - rv = f.read() - finally: - f.close() - return rv.decode('utf-8-sig').replace('\r\n', '\n') - finally: - os.unlink(name) - - -def open_url(url, wait=False, locate=False): - import subprocess - - def _unquote_file(url): - try: - import urllib - except ImportError: - import urllib - if url.startswith('file://'): - url = urllib.unquote(url[7:]) - return url - - if sys.platform == 'darwin': - args = ['open'] - if wait: - args.append('-W') - if locate: - args.append('-R') - args.append(_unquote_file(url)) - null = open('/dev/null', 'w') - try: - return subprocess.Popen(args, stderr=null).wait() - finally: - null.close() - elif WIN: - if locate: - url = _unquote_file(url) - args = 'explorer /select,"%s"' % _unquote_file( - url.replace('"', '')) - else: - args = 'start %s "" "%s"' % ( - wait and '/WAIT' or '', url.replace('"', '')) - return os.system(args) - - try: - if locate: - url = os.path.dirname(_unquote_file(url)) or '.' - else: - url = _unquote_file(url) - c = subprocess.Popen(['xdg-open', url]) - if wait: - return c.wait() - return 0 - except OSError: - if url.startswith(('http://', 'https://')) and not locate and not wait: - import webbrowser - webbrowser.open(url) - return 0 - return 1 - - -def _translate_ch_to_exc(ch): - if ch == '\x03': - raise KeyboardInterrupt() - if ch == '\x04': - raise EOFError() - - -if WIN: - import msvcrt - - def getchar(echo): - rv = msvcrt.getch() - if echo: - msvcrt.putchar(rv) - _translate_ch_to_exc(rv) - if PY2: - enc = getattr(sys.stdin, 'encoding', None) - if enc is not None: - rv = rv.decode(enc, 'replace') - else: - rv = rv.decode('cp1252', 'replace') - return rv -else: - import tty - import termios - - def getchar(echo): - if not isatty(sys.stdin): - f = open('/dev/tty') - fd = f.fileno() - else: - fd = sys.stdin.fileno() - f = None - try: - old_settings = termios.tcgetattr(fd) - try: - tty.setraw(fd) - ch = os.read(fd, 32) - if echo and isatty(sys.stdout): - sys.stdout.write(ch) - finally: - termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) - sys.stdout.flush() - if f is not None: - f.close() - except termios.error: - pass - _translate_ch_to_exc(ch) - return ch.decode(get_best_encoding(sys.stdin), 'replace') diff --git a/pyextra/click/_textwrap.py b/pyextra/click/_textwrap.py deleted file mode 100644 index 7e776031e..000000000 --- a/pyextra/click/_textwrap.py +++ /dev/null @@ -1,38 +0,0 @@ -import textwrap -from contextlib import contextmanager - - -class TextWrapper(textwrap.TextWrapper): - - def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): - space_left = max(width - cur_len, 1) - - if self.break_long_words: - last = reversed_chunks[-1] - cut = last[:space_left] - res = last[space_left:] - cur_line.append(cut) - reversed_chunks[-1] = res - elif not cur_line: - cur_line.append(reversed_chunks.pop()) - - @contextmanager - def extra_indent(self, indent): - old_initial_indent = self.initial_indent - old_subsequent_indent = self.subsequent_indent - self.initial_indent += indent - self.subsequent_indent += indent - try: - yield - finally: - self.initial_indent = old_initial_indent - self.subsequent_indent = old_subsequent_indent - - def indent_only(self, text): - rv = [] - for idx, line in enumerate(text.splitlines()): - indent = self.initial_indent - if idx > 0: - indent = self.subsequent_indent - rv.append(indent + line) - return '\n'.join(rv) diff --git a/pyextra/click/_unicodefun.py b/pyextra/click/_unicodefun.py deleted file mode 100644 index 9e17a384e..000000000 --- a/pyextra/click/_unicodefun.py +++ /dev/null @@ -1,118 +0,0 @@ -import os -import sys -import codecs - -from ._compat import PY2 - - -# If someone wants to vendor click, we want to ensure the -# correct package is discovered. Ideally we could use a -# relative import here but unfortunately Python does not -# support that. -click = sys.modules[__name__.rsplit('.', 1)[0]] - - -def _find_unicode_literals_frame(): - import __future__ - frm = sys._getframe(1) - idx = 1 - while frm is not None: - if frm.f_globals.get('__name__', '').startswith('click.'): - frm = frm.f_back - idx += 1 - elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag: - return idx - else: - break - return 0 - - -def _check_for_unicode_literals(): - if not __debug__: - return - if not PY2 or click.disable_unicode_literals_warning: - return - bad_frame = _find_unicode_literals_frame() - if bad_frame <= 0: - return - from warnings import warn - warn(Warning('Click detected the use of the unicode_literals ' - '__future__ import. This is heavily discouraged ' - 'because it can introduce subtle bugs in your ' - 'code. You should instead use explicit u"" literals ' - 'for your unicode strings. For more information see ' - 'http://click.pocoo.org/python3/'), - stacklevel=bad_frame) - - -def _verify_python3_env(): - """Ensures that the environment is good for unicode on Python 3.""" - if PY2: - return - try: - import locale - fs_enc = codecs.lookup(locale.getpreferredencoding()).name - except Exception: - fs_enc = 'ascii' - if fs_enc != 'ascii': - return - - extra = '' - if os.name == 'posix': - import subprocess - rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate()[0] - good_locales = set() - has_c_utf8 = False - - # Make sure we're operating on text here. - if isinstance(rv, bytes): - rv = rv.decode('ascii', 'replace') - - for line in rv.splitlines(): - locale = line.strip() - if locale.lower().endswith(('.utf-8', '.utf8')): - good_locales.add(locale) - if locale.lower() in ('c.utf8', 'c.utf-8'): - has_c_utf8 = True - - extra += '\n\n' - if not good_locales: - extra += ( - 'Additional information: on this system no suitable UTF-8\n' - 'locales were discovered. This most likely requires resolving\n' - 'by reconfiguring the locale system.' - ) - elif has_c_utf8: - extra += ( - 'This system supports the C.UTF-8 locale which is recommended.\n' - 'You might be able to resolve your issue by exporting the\n' - 'following environment variables:\n\n' - ' export LC_ALL=C.UTF-8\n' - ' export LANG=C.UTF-8' - ) - else: - extra += ( - 'This system lists a couple of UTF-8 supporting locales that\n' - 'you can pick from. The following suitable locales where\n' - 'discovered: %s' - ) % ', '.join(sorted(good_locales)) - - bad_locale = None - for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'): - if locale and locale.lower().endswith(('.utf-8', '.utf8')): - bad_locale = locale - if locale is not None: - break - if bad_locale is not None: - extra += ( - '\n\nClick discovered that you exported a UTF-8 locale\n' - 'but the locale system could not pick up from it because\n' - 'it does not exist. The exported locale is "%s" but it\n' - 'is not supported' - ) % bad_locale - - raise RuntimeError('Click will abort further execution because Python 3 ' - 'was configured to use ASCII as encoding for the ' - 'environment. Consult http://click.pocoo.org/python3/' - 'for mitigation steps.' + extra) diff --git a/pyextra/click/_winconsole.py b/pyextra/click/_winconsole.py deleted file mode 100644 index 9aed94216..000000000 --- a/pyextra/click/_winconsole.py +++ /dev/null @@ -1,273 +0,0 @@ -# -*- coding: utf-8 -*- -# This module is based on the excellent work by Adam Bartoš who -# provided a lot of what went into the implementation here in -# the discussion to issue1602 in the Python bug tracker. -# -# There are some general differences in regards to how this works -# compared to the original patches as we do not need to patch -# the entire interpreter but just work in our little world of -# echo and prmopt. - -import io -import os -import sys -import zlib -import time -import ctypes -import msvcrt -from click._compat import _NonClosingTextIOWrapper, text_type, PY2 -from ctypes import byref, POINTER, c_int, c_char, c_char_p, \ - c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE -try: - from ctypes import pythonapi - PyObject_GetBuffer = pythonapi.PyObject_GetBuffer - PyBuffer_Release = pythonapi.PyBuffer_Release -except ImportError: - pythonapi = None -from ctypes.wintypes import LPWSTR, LPCWSTR - - -c_ssize_p = POINTER(c_ssize_t) - -kernel32 = windll.kernel32 -GetStdHandle = kernel32.GetStdHandle -ReadConsoleW = kernel32.ReadConsoleW -WriteConsoleW = kernel32.WriteConsoleW -GetLastError = kernel32.GetLastError -GetCommandLineW = WINFUNCTYPE(LPWSTR)( - ('GetCommandLineW', windll.kernel32)) -CommandLineToArgvW = WINFUNCTYPE( - POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( - ('CommandLineToArgvW', windll.shell32)) - - -STDIN_HANDLE = GetStdHandle(-10) -STDOUT_HANDLE = GetStdHandle(-11) -STDERR_HANDLE = GetStdHandle(-12) - - -PyBUF_SIMPLE = 0 -PyBUF_WRITABLE = 1 - -ERROR_SUCCESS = 0 -ERROR_NOT_ENOUGH_MEMORY = 8 -ERROR_OPERATION_ABORTED = 995 - -STDIN_FILENO = 0 -STDOUT_FILENO = 1 -STDERR_FILENO = 2 - -EOF = b'\x1a' -MAX_BYTES_WRITTEN = 32767 - - -class Py_buffer(ctypes.Structure): - _fields_ = [ - ('buf', c_void_p), - ('obj', py_object), - ('len', c_ssize_t), - ('itemsize', c_ssize_t), - ('readonly', c_int), - ('ndim', c_int), - ('format', c_char_p), - ('shape', c_ssize_p), - ('strides', c_ssize_p), - ('suboffsets', c_ssize_p), - ('internal', c_void_p) - ] - - if PY2: - _fields_.insert(-1, ('smalltable', c_ssize_t * 2)) - - -# On PyPy we cannot get buffers so our ability to operate here is -# serverly limited. -if pythonapi is None: - get_buffer = None -else: - def get_buffer(obj, writable=False): - buf = Py_buffer() - flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE - PyObject_GetBuffer(py_object(obj), byref(buf), flags) - try: - buffer_type = c_char * buf.len - return buffer_type.from_address(buf.buf) - finally: - PyBuffer_Release(byref(buf)) - - -class _WindowsConsoleRawIOBase(io.RawIOBase): - - def __init__(self, handle): - self.handle = handle - - def isatty(self): - io.RawIOBase.isatty(self) - return True - - -class _WindowsConsoleReader(_WindowsConsoleRawIOBase): - - def readable(self): - return True - - def readinto(self, b): - bytes_to_be_read = len(b) - if not bytes_to_be_read: - return 0 - elif bytes_to_be_read % 2: - raise ValueError('cannot read odd number of bytes from ' - 'UTF-16-LE encoded console') - - buffer = get_buffer(b, writable=True) - code_units_to_be_read = bytes_to_be_read // 2 - code_units_read = c_ulong() - - rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read, - byref(code_units_read), None) - if GetLastError() == ERROR_OPERATION_ABORTED: - # wait for KeyboardInterrupt - time.sleep(0.1) - if not rv: - raise OSError('Windows error: %s' % GetLastError()) - - if buffer[0] == EOF: - return 0 - return 2 * code_units_read.value - - -class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): - - def writable(self): - return True - - @staticmethod - def _get_error_message(errno): - if errno == ERROR_SUCCESS: - return 'ERROR_SUCCESS' - elif errno == ERROR_NOT_ENOUGH_MEMORY: - return 'ERROR_NOT_ENOUGH_MEMORY' - return 'Windows error %s' % errno - - def write(self, b): - bytes_to_be_written = len(b) - buf = get_buffer(b) - code_units_to_be_written = min(bytes_to_be_written, - MAX_BYTES_WRITTEN) // 2 - code_units_written = c_ulong() - - WriteConsoleW(self.handle, buf, code_units_to_be_written, - byref(code_units_written), None) - bytes_written = 2 * code_units_written.value - - if bytes_written == 0 and bytes_to_be_written > 0: - raise OSError(self._get_error_message(GetLastError())) - return bytes_written - - -class ConsoleStream(object): - - def __init__(self, text_stream, byte_stream): - self._text_stream = text_stream - self.buffer = byte_stream - - @property - def name(self): - return self.buffer.name - - def write(self, x): - if isinstance(x, text_type): - return self._text_stream.write(x) - try: - self.flush() - except Exception: - pass - return self.buffer.write(x) - - def writelines(self, lines): - for line in lines: - self.write(line) - - def __getattr__(self, name): - return getattr(self._text_stream, name) - - def isatty(self): - return self.buffer.isatty() - - def __repr__(self): - return '' % ( - self.name, - self.encoding, - ) - - -def _get_text_stdin(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), - 'utf-16-le', 'strict', line_buffering=True) - return ConsoleStream(text_stream, buffer_stream) - - -def _get_text_stdout(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - _WindowsConsoleWriter(STDOUT_HANDLE), - 'utf-16-le', 'strict', line_buffering=True) - return ConsoleStream(text_stream, buffer_stream) - - -def _get_text_stderr(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - _WindowsConsoleWriter(STDERR_HANDLE), - 'utf-16-le', 'strict', line_buffering=True) - return ConsoleStream(text_stream, buffer_stream) - - -if PY2: - def _hash_py_argv(): - return zlib.crc32('\x00'.join(sys.argv[1:])) - - _initial_argv_hash = _hash_py_argv() - - def _get_windows_argv(): - argc = c_int(0) - argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc)) - argv = [argv_unicode[i] for i in range(0, argc.value)] - - if not hasattr(sys, 'frozen'): - argv = argv[1:] - while len(argv) > 0: - arg = argv[0] - if not arg.startswith('-') or arg == '-': - break - argv = argv[1:] - if arg.startswith(('-c', '-m')): - break - - return argv[1:] - - -_stream_factories = { - 0: _get_text_stdin, - 1: _get_text_stdout, - 2: _get_text_stderr, -} - - -def _get_windows_console_stream(f, encoding, errors): - if get_buffer is not None and \ - encoding in ('utf-16-le', None) \ - and errors in ('strict', None) and \ - hasattr(f, 'isatty') and f.isatty(): - func = _stream_factories.get(f.fileno()) - if func is not None: - if not PY2: - f = getattr(f, 'buffer') - if f is None: - return None - else: - # If we are on Python 2 we need to set the stream that we - # deal with to binary mode as otherwise the exercise if a - # bit moot. The same problems apply as for - # get_binary_stdin and friends from _compat. - msvcrt.setmode(f.fileno(), os.O_BINARY) - return func(f) diff --git a/pyextra/click/core.py b/pyextra/click/core.py deleted file mode 100644 index 745645147..000000000 --- a/pyextra/click/core.py +++ /dev/null @@ -1,1744 +0,0 @@ -import errno -import os -import sys -from contextlib import contextmanager -from itertools import repeat -from functools import update_wrapper - -from .types import convert_type, IntRange, BOOL -from .utils import make_str, make_default_short_help, echo, get_os_args -from .exceptions import ClickException, UsageError, BadParameter, Abort, \ - MissingParameter -from .termui import prompt, confirm -from .formatting import HelpFormatter, join_options -from .parser import OptionParser, split_opt -from .globals import push_context, pop_context - -from ._compat import PY2, isidentifier, iteritems -from ._unicodefun import _check_for_unicode_literals, _verify_python3_env - - -_missing = object() - - -SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...' -SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...' - - -def _bashcomplete(cmd, prog_name, complete_var=None): - """Internal handler for the bash completion support.""" - if complete_var is None: - complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper() - complete_instr = os.environ.get(complete_var) - if not complete_instr: - return - - from ._bashcomplete import bashcomplete - if bashcomplete(cmd, prog_name, complete_var, complete_instr): - sys.exit(1) - - -def _check_multicommand(base_command, cmd_name, cmd, register=False): - if not base_command.chain or not isinstance(cmd, MultiCommand): - return - if register: - hint = 'It is not possible to add multi commands as children to ' \ - 'another multi command that is in chain mode' - else: - hint = 'Found a multi command as subcommand to a multi command ' \ - 'that is in chain mode. This is not supported' - raise RuntimeError('%s. Command "%s" is set to chain and "%s" was ' - 'added as subcommand but it in itself is a ' - 'multi command. ("%s" is a %s within a chained ' - '%s named "%s"). This restriction was supposed to ' - 'be lifted in 6.0 but the fix was flawed. This ' - 'will be fixed in Click 7.0' % ( - hint, base_command.name, cmd_name, - cmd_name, cmd.__class__.__name__, - base_command.__class__.__name__, - base_command.name)) - - -def batch(iterable, batch_size): - return list(zip(*repeat(iter(iterable), batch_size))) - - -def invoke_param_callback(callback, ctx, param, value): - code = getattr(callback, '__code__', None) - args = getattr(code, 'co_argcount', 3) - - if args < 3: - # This will become a warning in Click 3.0: - from warnings import warn - warn(Warning('Invoked legacy parameter callback "%s". The new ' - 'signature for such callbacks starting with ' - 'click 2.0 is (ctx, param, value).' - % callback), stacklevel=3) - return callback(ctx, value) - return callback(ctx, param, value) - - -@contextmanager -def augment_usage_errors(ctx, param=None): - """Context manager that attaches extra information to exceptions that - fly. - """ - try: - yield - except BadParameter as e: - if e.ctx is None: - e.ctx = ctx - if param is not None and e.param is None: - e.param = param - raise - except UsageError as e: - if e.ctx is None: - e.ctx = ctx - raise - - -def iter_params_for_processing(invocation_order, declaration_order): - """Given a sequence of parameters in the order as should be considered - for processing and an iterable of parameters that exist, this returns - a list in the correct order as they should be processed. - """ - def sort_key(item): - try: - idx = invocation_order.index(item) - except ValueError: - idx = float('inf') - return (not item.is_eager, idx) - - return sorted(declaration_order, key=sort_key) - - -class Context(object): - """The context is a special internal object that holds state relevant - for the script execution at every single level. It's normally invisible - to commands unless they opt-in to getting access to it. - - The context is useful as it can pass internal objects around and can - control special execution features such as reading data from - environment variables. - - A context can be used as context manager in which case it will call - :meth:`close` on teardown. - - .. versionadded:: 2.0 - Added the `resilient_parsing`, `help_option_names`, - `token_normalize_func` parameters. - - .. versionadded:: 3.0 - Added the `allow_extra_args` and `allow_interspersed_args` - parameters. - - .. versionadded:: 4.0 - Added the `color`, `ignore_unknown_options`, and - `max_content_width` parameters. - - :param command: the command class for this context. - :param parent: the parent context. - :param info_name: the info name for this invocation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it is usually - the name of the script, for commands below it it's - the name of the script. - :param obj: an arbitrary object of user data. - :param auto_envvar_prefix: the prefix to use for automatic environment - variables. If this is `None` then reading - from environment variables is disabled. This - does not affect manually set environment - variables which are always read. - :param default_map: a dictionary (like object) with default values - for parameters. - :param terminal_width: the width of the terminal. The default is - inherit from parent context. If no context - defines the terminal width then auto - detection will be applied. - :param max_content_width: the maximum width for content rendered by - Click (this currently only affects help - pages). This defaults to 80 characters if - not overridden. In other words: even if the - terminal is larger than that, Click will not - format things wider than 80 characters by - default. In addition to that, formatters might - add some safety mapping on the right. - :param resilient_parsing: if this flag is enabled then Click will - parse without any interactivity or callback - invocation. This is useful for implementing - things such as completion support. - :param allow_extra_args: if this is set to `True` then extra arguments - at the end will not raise an error and will be - kept on the context. The default is to inherit - from the command. - :param allow_interspersed_args: if this is set to `False` then options - and arguments cannot be mixed. The - default is to inherit from the command. - :param ignore_unknown_options: instructs click to ignore options it does - not know and keeps them for later - processing. - :param help_option_names: optionally a list of strings that define how - the default help parameter is named. The - default is ``['--help']``. - :param token_normalize_func: an optional function that is used to - normalize tokens (options, choices, - etc.). This for instance can be used to - implement case insensitive behavior. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are used in texts that Click prints which is by - default not the case. This for instance would affect - help output. - """ - - def __init__(self, command, parent=None, info_name=None, obj=None, - auto_envvar_prefix=None, default_map=None, - terminal_width=None, max_content_width=None, - resilient_parsing=False, allow_extra_args=None, - allow_interspersed_args=None, - ignore_unknown_options=None, help_option_names=None, - token_normalize_func=None, color=None): - #: the parent context or `None` if none exists. - self.parent = parent - #: the :class:`Command` for this context. - self.command = command - #: the descriptive information name - self.info_name = info_name - #: the parsed parameters except if the value is hidden in which - #: case it's not remembered. - self.params = {} - #: the leftover arguments. - self.args = [] - #: protected arguments. These are arguments that are prepended - #: to `args` when certain parsing scenarios are encountered but - #: must be never propagated to another arguments. This is used - #: to implement nested parsing. - self.protected_args = [] - if obj is None and parent is not None: - obj = parent.obj - #: the user object stored. - self.obj = obj - self._meta = getattr(parent, 'meta', {}) - - #: A dictionary (-like object) with defaults for parameters. - if default_map is None \ - and parent is not None \ - and parent.default_map is not None: - default_map = parent.default_map.get(info_name) - self.default_map = default_map - - #: This flag indicates if a subcommand is going to be executed. A - #: group callback can use this information to figure out if it's - #: being executed directly or because the execution flow passes - #: onwards to a subcommand. By default it's None, but it can be - #: the name of the subcommand to execute. - #: - #: If chaining is enabled this will be set to ``'*'`` in case - #: any commands are executed. It is however not possible to - #: figure out which ones. If you require this knowledge you - #: should use a :func:`resultcallback`. - self.invoked_subcommand = None - - if terminal_width is None and parent is not None: - terminal_width = parent.terminal_width - #: The width of the terminal (None is autodetection). - self.terminal_width = terminal_width - - if max_content_width is None and parent is not None: - max_content_width = parent.max_content_width - #: The maximum width of formatted content (None implies a sensible - #: default which is 80 for most things). - self.max_content_width = max_content_width - - if allow_extra_args is None: - allow_extra_args = command.allow_extra_args - #: Indicates if the context allows extra args or if it should - #: fail on parsing. - #: - #: .. versionadded:: 3.0 - self.allow_extra_args = allow_extra_args - - if allow_interspersed_args is None: - allow_interspersed_args = command.allow_interspersed_args - #: Indicates if the context allows mixing of arguments and - #: options or not. - #: - #: .. versionadded:: 3.0 - self.allow_interspersed_args = allow_interspersed_args - - if ignore_unknown_options is None: - ignore_unknown_options = command.ignore_unknown_options - #: Instructs click to ignore options that a command does not - #: understand and will store it on the context for later - #: processing. This is primarily useful for situations where you - #: want to call into external programs. Generally this pattern is - #: strongly discouraged because it's not possibly to losslessly - #: forward all arguments. - #: - #: .. versionadded:: 4.0 - self.ignore_unknown_options = ignore_unknown_options - - if help_option_names is None: - if parent is not None: - help_option_names = parent.help_option_names - else: - help_option_names = ['--help'] - - #: The names for the help options. - self.help_option_names = help_option_names - - if token_normalize_func is None and parent is not None: - token_normalize_func = parent.token_normalize_func - - #: An optional normalization function for tokens. This is - #: options, choices, commands etc. - self.token_normalize_func = token_normalize_func - - #: Indicates if resilient parsing is enabled. In that case Click - #: will do its best to not cause any failures. - self.resilient_parsing = resilient_parsing - - # If there is no envvar prefix yet, but the parent has one and - # the command on this level has a name, we can expand the envvar - # prefix automatically. - if auto_envvar_prefix is None: - if parent is not None \ - and parent.auto_envvar_prefix is not None and \ - self.info_name is not None: - auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix, - self.info_name.upper()) - else: - self.auto_envvar_prefix = auto_envvar_prefix.upper() - self.auto_envvar_prefix = auto_envvar_prefix - - if color is None and parent is not None: - color = parent.color - - #: Controls if styling output is wanted or not. - self.color = color - - self._close_callbacks = [] - self._depth = 0 - - def __enter__(self): - self._depth += 1 - push_context(self) - return self - - def __exit__(self, exc_type, exc_value, tb): - self._depth -= 1 - if self._depth == 0: - self.close() - pop_context() - - @contextmanager - def scope(self, cleanup=True): - """This helper method can be used with the context object to promote - it to the current thread local (see :func:`get_current_context`). - The default behavior of this is to invoke the cleanup functions which - can be disabled by setting `cleanup` to `False`. The cleanup - functions are typically used for things such as closing file handles. - - If the cleanup is intended the context object can also be directly - used as a context manager. - - Example usage:: - - with ctx.scope(): - assert get_current_context() is ctx - - This is equivalent:: - - with ctx: - assert get_current_context() is ctx - - .. versionadded:: 5.0 - - :param cleanup: controls if the cleanup functions should be run or - not. The default is to run these functions. In - some situations the context only wants to be - temporarily pushed in which case this can be disabled. - Nested pushes automatically defer the cleanup. - """ - if not cleanup: - self._depth += 1 - try: - with self as rv: - yield rv - finally: - if not cleanup: - self._depth -= 1 - - @property - def meta(self): - """This is a dictionary which is shared with all the contexts - that are nested. It exists so that click utiltiies can store some - state here if they need to. It is however the responsibility of - that code to manage this dictionary well. - - The keys are supposed to be unique dotted strings. For instance - module paths are a good choice for it. What is stored in there is - irrelevant for the operation of click. However what is important is - that code that places data here adheres to the general semantics of - the system. - - Example usage:: - - LANG_KEY = __name__ + '.lang' - - def set_language(value): - ctx = get_current_context() - ctx.meta[LANG_KEY] = value - - def get_language(): - return get_current_context().meta.get(LANG_KEY, 'en_US') - - .. versionadded:: 5.0 - """ - return self._meta - - def make_formatter(self): - """Creates the formatter for the help and usage output.""" - return HelpFormatter(width=self.terminal_width, - max_width=self.max_content_width) - - def call_on_close(self, f): - """This decorator remembers a function as callback that should be - executed when the context tears down. This is most useful to bind - resource handling to the script execution. For instance, file objects - opened by the :class:`File` type will register their close callbacks - here. - - :param f: the function to execute on teardown. - """ - self._close_callbacks.append(f) - return f - - def close(self): - """Invokes all close callbacks.""" - for cb in self._close_callbacks: - cb() - self._close_callbacks = [] - - @property - def command_path(self): - """The computed command path. This is used for the ``usage`` - information on the help page. It's automatically created by - combining the info names of the chain of contexts to the root. - """ - rv = '' - if self.info_name is not None: - rv = self.info_name - if self.parent is not None: - rv = self.parent.command_path + ' ' + rv - return rv.lstrip() - - def find_root(self): - """Finds the outermost context.""" - node = self - while node.parent is not None: - node = node.parent - return node - - def find_object(self, object_type): - """Finds the closest object of a given type.""" - node = self - while node is not None: - if isinstance(node.obj, object_type): - return node.obj - node = node.parent - - def ensure_object(self, object_type): - """Like :meth:`find_object` but sets the innermost object to a - new instance of `object_type` if it does not exist. - """ - rv = self.find_object(object_type) - if rv is None: - self.obj = rv = object_type() - return rv - - def lookup_default(self, name): - """Looks up the default for a parameter name. This by default - looks into the :attr:`default_map` if available. - """ - if self.default_map is not None: - rv = self.default_map.get(name) - if callable(rv): - rv = rv() - return rv - - def fail(self, message): - """Aborts the execution of the program with a specific error - message. - - :param message: the error message to fail with. - """ - raise UsageError(message, self) - - def abort(self): - """Aborts the script.""" - raise Abort() - - def exit(self, code=0): - """Exits the application with a given exit code.""" - sys.exit(code) - - def get_usage(self): - """Helper method to get formatted usage string for the current - context and command. - """ - return self.command.get_usage(self) - - def get_help(self): - """Helper method to get formatted help page for the current - context and command. - """ - return self.command.get_help(self) - - def invoke(*args, **kwargs): - """Invokes a command callback in exactly the way it expects. There - are two ways to invoke this method: - - 1. the first argument can be a callback and all other arguments and - keyword arguments are forwarded directly to the function. - 2. the first argument is a click command object. In that case all - arguments are forwarded as well but proper click parameters - (options and click arguments) must be keyword arguments and Click - will fill in defaults. - - Note that before Click 3.2 keyword arguments were not properly filled - in against the intention of this code and no context was created. For - more information about this change and why it was done in a bugfix - release see :ref:`upgrade-to-3.2`. - """ - self, callback = args[:2] - ctx = self - - # It's also possible to invoke another command which might or - # might not have a callback. In that case we also fill - # in defaults and make a new context for this command. - if isinstance(callback, Command): - other_cmd = callback - callback = other_cmd.callback - ctx = Context(other_cmd, info_name=other_cmd.name, parent=self) - if callback is None: - raise TypeError('The given command does not have a ' - 'callback that can be invoked.') - - for param in other_cmd.params: - if param.name not in kwargs and param.expose_value: - kwargs[param.name] = param.get_default(ctx) - - args = args[2:] - with augment_usage_errors(self): - with ctx: - return callback(*args, **kwargs) - - def forward(*args, **kwargs): - """Similar to :meth:`invoke` but fills in default keyword - arguments from the current context if the other command expects - it. This cannot invoke callbacks directly, only other commands. - """ - self, cmd = args[:2] - - # It's also possible to invoke another command which might or - # might not have a callback. - if not isinstance(cmd, Command): - raise TypeError('Callback is not a command.') - - for param in self.params: - if param not in kwargs: - kwargs[param] = self.params[param] - - return self.invoke(cmd, **kwargs) - - -class BaseCommand(object): - """The base command implements the minimal API contract of commands. - Most code will never use this as it does not implement a lot of useful - functionality but it can act as the direct subclass of alternative - parsing methods that do not depend on the Click parser. - - For instance, this can be used to bridge Click and other systems like - argparse or docopt. - - Because base commands do not implement a lot of the API that other - parts of Click take for granted, they are not supported for all - operations. For instance, they cannot be used with the decorators - usually and they have no built-in callback system. - - .. versionchanged:: 2.0 - Added the `context_settings` parameter. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - """ - #: the default for the :attr:`Context.allow_extra_args` flag. - allow_extra_args = False - #: the default for the :attr:`Context.allow_interspersed_args` flag. - allow_interspersed_args = True - #: the default for the :attr:`Context.ignore_unknown_options` flag. - ignore_unknown_options = False - - def __init__(self, name, context_settings=None): - #: the name the command thinks it has. Upon registering a command - #: on a :class:`Group` the group will default the command name - #: with this information. You should instead use the - #: :class:`Context`\'s :attr:`~Context.info_name` attribute. - self.name = name - if context_settings is None: - context_settings = {} - #: an optional dictionary with defaults passed to the context. - self.context_settings = context_settings - - def get_usage(self, ctx): - raise NotImplementedError('Base commands cannot get usage') - - def get_help(self, ctx): - raise NotImplementedError('Base commands cannot get help') - - def make_context(self, info_name, args, parent=None, **extra): - """This function when given an info name and arguments will kick - off the parsing and create a new :class:`Context`. It does not - invoke the actual command callback though. - - :param info_name: the info name for this invokation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it's usually - the name of the script, for commands below it it's - the name of the script. - :param args: the arguments to parse as list of strings. - :param parent: the parent context if available. - :param extra: extra keyword arguments forwarded to the context - constructor. - """ - for key, value in iteritems(self.context_settings): - if key not in extra: - extra[key] = value - ctx = Context(self, info_name=info_name, parent=parent, **extra) - with ctx.scope(cleanup=False): - self.parse_args(ctx, args) - return ctx - - def parse_args(self, ctx, args): - """Given a context and a list of arguments this creates the parser - and parses the arguments, then modifies the context as necessary. - This is automatically invoked by :meth:`make_context`. - """ - raise NotImplementedError('Base commands do not know how to parse ' - 'arguments.') - - def invoke(self, ctx): - """Given a context, this invokes the command. The default - implementation is raising a not implemented error. - """ - raise NotImplementedError('Base commands are not invokable by default') - - def main(self, args=None, prog_name=None, complete_var=None, - standalone_mode=True, **extra): - """This is the way to invoke a script with all the bells and - whistles as a command line application. This will always terminate - the application after a call. If this is not wanted, ``SystemExit`` - needs to be caught. - - This method is also available by directly calling the instance of - a :class:`Command`. - - .. versionadded:: 3.0 - Added the `standalone_mode` flag to control the standalone mode. - - :param args: the arguments that should be used for parsing. If not - provided, ``sys.argv[1:]`` is used. - :param prog_name: the program name that should be used. By default - the program name is constructed by taking the file - name from ``sys.argv[0]``. - :param complete_var: the environment variable that controls the - bash completion support. The default is - ``"__COMPLETE"`` with prog name in - uppercase. - :param standalone_mode: the default behavior is to invoke the script - in standalone mode. Click will then - handle exceptions and convert them into - error messages and the function will never - return but shut down the interpreter. If - this is set to `False` they will be - propagated to the caller and the return - value of this function is the return value - of :meth:`invoke`. - :param extra: extra keyword arguments are forwarded to the context - constructor. See :class:`Context` for more information. - """ - # If we are in Python 3, we will verify that the environment is - # sane at this point of reject further execution to avoid a - # broken script. - if not PY2: - _verify_python3_env() - else: - _check_for_unicode_literals() - - if args is None: - args = get_os_args() - else: - args = list(args) - - if prog_name is None: - prog_name = make_str(os.path.basename( - sys.argv and sys.argv[0] or __file__)) - - # Hook for the Bash completion. This only activates if the Bash - # completion is actually enabled, otherwise this is quite a fast - # noop. - _bashcomplete(self, prog_name, complete_var) - - try: - try: - with self.make_context(prog_name, args, **extra) as ctx: - rv = self.invoke(ctx) - if not standalone_mode: - return rv - ctx.exit() - except (EOFError, KeyboardInterrupt): - echo(file=sys.stderr) - raise Abort() - except ClickException as e: - if not standalone_mode: - raise - e.show() - sys.exit(e.exit_code) - except IOError as e: - if e.errno == errno.EPIPE: - sys.exit(1) - else: - raise - except Abort: - if not standalone_mode: - raise - echo('Aborted!', file=sys.stderr) - sys.exit(1) - - def __call__(self, *args, **kwargs): - """Alias for :meth:`main`.""" - return self.main(*args, **kwargs) - - -class Command(BaseCommand): - """Commands are the basic building block of command line interfaces in - Click. A basic command handles command line parsing and might dispatch - more parsing to commands nested below it. - - .. versionchanged:: 2.0 - Added the `context_settings` parameter. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - :param callback: the callback to invoke. This is optional. - :param params: the parameters to register with this command. This can - be either :class:`Option` or :class:`Argument` objects. - :param help: the help string to use for this command. - :param epilog: like the help string but it's printed at the end of the - help page after everything else. - :param short_help: the short help to use for this command. This is - shown on the command listing of the parent command. - :param add_help_option: by default each command registers a ``--help`` - option. This can be disabled by this parameter. - """ - - def __init__(self, name, context_settings=None, callback=None, - params=None, help=None, epilog=None, short_help=None, - options_metavar='[OPTIONS]', add_help_option=True): - BaseCommand.__init__(self, name, context_settings) - #: the callback to execute when the command fires. This might be - #: `None` in which case nothing happens. - self.callback = callback - #: the list of parameters for this command in the order they - #: should show up in the help page and execute. Eager parameters - #: will automatically be handled before non eager ones. - self.params = params or [] - self.help = help - self.epilog = epilog - self.options_metavar = options_metavar - if short_help is None and help: - short_help = make_default_short_help(help) - self.short_help = short_help - self.add_help_option = add_help_option - - def get_usage(self, ctx): - formatter = ctx.make_formatter() - self.format_usage(ctx, formatter) - return formatter.getvalue().rstrip('\n') - - def get_params(self, ctx): - rv = self.params - help_option = self.get_help_option(ctx) - if help_option is not None: - rv = rv + [help_option] - return rv - - def format_usage(self, ctx, formatter): - """Writes the usage line into the formatter.""" - pieces = self.collect_usage_pieces(ctx) - formatter.write_usage(ctx.command_path, ' '.join(pieces)) - - def collect_usage_pieces(self, ctx): - """Returns all the pieces that go into the usage line and returns - it as a list of strings. - """ - rv = [self.options_metavar] - for param in self.get_params(ctx): - rv.extend(param.get_usage_pieces(ctx)) - return rv - - def get_help_option_names(self, ctx): - """Returns the names for the help option.""" - all_names = set(ctx.help_option_names) - for param in self.params: - all_names.difference_update(param.opts) - all_names.difference_update(param.secondary_opts) - return all_names - - def get_help_option(self, ctx): - """Returns the help option object.""" - help_options = self.get_help_option_names(ctx) - if not help_options or not self.add_help_option: - return - - def show_help(ctx, param, value): - if value and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - return Option(help_options, is_flag=True, - is_eager=True, expose_value=False, - callback=show_help, - help='Show this message and exit.') - - def make_parser(self, ctx): - """Creates the underlying option parser for this command.""" - parser = OptionParser(ctx) - parser.allow_interspersed_args = ctx.allow_interspersed_args - parser.ignore_unknown_options = ctx.ignore_unknown_options - for param in self.get_params(ctx): - param.add_to_parser(parser, ctx) - return parser - - def get_help(self, ctx): - """Formats the help into a string and returns it. This creates a - formatter and will call into the following formatting methods: - """ - formatter = ctx.make_formatter() - self.format_help(ctx, formatter) - return formatter.getvalue().rstrip('\n') - - def format_help(self, ctx, formatter): - """Writes the help into the formatter if it exists. - - This calls into the following methods: - - - :meth:`format_usage` - - :meth:`format_help_text` - - :meth:`format_options` - - :meth:`format_epilog` - """ - self.format_usage(ctx, formatter) - self.format_help_text(ctx, formatter) - self.format_options(ctx, formatter) - self.format_epilog(ctx, formatter) - - def format_help_text(self, ctx, formatter): - """Writes the help text to the formatter if it exists.""" - if self.help: - formatter.write_paragraph() - with formatter.indentation(): - formatter.write_text(self.help) - - def format_options(self, ctx, formatter): - """Writes all the options into the formatter if they exist.""" - opts = [] - for param in self.get_params(ctx): - rv = param.get_help_record(ctx) - if rv is not None: - opts.append(rv) - - if opts: - with formatter.section('Options'): - formatter.write_dl(opts) - - def format_epilog(self, ctx, formatter): - """Writes the epilog into the formatter if it exists.""" - if self.epilog: - formatter.write_paragraph() - with formatter.indentation(): - formatter.write_text(self.epilog) - - def parse_args(self, ctx, args): - parser = self.make_parser(ctx) - opts, args, param_order = parser.parse_args(args=args) - - for param in iter_params_for_processing( - param_order, self.get_params(ctx)): - value, args = param.handle_parse_result(ctx, opts, args) - - if args and not ctx.allow_extra_args and not ctx.resilient_parsing: - ctx.fail('Got unexpected extra argument%s (%s)' - % (len(args) != 1 and 's' or '', - ' '.join(map(make_str, args)))) - - ctx.args = args - return args - - def invoke(self, ctx): - """Given a context, this invokes the attached callback (if it exists) - in the right way. - """ - if self.callback is not None: - return ctx.invoke(self.callback, **ctx.params) - - -class MultiCommand(Command): - """A multi command is the basic implementation of a command that - dispatches to subcommands. The most common version is the - :class:`Group`. - - :param invoke_without_command: this controls how the multi command itself - is invoked. By default it's only invoked - if a subcommand is provided. - :param no_args_is_help: this controls what happens if no arguments are - provided. This option is enabled by default if - `invoke_without_command` is disabled or disabled - if it's enabled. If enabled this will add - ``--help`` as argument if no arguments are - passed. - :param subcommand_metavar: the string that is used in the documentation - to indicate the subcommand place. - :param chain: if this is set to `True` chaining of multiple subcommands - is enabled. This restricts the form of commands in that - they cannot have optional arguments but it allows - multiple commands to be chained together. - :param result_callback: the result callback to attach to this multi - command. - """ - allow_extra_args = True - allow_interspersed_args = False - - def __init__(self, name=None, invoke_without_command=False, - no_args_is_help=None, subcommand_metavar=None, - chain=False, result_callback=None, **attrs): - Command.__init__(self, name, **attrs) - if no_args_is_help is None: - no_args_is_help = not invoke_without_command - self.no_args_is_help = no_args_is_help - self.invoke_without_command = invoke_without_command - if subcommand_metavar is None: - if chain: - subcommand_metavar = SUBCOMMANDS_METAVAR - else: - subcommand_metavar = SUBCOMMAND_METAVAR - self.subcommand_metavar = subcommand_metavar - self.chain = chain - #: The result callback that is stored. This can be set or - #: overridden with the :func:`resultcallback` decorator. - self.result_callback = result_callback - - if self.chain: - for param in self.params: - if isinstance(param, Argument) and not param.required: - raise RuntimeError('Multi commands in chain mode cannot ' - 'have optional arguments.') - - def collect_usage_pieces(self, ctx): - rv = Command.collect_usage_pieces(self, ctx) - rv.append(self.subcommand_metavar) - return rv - - def format_options(self, ctx, formatter): - Command.format_options(self, ctx, formatter) - self.format_commands(ctx, formatter) - - def resultcallback(self, replace=False): - """Adds a result callback to the chain command. By default if a - result callback is already registered this will chain them but - this can be disabled with the `replace` parameter. The result - callback is invoked with the return value of the subcommand - (or the list of return values from all subcommands if chaining - is enabled) as well as the parameters as they would be passed - to the main callback. - - Example:: - - @click.group() - @click.option('-i', '--input', default=23) - def cli(input): - return 42 - - @cli.resultcallback() - def process_result(result, input): - return result + input - - .. versionadded:: 3.0 - - :param replace: if set to `True` an already existing result - callback will be removed. - """ - def decorator(f): - old_callback = self.result_callback - if old_callback is None or replace: - self.result_callback = f - return f - def function(__value, *args, **kwargs): - return f(old_callback(__value, *args, **kwargs), - *args, **kwargs) - self.result_callback = rv = update_wrapper(function, f) - return rv - return decorator - - def format_commands(self, ctx, formatter): - """Extra format methods for multi methods that adds all the commands - after the options. - """ - rows = [] - for subcommand in self.list_commands(ctx): - cmd = self.get_command(ctx, subcommand) - # What is this, the tool lied about a command. Ignore it - if cmd is None: - continue - - help = cmd.short_help or '' - rows.append((subcommand, help)) - - if rows: - with formatter.section('Commands'): - formatter.write_dl(rows) - - def parse_args(self, ctx, args): - if not args and self.no_args_is_help and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - rest = Command.parse_args(self, ctx, args) - if self.chain: - ctx.protected_args = rest - ctx.args = [] - elif rest: - ctx.protected_args, ctx.args = rest[:1], rest[1:] - - return ctx.args - - def invoke(self, ctx): - def _process_result(value): - if self.result_callback is not None: - value = ctx.invoke(self.result_callback, value, - **ctx.params) - return value - - if not ctx.protected_args: - # If we are invoked without command the chain flag controls - # how this happens. If we are not in chain mode, the return - # value here is the return value of the command. - # If however we are in chain mode, the return value is the - # return value of the result processor invoked with an empty - # list (which means that no subcommand actually was executed). - if self.invoke_without_command: - if not self.chain: - return Command.invoke(self, ctx) - with ctx: - Command.invoke(self, ctx) - return _process_result([]) - ctx.fail('Missing command.') - - # Fetch args back out - args = ctx.protected_args + ctx.args - ctx.args = [] - ctx.protected_args = [] - - # If we're not in chain mode, we only allow the invocation of a - # single command but we also inform the current context about the - # name of the command to invoke. - if not self.chain: - # Make sure the context is entered so we do not clean up - # resources until the result processor has worked. - with ctx: - cmd_name, cmd, args = self.resolve_command(ctx, args) - ctx.invoked_subcommand = cmd_name - Command.invoke(self, ctx) - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) - with sub_ctx: - return _process_result(sub_ctx.command.invoke(sub_ctx)) - - # In chain mode we create the contexts step by step, but after the - # base command has been invoked. Because at that point we do not - # know the subcommands yet, the invoked subcommand attribute is - # set to ``*`` to inform the command that subcommands are executed - # but nothing else. - with ctx: - ctx.invoked_subcommand = args and '*' or None - Command.invoke(self, ctx) - - # Otherwise we make every single context and invoke them in a - # chain. In that case the return value to the result processor - # is the list of all invoked subcommand's results. - contexts = [] - while args: - cmd_name, cmd, args = self.resolve_command(ctx, args) - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False) - contexts.append(sub_ctx) - args, sub_ctx.args = sub_ctx.args, [] - - rv = [] - for sub_ctx in contexts: - with sub_ctx: - rv.append(sub_ctx.command.invoke(sub_ctx)) - return _process_result(rv) - - def resolve_command(self, ctx, args): - cmd_name = make_str(args[0]) - original_cmd_name = cmd_name - - # Get the command - cmd = self.get_command(ctx, cmd_name) - - # If we can't find the command but there is a normalization - # function available, we try with that one. - if cmd is None and ctx.token_normalize_func is not None: - cmd_name = ctx.token_normalize_func(cmd_name) - cmd = self.get_command(ctx, cmd_name) - - # If we don't find the command we want to show an error message - # to the user that it was not provided. However, there is - # something else we should do: if the first argument looks like - # an option we want to kick off parsing again for arguments to - # resolve things like --help which now should go to the main - # place. - if cmd is None: - if split_opt(cmd_name)[0]: - self.parse_args(ctx, ctx.args) - ctx.fail('No such command "%s".' % original_cmd_name) - - return cmd_name, cmd, args[1:] - - def get_command(self, ctx, cmd_name): - """Given a context and a command name, this returns a - :class:`Command` object if it exists or returns `None`. - """ - raise NotImplementedError() - - def list_commands(self, ctx): - """Returns a list of subcommand names in the order they should - appear. - """ - return [] - - -class Group(MultiCommand): - """A group allows a command to have subcommands attached. This is the - most common way to implement nesting in Click. - - :param commands: a dictionary of commands. - """ - - def __init__(self, name=None, commands=None, **attrs): - MultiCommand.__init__(self, name, **attrs) - #: the registered subcommands by their exported names. - self.commands = commands or {} - - def add_command(self, cmd, name=None): - """Registers another :class:`Command` with this group. If the name - is not provided, the name of the command is used. - """ - name = name or cmd.name - if name is None: - raise TypeError('Command has no name.') - _check_multicommand(self, name, cmd, register=True) - self.commands[name] = cmd - - def command(self, *args, **kwargs): - """A shortcut decorator for declaring and attaching a command to - the group. This takes the same arguments as :func:`command` but - immediately registers the created command with this instance by - calling into :meth:`add_command`. - """ - def decorator(f): - cmd = command(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - return decorator - - def group(self, *args, **kwargs): - """A shortcut decorator for declaring and attaching a group to - the group. This takes the same arguments as :func:`group` but - immediately registers the created command with this instance by - calling into :meth:`add_command`. - """ - def decorator(f): - cmd = group(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - return decorator - - def get_command(self, ctx, cmd_name): - return self.commands.get(cmd_name) - - def list_commands(self, ctx): - return sorted(self.commands) - - -class CommandCollection(MultiCommand): - """A command collection is a multi command that merges multiple multi - commands together into one. This is a straightforward implementation - that accepts a list of different multi commands as sources and - provides all the commands for each of them. - """ - - def __init__(self, name=None, sources=None, **attrs): - MultiCommand.__init__(self, name, **attrs) - #: The list of registered multi commands. - self.sources = sources or [] - - def add_source(self, multi_cmd): - """Adds a new multi command to the chain dispatcher.""" - self.sources.append(multi_cmd) - - def get_command(self, ctx, cmd_name): - for source in self.sources: - rv = source.get_command(ctx, cmd_name) - if rv is not None: - if self.chain: - _check_multicommand(self, cmd_name, rv) - return rv - - def list_commands(self, ctx): - rv = set() - for source in self.sources: - rv.update(source.list_commands(ctx)) - return sorted(rv) - - -class Parameter(object): - """A parameter to a command comes in two versions: they are either - :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently - not supported by design as some of the internals for parsing are - intentionally not finalized. - - Some settings are supported by both options and arguments. - - .. versionchanged:: 2.0 - Changed signature for parameter callback to also be passed the - parameter. In Click 2.0, the old callback format will still work, - but it will raise a warning to give you change to migrate the - code easier. - - :param param_decls: the parameter declarations for this option or - argument. This is a list of flags or argument - names. - :param type: the type that should be used. Either a :class:`ParamType` - or a Python type. The later is converted into the former - automatically if supported. - :param required: controls if this is optional or not. - :param default: the default value if omitted. This can also be a callable, - in which case it's invoked when the default is needed - without any arguments. - :param callback: a callback that should be executed after the parameter - was matched. This is called as ``fn(ctx, param, - value)`` and needs to return the value. Before Click - 2.0, the signature was ``(ctx, value)``. - :param nargs: the number of arguments to match. If not ``1`` the return - value is a tuple instead of single value. The default for - nargs is ``1`` (except if the type is a tuple, then it's - the arity of the tuple). - :param metavar: how the value is represented in the help page. - :param expose_value: if this is `True` then the value is passed onwards - to the command callback and stored on the context, - otherwise it's skipped. - :param is_eager: eager values are processed before non eager ones. This - should not be set for arguments or it will inverse the - order of processing. - :param envvar: a string or list of strings that are environment variables - that should be checked. - """ - param_type_name = 'parameter' - - def __init__(self, param_decls=None, type=None, required=False, - default=None, callback=None, nargs=None, metavar=None, - expose_value=True, is_eager=False, envvar=None): - self.name, self.opts, self.secondary_opts = \ - self._parse_decls(param_decls or (), expose_value) - - self.type = convert_type(type, default) - - # Default nargs to what the type tells us if we have that - # information available. - if nargs is None: - if self.type.is_composite: - nargs = self.type.arity - else: - nargs = 1 - - self.required = required - self.callback = callback - self.nargs = nargs - self.multiple = False - self.expose_value = expose_value - self.default = default - self.is_eager = is_eager - self.metavar = metavar - self.envvar = envvar - - @property - def human_readable_name(self): - """Returns the human readable name of this parameter. This is the - same as the name for options, but the metavar for arguments. - """ - return self.name - - def make_metavar(self): - if self.metavar is not None: - return self.metavar - metavar = self.type.get_metavar(self) - if metavar is None: - metavar = self.type.name.upper() - if self.nargs != 1: - metavar += '...' - return metavar - - def get_default(self, ctx): - """Given a context variable this calculates the default value.""" - # Otherwise go with the regular default. - if callable(self.default): - rv = self.default() - else: - rv = self.default - return self.type_cast_value(ctx, rv) - - def add_to_parser(self, parser, ctx): - pass - - def consume_value(self, ctx, opts): - value = opts.get(self.name) - if value is None: - value = ctx.lookup_default(self.name) - if value is None: - value = self.value_from_envvar(ctx) - return value - - def type_cast_value(self, ctx, value): - """Given a value this runs it properly through the type system. - This automatically handles things like `nargs` and `multiple` as - well as composite types. - """ - if self.type.is_composite: - if self.nargs <= 1: - raise TypeError('Attempted to invoke composite type ' - 'but nargs has been set to %s. This is ' - 'not supported; nargs needs to be set to ' - 'a fixed value > 1.' % self.nargs) - if self.multiple: - return tuple(self.type(x or (), self, ctx) for x in value or ()) - return self.type(value or (), self, ctx) - - def _convert(value, level): - if level == 0: - return self.type(value, self, ctx) - return tuple(_convert(x, level - 1) for x in value or ()) - return _convert(value, (self.nargs != 1) + bool(self.multiple)) - - def process_value(self, ctx, value): - """Given a value and context this runs the logic to convert the - value as necessary. - """ - # If the value we were given is None we do nothing. This way - # code that calls this can easily figure out if something was - # not provided. Otherwise it would be converted into an empty - # tuple for multiple invocations which is inconvenient. - if value is not None: - return self.type_cast_value(ctx, value) - - def value_is_missing(self, value): - if value is None: - return True - if (self.nargs != 1 or self.multiple) and value == (): - return True - return False - - def full_process_value(self, ctx, value): - value = self.process_value(ctx, value) - - if value is None: - value = self.get_default(ctx) - - if self.required and self.value_is_missing(value): - raise MissingParameter(ctx=ctx, param=self) - - return value - - def resolve_envvar_value(self, ctx): - if self.envvar is None: - return - if isinstance(self.envvar, (tuple, list)): - for envvar in self.envvar: - rv = os.environ.get(envvar) - if rv is not None: - return rv - else: - return os.environ.get(self.envvar) - - def value_from_envvar(self, ctx): - rv = self.resolve_envvar_value(ctx) - if rv is not None and self.nargs != 1: - rv = self.type.split_envvar_value(rv) - return rv - - def handle_parse_result(self, ctx, opts, args): - with augment_usage_errors(ctx, param=self): - value = self.consume_value(ctx, opts) - try: - value = self.full_process_value(ctx, value) - except Exception: - if not ctx.resilient_parsing: - raise - value = None - if self.callback is not None: - try: - value = invoke_param_callback( - self.callback, ctx, self, value) - except Exception: - if not ctx.resilient_parsing: - raise - - if self.expose_value: - ctx.params[self.name] = value - return value, args - - def get_help_record(self, ctx): - pass - - def get_usage_pieces(self, ctx): - return [] - - -class Option(Parameter): - """Options are usually optional values on the command line and - have some extra features that arguments don't have. - - All other parameters are passed onwards to the parameter constructor. - - :param show_default: controls if the default value should be shown on the - help page. Normally, defaults are not shown. - :param prompt: if set to `True` or a non empty string then the user will - be prompted for input if not set. If set to `True` the - prompt will be the option name capitalized. - :param confirmation_prompt: if set then the value will need to be confirmed - if it was prompted for. - :param hide_input: if this is `True` then the input on the prompt will be - hidden from the user. This is useful for password - input. - :param is_flag: forces this option to act as a flag. The default is - auto detection. - :param flag_value: which value should be used for this flag if it's - enabled. This is set to a boolean automatically if - the option string contains a slash to mark two options. - :param multiple: if this is set to `True` then the argument is accepted - multiple times and recorded. This is similar to ``nargs`` - in how it works but supports arbitrary number of - arguments. - :param count: this flag makes an option increment an integer. - :param allow_from_autoenv: if this is enabled then the value of this - parameter will be pulled from an environment - variable in case a prefix is defined on the - context. - :param help: the help string. - """ - param_type_name = 'option' - - def __init__(self, param_decls=None, show_default=False, - prompt=False, confirmation_prompt=False, - hide_input=False, is_flag=None, flag_value=None, - multiple=False, count=False, allow_from_autoenv=True, - type=None, help=None, **attrs): - default_is_missing = attrs.get('default', _missing) is _missing - Parameter.__init__(self, param_decls, type=type, **attrs) - - if prompt is True: - prompt_text = self.name.replace('_', ' ').capitalize() - elif prompt is False: - prompt_text = None - else: - prompt_text = prompt - self.prompt = prompt_text - self.confirmation_prompt = confirmation_prompt - self.hide_input = hide_input - - # Flags - if is_flag is None: - if flag_value is not None: - is_flag = True - else: - is_flag = bool(self.secondary_opts) - if is_flag and default_is_missing: - self.default = False - if flag_value is None: - flag_value = not self.default - self.is_flag = is_flag - self.flag_value = flag_value - if self.is_flag and isinstance(self.flag_value, bool) \ - and type is None: - self.type = BOOL - self.is_bool_flag = True - else: - self.is_bool_flag = False - - # Counting - self.count = count - if count: - if type is None: - self.type = IntRange(min=0) - if default_is_missing: - self.default = 0 - - self.multiple = multiple - self.allow_from_autoenv = allow_from_autoenv - self.help = help - self.show_default = show_default - - # Sanity check for stuff we don't support - if __debug__: - if self.nargs < 0: - raise TypeError('Options cannot have nargs < 0') - if self.prompt and self.is_flag and not self.is_bool_flag: - raise TypeError('Cannot prompt for flags that are not bools.') - if not self.is_bool_flag and self.secondary_opts: - raise TypeError('Got secondary option for non boolean flag.') - if self.is_bool_flag and self.hide_input \ - and self.prompt is not None: - raise TypeError('Hidden input does not work with boolean ' - 'flag prompts.') - if self.count: - if self.multiple: - raise TypeError('Options cannot be multiple and count ' - 'at the same time.') - elif self.is_flag: - raise TypeError('Options cannot be count and flags at ' - 'the same time.') - - def _parse_decls(self, decls, expose_value): - opts = [] - secondary_opts = [] - name = None - possible_names = [] - - for decl in decls: - if isidentifier(decl): - if name is not None: - raise TypeError('Name defined twice') - name = decl - else: - split_char = decl[:1] == '/' and ';' or '/' - if split_char in decl: - first, second = decl.split(split_char, 1) - first = first.rstrip() - if first: - possible_names.append(split_opt(first)) - opts.append(first) - second = second.lstrip() - if second: - secondary_opts.append(second.lstrip()) - else: - possible_names.append(split_opt(decl)) - opts.append(decl) - - if name is None and possible_names: - possible_names.sort(key=lambda x: len(x[0])) - name = possible_names[-1][1].replace('-', '_').lower() - if not isidentifier(name): - name = None - - if name is None: - if not expose_value: - return None, opts, secondary_opts - raise TypeError('Could not determine name for option') - - if not opts and not secondary_opts: - raise TypeError('No options defined but a name was passed (%s). ' - 'Did you mean to declare an argument instead ' - 'of an option?' % name) - - return name, opts, secondary_opts - - def add_to_parser(self, parser, ctx): - kwargs = { - 'dest': self.name, - 'nargs': self.nargs, - 'obj': self, - } - - if self.multiple: - action = 'append' - elif self.count: - action = 'count' - else: - action = 'store' - - if self.is_flag: - kwargs.pop('nargs', None) - if self.is_bool_flag and self.secondary_opts: - parser.add_option(self.opts, action=action + '_const', - const=True, **kwargs) - parser.add_option(self.secondary_opts, action=action + - '_const', const=False, **kwargs) - else: - parser.add_option(self.opts, action=action + '_const', - const=self.flag_value, - **kwargs) - else: - kwargs['action'] = action - parser.add_option(self.opts, **kwargs) - - def get_help_record(self, ctx): - any_prefix_is_slash = [] - - def _write_opts(opts): - rv, any_slashes = join_options(opts) - if any_slashes: - any_prefix_is_slash[:] = [True] - if not self.is_flag and not self.count: - rv += ' ' + self.make_metavar() - return rv - - rv = [_write_opts(self.opts)] - if self.secondary_opts: - rv.append(_write_opts(self.secondary_opts)) - - help = self.help or '' - extra = [] - if self.default is not None and self.show_default: - extra.append('default: %s' % ( - ', '.join('%s' % d for d in self.default) - if isinstance(self.default, (list, tuple)) - else self.default, )) - if self.required: - extra.append('required') - if extra: - help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra)) - - return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help) - - def get_default(self, ctx): - # If we're a non boolean flag out default is more complex because - # we need to look at all flags in the same group to figure out - # if we're the the default one in which case we return the flag - # value as default. - if self.is_flag and not self.is_bool_flag: - for param in ctx.command.params: - if param.name == self.name and param.default: - return param.flag_value - return None - return Parameter.get_default(self, ctx) - - def prompt_for_value(self, ctx): - """This is an alternative flow that can be activated in the full - value processing if a value does not exist. It will prompt the - user until a valid value exists and then returns the processed - value as result. - """ - # Calculate the default before prompting anything to be stable. - default = self.get_default(ctx) - - # If this is a prompt for a flag we need to handle this - # differently. - if self.is_bool_flag: - return confirm(self.prompt, default) - - return prompt(self.prompt, default=default, - hide_input=self.hide_input, - confirmation_prompt=self.confirmation_prompt, - value_proc=lambda x: self.process_value(ctx, x)) - - def resolve_envvar_value(self, ctx): - rv = Parameter.resolve_envvar_value(self, ctx) - if rv is not None: - return rv - if self.allow_from_autoenv and \ - ctx.auto_envvar_prefix is not None: - envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper()) - return os.environ.get(envvar) - - def value_from_envvar(self, ctx): - rv = self.resolve_envvar_value(ctx) - if rv is None: - return None - value_depth = (self.nargs != 1) + bool(self.multiple) - if value_depth > 0 and rv is not None: - rv = self.type.split_envvar_value(rv) - if self.multiple and self.nargs != 1: - rv = batch(rv, self.nargs) - return rv - - def full_process_value(self, ctx, value): - if value is None and self.prompt is not None \ - and not ctx.resilient_parsing: - return self.prompt_for_value(ctx) - return Parameter.full_process_value(self, ctx, value) - - -class Argument(Parameter): - """Arguments are positional parameters to a command. They generally - provide fewer features than options but can have infinite ``nargs`` - and are required by default. - - All parameters are passed onwards to the parameter constructor. - """ - param_type_name = 'argument' - - def __init__(self, param_decls, required=None, **attrs): - if required is None: - if attrs.get('default') is not None: - required = False - else: - required = attrs.get('nargs', 1) > 0 - Parameter.__init__(self, param_decls, required=required, **attrs) - if self.default is not None and self.nargs < 0: - raise TypeError('nargs=-1 in combination with a default value ' - 'is not supported.') - - @property - def human_readable_name(self): - if self.metavar is not None: - return self.metavar - return self.name.upper() - - def make_metavar(self): - if self.metavar is not None: - return self.metavar - var = self.name.upper() - if not self.required: - var = '[%s]' % var - if self.nargs != 1: - var += '...' - return var - - def _parse_decls(self, decls, expose_value): - if not decls: - if not expose_value: - return None, [], [] - raise TypeError('Could not determine name for argument') - if len(decls) == 1: - name = arg = decls[0] - name = name.replace('-', '_').lower() - elif len(decls) == 2: - name, arg = decls - else: - raise TypeError('Arguments take exactly one or two ' - 'parameter declarations, got %d' % len(decls)) - return name, [arg], [] - - def get_usage_pieces(self, ctx): - return [self.make_metavar()] - - def add_to_parser(self, parser, ctx): - parser.add_argument(dest=self.name, nargs=self.nargs, - obj=self) - - -# Circular dependency between decorators and core -from .decorators import command, group diff --git a/pyextra/click/decorators.py b/pyextra/click/decorators.py deleted file mode 100644 index 989345265..000000000 --- a/pyextra/click/decorators.py +++ /dev/null @@ -1,304 +0,0 @@ -import sys -import inspect - -from functools import update_wrapper - -from ._compat import iteritems -from ._unicodefun import _check_for_unicode_literals -from .utils import echo -from .globals import get_current_context - - -def pass_context(f): - """Marks a callback as wanting to receive the current context - object as first argument. - """ - def new_func(*args, **kwargs): - return f(get_current_context(), *args, **kwargs) - return update_wrapper(new_func, f) - - -def pass_obj(f): - """Similar to :func:`pass_context`, but only pass the object on the - context onwards (:attr:`Context.obj`). This is useful if that object - represents the state of a nested system. - """ - def new_func(*args, **kwargs): - return f(get_current_context().obj, *args, **kwargs) - return update_wrapper(new_func, f) - - -def make_pass_decorator(object_type, ensure=False): - """Given an object type this creates a decorator that will work - similar to :func:`pass_obj` but instead of passing the object of the - current context, it will find the innermost context of type - :func:`object_type`. - - This generates a decorator that works roughly like this:: - - from functools import update_wrapper - - def decorator(f): - @pass_context - def new_func(ctx, *args, **kwargs): - obj = ctx.find_object(object_type) - return ctx.invoke(f, obj, *args, **kwargs) - return update_wrapper(new_func, f) - return decorator - - :param object_type: the type of the object to pass. - :param ensure: if set to `True`, a new object will be created and - remembered on the context if it's not there yet. - """ - def decorator(f): - def new_func(*args, **kwargs): - ctx = get_current_context() - if ensure: - obj = ctx.ensure_object(object_type) - else: - obj = ctx.find_object(object_type) - if obj is None: - raise RuntimeError('Managed to invoke callback without a ' - 'context object of type %r existing' - % object_type.__name__) - return ctx.invoke(f, obj, *args[1:], **kwargs) - return update_wrapper(new_func, f) - return decorator - - -def _make_command(f, name, attrs, cls): - if isinstance(f, Command): - raise TypeError('Attempted to convert a callback into a ' - 'command twice.') - try: - params = f.__click_params__ - params.reverse() - del f.__click_params__ - except AttributeError: - params = [] - help = attrs.get('help') - if help is None: - help = inspect.getdoc(f) - if isinstance(help, bytes): - help = help.decode('utf-8') - else: - help = inspect.cleandoc(help) - attrs['help'] = help - _check_for_unicode_literals() - return cls(name=name or f.__name__.lower(), - callback=f, params=params, **attrs) - - -def command(name=None, cls=None, **attrs): - """Creates a new :class:`Command` and uses the decorated function as - callback. This will also automatically attach all decorated - :func:`option`\s and :func:`argument`\s as parameters to the command. - - The name of the command defaults to the name of the function. If you - want to change that, you can pass the intended name as the first - argument. - - All keyword arguments are forwarded to the underlying command class. - - Once decorated the function turns into a :class:`Command` instance - that can be invoked as a command line utility or be attached to a - command :class:`Group`. - - :param name: the name of the command. This defaults to the function - name. - :param cls: the command class to instantiate. This defaults to - :class:`Command`. - """ - if cls is None: - cls = Command - def decorator(f): - cmd = _make_command(f, name, attrs, cls) - cmd.__doc__ = f.__doc__ - return cmd - return decorator - - -def group(name=None, **attrs): - """Creates a new :class:`Group` with a function as callback. This - works otherwise the same as :func:`command` just that the `cls` - parameter is set to :class:`Group`. - """ - attrs.setdefault('cls', Group) - return command(name, **attrs) - - -def _param_memo(f, param): - if isinstance(f, Command): - f.params.append(param) - else: - if not hasattr(f, '__click_params__'): - f.__click_params__ = [] - f.__click_params__.append(param) - - -def argument(*param_decls, **attrs): - """Attaches an argument to the command. All positional arguments are - passed as parameter declarations to :class:`Argument`; all keyword - arguments are forwarded unchanged (except ``cls``). - This is equivalent to creating an :class:`Argument` instance manually - and attaching it to the :attr:`Command.params` list. - - :param cls: the argument class to instantiate. This defaults to - :class:`Argument`. - """ - def decorator(f): - ArgumentClass = attrs.pop('cls', Argument) - _param_memo(f, ArgumentClass(param_decls, **attrs)) - return f - return decorator - - -def option(*param_decls, **attrs): - """Attaches an option to the command. All positional arguments are - passed as parameter declarations to :class:`Option`; all keyword - arguments are forwarded unchanged (except ``cls``). - This is equivalent to creating an :class:`Option` instance manually - and attaching it to the :attr:`Command.params` list. - - :param cls: the option class to instantiate. This defaults to - :class:`Option`. - """ - def decorator(f): - if 'help' in attrs: - attrs['help'] = inspect.cleandoc(attrs['help']) - OptionClass = attrs.pop('cls', Option) - _param_memo(f, OptionClass(param_decls, **attrs)) - return f - return decorator - - -def confirmation_option(*param_decls, **attrs): - """Shortcut for confirmation prompts that can be ignored by passing - ``--yes`` as parameter. - - This is equivalent to decorating a function with :func:`option` with - the following parameters:: - - def callback(ctx, param, value): - if not value: - ctx.abort() - - @click.command() - @click.option('--yes', is_flag=True, callback=callback, - expose_value=False, prompt='Do you want to continue?') - def dropdb(): - pass - """ - def decorator(f): - def callback(ctx, param, value): - if not value: - ctx.abort() - attrs.setdefault('is_flag', True) - attrs.setdefault('callback', callback) - attrs.setdefault('expose_value', False) - attrs.setdefault('prompt', 'Do you want to continue?') - attrs.setdefault('help', 'Confirm the action without prompting.') - return option(*(param_decls or ('--yes',)), **attrs)(f) - return decorator - - -def password_option(*param_decls, **attrs): - """Shortcut for password prompts. - - This is equivalent to decorating a function with :func:`option` with - the following parameters:: - - @click.command() - @click.option('--password', prompt=True, confirmation_prompt=True, - hide_input=True) - def changeadmin(password): - pass - """ - def decorator(f): - attrs.setdefault('prompt', True) - attrs.setdefault('confirmation_prompt', True) - attrs.setdefault('hide_input', True) - return option(*(param_decls or ('--password',)), **attrs)(f) - return decorator - - -def version_option(version=None, *param_decls, **attrs): - """Adds a ``--version`` option which immediately ends the program - printing out the version number. This is implemented as an eager - option that prints the version and exits the program in the callback. - - :param version: the version number to show. If not provided Click - attempts an auto discovery via setuptools. - :param prog_name: the name of the program (defaults to autodetection) - :param message: custom message to show instead of the default - (``'%(prog)s, version %(version)s'``) - :param others: everything else is forwarded to :func:`option`. - """ - if version is None: - module = sys._getframe(1).f_globals.get('__name__') - def decorator(f): - prog_name = attrs.pop('prog_name', None) - message = attrs.pop('message', '%(prog)s, version %(version)s') - - def callback(ctx, param, value): - if not value or ctx.resilient_parsing: - return - prog = prog_name - if prog is None: - prog = ctx.find_root().info_name - ver = version - if ver is None: - try: - import pkg_resources - except ImportError: - pass - else: - for dist in pkg_resources.working_set: - scripts = dist.get_entry_map().get('console_scripts') or {} - for script_name, entry_point in iteritems(scripts): - if entry_point.module_name == module: - ver = dist.version - break - if ver is None: - raise RuntimeError('Could not determine version') - echo(message % { - 'prog': prog, - 'version': ver, - }, color=ctx.color) - ctx.exit() - - attrs.setdefault('is_flag', True) - attrs.setdefault('expose_value', False) - attrs.setdefault('is_eager', True) - attrs.setdefault('help', 'Show the version and exit.') - attrs['callback'] = callback - return option(*(param_decls or ('--version',)), **attrs)(f) - return decorator - - -def help_option(*param_decls, **attrs): - """Adds a ``--help`` option which immediately ends the program - printing out the help page. This is usually unnecessary to add as - this is added by default to all commands unless suppressed. - - Like :func:`version_option`, this is implemented as eager option that - prints in the callback and exits. - - All arguments are forwarded to :func:`option`. - """ - def decorator(f): - def callback(ctx, param, value): - if value and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - attrs.setdefault('is_flag', True) - attrs.setdefault('expose_value', False) - attrs.setdefault('help', 'Show this message and exit.') - attrs.setdefault('is_eager', True) - attrs['callback'] = callback - return option(*(param_decls or ('--help',)), **attrs)(f) - return decorator - - -# Circular dependencies between core and decorators -from .core import Command, Group, Argument, Option diff --git a/pyextra/click/exceptions.py b/pyextra/click/exceptions.py deleted file mode 100644 index 74a4542bb..000000000 --- a/pyextra/click/exceptions.py +++ /dev/null @@ -1,201 +0,0 @@ -from ._compat import PY2, filename_to_ui, get_text_stderr -from .utils import echo - - -class ClickException(Exception): - """An exception that Click can handle and show to the user.""" - - #: The exit code for this exception - exit_code = 1 - - def __init__(self, message): - if PY2: - if message is not None: - message = message.encode('utf-8') - Exception.__init__(self, message) - self.message = message - - def format_message(self): - return self.message - - def show(self, file=None): - if file is None: - file = get_text_stderr() - echo('Error: %s' % self.format_message(), file=file) - - -class UsageError(ClickException): - """An internal exception that signals a usage error. This typically - aborts any further handling. - - :param message: the error message to display. - :param ctx: optionally the context that caused this error. Click will - fill in the context automatically in some situations. - """ - exit_code = 2 - - def __init__(self, message, ctx=None): - ClickException.__init__(self, message) - self.ctx = ctx - - def show(self, file=None): - if file is None: - file = get_text_stderr() - color = None - if self.ctx is not None: - color = self.ctx.color - echo(self.ctx.get_usage() + '\n', file=file, color=color) - echo('Error: %s' % self.format_message(), file=file, color=color) - - -class BadParameter(UsageError): - """An exception that formats out a standardized error message for a - bad parameter. This is useful when thrown from a callback or type as - Click will attach contextual information to it (for instance, which - parameter it is). - - .. versionadded:: 2.0 - - :param param: the parameter object that caused this error. This can - be left out, and Click will attach this info itself - if possible. - :param param_hint: a string that shows up as parameter name. This - can be used as alternative to `param` in cases - where custom validation should happen. If it is - a string it's used as such, if it's a list then - each item is quoted and separated. - """ - - def __init__(self, message, ctx=None, param=None, - param_hint=None): - UsageError.__init__(self, message, ctx) - self.param = param - self.param_hint = param_hint - - def format_message(self): - if self.param_hint is not None: - param_hint = self.param_hint - elif self.param is not None: - param_hint = self.param.opts or [self.param.human_readable_name] - else: - return 'Invalid value: %s' % self.message - if isinstance(param_hint, (tuple, list)): - param_hint = ' / '.join('"%s"' % x for x in param_hint) - return 'Invalid value for %s: %s' % (param_hint, self.message) - - -class MissingParameter(BadParameter): - """Raised if click required an option or argument but it was not - provided when invoking the script. - - .. versionadded:: 4.0 - - :param param_type: a string that indicates the type of the parameter. - The default is to inherit the parameter type from - the given `param`. Valid values are ``'parameter'``, - ``'option'`` or ``'argument'``. - """ - - def __init__(self, message=None, ctx=None, param=None, - param_hint=None, param_type=None): - BadParameter.__init__(self, message, ctx, param, param_hint) - self.param_type = param_type - - def format_message(self): - if self.param_hint is not None: - param_hint = self.param_hint - elif self.param is not None: - param_hint = self.param.opts or [self.param.human_readable_name] - else: - param_hint = None - if isinstance(param_hint, (tuple, list)): - param_hint = ' / '.join('"%s"' % x for x in param_hint) - - param_type = self.param_type - if param_type is None and self.param is not None: - param_type = self.param.param_type_name - - msg = self.message - if self.param is not None: - msg_extra = self.param.type.get_missing_message(self.param) - if msg_extra: - if msg: - msg += '. ' + msg_extra - else: - msg = msg_extra - - return 'Missing %s%s%s%s' % ( - param_type, - param_hint and ' %s' % param_hint or '', - msg and '. ' or '.', - msg or '', - ) - - -class NoSuchOption(UsageError): - """Raised if click attempted to handle an option that does not - exist. - - .. versionadded:: 4.0 - """ - - def __init__(self, option_name, message=None, possibilities=None, - ctx=None): - if message is None: - message = 'no such option: %s' % option_name - UsageError.__init__(self, message, ctx) - self.option_name = option_name - self.possibilities = possibilities - - def format_message(self): - bits = [self.message] - if self.possibilities: - if len(self.possibilities) == 1: - bits.append('Did you mean %s?' % self.possibilities[0]) - else: - possibilities = sorted(self.possibilities) - bits.append('(Possible options: %s)' % ', '.join(possibilities)) - return ' '.join(bits) - - -class BadOptionUsage(UsageError): - """Raised if an option is generally supplied but the use of the option - was incorrect. This is for instance raised if the number of arguments - for an option is not correct. - - .. versionadded:: 4.0 - """ - - def __init__(self, message, ctx=None): - UsageError.__init__(self, message, ctx) - - -class BadArgumentUsage(UsageError): - """Raised if an argument is generally supplied but the use of the argument - was incorrect. This is for instance raised if the number of values - for an argument is not correct. - - .. versionadded:: 6.0 - """ - - def __init__(self, message, ctx=None): - UsageError.__init__(self, message, ctx) - - -class FileError(ClickException): - """Raised if a file cannot be opened.""" - - def __init__(self, filename, hint=None): - ui_filename = filename_to_ui(filename) - if hint is None: - hint = 'unknown error' - ClickException.__init__(self, hint) - self.ui_filename = ui_filename - self.filename = filename - - def format_message(self): - return 'Could not open file %s: %s' % (self.ui_filename, self.message) - - -class Abort(RuntimeError): - """An internal signalling exception that signals Click to abort.""" diff --git a/pyextra/click/formatting.py b/pyextra/click/formatting.py deleted file mode 100644 index a3d6a4d38..000000000 --- a/pyextra/click/formatting.py +++ /dev/null @@ -1,256 +0,0 @@ -from contextlib import contextmanager -from .termui import get_terminal_size -from .parser import split_opt -from ._compat import term_len - - -# Can force a width. This is used by the test system -FORCED_WIDTH = None - - -def measure_table(rows): - widths = {} - for row in rows: - for idx, col in enumerate(row): - widths[idx] = max(widths.get(idx, 0), term_len(col)) - return tuple(y for x, y in sorted(widths.items())) - - -def iter_rows(rows, col_count): - for row in rows: - row = tuple(row) - yield row + ('',) * (col_count - len(row)) - - -def wrap_text(text, width=78, initial_indent='', subsequent_indent='', - preserve_paragraphs=False): - """A helper function that intelligently wraps text. By default, it - assumes that it operates on a single paragraph of text but if the - `preserve_paragraphs` parameter is provided it will intelligently - handle paragraphs (defined by two empty lines). - - If paragraphs are handled, a paragraph can be prefixed with an empty - line containing the ``\\b`` character (``\\x08``) to indicate that - no rewrapping should happen in that block. - - :param text: the text that should be rewrapped. - :param width: the maximum width for the text. - :param initial_indent: the initial indent that should be placed on the - first line as a string. - :param subsequent_indent: the indent string that should be placed on - each consecutive line. - :param preserve_paragraphs: if this flag is set then the wrapping will - intelligently handle paragraphs. - """ - from ._textwrap import TextWrapper - text = text.expandtabs() - wrapper = TextWrapper(width, initial_indent=initial_indent, - subsequent_indent=subsequent_indent, - replace_whitespace=False) - if not preserve_paragraphs: - return wrapper.fill(text) - - p = [] - buf = [] - indent = None - - def _flush_par(): - if not buf: - return - if buf[0].strip() == '\b': - p.append((indent or 0, True, '\n'.join(buf[1:]))) - else: - p.append((indent or 0, False, ' '.join(buf))) - del buf[:] - - for line in text.splitlines(): - if not line: - _flush_par() - indent = None - else: - if indent is None: - orig_len = term_len(line) - line = line.lstrip() - indent = orig_len - term_len(line) - buf.append(line) - _flush_par() - - rv = [] - for indent, raw, text in p: - with wrapper.extra_indent(' ' * indent): - if raw: - rv.append(wrapper.indent_only(text)) - else: - rv.append(wrapper.fill(text)) - - return '\n\n'.join(rv) - - -class HelpFormatter(object): - """This class helps with formatting text-based help pages. It's - usually just needed for very special internal cases, but it's also - exposed so that developers can write their own fancy outputs. - - At present, it always writes into memory. - - :param indent_increment: the additional increment for each level. - :param width: the width for the text. This defaults to the terminal - width clamped to a maximum of 78. - """ - - def __init__(self, indent_increment=2, width=None, max_width=None): - self.indent_increment = indent_increment - if max_width is None: - max_width = 80 - if width is None: - width = FORCED_WIDTH - if width is None: - width = max(min(get_terminal_size()[0], max_width) - 2, 50) - self.width = width - self.current_indent = 0 - self.buffer = [] - - def write(self, string): - """Writes a unicode string into the internal buffer.""" - self.buffer.append(string) - - def indent(self): - """Increases the indentation.""" - self.current_indent += self.indent_increment - - def dedent(self): - """Decreases the indentation.""" - self.current_indent -= self.indent_increment - - def write_usage(self, prog, args='', prefix='Usage: '): - """Writes a usage line into the buffer. - - :param prog: the program name. - :param args: whitespace separated list of arguments. - :param prefix: the prefix for the first line. - """ - usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog) - text_width = self.width - self.current_indent - - if text_width >= (term_len(usage_prefix) + 20): - # The arguments will fit to the right of the prefix. - indent = ' ' * term_len(usage_prefix) - self.write(wrap_text(args, text_width, - initial_indent=usage_prefix, - subsequent_indent=indent)) - else: - # The prefix is too long, put the arguments on the next line. - self.write(usage_prefix) - self.write('\n') - indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4) - self.write(wrap_text(args, text_width, - initial_indent=indent, - subsequent_indent=indent)) - - self.write('\n') - - def write_heading(self, heading): - """Writes a heading into the buffer.""" - self.write('%*s%s:\n' % (self.current_indent, '', heading)) - - def write_paragraph(self): - """Writes a paragraph into the buffer.""" - if self.buffer: - self.write('\n') - - def write_text(self, text): - """Writes re-indented text into the buffer. This rewraps and - preserves paragraphs. - """ - text_width = max(self.width - self.current_indent, 11) - indent = ' ' * self.current_indent - self.write(wrap_text(text, text_width, - initial_indent=indent, - subsequent_indent=indent, - preserve_paragraphs=True)) - self.write('\n') - - def write_dl(self, rows, col_max=30, col_spacing=2): - """Writes a definition list into the buffer. This is how options - and commands are usually formatted. - - :param rows: a list of two item tuples for the terms and values. - :param col_max: the maximum width of the first column. - :param col_spacing: the number of spaces between the first and - second column. - """ - rows = list(rows) - widths = measure_table(rows) - if len(widths) != 2: - raise TypeError('Expected two columns for definition list') - - first_col = min(widths[0], col_max) + col_spacing - - for first, second in iter_rows(rows, len(widths)): - self.write('%*s%s' % (self.current_indent, '', first)) - if not second: - self.write('\n') - continue - if term_len(first) <= first_col - col_spacing: - self.write(' ' * (first_col - term_len(first))) - else: - self.write('\n') - self.write(' ' * (first_col + self.current_indent)) - - text_width = max(self.width - first_col - 2, 10) - lines = iter(wrap_text(second, text_width).splitlines()) - if lines: - self.write(next(lines) + '\n') - for line in lines: - self.write('%*s%s\n' % ( - first_col + self.current_indent, '', line)) - else: - self.write('\n') - - @contextmanager - def section(self, name): - """Helpful context manager that writes a paragraph, a heading, - and the indents. - - :param name: the section name that is written as heading. - """ - self.write_paragraph() - self.write_heading(name) - self.indent() - try: - yield - finally: - self.dedent() - - @contextmanager - def indentation(self): - """A context manager that increases the indentation.""" - self.indent() - try: - yield - finally: - self.dedent() - - def getvalue(self): - """Returns the buffer contents.""" - return ''.join(self.buffer) - - -def join_options(options): - """Given a list of option strings this joins them in the most appropriate - way and returns them in the form ``(formatted_string, - any_prefix_is_slash)`` where the second item in the tuple is a flag that - indicates if any of the option prefixes was a slash. - """ - rv = [] - any_prefix_is_slash = False - for opt in options: - prefix = split_opt(opt)[0] - if prefix == '/': - any_prefix_is_slash = True - rv.append((len(prefix), opt)) - - rv.sort(key=lambda x: x[0]) - - rv = ', '.join(x[1] for x in rv) - return rv, any_prefix_is_slash diff --git a/pyextra/click/globals.py b/pyextra/click/globals.py deleted file mode 100644 index 14338e6bb..000000000 --- a/pyextra/click/globals.py +++ /dev/null @@ -1,48 +0,0 @@ -from threading import local - - -_local = local() - - -def get_current_context(silent=False): - """Returns the current click context. This can be used as a way to - access the current context object from anywhere. This is a more implicit - alternative to the :func:`pass_context` decorator. This function is - primarily useful for helpers such as :func:`echo` which might be - interested in changing it's behavior based on the current context. - - To push the current context, :meth:`Context.scope` can be used. - - .. versionadded:: 5.0 - - :param silent: is set to `True` the return value is `None` if no context - is available. The default behavior is to raise a - :exc:`RuntimeError`. - """ - try: - return getattr(_local, 'stack')[-1] - except (AttributeError, IndexError): - if not silent: - raise RuntimeError('There is no active click context.') - - -def push_context(ctx): - """Pushes a new context to the current stack.""" - _local.__dict__.setdefault('stack', []).append(ctx) - - -def pop_context(): - """Removes the top level from the stack.""" - _local.stack.pop() - - -def resolve_color_default(color=None): - """"Internal helper to get the default value of the color flag. If a - value is passed it's returned unchanged, otherwise it's looked up from - the current context. - """ - if color is not None: - return color - ctx = get_current_context(silent=True) - if ctx is not None: - return ctx.color diff --git a/pyextra/click/parser.py b/pyextra/click/parser.py deleted file mode 100644 index 9775c9ff9..000000000 --- a/pyextra/click/parser.py +++ /dev/null @@ -1,426 +0,0 @@ -# -*- coding: utf-8 -*- -""" - click.parser - ~~~~~~~~~~~~ - - This module started out as largely a copy paste from the stdlib's - optparse module with the features removed that we do not need from - optparse because we implement them in Click on a higher level (for - instance type handling, help formatting and a lot more). - - The plan is to remove more and more from here over time. - - The reason this is a different module and not optparse from the stdlib - is that there are differences in 2.x and 3.x about the error messages - generated and optparse in the stdlib uses gettext for no good reason - and might cause us issues. -""" -import re -from collections import deque -from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \ - BadArgumentUsage - - -def _unpack_args(args, nargs_spec): - """Given an iterable of arguments and an iterable of nargs specifications, - it returns a tuple with all the unpacked arguments at the first index - and all remaining arguments as the second. - - The nargs specification is the number of arguments that should be consumed - or `-1` to indicate that this position should eat up all the remainders. - - Missing items are filled with `None`. - """ - args = deque(args) - nargs_spec = deque(nargs_spec) - rv = [] - spos = None - - def _fetch(c): - try: - if spos is None: - return c.popleft() - else: - return c.pop() - except IndexError: - return None - - while nargs_spec: - nargs = _fetch(nargs_spec) - if nargs == 1: - rv.append(_fetch(args)) - elif nargs > 1: - x = [_fetch(args) for _ in range(nargs)] - # If we're reversed, we're pulling in the arguments in reverse, - # so we need to turn them around. - if spos is not None: - x.reverse() - rv.append(tuple(x)) - elif nargs < 0: - if spos is not None: - raise TypeError('Cannot have two nargs < 0') - spos = len(rv) - rv.append(None) - - # spos is the position of the wildcard (star). If it's not `None`, - # we fill it with the remainder. - if spos is not None: - rv[spos] = tuple(args) - args = [] - rv[spos + 1:] = reversed(rv[spos + 1:]) - - return tuple(rv), list(args) - - -def _error_opt_args(nargs, opt): - if nargs == 1: - raise BadOptionUsage('%s option requires an argument' % opt) - raise BadOptionUsage('%s option requires %d arguments' % (opt, nargs)) - - -def split_opt(opt): - first = opt[:1] - if first.isalnum(): - return '', opt - if opt[1:2] == first: - return opt[:2], opt[2:] - return first, opt[1:] - - -def normalize_opt(opt, ctx): - if ctx is None or ctx.token_normalize_func is None: - return opt - prefix, opt = split_opt(opt) - return prefix + ctx.token_normalize_func(opt) - - -def split_arg_string(string): - """Given an argument string this attempts to split it into small parts.""" - rv = [] - for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'" - r'|"([^"\\]*(?:\\.[^"\\]*)*)"' - r'|\S+)\s*', string, re.S): - arg = match.group().strip() - if arg[:1] == arg[-1:] and arg[:1] in '"\'': - arg = arg[1:-1].encode('ascii', 'backslashreplace') \ - .decode('unicode-escape') - try: - arg = type(string)(arg) - except UnicodeError: - pass - rv.append(arg) - return rv - - -class Option(object): - - def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): - self._short_opts = [] - self._long_opts = [] - self.prefixes = set() - - for opt in opts: - prefix, value = split_opt(opt) - if not prefix: - raise ValueError('Invalid start character for option (%s)' - % opt) - self.prefixes.add(prefix[0]) - if len(prefix) == 1 and len(value) == 1: - self._short_opts.append(opt) - else: - self._long_opts.append(opt) - self.prefixes.add(prefix) - - if action is None: - action = 'store' - - self.dest = dest - self.action = action - self.nargs = nargs - self.const = const - self.obj = obj - - @property - def takes_value(self): - return self.action in ('store', 'append') - - def process(self, value, state): - if self.action == 'store': - state.opts[self.dest] = value - elif self.action == 'store_const': - state.opts[self.dest] = self.const - elif self.action == 'append': - state.opts.setdefault(self.dest, []).append(value) - elif self.action == 'append_const': - state.opts.setdefault(self.dest, []).append(self.const) - elif self.action == 'count': - state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 - else: - raise ValueError('unknown action %r' % self.action) - state.order.append(self.obj) - - -class Argument(object): - - def __init__(self, dest, nargs=1, obj=None): - self.dest = dest - self.nargs = nargs - self.obj = obj - - def process(self, value, state): - if self.nargs > 1: - holes = sum(1 for x in value if x is None) - if holes == len(value): - value = None - elif holes != 0: - raise BadArgumentUsage('argument %s takes %d values' - % (self.dest, self.nargs)) - state.opts[self.dest] = value - state.order.append(self.obj) - - -class ParsingState(object): - - def __init__(self, rargs): - self.opts = {} - self.largs = [] - self.rargs = rargs - self.order = [] - - -class OptionParser(object): - """The option parser is an internal class that is ultimately used to - parse options and arguments. It's modelled after optparse and brings - a similar but vastly simplified API. It should generally not be used - directly as the high level Click classes wrap it for you. - - It's not nearly as extensible as optparse or argparse as it does not - implement features that are implemented on a higher level (such as - types or defaults). - - :param ctx: optionally the :class:`~click.Context` where this parser - should go with. - """ - - def __init__(self, ctx=None): - #: The :class:`~click.Context` for this parser. This might be - #: `None` for some advanced use cases. - self.ctx = ctx - #: This controls how the parser deals with interspersed arguments. - #: If this is set to `False`, the parser will stop on the first - #: non-option. Click uses this to implement nested subcommands - #: safely. - self.allow_interspersed_args = True - #: This tells the parser how to deal with unknown options. By - #: default it will error out (which is sensible), but there is a - #: second mode where it will ignore it and continue processing - #: after shifting all the unknown options into the resulting args. - self.ignore_unknown_options = False - if ctx is not None: - self.allow_interspersed_args = ctx.allow_interspersed_args - self.ignore_unknown_options = ctx.ignore_unknown_options - self._short_opt = {} - self._long_opt = {} - self._opt_prefixes = set(['-', '--']) - self._args = [] - - def add_option(self, opts, dest, action=None, nargs=1, const=None, - obj=None): - """Adds a new option named `dest` to the parser. The destination - is not inferred (unlike with optparse) and needs to be explicitly - provided. Action can be any of ``store``, ``store_const``, - ``append``, ``appnd_const`` or ``count``. - - The `obj` can be used to identify the option in the order list - that is returned from the parser. - """ - if obj is None: - obj = dest - opts = [normalize_opt(opt, self.ctx) for opt in opts] - option = Option(opts, dest, action=action, nargs=nargs, - const=const, obj=obj) - self._opt_prefixes.update(option.prefixes) - for opt in option._short_opts: - self._short_opt[opt] = option - for opt in option._long_opts: - self._long_opt[opt] = option - - def add_argument(self, dest, nargs=1, obj=None): - """Adds a positional argument named `dest` to the parser. - - The `obj` can be used to identify the option in the order list - that is returned from the parser. - """ - if obj is None: - obj = dest - self._args.append(Argument(dest=dest, nargs=nargs, obj=obj)) - - def parse_args(self, args): - """Parses positional arguments and returns ``(values, args, order)`` - for the parsed options and arguments as well as the leftover - arguments if there are any. The order is a list of objects as they - appear on the command line. If arguments appear multiple times they - will be memorized multiple times as well. - """ - state = ParsingState(args) - try: - self._process_args_for_options(state) - self._process_args_for_args(state) - except UsageError: - if self.ctx is None or not self.ctx.resilient_parsing: - raise - return state.opts, state.largs, state.order - - def _process_args_for_args(self, state): - pargs, args = _unpack_args(state.largs + state.rargs, - [x.nargs for x in self._args]) - - for idx, arg in enumerate(self._args): - arg.process(pargs[idx], state) - - state.largs = args - state.rargs = [] - - def _process_args_for_options(self, state): - while state.rargs: - arg = state.rargs.pop(0) - arglen = len(arg) - # Double dashes always handled explicitly regardless of what - # prefixes are valid. - if arg == '--': - return - elif arg[:1] in self._opt_prefixes and arglen > 1: - self._process_opts(arg, state) - elif self.allow_interspersed_args: - state.largs.append(arg) - else: - state.rargs.insert(0, arg) - return - - # Say this is the original argument list: - # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] - # ^ - # (we are about to process arg(i)). - # - # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of - # [arg0, ..., arg(i-1)] (any options and their arguments will have - # been removed from largs). - # - # The while loop will usually consume 1 or more arguments per pass. - # If it consumes 1 (eg. arg is an option that takes no arguments), - # then after _process_arg() is done the situation is: - # - # largs = subset of [arg0, ..., arg(i)] - # rargs = [arg(i+1), ..., arg(N-1)] - # - # If allow_interspersed_args is false, largs will always be - # *empty* -- still a subset of [arg0, ..., arg(i-1)], but - # not a very interesting subset! - - def _match_long_opt(self, opt, explicit_value, state): - if opt not in self._long_opt: - possibilities = [word for word in self._long_opt - if word.startswith(opt)] - raise NoSuchOption(opt, possibilities=possibilities) - - option = self._long_opt[opt] - if option.takes_value: - # At this point it's safe to modify rargs by injecting the - # explicit value, because no exception is raised in this - # branch. This means that the inserted value will be fully - # consumed. - if explicit_value is not None: - state.rargs.insert(0, explicit_value) - - nargs = option.nargs - if len(state.rargs) < nargs: - _error_opt_args(nargs, opt) - elif nargs == 1: - value = state.rargs.pop(0) - else: - value = tuple(state.rargs[:nargs]) - del state.rargs[:nargs] - - elif explicit_value is not None: - raise BadOptionUsage('%s option does not take a value' % opt) - - else: - value = None - - option.process(value, state) - - def _match_short_opt(self, arg, state): - stop = False - i = 1 - prefix = arg[0] - unknown_options = [] - - for ch in arg[1:]: - opt = normalize_opt(prefix + ch, self.ctx) - option = self._short_opt.get(opt) - i += 1 - - if not option: - if self.ignore_unknown_options: - unknown_options.append(ch) - continue - raise NoSuchOption(opt) - if option.takes_value: - # Any characters left in arg? Pretend they're the - # next arg, and stop consuming characters of arg. - if i < len(arg): - state.rargs.insert(0, arg[i:]) - stop = True - - nargs = option.nargs - if len(state.rargs) < nargs: - _error_opt_args(nargs, opt) - elif nargs == 1: - value = state.rargs.pop(0) - else: - value = tuple(state.rargs[:nargs]) - del state.rargs[:nargs] - - else: - value = None - - option.process(value, state) - - if stop: - break - - # If we got any unknown options we re-combinate the string of the - # remaining options and re-attach the prefix, then report that - # to the state as new larg. This way there is basic combinatorics - # that can be achieved while still ignoring unknown arguments. - if self.ignore_unknown_options and unknown_options: - state.largs.append(prefix + ''.join(unknown_options)) - - def _process_opts(self, arg, state): - explicit_value = None - # Long option handling happens in two parts. The first part is - # supporting explicitly attached values. In any case, we will try - # to long match the option first. - if '=' in arg: - long_opt, explicit_value = arg.split('=', 1) - else: - long_opt = arg - norm_long_opt = normalize_opt(long_opt, self.ctx) - - # At this point we will match the (assumed) long option through - # the long option matching code. Note that this allows options - # like "-foo" to be matched as long options. - try: - self._match_long_opt(norm_long_opt, explicit_value, state) - except NoSuchOption: - # At this point the long option matching failed, and we need - # to try with short options. However there is a special rule - # which says, that if we have a two character options prefix - # (applies to "--foo" for instance), we do not dispatch to the - # short option code and will instead raise the no option - # error. - if arg[:2] not in self._opt_prefixes: - return self._match_short_opt(arg, state) - if not self.ignore_unknown_options: - raise - state.largs.append(arg) diff --git a/pyextra/click/termui.py b/pyextra/click/termui.py deleted file mode 100644 index d9fba5232..000000000 --- a/pyextra/click/termui.py +++ /dev/null @@ -1,539 +0,0 @@ -import os -import sys -import struct - -from ._compat import raw_input, text_type, string_types, \ - isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN -from .utils import echo -from .exceptions import Abort, UsageError -from .types import convert_type -from .globals import resolve_color_default - - -# The prompt functions to use. The doc tools currently override these -# functions to customize how they work. -visible_prompt_func = raw_input - -_ansi_colors = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', - 'cyan', 'white', 'reset') -_ansi_reset_all = '\033[0m' - - -def hidden_prompt_func(prompt): - import getpass - return getpass.getpass(prompt) - - -def _build_prompt(text, suffix, show_default=False, default=None): - prompt = text - if default is not None and show_default: - prompt = '%s [%s]' % (prompt, default) - return prompt + suffix - - -def prompt(text, default=None, hide_input=False, - confirmation_prompt=False, type=None, - value_proc=None, prompt_suffix=': ', - show_default=True, err=False): - """Prompts a user for input. This is a convenience function that can - be used to prompt a user for input later. - - If the user aborts the input by sending a interrupt signal, this - function will catch it and raise a :exc:`Abort` exception. - - .. versionadded:: 6.0 - Added unicode support for cmd.exe on Windows. - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param text: the text to show for the prompt. - :param default: the default value to use if no input happens. If this - is not given it will prompt until it's aborted. - :param hide_input: if this is set to true then the input value will - be hidden. - :param confirmation_prompt: asks for confirmation for the value. - :param type: the type to use to check the value against. - :param value_proc: if this parameter is provided it's a function that - is invoked instead of the type conversion to - convert a value. - :param prompt_suffix: a suffix that should be added to the prompt. - :param show_default: shows or hides the default value in the prompt. - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``, the same as with echo. - """ - result = None - - def prompt_func(text): - f = hide_input and hidden_prompt_func or visible_prompt_func - try: - # Write the prompt separately so that we get nice - # coloring through colorama on Windows - echo(text, nl=False, err=err) - return f('') - except (KeyboardInterrupt, EOFError): - # getpass doesn't print a newline if the user aborts input with ^C. - # Allegedly this behavior is inherited from getpass(3). - # A doc bug has been filed at https://bugs.python.org/issue24711 - if hide_input: - echo(None, err=err) - raise Abort() - - if value_proc is None: - value_proc = convert_type(type, default) - - prompt = _build_prompt(text, prompt_suffix, show_default, default) - - while 1: - while 1: - value = prompt_func(prompt) - if value: - break - # If a default is set and used, then the confirmation - # prompt is always skipped because that's the only thing - # that really makes sense. - elif default is not None: - return default - try: - result = value_proc(value) - except UsageError as e: - echo('Error: %s' % e.message, err=err) - continue - if not confirmation_prompt: - return result - while 1: - value2 = prompt_func('Repeat for confirmation: ') - if value2: - break - if value == value2: - return result - echo('Error: the two entered values do not match', err=err) - - -def confirm(text, default=False, abort=False, prompt_suffix=': ', - show_default=True, err=False): - """Prompts for confirmation (yes/no question). - - If the user aborts the input by sending a interrupt signal this - function will catch it and raise a :exc:`Abort` exception. - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param text: the question to ask. - :param default: the default for the prompt. - :param abort: if this is set to `True` a negative answer aborts the - exception by raising :exc:`Abort`. - :param prompt_suffix: a suffix that should be added to the prompt. - :param show_default: shows or hides the default value in the prompt. - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``, the same as with echo. - """ - prompt = _build_prompt(text, prompt_suffix, show_default, - default and 'Y/n' or 'y/N') - while 1: - try: - # Write the prompt separately so that we get nice - # coloring through colorama on Windows - echo(prompt, nl=False, err=err) - value = visible_prompt_func('').lower().strip() - except (KeyboardInterrupt, EOFError): - raise Abort() - if value in ('y', 'yes'): - rv = True - elif value in ('n', 'no'): - rv = False - elif value == '': - rv = default - else: - echo('Error: invalid input', err=err) - continue - break - if abort and not rv: - raise Abort() - return rv - - -def get_terminal_size(): - """Returns the current size of the terminal as tuple in the form - ``(width, height)`` in columns and rows. - """ - # If shutil has get_terminal_size() (Python 3.3 and later) use that - if sys.version_info >= (3, 3): - import shutil - shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None) - if shutil_get_terminal_size: - sz = shutil_get_terminal_size() - return sz.columns, sz.lines - - if get_winterm_size is not None: - return get_winterm_size() - - def ioctl_gwinsz(fd): - try: - import fcntl - import termios - cr = struct.unpack( - 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) - except Exception: - return - return cr - - cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) - if not cr: - try: - fd = os.open(os.ctermid(), os.O_RDONLY) - try: - cr = ioctl_gwinsz(fd) - finally: - os.close(fd) - except Exception: - pass - if not cr or not cr[0] or not cr[1]: - cr = (os.environ.get('LINES', 25), - os.environ.get('COLUMNS', DEFAULT_COLUMNS)) - return int(cr[1]), int(cr[0]) - - -def echo_via_pager(text, color=None): - """This function takes a text and shows it via an environment specific - pager on stdout. - - .. versionchanged:: 3.0 - Added the `color` flag. - - :param text: the text to page. - :param color: controls if the pager supports ANSI colors or not. The - default is autodetection. - """ - color = resolve_color_default(color) - if not isinstance(text, string_types): - text = text_type(text) - from ._termui_impl import pager - return pager(text + '\n', color) - - -def progressbar(iterable=None, length=None, label=None, show_eta=True, - show_percent=None, show_pos=False, - item_show_func=None, fill_char='#', empty_char='-', - bar_template='%(label)s [%(bar)s] %(info)s', - info_sep=' ', width=36, file=None, color=None): - """This function creates an iterable context manager that can be used - to iterate over something while showing a progress bar. It will - either iterate over the `iterable` or `length` items (that are counted - up). While iteration happens, this function will print a rendered - progress bar to the given `file` (defaults to stdout) and will attempt - to calculate remaining time and more. By default, this progress bar - will not be rendered if the file is not a terminal. - - The context manager creates the progress bar. When the context - manager is entered the progress bar is already displayed. With every - iteration over the progress bar, the iterable passed to the bar is - advanced and the bar is updated. When the context manager exits, - a newline is printed and the progress bar is finalized on screen. - - No printing must happen or the progress bar will be unintentionally - destroyed. - - Example usage:: - - with progressbar(items) as bar: - for item in bar: - do_something_with(item) - - Alternatively, if no iterable is specified, one can manually update the - progress bar through the `update()` method instead of directly - iterating over the progress bar. The update method accepts the number - of steps to increment the bar with:: - - with progressbar(length=chunks.total_bytes) as bar: - for chunk in chunks: - process_chunk(chunk) - bar.update(chunks.bytes) - - .. versionadded:: 2.0 - - .. versionadded:: 4.0 - Added the `color` parameter. Added a `update` method to the - progressbar object. - - :param iterable: an iterable to iterate over. If not provided the length - is required. - :param length: the number of items to iterate over. By default the - progressbar will attempt to ask the iterator about its - length, which might or might not work. If an iterable is - also provided this parameter can be used to override the - length. If an iterable is not provided the progress bar - will iterate over a range of that length. - :param label: the label to show next to the progress bar. - :param show_eta: enables or disables the estimated time display. This is - automatically disabled if the length cannot be - determined. - :param show_percent: enables or disables the percentage display. The - default is `True` if the iterable has a length or - `False` if not. - :param show_pos: enables or disables the absolute position display. The - default is `False`. - :param item_show_func: a function called with the current item which - can return a string to show the current item - next to the progress bar. Note that the current - item can be `None`! - :param fill_char: the character to use to show the filled part of the - progress bar. - :param empty_char: the character to use to show the non-filled part of - the progress bar. - :param bar_template: the format string to use as template for the bar. - The parameters in it are ``label`` for the label, - ``bar`` for the progress bar and ``info`` for the - info section. - :param info_sep: the separator between multiple info items (eta etc.) - :param width: the width of the progress bar in characters, 0 means full - terminal width - :param file: the file to write to. If this is not a terminal then - only the label is printed. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are included anywhere in the progress bar output - which is not the case by default. - """ - from ._termui_impl import ProgressBar - color = resolve_color_default(color) - return ProgressBar(iterable=iterable, length=length, show_eta=show_eta, - show_percent=show_percent, show_pos=show_pos, - item_show_func=item_show_func, fill_char=fill_char, - empty_char=empty_char, bar_template=bar_template, - info_sep=info_sep, file=file, label=label, - width=width, color=color) - - -def clear(): - """Clears the terminal screen. This will have the effect of clearing - the whole visible space of the terminal and moving the cursor to the - top left. This does not do anything if not connected to a terminal. - - .. versionadded:: 2.0 - """ - if not isatty(sys.stdout): - return - # If we're on Windows and we don't have colorama available, then we - # clear the screen by shelling out. Otherwise we can use an escape - # sequence. - if WIN: - os.system('cls') - else: - sys.stdout.write('\033[2J\033[1;1H') - - -def style(text, fg=None, bg=None, bold=None, dim=None, underline=None, - blink=None, reverse=None, reset=True): - """Styles a text with ANSI styles and returns the new string. By - default the styling is self contained which means that at the end - of the string a reset code is issued. This can be prevented by - passing ``reset=False``. - - Examples:: - - click.echo(click.style('Hello World!', fg='green')) - click.echo(click.style('ATTENTION!', blink=True)) - click.echo(click.style('Some things', reverse=True, fg='cyan')) - - Supported color names: - - * ``black`` (might be a gray) - * ``red`` - * ``green`` - * ``yellow`` (might be an orange) - * ``blue`` - * ``magenta`` - * ``cyan`` - * ``white`` (might be light gray) - * ``reset`` (reset the color code only) - - .. versionadded:: 2.0 - - :param text: the string to style with ansi codes. - :param fg: if provided this will become the foreground color. - :param bg: if provided this will become the background color. - :param bold: if provided this will enable or disable bold mode. - :param dim: if provided this will enable or disable dim mode. This is - badly supported. - :param underline: if provided this will enable or disable underline. - :param blink: if provided this will enable or disable blinking. - :param reverse: if provided this will enable or disable inverse - rendering (foreground becomes background and the - other way round). - :param reset: by default a reset-all code is added at the end of the - string which means that styles do not carry over. This - can be disabled to compose styles. - """ - bits = [] - if fg: - try: - bits.append('\033[%dm' % (_ansi_colors.index(fg) + 30)) - except ValueError: - raise TypeError('Unknown color %r' % fg) - if bg: - try: - bits.append('\033[%dm' % (_ansi_colors.index(bg) + 40)) - except ValueError: - raise TypeError('Unknown color %r' % bg) - if bold is not None: - bits.append('\033[%dm' % (1 if bold else 22)) - if dim is not None: - bits.append('\033[%dm' % (2 if dim else 22)) - if underline is not None: - bits.append('\033[%dm' % (4 if underline else 24)) - if blink is not None: - bits.append('\033[%dm' % (5 if blink else 25)) - if reverse is not None: - bits.append('\033[%dm' % (7 if reverse else 27)) - bits.append(text) - if reset: - bits.append(_ansi_reset_all) - return ''.join(bits) - - -def unstyle(text): - """Removes ANSI styling information from a string. Usually it's not - necessary to use this function as Click's echo function will - automatically remove styling if necessary. - - .. versionadded:: 2.0 - - :param text: the text to remove style information from. - """ - return strip_ansi(text) - - -def secho(text, file=None, nl=True, err=False, color=None, **styles): - """This function combines :func:`echo` and :func:`style` into one - call. As such the following two calls are the same:: - - click.secho('Hello World!', fg='green') - click.echo(click.style('Hello World!', fg='green')) - - All keyword arguments are forwarded to the underlying functions - depending on which one they go with. - - .. versionadded:: 2.0 - """ - return echo(style(text, **styles), file=file, nl=nl, err=err, color=color) - - -def edit(text=None, editor=None, env=None, require_save=True, - extension='.txt', filename=None): - r"""Edits the given text in the defined editor. If an editor is given - (should be the full path to the executable but the regular operating - system search path is used for finding the executable) it overrides - the detected editor. Optionally, some environment variables can be - used. If the editor is closed without changes, `None` is returned. In - case a file is edited directly the return value is always `None` and - `require_save` and `extension` are ignored. - - If the editor cannot be opened a :exc:`UsageError` is raised. - - Note for Windows: to simplify cross-platform usage, the newlines are - automatically converted from POSIX to Windows and vice versa. As such, - the message here will have ``\n`` as newline markers. - - :param text: the text to edit. - :param editor: optionally the editor to use. Defaults to automatic - detection. - :param env: environment variables to forward to the editor. - :param require_save: if this is true, then not saving in the editor - will make the return value become `None`. - :param extension: the extension to tell the editor about. This defaults - to `.txt` but changing this might change syntax - highlighting. - :param filename: if provided it will edit this file instead of the - provided text contents. It will not use a temporary - file as an indirection in that case. - """ - from ._termui_impl import Editor - editor = Editor(editor=editor, env=env, require_save=require_save, - extension=extension) - if filename is None: - return editor.edit(text) - editor.edit_file(filename) - - -def launch(url, wait=False, locate=False): - """This function launches the given URL (or filename) in the default - viewer application for this file type. If this is an executable, it - might launch the executable in a new session. The return value is - the exit code of the launched application. Usually, ``0`` indicates - success. - - Examples:: - - click.launch('http://click.pocoo.org/') - click.launch('/my/downloaded/file', locate=True) - - .. versionadded:: 2.0 - - :param url: URL or filename of the thing to launch. - :param wait: waits for the program to stop. - :param locate: if this is set to `True` then instead of launching the - application associated with the URL it will attempt to - launch a file manager with the file located. This - might have weird effects if the URL does not point to - the filesystem. - """ - from ._termui_impl import open_url - return open_url(url, wait=wait, locate=locate) - - -# If this is provided, getchar() calls into this instead. This is used -# for unittesting purposes. -_getchar = None - - -def getchar(echo=False): - """Fetches a single character from the terminal and returns it. This - will always return a unicode character and under certain rare - circumstances this might return more than one character. The - situations which more than one character is returned is when for - whatever reason multiple characters end up in the terminal buffer or - standard input was not actually a terminal. - - Note that this will always read from the terminal, even if something - is piped into the standard input. - - .. versionadded:: 2.0 - - :param echo: if set to `True`, the character read will also show up on - the terminal. The default is to not show it. - """ - f = _getchar - if f is None: - from ._termui_impl import getchar as f - return f(echo) - - -def pause(info='Press any key to continue ...', err=False): - """This command stops execution and waits for the user to press any - key to continue. This is similar to the Windows batch "pause" - command. If the program is not run through a terminal, this command - will instead do nothing. - - .. versionadded:: 2.0 - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param info: the info string to print before pausing. - :param err: if set to message goes to ``stderr`` instead of - ``stdout``, the same as with echo. - """ - if not isatty(sys.stdin) or not isatty(sys.stdout): - return - try: - if info: - echo(info, nl=False, err=err) - try: - getchar() - except (KeyboardInterrupt, EOFError): - pass - finally: - if info: - echo(err=err) diff --git a/pyextra/click/testing.py b/pyextra/click/testing.py deleted file mode 100644 index 4416c7741..000000000 --- a/pyextra/click/testing.py +++ /dev/null @@ -1,322 +0,0 @@ -import os -import sys -import shutil -import tempfile -import contextlib - -from ._compat import iteritems, PY2 - - -# If someone wants to vendor click, we want to ensure the -# correct package is discovered. Ideally we could use a -# relative import here but unfortunately Python does not -# support that. -clickpkg = sys.modules[__name__.rsplit('.', 1)[0]] - - -if PY2: - from cStringIO import StringIO -else: - import io - from ._compat import _find_binary_reader - - -class EchoingStdin(object): - - def __init__(self, input, output): - self._input = input - self._output = output - - def __getattr__(self, x): - return getattr(self._input, x) - - def _echo(self, rv): - self._output.write(rv) - return rv - - def read(self, n=-1): - return self._echo(self._input.read(n)) - - def readline(self, n=-1): - return self._echo(self._input.readline(n)) - - def readlines(self): - return [self._echo(x) for x in self._input.readlines()] - - def __iter__(self): - return iter(self._echo(x) for x in self._input) - - def __repr__(self): - return repr(self._input) - - -def make_input_stream(input, charset): - # Is already an input stream. - if hasattr(input, 'read'): - if PY2: - return input - rv = _find_binary_reader(input) - if rv is not None: - return rv - raise TypeError('Could not find binary reader for input stream.') - - if input is None: - input = b'' - elif not isinstance(input, bytes): - input = input.encode(charset) - if PY2: - return StringIO(input) - return io.BytesIO(input) - - -class Result(object): - """Holds the captured result of an invoked CLI script.""" - - def __init__(self, runner, output_bytes, exit_code, exception, - exc_info=None): - #: The runner that created the result - self.runner = runner - #: The output as bytes. - self.output_bytes = output_bytes - #: The exit code as integer. - self.exit_code = exit_code - #: The exception that happend if one did. - self.exception = exception - #: The traceback - self.exc_info = exc_info - - @property - def output(self): - """The output as unicode string.""" - return self.output_bytes.decode(self.runner.charset, 'replace') \ - .replace('\r\n', '\n') - - def __repr__(self): - return '' % ( - self.exception and repr(self.exception) or 'okay', - ) - - -class CliRunner(object): - """The CLI runner provides functionality to invoke a Click command line - script for unittesting purposes in a isolated environment. This only - works in single-threaded systems without any concurrency as it changes the - global interpreter state. - - :param charset: the character set for the input and output data. This is - UTF-8 by default and should not be changed currently as - the reporting to Click only works in Python 2 properly. - :param env: a dictionary with environment variables for overriding. - :param echo_stdin: if this is set to `True`, then reading from stdin writes - to stdout. This is useful for showing examples in - some circumstances. Note that regular prompts - will automatically echo the input. - """ - - def __init__(self, charset=None, env=None, echo_stdin=False): - if charset is None: - charset = 'utf-8' - self.charset = charset - self.env = env or {} - self.echo_stdin = echo_stdin - - def get_default_prog_name(self, cli): - """Given a command object it will return the default program name - for it. The default is the `name` attribute or ``"root"`` if not - set. - """ - return cli.name or 'root' - - def make_env(self, overrides=None): - """Returns the environment overrides for invoking a script.""" - rv = dict(self.env) - if overrides: - rv.update(overrides) - return rv - - @contextlib.contextmanager - def isolation(self, input=None, env=None, color=False): - """A context manager that sets up the isolation for invoking of a - command line tool. This sets up stdin with the given input data - and `os.environ` with the overrides from the given dictionary. - This also rebinds some internals in Click to be mocked (like the - prompt functionality). - - This is automatically done in the :meth:`invoke` method. - - .. versionadded:: 4.0 - The ``color`` parameter was added. - - :param input: the input stream to put into sys.stdin. - :param env: the environment overrides as dictionary. - :param color: whether the output should contain color codes. The - application can still override this explicitly. - """ - input = make_input_stream(input, self.charset) - - old_stdin = sys.stdin - old_stdout = sys.stdout - old_stderr = sys.stderr - old_forced_width = clickpkg.formatting.FORCED_WIDTH - clickpkg.formatting.FORCED_WIDTH = 80 - - env = self.make_env(env) - - if PY2: - sys.stdout = sys.stderr = bytes_output = StringIO() - if self.echo_stdin: - input = EchoingStdin(input, bytes_output) - else: - bytes_output = io.BytesIO() - if self.echo_stdin: - input = EchoingStdin(input, bytes_output) - input = io.TextIOWrapper(input, encoding=self.charset) - sys.stdout = sys.stderr = io.TextIOWrapper( - bytes_output, encoding=self.charset) - - sys.stdin = input - - def visible_input(prompt=None): - sys.stdout.write(prompt or '') - val = input.readline().rstrip('\r\n') - sys.stdout.write(val + '\n') - sys.stdout.flush() - return val - - def hidden_input(prompt=None): - sys.stdout.write((prompt or '') + '\n') - sys.stdout.flush() - return input.readline().rstrip('\r\n') - - def _getchar(echo): - char = sys.stdin.read(1) - if echo: - sys.stdout.write(char) - sys.stdout.flush() - return char - - default_color = color - def should_strip_ansi(stream=None, color=None): - if color is None: - return not default_color - return not color - - old_visible_prompt_func = clickpkg.termui.visible_prompt_func - old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func - old__getchar_func = clickpkg.termui._getchar - old_should_strip_ansi = clickpkg.utils.should_strip_ansi - clickpkg.termui.visible_prompt_func = visible_input - clickpkg.termui.hidden_prompt_func = hidden_input - clickpkg.termui._getchar = _getchar - clickpkg.utils.should_strip_ansi = should_strip_ansi - - old_env = {} - try: - for key, value in iteritems(env): - old_env[key] = os.environ.get(key) - if value is None: - try: - del os.environ[key] - except Exception: - pass - else: - os.environ[key] = value - yield bytes_output - finally: - for key, value in iteritems(old_env): - if value is None: - try: - del os.environ[key] - except Exception: - pass - else: - os.environ[key] = value - sys.stdout = old_stdout - sys.stderr = old_stderr - sys.stdin = old_stdin - clickpkg.termui.visible_prompt_func = old_visible_prompt_func - clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func - clickpkg.termui._getchar = old__getchar_func - clickpkg.utils.should_strip_ansi = old_should_strip_ansi - clickpkg.formatting.FORCED_WIDTH = old_forced_width - - def invoke(self, cli, args=None, input=None, env=None, - catch_exceptions=True, color=False, **extra): - """Invokes a command in an isolated environment. The arguments are - forwarded directly to the command line script, the `extra` keyword - arguments are passed to the :meth:`~clickpkg.Command.main` function of - the command. - - This returns a :class:`Result` object. - - .. versionadded:: 3.0 - The ``catch_exceptions`` parameter was added. - - .. versionchanged:: 3.0 - The result object now has an `exc_info` attribute with the - traceback if available. - - .. versionadded:: 4.0 - The ``color`` parameter was added. - - :param cli: the command to invoke - :param args: the arguments to invoke - :param input: the input data for `sys.stdin`. - :param env: the environment overrides. - :param catch_exceptions: Whether to catch any other exceptions than - ``SystemExit``. - :param extra: the keyword arguments to pass to :meth:`main`. - :param color: whether the output should contain color codes. The - application can still override this explicitly. - """ - exc_info = None - with self.isolation(input=input, env=env, color=color) as out: - exception = None - exit_code = 0 - - try: - cli.main(args=args or (), - prog_name=self.get_default_prog_name(cli), **extra) - except SystemExit as e: - if e.code != 0: - exception = e - - exc_info = sys.exc_info() - - exit_code = e.code - if not isinstance(exit_code, int): - sys.stdout.write(str(exit_code)) - sys.stdout.write('\n') - exit_code = 1 - except Exception as e: - if not catch_exceptions: - raise - exception = e - exit_code = -1 - exc_info = sys.exc_info() - finally: - sys.stdout.flush() - output = out.getvalue() - - return Result(runner=self, - output_bytes=output, - exit_code=exit_code, - exception=exception, - exc_info=exc_info) - - @contextlib.contextmanager - def isolated_filesystem(self): - """A context manager that creates a temporary folder and changes - the current working directory to it for isolated filesystem tests. - """ - cwd = os.getcwd() - t = tempfile.mkdtemp() - os.chdir(t) - try: - yield t - finally: - os.chdir(cwd) - try: - shutil.rmtree(t) - except (OSError, IOError): - pass diff --git a/pyextra/click/types.py b/pyextra/click/types.py deleted file mode 100644 index 36390026d..000000000 --- a/pyextra/click/types.py +++ /dev/null @@ -1,550 +0,0 @@ -import os -import stat - -from ._compat import open_stream, text_type, filename_to_ui, \ - get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2 -from .exceptions import BadParameter -from .utils import safecall, LazyFile - - -class ParamType(object): - """Helper for converting values through types. The following is - necessary for a valid type: - - * it needs a name - * it needs to pass through None unchanged - * it needs to convert from a string - * it needs to convert its result type through unchanged - (eg: needs to be idempotent) - * it needs to be able to deal with param and context being `None`. - This can be the case when the object is used with prompt - inputs. - """ - is_composite = False - - #: the descriptive name of this type - name = None - - #: if a list of this type is expected and the value is pulled from a - #: string environment variable, this is what splits it up. `None` - #: means any whitespace. For all parameters the general rule is that - #: whitespace splits them up. The exception are paths and files which - #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on - #: Windows). - envvar_list_splitter = None - - def __call__(self, value, param=None, ctx=None): - if value is not None: - return self.convert(value, param, ctx) - - def get_metavar(self, param): - """Returns the metavar default for this param if it provides one.""" - - def get_missing_message(self, param): - """Optionally might return extra information about a missing - parameter. - - .. versionadded:: 2.0 - """ - - def convert(self, value, param, ctx): - """Converts the value. This is not invoked for values that are - `None` (the missing value). - """ - return value - - def split_envvar_value(self, rv): - """Given a value from an environment variable this splits it up - into small chunks depending on the defined envvar list splitter. - - If the splitter is set to `None`, which means that whitespace splits, - then leading and trailing whitespace is ignored. Otherwise, leading - and trailing splitters usually lead to empty items being included. - """ - return (rv or '').split(self.envvar_list_splitter) - - def fail(self, message, param=None, ctx=None): - """Helper method to fail with an invalid value message.""" - raise BadParameter(message, ctx=ctx, param=param) - - -class CompositeParamType(ParamType): - is_composite = True - - @property - def arity(self): - raise NotImplementedError() - - -class FuncParamType(ParamType): - - def __init__(self, func): - self.name = func.__name__ - self.func = func - - def convert(self, value, param, ctx): - try: - return self.func(value) - except ValueError: - try: - value = text_type(value) - except UnicodeError: - value = str(value).decode('utf-8', 'replace') - self.fail(value, param, ctx) - - -class UnprocessedParamType(ParamType): - name = 'text' - - def convert(self, value, param, ctx): - return value - - def __repr__(self): - return 'UNPROCESSED' - - -class StringParamType(ParamType): - name = 'text' - - def convert(self, value, param, ctx): - if isinstance(value, bytes): - enc = _get_argv_encoding() - try: - value = value.decode(enc) - except UnicodeError: - fs_enc = get_filesystem_encoding() - if fs_enc != enc: - try: - value = value.decode(fs_enc) - except UnicodeError: - value = value.decode('utf-8', 'replace') - return value - return value - - def __repr__(self): - return 'STRING' - - -class Choice(ParamType): - """The choice type allows a value to be checked against a fixed set of - supported values. All of these values have to be strings. - - See :ref:`choice-opts` for an example. - """ - name = 'choice' - - def __init__(self, choices): - self.choices = choices - - def get_metavar(self, param): - return '[%s]' % '|'.join(self.choices) - - def get_missing_message(self, param): - return 'Choose from %s.' % ', '.join(self.choices) - - def convert(self, value, param, ctx): - # Exact match - if value in self.choices: - return value - - # Match through normalization - if ctx is not None and \ - ctx.token_normalize_func is not None: - value = ctx.token_normalize_func(value) - for choice in self.choices: - if ctx.token_normalize_func(choice) == value: - return choice - - self.fail('invalid choice: %s. (choose from %s)' % - (value, ', '.join(self.choices)), param, ctx) - - def __repr__(self): - return 'Choice(%r)' % list(self.choices) - - -class IntParamType(ParamType): - name = 'integer' - - def convert(self, value, param, ctx): - try: - return int(value) - except (ValueError, UnicodeError): - self.fail('%s is not a valid integer' % value, param, ctx) - - def __repr__(self): - return 'INT' - - -class IntRange(IntParamType): - """A parameter that works similar to :data:`click.INT` but restricts - the value to fit into a range. The default behavior is to fail if the - value falls outside the range, but it can also be silently clamped - between the two edges. - - See :ref:`ranges` for an example. - """ - name = 'integer range' - - def __init__(self, min=None, max=None, clamp=False): - self.min = min - self.max = max - self.clamp = clamp - - def convert(self, value, param, ctx): - rv = IntParamType.convert(self, value, param, ctx) - if self.clamp: - if self.min is not None and rv < self.min: - return self.min - if self.max is not None and rv > self.max: - return self.max - if self.min is not None and rv < self.min or \ - self.max is not None and rv > self.max: - if self.min is None: - self.fail('%s is bigger than the maximum valid value ' - '%s.' % (rv, self.max), param, ctx) - elif self.max is None: - self.fail('%s is smaller than the minimum valid value ' - '%s.' % (rv, self.min), param, ctx) - else: - self.fail('%s is not in the valid range of %s to %s.' - % (rv, self.min, self.max), param, ctx) - return rv - - def __repr__(self): - return 'IntRange(%r, %r)' % (self.min, self.max) - - -class BoolParamType(ParamType): - name = 'boolean' - - def convert(self, value, param, ctx): - if isinstance(value, bool): - return bool(value) - value = value.lower() - if value in ('true', '1', 'yes', 'y'): - return True - elif value in ('false', '0', 'no', 'n'): - return False - self.fail('%s is not a valid boolean' % value, param, ctx) - - def __repr__(self): - return 'BOOL' - - -class FloatParamType(ParamType): - name = 'float' - - def convert(self, value, param, ctx): - try: - return float(value) - except (UnicodeError, ValueError): - self.fail('%s is not a valid floating point value' % - value, param, ctx) - - def __repr__(self): - return 'FLOAT' - - -class UUIDParameterType(ParamType): - name = 'uuid' - - def convert(self, value, param, ctx): - import uuid - try: - if PY2 and isinstance(value, text_type): - value = value.encode('ascii') - return uuid.UUID(value) - except (UnicodeError, ValueError): - self.fail('%s is not a valid UUID value' % value, param, ctx) - - def __repr__(self): - return 'UUID' - - -class File(ParamType): - """Declares a parameter to be a file for reading or writing. The file - is automatically closed once the context tears down (after the command - finished working). - - Files can be opened for reading or writing. The special value ``-`` - indicates stdin or stdout depending on the mode. - - By default, the file is opened for reading text data, but it can also be - opened in binary mode or for writing. The encoding parameter can be used - to force a specific encoding. - - The `lazy` flag controls if the file should be opened immediately or - upon first IO. The default is to be non lazy for standard input and - output streams as well as files opened for reading, lazy otherwise. - - Starting with Click 2.0, files can also be opened atomically in which - case all writes go into a separate file in the same folder and upon - completion the file will be moved over to the original location. This - is useful if a file regularly read by other users is modified. - - See :ref:`file-args` for more information. - """ - name = 'filename' - envvar_list_splitter = os.path.pathsep - - def __init__(self, mode='r', encoding=None, errors='strict', lazy=None, - atomic=False): - self.mode = mode - self.encoding = encoding - self.errors = errors - self.lazy = lazy - self.atomic = atomic - - def resolve_lazy_flag(self, value): - if self.lazy is not None: - return self.lazy - if value == '-': - return False - elif 'w' in self.mode: - return True - return False - - def convert(self, value, param, ctx): - try: - if hasattr(value, 'read') or hasattr(value, 'write'): - return value - - lazy = self.resolve_lazy_flag(value) - - if lazy: - f = LazyFile(value, self.mode, self.encoding, self.errors, - atomic=self.atomic) - if ctx is not None: - ctx.call_on_close(f.close_intelligently) - return f - - f, should_close = open_stream(value, self.mode, - self.encoding, self.errors, - atomic=self.atomic) - # If a context is provided, we automatically close the file - # at the end of the context execution (or flush out). If a - # context does not exist, it's the caller's responsibility to - # properly close the file. This for instance happens when the - # type is used with prompts. - if ctx is not None: - if should_close: - ctx.call_on_close(safecall(f.close)) - else: - ctx.call_on_close(safecall(f.flush)) - return f - except (IOError, OSError) as e: - self.fail('Could not open file: %s: %s' % ( - filename_to_ui(value), - get_streerror(e), - ), param, ctx) - - -class Path(ParamType): - """The path type is similar to the :class:`File` type but it performs - different checks. First of all, instead of returning an open file - handle it returns just the filename. Secondly, it can perform various - basic checks about what the file or directory should be. - - .. versionchanged:: 6.0 - `allow_dash` was added. - - :param exists: if set to true, the file or directory needs to exist for - this value to be valid. If this is not required and a - file does indeed not exist, then all further checks are - silently skipped. - :param file_okay: controls if a file is a possible value. - :param dir_okay: controls if a directory is a possible value. - :param writable: if true, a writable check is performed. - :param readable: if true, a readable check is performed. - :param resolve_path: if this is true, then the path is fully resolved - before the value is passed onwards. This means - that it's absolute and symlinks are resolved. - :param allow_dash: If this is set to `True`, a single dash to indicate - standard streams is permitted. - :param type: optionally a string type that should be used to - represent the path. The default is `None` which - means the return value will be either bytes or - unicode depending on what makes most sense given the - input data Click deals with. - """ - envvar_list_splitter = os.path.pathsep - - def __init__(self, exists=False, file_okay=True, dir_okay=True, - writable=False, readable=True, resolve_path=False, - allow_dash=False, path_type=None): - self.exists = exists - self.file_okay = file_okay - self.dir_okay = dir_okay - self.writable = writable - self.readable = readable - self.resolve_path = resolve_path - self.allow_dash = allow_dash - self.type = path_type - - if self.file_okay and not self.dir_okay: - self.name = 'file' - self.path_type = 'File' - if self.dir_okay and not self.file_okay: - self.name = 'directory' - self.path_type = 'Directory' - else: - self.name = 'path' - self.path_type = 'Path' - - def coerce_path_result(self, rv): - if self.type is not None and not isinstance(rv, self.type): - if self.type is text_type: - rv = rv.decode(get_filesystem_encoding()) - else: - rv = rv.encode(get_filesystem_encoding()) - return rv - - def convert(self, value, param, ctx): - rv = value - - is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-') - - if not is_dash: - if self.resolve_path: - rv = os.path.realpath(rv) - - try: - st = os.stat(rv) - except OSError: - if not self.exists: - return self.coerce_path_result(rv) - self.fail('%s "%s" does not exist.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - - if not self.file_okay and stat.S_ISREG(st.st_mode): - self.fail('%s "%s" is a file.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - if not self.dir_okay and stat.S_ISDIR(st.st_mode): - self.fail('%s "%s" is a directory.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - if self.writable and not os.access(value, os.W_OK): - self.fail('%s "%s" is not writable.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - if self.readable and not os.access(value, os.R_OK): - self.fail('%s "%s" is not readable.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - - return self.coerce_path_result(rv) - - -class Tuple(CompositeParamType): - """The default behavior of Click is to apply a type on a value directly. - This works well in most cases, except for when `nargs` is set to a fixed - count and different types should be used for different items. In this - case the :class:`Tuple` type can be used. This type can only be used - if `nargs` is set to a fixed number. - - For more information see :ref:`tuple-type`. - - This can be selected by using a Python tuple literal as a type. - - :param types: a list of types that should be used for the tuple items. - """ - - def __init__(self, types): - self.types = [convert_type(ty) for ty in types] - - @property - def name(self): - return "<" + " ".join(ty.name for ty in self.types) + ">" - - @property - def arity(self): - return len(self.types) - - def convert(self, value, param, ctx): - if len(value) != len(self.types): - raise TypeError('It would appear that nargs is set to conflict ' - 'with the composite type arity.') - return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value)) - - -def convert_type(ty, default=None): - """Converts a callable or python ty into the most appropriate param - ty. - """ - guessed_type = False - if ty is None and default is not None: - if isinstance(default, tuple): - ty = tuple(map(type, default)) - else: - ty = type(default) - guessed_type = True - - if isinstance(ty, tuple): - return Tuple(ty) - if isinstance(ty, ParamType): - return ty - if ty is text_type or ty is str or ty is None: - return STRING - if ty is int: - return INT - # Booleans are only okay if not guessed. This is done because for - # flags the default value is actually a bit of a lie in that it - # indicates which of the flags is the one we want. See get_default() - # for more information. - if ty is bool and not guessed_type: - return BOOL - if ty is float: - return FLOAT - if guessed_type: - return STRING - - # Catch a common mistake - if __debug__: - try: - if issubclass(ty, ParamType): - raise AssertionError('Attempted to use an uninstantiated ' - 'parameter type (%s).' % ty) - except TypeError: - pass - return FuncParamType(ty) - - -#: A dummy parameter type that just does nothing. From a user's -#: perspective this appears to just be the same as `STRING` but internally -#: no string conversion takes place. This is necessary to achieve the -#: same bytes/unicode behavior on Python 2/3 in situations where you want -#: to not convert argument types. This is usually useful when working -#: with file paths as they can appear in bytes and unicode. -#: -#: For path related uses the :class:`Path` type is a better choice but -#: there are situations where an unprocessed type is useful which is why -#: it is is provided. -#: -#: .. versionadded:: 4.0 -UNPROCESSED = UnprocessedParamType() - -#: A unicode string parameter type which is the implicit default. This -#: can also be selected by using ``str`` as type. -STRING = StringParamType() - -#: An integer parameter. This can also be selected by using ``int`` as -#: type. -INT = IntParamType() - -#: A floating point value parameter. This can also be selected by using -#: ``float`` as type. -FLOAT = FloatParamType() - -#: A boolean parameter. This is the default for boolean flags. This can -#: also be selected by using ``bool`` as a type. -BOOL = BoolParamType() - -#: A UUID parameter. -UUID = UUIDParameterType() diff --git a/pyextra/click/utils.py b/pyextra/click/utils.py deleted file mode 100644 index eee626d3f..000000000 --- a/pyextra/click/utils.py +++ /dev/null @@ -1,415 +0,0 @@ -import os -import sys - -from .globals import resolve_color_default - -from ._compat import text_type, open_stream, get_filesystem_encoding, \ - get_streerror, string_types, PY2, binary_streams, text_streams, \ - filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \ - _default_text_stdout, _default_text_stderr, is_bytes, WIN - -if not PY2: - from ._compat import _find_binary_writer -elif WIN: - from ._winconsole import _get_windows_argv, \ - _hash_py_argv, _initial_argv_hash - - -echo_native_types = string_types + (bytes, bytearray) - - -def _posixify(name): - return '-'.join(name.split()).lower() - - -def safecall(func): - """Wraps a function so that it swallows exceptions.""" - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception: - pass - return wrapper - - -def make_str(value): - """Converts a value into a valid string.""" - if isinstance(value, bytes): - try: - return value.decode(get_filesystem_encoding()) - except UnicodeError: - return value.decode('utf-8', 'replace') - return text_type(value) - - -def make_default_short_help(help, max_length=45): - words = help.split() - total_length = 0 - result = [] - done = False - - for word in words: - if word[-1:] == '.': - done = True - new_length = result and 1 + len(word) or len(word) - if total_length + new_length > max_length: - result.append('...') - done = True - else: - if result: - result.append(' ') - result.append(word) - if done: - break - total_length += new_length - - return ''.join(result) - - -class LazyFile(object): - """A lazy file works like a regular file but it does not fully open - the file but it does perform some basic checks early to see if the - filename parameter does make sense. This is useful for safely opening - files for writing. - """ - - def __init__(self, filename, mode='r', encoding=None, errors='strict', - atomic=False): - self.name = filename - self.mode = mode - self.encoding = encoding - self.errors = errors - self.atomic = atomic - - if filename == '-': - self._f, self.should_close = open_stream(filename, mode, - encoding, errors) - else: - if 'r' in mode: - # Open and close the file in case we're opening it for - # reading so that we can catch at least some errors in - # some cases early. - open(filename, mode).close() - self._f = None - self.should_close = True - - def __getattr__(self, name): - return getattr(self.open(), name) - - def __repr__(self): - if self._f is not None: - return repr(self._f) - return '' % (self.name, self.mode) - - def open(self): - """Opens the file if it's not yet open. This call might fail with - a :exc:`FileError`. Not handling this error will produce an error - that Click shows. - """ - if self._f is not None: - return self._f - try: - rv, self.should_close = open_stream(self.name, self.mode, - self.encoding, - self.errors, - atomic=self.atomic) - except (IOError, OSError) as e: - from .exceptions import FileError - raise FileError(self.name, hint=get_streerror(e)) - self._f = rv - return rv - - def close(self): - """Closes the underlying file, no matter what.""" - if self._f is not None: - self._f.close() - - def close_intelligently(self): - """This function only closes the file if it was opened by the lazy - file wrapper. For instance this will never close stdin. - """ - if self.should_close: - self.close() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close_intelligently() - - def __iter__(self): - self.open() - return iter(self._f) - - -class KeepOpenFile(object): - - def __init__(self, file): - self._file = file - - def __getattr__(self, name): - return getattr(self._file, name) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - pass - - def __repr__(self): - return repr(self._file) - - def __iter__(self): - return iter(self._file) - - -def echo(message=None, file=None, nl=True, err=False, color=None): - """Prints a message plus a newline to the given file or stdout. On - first sight, this looks like the print function, but it has improved - support for handling Unicode and binary data that does not fail no - matter how badly configured the system is. - - Primarily it means that you can print binary data as well as Unicode - data on both 2.x and 3.x to the given file in the most appropriate way - possible. This is a very carefree function as in that it will try its - best to not fail. As of Click 6.0 this includes support for unicode - output on the Windows console. - - In addition to that, if `colorama`_ is installed, the echo function will - also support clever handling of ANSI codes. Essentially it will then - do the following: - - - add transparent handling of ANSI color codes on Windows. - - hide ANSI codes automatically if the destination file is not a - terminal. - - .. _colorama: http://pypi.python.org/pypi/colorama - - .. versionchanged:: 6.0 - As of Click 6.0 the echo function will properly support unicode - output on the windows console. Not that click does not modify - the interpreter in any way which means that `sys.stdout` or the - print statement or function will still not provide unicode support. - - .. versionchanged:: 2.0 - Starting with version 2.0 of Click, the echo function will work - with colorama if it's installed. - - .. versionadded:: 3.0 - The `err` parameter was added. - - .. versionchanged:: 4.0 - Added the `color` flag. - - :param message: the message to print - :param file: the file to write to (defaults to ``stdout``) - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``. This is faster and easier than calling - :func:`get_text_stderr` yourself. - :param nl: if set to `True` (the default) a newline is printed afterwards. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. - """ - if file is None: - if err: - file = _default_text_stderr() - else: - file = _default_text_stdout() - - # Convert non bytes/text into the native string type. - if message is not None and not isinstance(message, echo_native_types): - message = text_type(message) - - if nl: - message = message or u'' - if isinstance(message, text_type): - message += u'\n' - else: - message += b'\n' - - # If there is a message, and we're in Python 3, and the value looks - # like bytes, we manually need to find the binary stream and write the - # message in there. This is done separately so that most stream - # types will work as you would expect. Eg: you can write to StringIO - # for other cases. - if message and not PY2 and is_bytes(message): - binary_file = _find_binary_writer(file) - if binary_file is not None: - file.flush() - binary_file.write(message) - binary_file.flush() - return - - # ANSI-style support. If there is no message or we are dealing with - # bytes nothing is happening. If we are connected to a file we want - # to strip colors. If we are on windows we either wrap the stream - # to strip the color or we use the colorama support to translate the - # ansi codes to API calls. - if message and not is_bytes(message): - color = resolve_color_default(color) - if should_strip_ansi(file, color): - message = strip_ansi(message) - elif WIN: - if auto_wrap_for_ansi is not None: - file = auto_wrap_for_ansi(file) - elif not color: - message = strip_ansi(message) - - if message: - file.write(message) - file.flush() - - -def get_binary_stream(name): - """Returns a system stream for byte processing. This essentially - returns the stream from the sys module with the given name but it - solves some compatibility issues between different Python versions. - Primarily this function is necessary for getting binary streams on - Python 3. - - :param name: the name of the stream to open. Valid names are ``'stdin'``, - ``'stdout'`` and ``'stderr'`` - """ - opener = binary_streams.get(name) - if opener is None: - raise TypeError('Unknown standard stream %r' % name) - return opener() - - -def get_text_stream(name, encoding=None, errors='strict'): - """Returns a system stream for text processing. This usually returns - a wrapped stream around a binary stream returned from - :func:`get_binary_stream` but it also can take shortcuts on Python 3 - for already correctly configured streams. - - :param name: the name of the stream to open. Valid names are ``'stdin'``, - ``'stdout'`` and ``'stderr'`` - :param encoding: overrides the detected default encoding. - :param errors: overrides the default error mode. - """ - opener = text_streams.get(name) - if opener is None: - raise TypeError('Unknown standard stream %r' % name) - return opener(encoding, errors) - - -def open_file(filename, mode='r', encoding=None, errors='strict', - lazy=False, atomic=False): - """This is similar to how the :class:`File` works but for manual - usage. Files are opened non lazy by default. This can open regular - files as well as stdin/stdout if ``'-'`` is passed. - - If stdin/stdout is returned the stream is wrapped so that the context - manager will not close the stream accidentally. This makes it possible - to always use the function like this without having to worry to - accidentally close a standard stream:: - - with open_file(filename) as f: - ... - - .. versionadded:: 3.0 - - :param filename: the name of the file to open (or ``'-'`` for stdin/stdout). - :param mode: the mode in which to open the file. - :param encoding: the encoding to use. - :param errors: the error handling for this file. - :param lazy: can be flipped to true to open the file lazily. - :param atomic: in atomic mode writes go into a temporary file and it's - moved on close. - """ - if lazy: - return LazyFile(filename, mode, encoding, errors, atomic=atomic) - f, should_close = open_stream(filename, mode, encoding, errors, - atomic=atomic) - if not should_close: - f = KeepOpenFile(f) - return f - - -def get_os_args(): - """This returns the argument part of sys.argv in the most appropriate - form for processing. What this means is that this return value is in - a format that works for Click to process but does not necessarily - correspond well to what's actually standard for the interpreter. - - On most environments the return value is ``sys.argv[:1]`` unchanged. - However if you are on Windows and running Python 2 the return value - will actually be a list of unicode strings instead because the - default behavior on that platform otherwise will not be able to - carry all possible values that sys.argv can have. - - .. versionadded:: 6.0 - """ - # We can only extract the unicode argv if sys.argv has not been - # changed since the startup of the application. - if PY2 and WIN and _initial_argv_hash == _hash_py_argv(): - return _get_windows_argv() - return sys.argv[1:] - - -def format_filename(filename, shorten=False): - """Formats a filename for user display. The main purpose of this - function is to ensure that the filename can be displayed at all. This - will decode the filename to unicode if necessary in a way that it will - not fail. Optionally, it can shorten the filename to not include the - full path to the filename. - - :param filename: formats a filename for UI display. This will also convert - the filename into unicode without failing. - :param shorten: this optionally shortens the filename to strip of the - path that leads up to it. - """ - if shorten: - filename = os.path.basename(filename) - return filename_to_ui(filename) - - -def get_app_dir(app_name, roaming=True, force_posix=False): - r"""Returns the config folder for the application. The default behavior - is to return whatever is most appropriate for the operating system. - - To give you an idea, for an app called ``"Foo Bar"``, something like - the following folders could be returned: - - Mac OS X: - ``~/Library/Application Support/Foo Bar`` - Mac OS X (POSIX): - ``~/.foo-bar`` - Unix: - ``~/.config/foo-bar`` - Unix (POSIX): - ``~/.foo-bar`` - Win XP (roaming): - ``C:\Documents and Settings\\Local Settings\Application Data\Foo Bar`` - Win XP (not roaming): - ``C:\Documents and Settings\\Application Data\Foo Bar`` - Win 7 (roaming): - ``C:\Users\\AppData\Roaming\Foo Bar`` - Win 7 (not roaming): - ``C:\Users\\AppData\Local\Foo Bar`` - - .. versionadded:: 2.0 - - :param app_name: the application name. This should be properly capitalized - and can contain whitespace. - :param roaming: controls if the folder should be roaming or not on Windows. - Has no affect otherwise. - :param force_posix: if this is set to `True` then on any POSIX system the - folder will be stored in the home folder with a leading - dot instead of the XDG config home or darwin's - application support folder. - """ - if WIN: - key = roaming and 'APPDATA' or 'LOCALAPPDATA' - folder = os.environ.get(key) - if folder is None: - folder = os.path.expanduser('~') - return os.path.join(folder, app_name) - if force_posix: - return os.path.join(os.path.expanduser('~/.' + _posixify(app_name))) - if sys.platform == 'darwin': - return os.path.join(os.path.expanduser( - '~/Library/Application Support'), app_name) - return os.path.join( - os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')), - _posixify(app_name)) diff --git a/pyextra/flask/__init__.py b/pyextra/flask/__init__.py deleted file mode 100644 index ded198224..000000000 --- a/pyextra/flask/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask - ~~~~~ - - A microframework based on Werkzeug. It's extensively documented - and follows best practice patterns. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -__version__ = '1.0.2' - -# utilities we import from Werkzeug and Jinja2 that are unused -# in the module but are exported as public interface. -from werkzeug.exceptions import abort -from werkzeug.utils import redirect -from jinja2 import Markup, escape - -from .app import Flask, Request, Response -from .config import Config -from .helpers import url_for, flash, send_file, send_from_directory, \ - get_flashed_messages, get_template_attribute, make_response, safe_join, \ - stream_with_context -from .globals import current_app, g, request, session, _request_ctx_stack, \ - _app_ctx_stack -from .ctx import has_request_context, has_app_context, \ - after_this_request, copy_current_request_context -from .blueprints import Blueprint -from .templating import render_template, render_template_string - -# the signals -from .signals import signals_available, template_rendered, request_started, \ - request_finished, got_request_exception, request_tearing_down, \ - appcontext_tearing_down, appcontext_pushed, \ - appcontext_popped, message_flashed, before_render_template - -# We're not exposing the actual json module but a convenient wrapper around -# it. -from . import json - -# This was the only thing that Flask used to export at one point and it had -# a more generic name. -jsonify = json.jsonify - -# backwards compat, goes away in 1.0 -from .sessions import SecureCookieSession as Session -json_available = True diff --git a/pyextra/flask/__main__.py b/pyextra/flask/__main__.py deleted file mode 100644 index 4aee65437..000000000 --- a/pyextra/flask/__main__.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.__main__ - ~~~~~~~~~~~~~~ - - Alias for flask.run for the command line. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -if __name__ == '__main__': - from .cli import main - main(as_module=True) diff --git a/pyextra/flask/_compat.py b/pyextra/flask/_compat.py deleted file mode 100644 index a3b5b9c1c..000000000 --- a/pyextra/flask/_compat.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask._compat - ~~~~~~~~~~~~~ - - Some py2/py3 compatibility support based on a stripped down - version of six so we don't have to depend on a specific version - of it. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import sys - -PY2 = sys.version_info[0] == 2 -_identity = lambda x: x - - -if not PY2: - text_type = str - string_types = (str,) - integer_types = (int,) - - iterkeys = lambda d: iter(d.keys()) - itervalues = lambda d: iter(d.values()) - iteritems = lambda d: iter(d.items()) - - from inspect import getfullargspec as getargspec - from io import StringIO - - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - implements_to_string = _identity - -else: - text_type = unicode - string_types = (str, unicode) - integer_types = (int, long) - - iterkeys = lambda d: d.iterkeys() - itervalues = lambda d: d.itervalues() - iteritems = lambda d: d.iteritems() - - from inspect import getargspec - from cStringIO import StringIO - - exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') - - def implements_to_string(cls): - cls.__unicode__ = cls.__str__ - cls.__str__ = lambda x: x.__unicode__().encode('utf-8') - return cls - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a - # dummy metaclass for one level of class instantiation that replaces - # itself with the actual metaclass. - class metaclass(type): - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -# Certain versions of pypy have a bug where clearing the exception stack -# breaks the __exit__ function in a very peculiar way. The second level of -# exception blocks is necessary because pypy seems to forget to check if an -# exception happened until the next bytecode instruction? -# -# Relevant PyPy bugfix commit: -# https://bitbucket.org/pypy/pypy/commits/77ecf91c635a287e88e60d8ddb0f4e9df4003301 -# According to ronan on #pypy IRC, it is released in PyPy2 2.3 and later -# versions. -# -# Ubuntu 14.04 has PyPy 2.2.1, which does exhibit this bug. -BROKEN_PYPY_CTXMGR_EXIT = False -if hasattr(sys, 'pypy_version_info'): - class _Mgr(object): - def __enter__(self): - return self - def __exit__(self, *args): - if hasattr(sys, 'exc_clear'): - # Python 3 (PyPy3) doesn't have exc_clear - sys.exc_clear() - try: - try: - with _Mgr(): - raise AssertionError() - except: - raise - except TypeError: - BROKEN_PYPY_CTXMGR_EXIT = True - except AssertionError: - pass diff --git a/pyextra/flask/app.py b/pyextra/flask/app.py deleted file mode 100644 index 87c590034..000000000 --- a/pyextra/flask/app.py +++ /dev/null @@ -1,2315 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.app - ~~~~~~~~~ - - This module implements the central WSGI application object. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import os -import sys -import warnings -from datetime import timedelta -from functools import update_wrapper -from itertools import chain -from threading import Lock - -from werkzeug.datastructures import Headers, ImmutableDict -from werkzeug.exceptions import BadRequest, BadRequestKeyError, HTTPException, \ - InternalServerError, MethodNotAllowed, default_exceptions -from werkzeug.routing import BuildError, Map, RequestRedirect, Rule - -from . import cli, json -from ._compat import integer_types, reraise, string_types, text_type -from .config import Config, ConfigAttribute -from .ctx import AppContext, RequestContext, _AppCtxGlobals -from .globals import _request_ctx_stack, g, request, session -from .helpers import ( - _PackageBoundObject, - _endpoint_from_view_func, find_package, get_env, get_debug_flag, - get_flashed_messages, locked_cached_property, url_for, get_load_dotenv -) -from .logging import create_logger -from .sessions import SecureCookieSessionInterface -from .signals import appcontext_tearing_down, got_request_exception, \ - request_finished, request_started, request_tearing_down -from .templating import DispatchingJinjaLoader, Environment, \ - _default_template_ctx_processor -from .wrappers import Request, Response - -# a singleton sentinel value for parameter defaults -_sentinel = object() - - -def _make_timedelta(value): - if not isinstance(value, timedelta): - return timedelta(seconds=value) - return value - - -def setupmethod(f): - """Wraps a method so that it performs a check in debug mode if the - first request was already handled. - """ - def wrapper_func(self, *args, **kwargs): - if self.debug and self._got_first_request: - raise AssertionError('A setup function was called after the ' - 'first request was handled. This usually indicates a bug ' - 'in the application where a module was not imported ' - 'and decorators or other functionality was called too late.\n' - 'To fix this make sure to import all your view modules, ' - 'database models and everything related at a central place ' - 'before the application starts serving requests.') - return f(self, *args, **kwargs) - return update_wrapper(wrapper_func, f) - - -class Flask(_PackageBoundObject): - """The flask object implements a WSGI application and acts as the central - object. It is passed the name of the module or package of the - application. Once it is created it will act as a central registry for - the view functions, the URL rules, template configuration and much more. - - The name of the package is used to resolve resources from inside the - package or the folder the module is contained in depending on if the - package parameter resolves to an actual python package (a folder with - an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file). - - For more information about resource loading, see :func:`open_resource`. - - Usually you create a :class:`Flask` instance in your main module or - in the :file:`__init__.py` file of your package like this:: - - from flask import Flask - app = Flask(__name__) - - .. admonition:: About the First Parameter - - The idea of the first parameter is to give Flask an idea of what - belongs to your application. This name is used to find resources - on the filesystem, can be used by extensions to improve debugging - information and a lot more. - - So it's important what you provide there. If you are using a single - module, `__name__` is always the correct value. If you however are - using a package, it's usually recommended to hardcode the name of - your package there. - - For example if your application is defined in :file:`yourapplication/app.py` - you should create it with one of the two versions below:: - - app = Flask('yourapplication') - app = Flask(__name__.split('.')[0]) - - Why is that? The application will work even with `__name__`, thanks - to how resources are looked up. However it will make debugging more - painful. Certain extensions can make assumptions based on the - import name of your application. For example the Flask-SQLAlchemy - extension will look for the code in your application that triggered - an SQL query in debug mode. If the import name is not properly set - up, that debugging information is lost. (For example it would only - pick up SQL queries in `yourapplication.app` and not - `yourapplication.views.frontend`) - - .. versionadded:: 0.7 - The `static_url_path`, `static_folder`, and `template_folder` - parameters were added. - - .. versionadded:: 0.8 - The `instance_path` and `instance_relative_config` parameters were - added. - - .. versionadded:: 0.11 - The `root_path` parameter was added. - - .. versionadded:: 1.0 - The ``host_matching`` and ``static_host`` parameters were added. - - .. versionadded:: 1.0 - The ``subdomain_matching`` parameter was added. Subdomain - matching needs to be enabled manually now. Setting - :data:`SERVER_NAME` does not implicitly enable it. - - :param import_name: the name of the application package - :param static_url_path: can be used to specify a different path for the - static files on the web. Defaults to the name - of the `static_folder` folder. - :param static_folder: the folder with static files that should be served - at `static_url_path`. Defaults to the ``'static'`` - folder in the root path of the application. - :param static_host: the host to use when adding the static route. - Defaults to None. Required when using ``host_matching=True`` - with a ``static_folder`` configured. - :param host_matching: set ``url_map.host_matching`` attribute. - Defaults to False. - :param subdomain_matching: consider the subdomain relative to - :data:`SERVER_NAME` when matching routes. Defaults to False. - :param template_folder: the folder that contains the templates that should - be used by the application. Defaults to - ``'templates'`` folder in the root path of the - application. - :param instance_path: An alternative instance path for the application. - By default the folder ``'instance'`` next to the - package or module is assumed to be the instance - path. - :param instance_relative_config: if set to ``True`` relative filenames - for loading the config are assumed to - be relative to the instance path instead - of the application root. - :param root_path: Flask by default will automatically calculate the path - to the root of the application. In certain situations - this cannot be achieved (for instance if the package - is a Python 3 namespace package) and needs to be - manually defined. - """ - - #: The class that is used for request objects. See :class:`~flask.Request` - #: for more information. - request_class = Request - - #: The class that is used for response objects. See - #: :class:`~flask.Response` for more information. - response_class = Response - - #: The class that is used for the Jinja environment. - #: - #: .. versionadded:: 0.11 - jinja_environment = Environment - - #: The class that is used for the :data:`~flask.g` instance. - #: - #: Example use cases for a custom class: - #: - #: 1. Store arbitrary attributes on flask.g. - #: 2. Add a property for lazy per-request database connectors. - #: 3. Return None instead of AttributeError on unexpected attributes. - #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g. - #: - #: In Flask 0.9 this property was called `request_globals_class` but it - #: was changed in 0.10 to :attr:`app_ctx_globals_class` because the - #: flask.g object is now application context scoped. - #: - #: .. versionadded:: 0.10 - app_ctx_globals_class = _AppCtxGlobals - - #: The class that is used for the ``config`` attribute of this app. - #: Defaults to :class:`~flask.Config`. - #: - #: Example use cases for a custom class: - #: - #: 1. Default values for certain config options. - #: 2. Access to config values through attributes in addition to keys. - #: - #: .. versionadded:: 0.11 - config_class = Config - - #: The testing flag. Set this to ``True`` to enable the test mode of - #: Flask extensions (and in the future probably also Flask itself). - #: For example this might activate test helpers that have an - #: additional runtime cost which should not be enabled by default. - #: - #: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the - #: default it's implicitly enabled. - #: - #: This attribute can also be configured from the config with the - #: ``TESTING`` configuration key. Defaults to ``False``. - testing = ConfigAttribute('TESTING') - - #: If a secret key is set, cryptographic components can use this to - #: sign cookies and other things. Set this to a complex random value - #: when you want to use the secure cookie for instance. - #: - #: This attribute can also be configured from the config with the - #: :data:`SECRET_KEY` configuration key. Defaults to ``None``. - secret_key = ConfigAttribute('SECRET_KEY') - - #: The secure cookie uses this for the name of the session cookie. - #: - #: This attribute can also be configured from the config with the - #: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'`` - session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME') - - #: A :class:`~datetime.timedelta` which is used to set the expiration - #: date of a permanent session. The default is 31 days which makes a - #: permanent session survive for roughly one month. - #: - #: This attribute can also be configured from the config with the - #: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to - #: ``timedelta(days=31)`` - permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME', - get_converter=_make_timedelta) - - #: A :class:`~datetime.timedelta` which is used as default cache_timeout - #: for the :func:`send_file` functions. The default is 12 hours. - #: - #: This attribute can also be configured from the config with the - #: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration - #: variable can also be set with an integer value used as seconds. - #: Defaults to ``timedelta(hours=12)`` - send_file_max_age_default = ConfigAttribute('SEND_FILE_MAX_AGE_DEFAULT', - get_converter=_make_timedelta) - - #: Enable this if you want to use the X-Sendfile feature. Keep in - #: mind that the server has to support this. This only affects files - #: sent with the :func:`send_file` method. - #: - #: .. versionadded:: 0.2 - #: - #: This attribute can also be configured from the config with the - #: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``. - use_x_sendfile = ConfigAttribute('USE_X_SENDFILE') - - #: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`. - #: - #: .. versionadded:: 0.10 - json_encoder = json.JSONEncoder - - #: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`. - #: - #: .. versionadded:: 0.10 - json_decoder = json.JSONDecoder - - #: Options that are passed directly to the Jinja2 environment. - jinja_options = ImmutableDict( - extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_'] - ) - - #: Default configuration parameters. - default_config = ImmutableDict({ - 'ENV': None, - 'DEBUG': None, - 'TESTING': False, - 'PROPAGATE_EXCEPTIONS': None, - 'PRESERVE_CONTEXT_ON_EXCEPTION': None, - 'SECRET_KEY': None, - 'PERMANENT_SESSION_LIFETIME': timedelta(days=31), - 'USE_X_SENDFILE': False, - 'SERVER_NAME': None, - 'APPLICATION_ROOT': '/', - 'SESSION_COOKIE_NAME': 'session', - 'SESSION_COOKIE_DOMAIN': None, - 'SESSION_COOKIE_PATH': None, - 'SESSION_COOKIE_HTTPONLY': True, - 'SESSION_COOKIE_SECURE': False, - 'SESSION_COOKIE_SAMESITE': None, - 'SESSION_REFRESH_EACH_REQUEST': True, - 'MAX_CONTENT_LENGTH': None, - 'SEND_FILE_MAX_AGE_DEFAULT': timedelta(hours=12), - 'TRAP_BAD_REQUEST_ERRORS': None, - 'TRAP_HTTP_EXCEPTIONS': False, - 'EXPLAIN_TEMPLATE_LOADING': False, - 'PREFERRED_URL_SCHEME': 'http', - 'JSON_AS_ASCII': True, - 'JSON_SORT_KEYS': True, - 'JSONIFY_PRETTYPRINT_REGULAR': False, - 'JSONIFY_MIMETYPE': 'application/json', - 'TEMPLATES_AUTO_RELOAD': None, - 'MAX_COOKIE_SIZE': 4093, - }) - - #: The rule object to use for URL rules created. This is used by - #: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`. - #: - #: .. versionadded:: 0.7 - url_rule_class = Rule - - #: the test client that is used with when `test_client` is used. - #: - #: .. versionadded:: 0.7 - test_client_class = None - - #: The :class:`~click.testing.CliRunner` subclass, by default - #: :class:`~flask.testing.FlaskCliRunner` that is used by - #: :meth:`test_cli_runner`. Its ``__init__`` method should take a - #: Flask app object as the first argument. - #: - #: .. versionadded:: 1.0 - test_cli_runner_class = None - - #: the session interface to use. By default an instance of - #: :class:`~flask.sessions.SecureCookieSessionInterface` is used here. - #: - #: .. versionadded:: 0.8 - session_interface = SecureCookieSessionInterface() - - # TODO remove the next three attrs when Sphinx :inherited-members: works - # https://github.com/sphinx-doc/sphinx/issues/741 - - #: The name of the package or module that this app belongs to. Do not - #: change this once it is set by the constructor. - import_name = None - - #: Location of the template files to be added to the template lookup. - #: ``None`` if templates should not be added. - template_folder = None - - #: Absolute path to the package on the filesystem. Used to look up - #: resources contained in the package. - root_path = None - - def __init__( - self, - import_name, - static_url_path=None, - static_folder='static', - static_host=None, - host_matching=False, - subdomain_matching=False, - template_folder='templates', - instance_path=None, - instance_relative_config=False, - root_path=None - ): - _PackageBoundObject.__init__( - self, - import_name, - template_folder=template_folder, - root_path=root_path - ) - - if static_url_path is not None: - self.static_url_path = static_url_path - - if static_folder is not None: - self.static_folder = static_folder - - if instance_path is None: - instance_path = self.auto_find_instance_path() - elif not os.path.isabs(instance_path): - raise ValueError( - 'If an instance path is provided it must be absolute.' - ' A relative path was given instead.' - ) - - #: Holds the path to the instance folder. - #: - #: .. versionadded:: 0.8 - self.instance_path = instance_path - - #: The configuration dictionary as :class:`Config`. This behaves - #: exactly like a regular dictionary but supports additional methods - #: to load a config from files. - self.config = self.make_config(instance_relative_config) - - #: A dictionary of all view functions registered. The keys will - #: be function names which are also used to generate URLs and - #: the values are the function objects themselves. - #: To register a view function, use the :meth:`route` decorator. - self.view_functions = {} - - #: A dictionary of all registered error handlers. The key is ``None`` - #: for error handlers active on the application, otherwise the key is - #: the name of the blueprint. Each key points to another dictionary - #: where the key is the status code of the http exception. The - #: special key ``None`` points to a list of tuples where the first item - #: is the class for the instance check and the second the error handler - #: function. - #: - #: To register an error handler, use the :meth:`errorhandler` - #: decorator. - self.error_handler_spec = {} - - #: A list of functions that are called when :meth:`url_for` raises a - #: :exc:`~werkzeug.routing.BuildError`. Each function registered here - #: is called with `error`, `endpoint` and `values`. If a function - #: returns ``None`` or raises a :exc:`BuildError` the next function is - #: tried. - #: - #: .. versionadded:: 0.9 - self.url_build_error_handlers = [] - - #: A dictionary with lists of functions that will be called at the - #: beginning of each request. The key of the dictionary is the name of - #: the blueprint this function is active for, or ``None`` for all - #: requests. To register a function, use the :meth:`before_request` - #: decorator. - self.before_request_funcs = {} - - #: A list of functions that will be called at the beginning of the - #: first request to this instance. To register a function, use the - #: :meth:`before_first_request` decorator. - #: - #: .. versionadded:: 0.8 - self.before_first_request_funcs = [] - - #: A dictionary with lists of functions that should be called after - #: each request. The key of the dictionary is the name of the blueprint - #: this function is active for, ``None`` for all requests. This can for - #: example be used to close database connections. To register a function - #: here, use the :meth:`after_request` decorator. - self.after_request_funcs = {} - - #: A dictionary with lists of functions that are called after - #: each request, even if an exception has occurred. The key of the - #: dictionary is the name of the blueprint this function is active for, - #: ``None`` for all requests. These functions are not allowed to modify - #: the request, and their return values are ignored. If an exception - #: occurred while processing the request, it gets passed to each - #: teardown_request function. To register a function here, use the - #: :meth:`teardown_request` decorator. - #: - #: .. versionadded:: 0.7 - self.teardown_request_funcs = {} - - #: A list of functions that are called when the application context - #: is destroyed. Since the application context is also torn down - #: if the request ends this is the place to store code that disconnects - #: from databases. - #: - #: .. versionadded:: 0.9 - self.teardown_appcontext_funcs = [] - - #: A dictionary with lists of functions that are called before the - #: :attr:`before_request_funcs` functions. The key of the dictionary is - #: the name of the blueprint this function is active for, or ``None`` - #: for all requests. To register a function, use - #: :meth:`url_value_preprocessor`. - #: - #: .. versionadded:: 0.7 - self.url_value_preprocessors = {} - - #: A dictionary with lists of functions that can be used as URL value - #: preprocessors. The key ``None`` here is used for application wide - #: callbacks, otherwise the key is the name of the blueprint. - #: Each of these functions has the chance to modify the dictionary - #: of URL values before they are used as the keyword arguments of the - #: view function. For each function registered this one should also - #: provide a :meth:`url_defaults` function that adds the parameters - #: automatically again that were removed that way. - #: - #: .. versionadded:: 0.7 - self.url_default_functions = {} - - #: A dictionary with list of functions that are called without argument - #: to populate the template context. The key of the dictionary is the - #: name of the blueprint this function is active for, ``None`` for all - #: requests. Each returns a dictionary that the template context is - #: updated with. To register a function here, use the - #: :meth:`context_processor` decorator. - self.template_context_processors = { - None: [_default_template_ctx_processor] - } - - #: A list of shell context processor functions that should be run - #: when a shell context is created. - #: - #: .. versionadded:: 0.11 - self.shell_context_processors = [] - - #: all the attached blueprints in a dictionary by name. Blueprints - #: can be attached multiple times so this dictionary does not tell - #: you how often they got attached. - #: - #: .. versionadded:: 0.7 - self.blueprints = {} - self._blueprint_order = [] - - #: a place where extensions can store application specific state. For - #: example this is where an extension could store database engines and - #: similar things. For backwards compatibility extensions should register - #: themselves like this:: - #: - #: if not hasattr(app, 'extensions'): - #: app.extensions = {} - #: app.extensions['extensionname'] = SomeObject() - #: - #: The key must match the name of the extension module. For example in - #: case of a "Flask-Foo" extension in `flask_foo`, the key would be - #: ``'foo'``. - #: - #: .. versionadded:: 0.7 - self.extensions = {} - - #: The :class:`~werkzeug.routing.Map` for this instance. You can use - #: this to change the routing converters after the class was created - #: but before any routes are connected. Example:: - #: - #: from werkzeug.routing import BaseConverter - #: - #: class ListConverter(BaseConverter): - #: def to_python(self, value): - #: return value.split(',') - #: def to_url(self, values): - #: return ','.join(super(ListConverter, self).to_url(value) - #: for value in values) - #: - #: app = Flask(__name__) - #: app.url_map.converters['list'] = ListConverter - self.url_map = Map() - - self.url_map.host_matching = host_matching - self.subdomain_matching = subdomain_matching - - # tracks internally if the application already handled at least one - # request. - self._got_first_request = False - self._before_request_lock = Lock() - - # Add a static route using the provided static_url_path, static_host, - # and static_folder if there is a configured static_folder. - # Note we do this without checking if static_folder exists. - # For one, it might be created while the server is running (e.g. during - # development). Also, Google App Engine stores static files somewhere - if self.has_static_folder: - assert bool(static_host) == host_matching, 'Invalid static_host/host_matching combination' - self.add_url_rule( - self.static_url_path + '/', - endpoint='static', - host=static_host, - view_func=self.send_static_file - ) - - #: The click command line context for this application. Commands - #: registered here show up in the :command:`flask` command once the - #: application has been discovered. The default commands are - #: provided by Flask itself and can be overridden. - #: - #: This is an instance of a :class:`click.Group` object. - self.cli = cli.AppGroup(self.name) - - @locked_cached_property - def name(self): - """The name of the application. This is usually the import name - with the difference that it's guessed from the run file if the - import name is main. This name is used as a display name when - Flask needs the name of the application. It can be set and overridden - to change the value. - - .. versionadded:: 0.8 - """ - if self.import_name == '__main__': - fn = getattr(sys.modules['__main__'], '__file__', None) - if fn is None: - return '__main__' - return os.path.splitext(os.path.basename(fn))[0] - return self.import_name - - @property - def propagate_exceptions(self): - """Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration - value in case it's set, otherwise a sensible default is returned. - - .. versionadded:: 0.7 - """ - rv = self.config['PROPAGATE_EXCEPTIONS'] - if rv is not None: - return rv - return self.testing or self.debug - - @property - def preserve_context_on_exception(self): - """Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION`` - configuration value in case it's set, otherwise a sensible default - is returned. - - .. versionadded:: 0.7 - """ - rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION'] - if rv is not None: - return rv - return self.debug - - @locked_cached_property - def logger(self): - """The ``'flask.app'`` logger, a standard Python - :class:`~logging.Logger`. - - In debug mode, the logger's :attr:`~logging.Logger.level` will be set - to :data:`~logging.DEBUG`. - - If there are no handlers configured, a default handler will be added. - See :ref:`logging` for more information. - - .. versionchanged:: 1.0 - Behavior was simplified. The logger is always named - ``flask.app``. The level is only set during configuration, it - doesn't check ``app.debug`` each time. Only one format is used, - not different ones depending on ``app.debug``. No handlers are - removed, and a handler is only added if no handlers are already - configured. - - .. versionadded:: 0.3 - """ - return create_logger(self) - - @locked_cached_property - def jinja_env(self): - """The Jinja2 environment used to load templates.""" - return self.create_jinja_environment() - - @property - def got_first_request(self): - """This attribute is set to ``True`` if the application started - handling the first request. - - .. versionadded:: 0.8 - """ - return self._got_first_request - - def make_config(self, instance_relative=False): - """Used to create the config attribute by the Flask constructor. - The `instance_relative` parameter is passed in from the constructor - of Flask (there named `instance_relative_config`) and indicates if - the config should be relative to the instance path or the root path - of the application. - - .. versionadded:: 0.8 - """ - root_path = self.root_path - if instance_relative: - root_path = self.instance_path - defaults = dict(self.default_config) - defaults['ENV'] = get_env() - defaults['DEBUG'] = get_debug_flag() - return self.config_class(root_path, defaults) - - def auto_find_instance_path(self): - """Tries to locate the instance path if it was not provided to the - constructor of the application class. It will basically calculate - the path to a folder named ``instance`` next to your main file or - the package. - - .. versionadded:: 0.8 - """ - prefix, package_path = find_package(self.import_name) - if prefix is None: - return os.path.join(package_path, 'instance') - return os.path.join(prefix, 'var', self.name + '-instance') - - def open_instance_resource(self, resource, mode='rb'): - """Opens a resource from the application's instance folder - (:attr:`instance_path`). Otherwise works like - :meth:`open_resource`. Instance resources can also be opened for - writing. - - :param resource: the name of the resource. To access resources within - subfolders use forward slashes as separator. - :param mode: resource file opening mode, default is 'rb'. - """ - return open(os.path.join(self.instance_path, resource), mode) - - def _get_templates_auto_reload(self): - """Reload templates when they are changed. Used by - :meth:`create_jinja_environment`. - - This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If - not set, it will be enabled in debug mode. - - .. versionadded:: 1.0 - This property was added but the underlying config and behavior - already existed. - """ - rv = self.config['TEMPLATES_AUTO_RELOAD'] - return rv if rv is not None else self.debug - - def _set_templates_auto_reload(self, value): - self.config['TEMPLATES_AUTO_RELOAD'] = value - - templates_auto_reload = property( - _get_templates_auto_reload, _set_templates_auto_reload - ) - del _get_templates_auto_reload, _set_templates_auto_reload - - def create_jinja_environment(self): - """Creates the Jinja2 environment based on :attr:`jinja_options` - and :meth:`select_jinja_autoescape`. Since 0.7 this also adds - the Jinja2 globals and filters after initialization. Override - this function to customize the behavior. - - .. versionadded:: 0.5 - .. versionchanged:: 0.11 - ``Environment.auto_reload`` set in accordance with - ``TEMPLATES_AUTO_RELOAD`` configuration option. - """ - options = dict(self.jinja_options) - - if 'autoescape' not in options: - options['autoescape'] = self.select_jinja_autoescape - - if 'auto_reload' not in options: - options['auto_reload'] = self.templates_auto_reload - - rv = self.jinja_environment(self, **options) - rv.globals.update( - url_for=url_for, - get_flashed_messages=get_flashed_messages, - config=self.config, - # request, session and g are normally added with the - # context processor for efficiency reasons but for imported - # templates we also want the proxies in there. - request=request, - session=session, - g=g - ) - rv.filters['tojson'] = json.tojson_filter - return rv - - def create_global_jinja_loader(self): - """Creates the loader for the Jinja2 environment. Can be used to - override just the loader and keeping the rest unchanged. It's - discouraged to override this function. Instead one should override - the :meth:`jinja_loader` function instead. - - The global loader dispatches between the loaders of the application - and the individual blueprints. - - .. versionadded:: 0.7 - """ - return DispatchingJinjaLoader(self) - - def select_jinja_autoescape(self, filename): - """Returns ``True`` if autoescaping should be active for the given - template name. If no template name is given, returns `True`. - - .. versionadded:: 0.5 - """ - if filename is None: - return True - return filename.endswith(('.html', '.htm', '.xml', '.xhtml')) - - def update_template_context(self, context): - """Update the template context with some commonly used variables. - This injects request, session, config and g into the template - context as well as everything template context processors want - to inject. Note that the as of Flask 0.6, the original values - in the context will not be overridden if a context processor - decides to return a value with the same key. - - :param context: the context as a dictionary that is updated in place - to add extra variables. - """ - funcs = self.template_context_processors[None] - reqctx = _request_ctx_stack.top - if reqctx is not None: - bp = reqctx.request.blueprint - if bp is not None and bp in self.template_context_processors: - funcs = chain(funcs, self.template_context_processors[bp]) - orig_ctx = context.copy() - for func in funcs: - context.update(func()) - # make sure the original values win. This makes it possible to - # easier add new variables in context processors without breaking - # existing views. - context.update(orig_ctx) - - def make_shell_context(self): - """Returns the shell context for an interactive shell for this - application. This runs all the registered shell context - processors. - - .. versionadded:: 0.11 - """ - rv = {'app': self, 'g': g} - for processor in self.shell_context_processors: - rv.update(processor()) - return rv - - #: What environment the app is running in. Flask and extensions may - #: enable behaviors based on the environment, such as enabling debug - #: mode. This maps to the :data:`ENV` config key. This is set by the - #: :envvar:`FLASK_ENV` environment variable and may not behave as - #: expected if set in code. - #: - #: **Do not enable development when deploying in production.** - #: - #: Default: ``'production'`` - env = ConfigAttribute('ENV') - - def _get_debug(self): - return self.config['DEBUG'] - - def _set_debug(self, value): - self.config['DEBUG'] = value - self.jinja_env.auto_reload = self.templates_auto_reload - - #: Whether debug mode is enabled. When using ``flask run`` to start - #: the development server, an interactive debugger will be shown for - #: unhandled exceptions, and the server will be reloaded when code - #: changes. This maps to the :data:`DEBUG` config key. This is - #: enabled when :attr:`env` is ``'development'`` and is overridden - #: by the ``FLASK_DEBUG`` environment variable. It may not behave as - #: expected if set in code. - #: - #: **Do not enable debug mode when deploying in production.** - #: - #: Default: ``True`` if :attr:`env` is ``'development'``, or - #: ``False`` otherwise. - debug = property(_get_debug, _set_debug) - del _get_debug, _set_debug - - def run(self, host=None, port=None, debug=None, - load_dotenv=True, **options): - """Runs the application on a local development server. - - Do not use ``run()`` in a production setting. It is not intended to - meet security and performance requirements for a production server. - Instead, see :ref:`deployment` for WSGI server recommendations. - - If the :attr:`debug` flag is set the server will automatically reload - for code changes and show a debugger in case an exception happened. - - If you want to run the application in debug mode, but disable the - code execution on the interactive debugger, you can pass - ``use_evalex=False`` as parameter. This will keep the debugger's - traceback screen active, but disable code execution. - - It is not recommended to use this function for development with - automatic reloading as this is badly supported. Instead you should - be using the :command:`flask` command line script's ``run`` support. - - .. admonition:: Keep in Mind - - Flask will suppress any server error with a generic error page - unless it is in debug mode. As such to enable just the - interactive debugger without the code reloading, you have to - invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``. - Setting ``use_debugger`` to ``True`` without being in debug mode - won't catch any exceptions because there won't be any to - catch. - - :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to - have the server available externally as well. Defaults to - ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable - if present. - :param port: the port of the webserver. Defaults to ``5000`` or the - port defined in the ``SERVER_NAME`` config variable if present. - :param debug: if given, enable or disable debug mode. See - :attr:`debug`. - :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv` - files to set environment variables. Will also change the working - directory to the directory containing the first file found. - :param options: the options to be forwarded to the underlying Werkzeug - server. See :func:`werkzeug.serving.run_simple` for more - information. - - .. versionchanged:: 1.0 - If installed, python-dotenv will be used to load environment - variables from :file:`.env` and :file:`.flaskenv` files. - - If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG` - environment variables will override :attr:`env` and - :attr:`debug`. - - Threaded mode is enabled by default. - - .. versionchanged:: 0.10 - The default port is now picked from the ``SERVER_NAME`` - variable. - """ - # Change this into a no-op if the server is invoked from the - # command line. Have a look at cli.py for more information. - if os.environ.get('FLASK_RUN_FROM_CLI') == 'true': - from .debughelpers import explain_ignored_app_run - explain_ignored_app_run() - return - - if get_load_dotenv(load_dotenv): - cli.load_dotenv() - - # if set, let env vars override previous values - if 'FLASK_ENV' in os.environ: - self.env = get_env() - self.debug = get_debug_flag() - elif 'FLASK_DEBUG' in os.environ: - self.debug = get_debug_flag() - - # debug passed to method overrides all other sources - if debug is not None: - self.debug = bool(debug) - - _host = '127.0.0.1' - _port = 5000 - server_name = self.config.get('SERVER_NAME') - sn_host, sn_port = None, None - - if server_name: - sn_host, _, sn_port = server_name.partition(':') - - host = host or sn_host or _host - port = int(port or sn_port or _port) - - options.setdefault('use_reloader', self.debug) - options.setdefault('use_debugger', self.debug) - options.setdefault('threaded', True) - - cli.show_server_banner(self.env, self.debug, self.name, False) - - from werkzeug.serving import run_simple - - try: - run_simple(host, port, self, **options) - finally: - # reset the first request information if the development server - # reset normally. This makes it possible to restart the server - # without reloader and that stuff from an interactive shell. - self._got_first_request = False - - def test_client(self, use_cookies=True, **kwargs): - """Creates a test client for this application. For information - about unit testing head over to :ref:`testing`. - - Note that if you are testing for assertions or exceptions in your - application code, you must set ``app.testing = True`` in order for the - exceptions to propagate to the test client. Otherwise, the exception - will be handled by the application (not visible to the test client) and - the only indication of an AssertionError or other exception will be a - 500 status code response to the test client. See the :attr:`testing` - attribute. For example:: - - app.testing = True - client = app.test_client() - - The test client can be used in a ``with`` block to defer the closing down - of the context until the end of the ``with`` block. This is useful if - you want to access the context locals for testing:: - - with app.test_client() as c: - rv = c.get('/?vodka=42') - assert request.args['vodka'] == '42' - - Additionally, you may pass optional keyword arguments that will then - be passed to the application's :attr:`test_client_class` constructor. - For example:: - - from flask.testing import FlaskClient - - class CustomClient(FlaskClient): - def __init__(self, *args, **kwargs): - self._authentication = kwargs.pop("authentication") - super(CustomClient,self).__init__( *args, **kwargs) - - app.test_client_class = CustomClient - client = app.test_client(authentication='Basic ....') - - See :class:`~flask.testing.FlaskClient` for more information. - - .. versionchanged:: 0.4 - added support for ``with`` block usage for the client. - - .. versionadded:: 0.7 - The `use_cookies` parameter was added as well as the ability - to override the client to be used by setting the - :attr:`test_client_class` attribute. - - .. versionchanged:: 0.11 - Added `**kwargs` to support passing additional keyword arguments to - the constructor of :attr:`test_client_class`. - """ - cls = self.test_client_class - if cls is None: - from flask.testing import FlaskClient as cls - return cls(self, self.response_class, use_cookies=use_cookies, **kwargs) - - def test_cli_runner(self, **kwargs): - """Create a CLI runner for testing CLI commands. - See :ref:`testing-cli`. - - Returns an instance of :attr:`test_cli_runner_class`, by default - :class:`~flask.testing.FlaskCliRunner`. The Flask app object is - passed as the first argument. - - .. versionadded:: 1.0 - """ - cls = self.test_cli_runner_class - - if cls is None: - from flask.testing import FlaskCliRunner as cls - - return cls(self, **kwargs) - - def open_session(self, request): - """Creates or opens a new session. Default implementation stores all - session data in a signed cookie. This requires that the - :attr:`secret_key` is set. Instead of overriding this method - we recommend replacing the :class:`session_interface`. - - .. deprecated: 1.0 - Will be removed in 1.1. Use ``session_interface.open_session`` - instead. - - :param request: an instance of :attr:`request_class`. - """ - - warnings.warn(DeprecationWarning( - '"open_session" is deprecated and will be removed in 1.1. Use' - ' "session_interface.open_session" instead.' - )) - return self.session_interface.open_session(self, request) - - def save_session(self, session, response): - """Saves the session if it needs updates. For the default - implementation, check :meth:`open_session`. Instead of overriding this - method we recommend replacing the :class:`session_interface`. - - .. deprecated: 1.0 - Will be removed in 1.1. Use ``session_interface.save_session`` - instead. - - :param session: the session to be saved (a - :class:`~werkzeug.contrib.securecookie.SecureCookie` - object) - :param response: an instance of :attr:`response_class` - """ - - warnings.warn(DeprecationWarning( - '"save_session" is deprecated and will be removed in 1.1. Use' - ' "session_interface.save_session" instead.' - )) - return self.session_interface.save_session(self, session, response) - - def make_null_session(self): - """Creates a new instance of a missing session. Instead of overriding - this method we recommend replacing the :class:`session_interface`. - - .. deprecated: 1.0 - Will be removed in 1.1. Use ``session_interface.make_null_session`` - instead. - - .. versionadded:: 0.7 - """ - - warnings.warn(DeprecationWarning( - '"make_null_session" is deprecated and will be removed in 1.1. Use' - ' "session_interface.make_null_session" instead.' - )) - return self.session_interface.make_null_session(self) - - @setupmethod - def register_blueprint(self, blueprint, **options): - """Register a :class:`~flask.Blueprint` on the application. Keyword - arguments passed to this method will override the defaults set on the - blueprint. - - Calls the blueprint's :meth:`~flask.Blueprint.register` method after - recording the blueprint in the application's :attr:`blueprints`. - - :param blueprint: The blueprint to register. - :param url_prefix: Blueprint routes will be prefixed with this. - :param subdomain: Blueprint routes will match on this subdomain. - :param url_defaults: Blueprint routes will use these default values for - view arguments. - :param options: Additional keyword arguments are passed to - :class:`~flask.blueprints.BlueprintSetupState`. They can be - accessed in :meth:`~flask.Blueprint.record` callbacks. - - .. versionadded:: 0.7 - """ - first_registration = False - - if blueprint.name in self.blueprints: - assert self.blueprints[blueprint.name] is blueprint, ( - 'A name collision occurred between blueprints %r and %r. Both' - ' share the same name "%s". Blueprints that are created on the' - ' fly need unique names.' % ( - blueprint, self.blueprints[blueprint.name], blueprint.name - ) - ) - else: - self.blueprints[blueprint.name] = blueprint - self._blueprint_order.append(blueprint) - first_registration = True - - blueprint.register(self, options, first_registration) - - def iter_blueprints(self): - """Iterates over all blueprints by the order they were registered. - - .. versionadded:: 0.11 - """ - return iter(self._blueprint_order) - - @setupmethod - def add_url_rule(self, rule, endpoint=None, view_func=None, - provide_automatic_options=None, **options): - """Connects a URL rule. Works exactly like the :meth:`route` - decorator. If a view_func is provided it will be registered with the - endpoint. - - Basically this example:: - - @app.route('/') - def index(): - pass - - Is equivalent to the following:: - - def index(): - pass - app.add_url_rule('/', 'index', index) - - If the view_func is not provided you will need to connect the endpoint - to a view function like so:: - - app.view_functions['index'] = index - - Internally :meth:`route` invokes :meth:`add_url_rule` so if you want - to customize the behavior via subclassing you only need to change - this method. - - For more information refer to :ref:`url-route-registrations`. - - .. versionchanged:: 0.2 - `view_func` parameter added. - - .. versionchanged:: 0.6 - ``OPTIONS`` is added automatically as method. - - :param rule: the URL rule as string - :param endpoint: the endpoint for the registered URL rule. Flask - itself assumes the name of the view function as - endpoint - :param view_func: the function to call when serving a request to the - provided endpoint - :param provide_automatic_options: controls whether the ``OPTIONS`` - method should be added automatically. This can also be controlled - by setting the ``view_func.provide_automatic_options = False`` - before adding the rule. - :param options: the options to be forwarded to the underlying - :class:`~werkzeug.routing.Rule` object. A change - to Werkzeug is handling of method options. methods - is a list of methods this rule should be limited - to (``GET``, ``POST`` etc.). By default a rule - just listens for ``GET`` (and implicitly ``HEAD``). - Starting with Flask 0.6, ``OPTIONS`` is implicitly - added and handled by the standard request handling. - """ - if endpoint is None: - endpoint = _endpoint_from_view_func(view_func) - options['endpoint'] = endpoint - methods = options.pop('methods', None) - - # if the methods are not given and the view_func object knows its - # methods we can use that instead. If neither exists, we go with - # a tuple of only ``GET`` as default. - if methods is None: - methods = getattr(view_func, 'methods', None) or ('GET',) - if isinstance(methods, string_types): - raise TypeError('Allowed methods have to be iterables of strings, ' - 'for example: @app.route(..., methods=["POST"])') - methods = set(item.upper() for item in methods) - - # Methods that should always be added - required_methods = set(getattr(view_func, 'required_methods', ())) - - # starting with Flask 0.8 the view_func object can disable and - # force-enable the automatic options handling. - if provide_automatic_options is None: - provide_automatic_options = getattr(view_func, - 'provide_automatic_options', None) - - if provide_automatic_options is None: - if 'OPTIONS' not in methods: - provide_automatic_options = True - required_methods.add('OPTIONS') - else: - provide_automatic_options = False - - # Add the required methods now. - methods |= required_methods - - rule = self.url_rule_class(rule, methods=methods, **options) - rule.provide_automatic_options = provide_automatic_options - - self.url_map.add(rule) - if view_func is not None: - old_func = self.view_functions.get(endpoint) - if old_func is not None and old_func != view_func: - raise AssertionError('View function mapping is overwriting an ' - 'existing endpoint function: %s' % endpoint) - self.view_functions[endpoint] = view_func - - def route(self, rule, **options): - """A decorator that is used to register a view function for a - given URL rule. This does the same thing as :meth:`add_url_rule` - but is intended for decorator usage:: - - @app.route('/') - def index(): - return 'Hello World' - - For more information refer to :ref:`url-route-registrations`. - - :param rule: the URL rule as string - :param endpoint: the endpoint for the registered URL rule. Flask - itself assumes the name of the view function as - endpoint - :param options: the options to be forwarded to the underlying - :class:`~werkzeug.routing.Rule` object. A change - to Werkzeug is handling of method options. methods - is a list of methods this rule should be limited - to (``GET``, ``POST`` etc.). By default a rule - just listens for ``GET`` (and implicitly ``HEAD``). - Starting with Flask 0.6, ``OPTIONS`` is implicitly - added and handled by the standard request handling. - """ - def decorator(f): - endpoint = options.pop('endpoint', None) - self.add_url_rule(rule, endpoint, f, **options) - return f - return decorator - - @setupmethod - def endpoint(self, endpoint): - """A decorator to register a function as an endpoint. - Example:: - - @app.endpoint('example.endpoint') - def example(): - return "example" - - :param endpoint: the name of the endpoint - """ - def decorator(f): - self.view_functions[endpoint] = f - return f - return decorator - - @staticmethod - def _get_exc_class_and_code(exc_class_or_code): - """Ensure that we register only exceptions as handler keys""" - if isinstance(exc_class_or_code, integer_types): - exc_class = default_exceptions[exc_class_or_code] - else: - exc_class = exc_class_or_code - - assert issubclass(exc_class, Exception) - - if issubclass(exc_class, HTTPException): - return exc_class, exc_class.code - else: - return exc_class, None - - @setupmethod - def errorhandler(self, code_or_exception): - """Register a function to handle errors by code or exception class. - - A decorator that is used to register a function given an - error code. Example:: - - @app.errorhandler(404) - def page_not_found(error): - return 'This page does not exist', 404 - - You can also register handlers for arbitrary exceptions:: - - @app.errorhandler(DatabaseError) - def special_exception_handler(error): - return 'Database connection failed', 500 - - .. versionadded:: 0.7 - Use :meth:`register_error_handler` instead of modifying - :attr:`error_handler_spec` directly, for application wide error - handlers. - - .. versionadded:: 0.7 - One can now additionally also register custom exception types - that do not necessarily have to be a subclass of the - :class:`~werkzeug.exceptions.HTTPException` class. - - :param code_or_exception: the code as integer for the handler, or - an arbitrary exception - """ - def decorator(f): - self._register_error_handler(None, code_or_exception, f) - return f - return decorator - - @setupmethod - def register_error_handler(self, code_or_exception, f): - """Alternative error attach function to the :meth:`errorhandler` - decorator that is more straightforward to use for non decorator - usage. - - .. versionadded:: 0.7 - """ - self._register_error_handler(None, code_or_exception, f) - - @setupmethod - def _register_error_handler(self, key, code_or_exception, f): - """ - :type key: None|str - :type code_or_exception: int|T<=Exception - :type f: callable - """ - if isinstance(code_or_exception, HTTPException): # old broken behavior - raise ValueError( - 'Tried to register a handler for an exception instance {0!r}.' - ' Handlers can only be registered for exception classes or' - ' HTTP error codes.'.format(code_or_exception) - ) - - try: - exc_class, code = self._get_exc_class_and_code(code_or_exception) - except KeyError: - raise KeyError( - "'{0}' is not a recognized HTTP error code. Use a subclass of" - " HTTPException with that code instead.".format(code_or_exception) - ) - - handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {}) - handlers[exc_class] = f - - @setupmethod - def template_filter(self, name=None): - """A decorator that is used to register custom template filter. - You can specify a name for the filter, otherwise the function - name will be used. Example:: - - @app.template_filter() - def reverse(s): - return s[::-1] - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def decorator(f): - self.add_template_filter(f, name=name) - return f - return decorator - - @setupmethod - def add_template_filter(self, f, name=None): - """Register a custom template filter. Works exactly like the - :meth:`template_filter` decorator. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - self.jinja_env.filters[name or f.__name__] = f - - @setupmethod - def template_test(self, name=None): - """A decorator that is used to register custom template test. - You can specify a name for the test, otherwise the function - name will be used. Example:: - - @app.template_test() - def is_prime(n): - if n == 2: - return True - for i in range(2, int(math.ceil(math.sqrt(n))) + 1): - if n % i == 0: - return False - return True - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - def decorator(f): - self.add_template_test(f, name=name) - return f - return decorator - - @setupmethod - def add_template_test(self, f, name=None): - """Register a custom template test. Works exactly like the - :meth:`template_test` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - self.jinja_env.tests[name or f.__name__] = f - - @setupmethod - def template_global(self, name=None): - """A decorator that is used to register a custom template global function. - You can specify a name for the global function, otherwise the function - name will be used. Example:: - - @app.template_global() - def double(n): - return 2 * n - - .. versionadded:: 0.10 - - :param name: the optional name of the global function, otherwise the - function name will be used. - """ - def decorator(f): - self.add_template_global(f, name=name) - return f - return decorator - - @setupmethod - def add_template_global(self, f, name=None): - """Register a custom template global function. Works exactly like the - :meth:`template_global` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the global function, otherwise the - function name will be used. - """ - self.jinja_env.globals[name or f.__name__] = f - - @setupmethod - def before_request(self, f): - """Registers a function to run before each request. - - For example, this can be used to open a database connection, or to load - the logged in user from the session. - - The function will be called without any arguments. If it returns a - non-None value, the value is handled as if it was the return value from - the view, and further request handling is stopped. - """ - self.before_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def before_first_request(self, f): - """Registers a function to be run before the first request to this - instance of the application. - - The function will be called without any arguments and its return - value is ignored. - - .. versionadded:: 0.8 - """ - self.before_first_request_funcs.append(f) - return f - - @setupmethod - def after_request(self, f): - """Register a function to be run after each request. - - Your function must take one parameter, an instance of - :attr:`response_class` and return a new response object or the - same (see :meth:`process_response`). - - As of Flask 0.7 this function might not be executed at the end of the - request in case an unhandled exception occurred. - """ - self.after_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def teardown_request(self, f): - """Register a function to be run at the end of each request, - regardless of whether there was an exception or not. These functions - are executed when the request context is popped, even if not an - actual request was performed. - - Example:: - - ctx = app.test_request_context() - ctx.push() - ... - ctx.pop() - - When ``ctx.pop()`` is executed in the above example, the teardown - functions are called just before the request context moves from the - stack of active contexts. This becomes relevant if you are using - such constructs in tests. - - Generally teardown functions must take every necessary step to avoid - that they will fail. If they do execute code that might fail they - will have to surround the execution of these code by try/except - statements and log occurring errors. - - When a teardown function was called because of an exception it will - be passed an error object. - - The return values of teardown functions are ignored. - - .. admonition:: Debug Note - - In debug mode Flask will not tear down a request on an exception - immediately. Instead it will keep it alive so that the interactive - debugger can still access it. This behavior can be controlled - by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable. - """ - self.teardown_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def teardown_appcontext(self, f): - """Registers a function to be called when the application context - ends. These functions are typically also called when the request - context is popped. - - Example:: - - ctx = app.app_context() - ctx.push() - ... - ctx.pop() - - When ``ctx.pop()`` is executed in the above example, the teardown - functions are called just before the app context moves from the - stack of active contexts. This becomes relevant if you are using - such constructs in tests. - - Since a request context typically also manages an application - context it would also be called when you pop a request context. - - When a teardown function was called because of an unhandled exception - it will be passed an error object. If an :meth:`errorhandler` is - registered, it will handle the exception and the teardown will not - receive it. - - The return values of teardown functions are ignored. - - .. versionadded:: 0.9 - """ - self.teardown_appcontext_funcs.append(f) - return f - - @setupmethod - def context_processor(self, f): - """Registers a template context processor function.""" - self.template_context_processors[None].append(f) - return f - - @setupmethod - def shell_context_processor(self, f): - """Registers a shell context processor function. - - .. versionadded:: 0.11 - """ - self.shell_context_processors.append(f) - return f - - @setupmethod - def url_value_preprocessor(self, f): - """Register a URL value preprocessor function for all view - functions in the application. These functions will be called before the - :meth:`before_request` functions. - - The function can modify the values captured from the matched url before - they are passed to the view. For example, this can be used to pop a - common language code value and place it in ``g`` rather than pass it to - every view. - - The function is passed the endpoint name and values dict. The return - value is ignored. - """ - self.url_value_preprocessors.setdefault(None, []).append(f) - return f - - @setupmethod - def url_defaults(self, f): - """Callback function for URL defaults for all view functions of the - application. It's called with the endpoint and values and should - update the values passed in place. - """ - self.url_default_functions.setdefault(None, []).append(f) - return f - - def _find_error_handler(self, e): - """Return a registered error handler for an exception in this order: - blueprint handler for a specific code, app handler for a specific code, - blueprint handler for an exception class, app handler for an exception - class, or ``None`` if a suitable handler is not found. - """ - exc_class, code = self._get_exc_class_and_code(type(e)) - - for name, c in ( - (request.blueprint, code), (None, code), - (request.blueprint, None), (None, None) - ): - handler_map = self.error_handler_spec.setdefault(name, {}).get(c) - - if not handler_map: - continue - - for cls in exc_class.__mro__: - handler = handler_map.get(cls) - - if handler is not None: - return handler - - def handle_http_exception(self, e): - """Handles an HTTP exception. By default this will invoke the - registered error handlers and fall back to returning the - exception as response. - - .. versionadded:: 0.3 - """ - # Proxy exceptions don't have error codes. We want to always return - # those unchanged as errors - if e.code is None: - return e - - handler = self._find_error_handler(e) - if handler is None: - return e - return handler(e) - - def trap_http_exception(self, e): - """Checks if an HTTP exception should be trapped or not. By default - this will return ``False`` for all exceptions except for a bad request - key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It - also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``. - - This is called for all HTTP exceptions raised by a view function. - If it returns ``True`` for any exception the error handler for this - exception is not called and it shows up as regular exception in the - traceback. This is helpful for debugging implicitly raised HTTP - exceptions. - - .. versionchanged:: 1.0 - Bad request errors are not trapped by default in debug mode. - - .. versionadded:: 0.8 - """ - if self.config['TRAP_HTTP_EXCEPTIONS']: - return True - - trap_bad_request = self.config['TRAP_BAD_REQUEST_ERRORS'] - - # if unset, trap key errors in debug mode - if ( - trap_bad_request is None and self.debug - and isinstance(e, BadRequestKeyError) - ): - return True - - if trap_bad_request: - return isinstance(e, BadRequest) - - return False - - def handle_user_exception(self, e): - """This method is called whenever an exception occurs that should be - handled. A special case are - :class:`~werkzeug.exception.HTTPException`\s which are forwarded by - this function to the :meth:`handle_http_exception` method. This - function will either return a response value or reraise the - exception with the same traceback. - - .. versionchanged:: 1.0 - Key errors raised from request data like ``form`` show the the bad - key in debug mode rather than a generic bad request message. - - .. versionadded:: 0.7 - """ - exc_type, exc_value, tb = sys.exc_info() - assert exc_value is e - # ensure not to trash sys.exc_info() at that point in case someone - # wants the traceback preserved in handle_http_exception. Of course - # we cannot prevent users from trashing it themselves in a custom - # trap_http_exception method so that's their fault then. - - # MultiDict passes the key to the exception, but that's ignored - # when generating the response message. Set an informative - # description for key errors in debug mode or when trapping errors. - if ( - (self.debug or self.config['TRAP_BAD_REQUEST_ERRORS']) - and isinstance(e, BadRequestKeyError) - # only set it if it's still the default description - and e.description is BadRequestKeyError.description - ): - e.description = "KeyError: '{0}'".format(*e.args) - - if isinstance(e, HTTPException) and not self.trap_http_exception(e): - return self.handle_http_exception(e) - - handler = self._find_error_handler(e) - - if handler is None: - reraise(exc_type, exc_value, tb) - return handler(e) - - def handle_exception(self, e): - """Default exception handling that kicks in when an exception - occurs that is not caught. In debug mode the exception will - be re-raised immediately, otherwise it is logged and the handler - for a 500 internal server error is used. If no such handler - exists, a default 500 internal server error message is displayed. - - .. versionadded:: 0.3 - """ - exc_type, exc_value, tb = sys.exc_info() - - got_request_exception.send(self, exception=e) - handler = self._find_error_handler(InternalServerError()) - - if self.propagate_exceptions: - # if we want to repropagate the exception, we can attempt to - # raise it with the whole traceback in case we can do that - # (the function was actually called from the except part) - # otherwise, we just raise the error again - if exc_value is e: - reraise(exc_type, exc_value, tb) - else: - raise e - - self.log_exception((exc_type, exc_value, tb)) - if handler is None: - return InternalServerError() - return self.finalize_request(handler(e), from_error_handler=True) - - def log_exception(self, exc_info): - """Logs an exception. This is called by :meth:`handle_exception` - if debugging is disabled and right before the handler is called. - The default implementation logs the exception as error on the - :attr:`logger`. - - .. versionadded:: 0.8 - """ - self.logger.error('Exception on %s [%s]' % ( - request.path, - request.method - ), exc_info=exc_info) - - def raise_routing_exception(self, request): - """Exceptions that are recording during routing are reraised with - this method. During debug we are not reraising redirect requests - for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising - a different error instead to help debug situations. - - :internal: - """ - if not self.debug \ - or not isinstance(request.routing_exception, RequestRedirect) \ - or request.method in ('GET', 'HEAD', 'OPTIONS'): - raise request.routing_exception - - from .debughelpers import FormDataRoutingRedirect - raise FormDataRoutingRedirect(request) - - def dispatch_request(self): - """Does the request dispatching. Matches the URL and returns the - return value of the view or error handler. This does not have to - be a response object. In order to convert the return value to a - proper response object, call :func:`make_response`. - - .. versionchanged:: 0.7 - This no longer does the exception handling, this code was - moved to the new :meth:`full_dispatch_request`. - """ - req = _request_ctx_stack.top.request - if req.routing_exception is not None: - self.raise_routing_exception(req) - rule = req.url_rule - # if we provide automatic options for this URL and the - # request came with the OPTIONS method, reply automatically - if getattr(rule, 'provide_automatic_options', False) \ - and req.method == 'OPTIONS': - return self.make_default_options_response() - # otherwise dispatch to the handler for that endpoint - return self.view_functions[rule.endpoint](**req.view_args) - - def full_dispatch_request(self): - """Dispatches the request and on top of that performs request - pre and postprocessing as well as HTTP exception catching and - error handling. - - .. versionadded:: 0.7 - """ - self.try_trigger_before_first_request_functions() - try: - request_started.send(self) - rv = self.preprocess_request() - if rv is None: - rv = self.dispatch_request() - except Exception as e: - rv = self.handle_user_exception(e) - return self.finalize_request(rv) - - def finalize_request(self, rv, from_error_handler=False): - """Given the return value from a view function this finalizes - the request by converting it into a response and invoking the - postprocessing functions. This is invoked for both normal - request dispatching as well as error handlers. - - Because this means that it might be called as a result of a - failure a special safe mode is available which can be enabled - with the `from_error_handler` flag. If enabled, failures in - response processing will be logged and otherwise ignored. - - :internal: - """ - response = self.make_response(rv) - try: - response = self.process_response(response) - request_finished.send(self, response=response) - except Exception: - if not from_error_handler: - raise - self.logger.exception('Request finalizing failed with an ' - 'error while handling an error') - return response - - def try_trigger_before_first_request_functions(self): - """Called before each request and will ensure that it triggers - the :attr:`before_first_request_funcs` and only exactly once per - application instance (which means process usually). - - :internal: - """ - if self._got_first_request: - return - with self._before_request_lock: - if self._got_first_request: - return - for func in self.before_first_request_funcs: - func() - self._got_first_request = True - - def make_default_options_response(self): - """This method is called to create the default ``OPTIONS`` response. - This can be changed through subclassing to change the default - behavior of ``OPTIONS`` responses. - - .. versionadded:: 0.7 - """ - adapter = _request_ctx_stack.top.url_adapter - if hasattr(adapter, 'allowed_methods'): - methods = adapter.allowed_methods() - else: - # fallback for Werkzeug < 0.7 - methods = [] - try: - adapter.match(method='--') - except MethodNotAllowed as e: - methods = e.valid_methods - except HTTPException as e: - pass - rv = self.response_class() - rv.allow.update(methods) - return rv - - def should_ignore_error(self, error): - """This is called to figure out if an error should be ignored - or not as far as the teardown system is concerned. If this - function returns ``True`` then the teardown handlers will not be - passed the error. - - .. versionadded:: 0.10 - """ - return False - - def make_response(self, rv): - """Convert the return value from a view function to an instance of - :attr:`response_class`. - - :param rv: the return value from the view function. The view function - must return a response. Returning ``None``, or the view ending - without returning, is not allowed. The following types are allowed - for ``view_rv``: - - ``str`` (``unicode`` in Python 2) - A response object is created with the string encoded to UTF-8 - as the body. - - ``bytes`` (``str`` in Python 2) - A response object is created with the bytes as the body. - - ``tuple`` - Either ``(body, status, headers)``, ``(body, status)``, or - ``(body, headers)``, where ``body`` is any of the other types - allowed here, ``status`` is a string or an integer, and - ``headers`` is a dictionary or a list of ``(key, value)`` - tuples. If ``body`` is a :attr:`response_class` instance, - ``status`` overwrites the exiting value and ``headers`` are - extended. - - :attr:`response_class` - The object is returned unchanged. - - other :class:`~werkzeug.wrappers.Response` class - The object is coerced to :attr:`response_class`. - - :func:`callable` - The function is called as a WSGI application. The result is - used to create a response object. - - .. versionchanged:: 0.9 - Previously a tuple was interpreted as the arguments for the - response object. - """ - - status = headers = None - - # unpack tuple returns - if isinstance(rv, tuple): - len_rv = len(rv) - - # a 3-tuple is unpacked directly - if len_rv == 3: - rv, status, headers = rv - # decide if a 2-tuple has status or headers - elif len_rv == 2: - if isinstance(rv[1], (Headers, dict, tuple, list)): - rv, headers = rv - else: - rv, status = rv - # other sized tuples are not allowed - else: - raise TypeError( - 'The view function did not return a valid response tuple.' - ' The tuple must have the form (body, status, headers),' - ' (body, status), or (body, headers).' - ) - - # the body must not be None - if rv is None: - raise TypeError( - 'The view function did not return a valid response. The' - ' function either returned None or ended without a return' - ' statement.' - ) - - # make sure the body is an instance of the response class - if not isinstance(rv, self.response_class): - if isinstance(rv, (text_type, bytes, bytearray)): - # let the response class set the status and headers instead of - # waiting to do it manually, so that the class can handle any - # special logic - rv = self.response_class(rv, status=status, headers=headers) - status = headers = None - else: - # evaluate a WSGI callable, or coerce a different response - # class to the correct type - try: - rv = self.response_class.force_type(rv, request.environ) - except TypeError as e: - new_error = TypeError( - '{e}\nThe view function did not return a valid' - ' response. The return type must be a string, tuple,' - ' Response instance, or WSGI callable, but it was a' - ' {rv.__class__.__name__}.'.format(e=e, rv=rv) - ) - reraise(TypeError, new_error, sys.exc_info()[2]) - - # prefer the status if it was provided - if status is not None: - if isinstance(status, (text_type, bytes, bytearray)): - rv.status = status - else: - rv.status_code = status - - # extend existing headers with provided headers - if headers: - rv.headers.extend(headers) - - return rv - - def create_url_adapter(self, request): - """Creates a URL adapter for the given request. The URL adapter - is created at a point where the request context is not yet set - up so the request is passed explicitly. - - .. versionadded:: 0.6 - - .. versionchanged:: 0.9 - This can now also be called without a request object when the - URL adapter is created for the application context. - - .. versionchanged:: 1.0 - :data:`SERVER_NAME` no longer implicitly enables subdomain - matching. Use :attr:`subdomain_matching` instead. - """ - if request is not None: - # If subdomain matching is disabled (the default), use the - # default subdomain in all cases. This should be the default - # in Werkzeug but it currently does not have that feature. - subdomain = ((self.url_map.default_subdomain or None) - if not self.subdomain_matching else None) - return self.url_map.bind_to_environ( - request.environ, - server_name=self.config['SERVER_NAME'], - subdomain=subdomain) - # We need at the very least the server name to be set for this - # to work. - if self.config['SERVER_NAME'] is not None: - return self.url_map.bind( - self.config['SERVER_NAME'], - script_name=self.config['APPLICATION_ROOT'], - url_scheme=self.config['PREFERRED_URL_SCHEME']) - - def inject_url_defaults(self, endpoint, values): - """Injects the URL defaults for the given endpoint directly into - the values dictionary passed. This is used internally and - automatically called on URL building. - - .. versionadded:: 0.7 - """ - funcs = self.url_default_functions.get(None, ()) - if '.' in endpoint: - bp = endpoint.rsplit('.', 1)[0] - funcs = chain(funcs, self.url_default_functions.get(bp, ())) - for func in funcs: - func(endpoint, values) - - def handle_url_build_error(self, error, endpoint, values): - """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`. - """ - exc_type, exc_value, tb = sys.exc_info() - for handler in self.url_build_error_handlers: - try: - rv = handler(error, endpoint, values) - if rv is not None: - return rv - except BuildError as e: - # make error available outside except block (py3) - error = e - - # At this point we want to reraise the exception. If the error is - # still the same one we can reraise it with the original traceback, - # otherwise we raise it from here. - if error is exc_value: - reraise(exc_type, exc_value, tb) - raise error - - def preprocess_request(self): - """Called before the request is dispatched. Calls - :attr:`url_value_preprocessors` registered with the app and the - current blueprint (if any). Then calls :attr:`before_request_funcs` - registered with the app and the blueprint. - - If any :meth:`before_request` handler returns a non-None value, the - value is handled as if it was the return value from the view, and - further request handling is stopped. - """ - - bp = _request_ctx_stack.top.request.blueprint - - funcs = self.url_value_preprocessors.get(None, ()) - if bp is not None and bp in self.url_value_preprocessors: - funcs = chain(funcs, self.url_value_preprocessors[bp]) - for func in funcs: - func(request.endpoint, request.view_args) - - funcs = self.before_request_funcs.get(None, ()) - if bp is not None and bp in self.before_request_funcs: - funcs = chain(funcs, self.before_request_funcs[bp]) - for func in funcs: - rv = func() - if rv is not None: - return rv - - def process_response(self, response): - """Can be overridden in order to modify the response object - before it's sent to the WSGI server. By default this will - call all the :meth:`after_request` decorated functions. - - .. versionchanged:: 0.5 - As of Flask 0.5 the functions registered for after request - execution are called in reverse order of registration. - - :param response: a :attr:`response_class` object. - :return: a new response object or the same, has to be an - instance of :attr:`response_class`. - """ - ctx = _request_ctx_stack.top - bp = ctx.request.blueprint - funcs = ctx._after_request_functions - if bp is not None and bp in self.after_request_funcs: - funcs = chain(funcs, reversed(self.after_request_funcs[bp])) - if None in self.after_request_funcs: - funcs = chain(funcs, reversed(self.after_request_funcs[None])) - for handler in funcs: - response = handler(response) - if not self.session_interface.is_null_session(ctx.session): - self.session_interface.save_session(self, ctx.session, response) - return response - - def do_teardown_request(self, exc=_sentinel): - """Called after the request is dispatched and the response is - returned, right before the request context is popped. - - This calls all functions decorated with - :meth:`teardown_request`, and :meth:`Blueprint.teardown_request` - if a blueprint handled the request. Finally, the - :data:`request_tearing_down` signal is sent. - - This is called by - :meth:`RequestContext.pop() `, - which may be delayed during testing to maintain access to - resources. - - :param exc: An unhandled exception raised while dispatching the - request. Detected from the current exception information if - not passed. Passed to each teardown function. - - .. versionchanged:: 0.9 - Added the ``exc`` argument. - """ - if exc is _sentinel: - exc = sys.exc_info()[1] - funcs = reversed(self.teardown_request_funcs.get(None, ())) - bp = _request_ctx_stack.top.request.blueprint - if bp is not None and bp in self.teardown_request_funcs: - funcs = chain(funcs, reversed(self.teardown_request_funcs[bp])) - for func in funcs: - func(exc) - request_tearing_down.send(self, exc=exc) - - def do_teardown_appcontext(self, exc=_sentinel): - """Called right before the application context is popped. - - When handling a request, the application context is popped - after the request context. See :meth:`do_teardown_request`. - - This calls all functions decorated with - :meth:`teardown_appcontext`. Then the - :data:`appcontext_tearing_down` signal is sent. - - This is called by - :meth:`AppContext.pop() `. - - .. versionadded:: 0.9 - """ - if exc is _sentinel: - exc = sys.exc_info()[1] - for func in reversed(self.teardown_appcontext_funcs): - func(exc) - appcontext_tearing_down.send(self, exc=exc) - - def app_context(self): - """Create an :class:`~flask.ctx.AppContext`. Use as a ``with`` - block to push the context, which will make :data:`current_app` - point at this application. - - An application context is automatically pushed by - :meth:`RequestContext.push() ` - when handling a request, and when running a CLI command. Use - this to manually create a context outside of these situations. - - :: - - with app.app_context(): - init_db() - - See :doc:`/appcontext`. - - .. versionadded:: 0.9 - """ - return AppContext(self) - - def request_context(self, environ): - """Create a :class:`~flask.ctx.RequestContext` representing a - WSGI environment. Use a ``with`` block to push the context, - which will make :data:`request` point at this request. - - See :doc:`/reqcontext`. - - Typically you should not call this from your own code. A request - context is automatically pushed by the :meth:`wsgi_app` when - handling a request. Use :meth:`test_request_context` to create - an environment and context instead of this method. - - :param environ: a WSGI environment - """ - return RequestContext(self, environ) - - def test_request_context(self, *args, **kwargs): - """Create a :class:`~flask.ctx.RequestContext` for a WSGI - environment created from the given values. This is mostly useful - during testing, where you may want to run a function that uses - request data without dispatching a full request. - - See :doc:`/reqcontext`. - - Use a ``with`` block to push the context, which will make - :data:`request` point at the request for the created - environment. :: - - with test_request_context(...): - generate_report() - - When using the shell, it may be easier to push and pop the - context manually to avoid indentation. :: - - ctx = app.test_request_context(...) - ctx.push() - ... - ctx.pop() - - Takes the same arguments as Werkzeug's - :class:`~werkzeug.test.EnvironBuilder`, with some defaults from - the application. See the linked Werkzeug docs for most of the - available arguments. Flask-specific behavior is listed here. - - :param path: URL path being requested. - :param base_url: Base URL where the app is being served, which - ``path`` is relative to. If not given, built from - :data:`PREFERRED_URL_SCHEME`, ``subdomain``, - :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`. - :param subdomain: Subdomain name to append to - :data:`SERVER_NAME`. - :param url_scheme: Scheme to use instead of - :data:`PREFERRED_URL_SCHEME`. - :param data: The request body, either as a string or a dict of - form keys and values. - :param json: If given, this is serialized as JSON and passed as - ``data``. Also defaults ``content_type`` to - ``application/json``. - :param args: other positional arguments passed to - :class:`~werkzeug.test.EnvironBuilder`. - :param kwargs: other keyword arguments passed to - :class:`~werkzeug.test.EnvironBuilder`. - """ - from flask.testing import make_test_environ_builder - - builder = make_test_environ_builder(self, *args, **kwargs) - - try: - return self.request_context(builder.get_environ()) - finally: - builder.close() - - def wsgi_app(self, environ, start_response): - """The actual WSGI application. This is not implemented in - :meth:`__call__` so that middlewares can be applied without - losing a reference to the app object. Instead of doing this:: - - app = MyMiddleware(app) - - It's a better idea to do this instead:: - - app.wsgi_app = MyMiddleware(app.wsgi_app) - - Then you still have the original application object around and - can continue to call methods on it. - - .. versionchanged:: 0.7 - Teardown events for the request and app contexts are called - even if an unhandled error occurs. Other events may not be - called depending on when an error occurs during dispatch. - See :ref:`callbacks-and-errors`. - - :param environ: A WSGI environment. - :param start_response: A callable accepting a status code, - a list of headers, and an optional exception context to - start the response. - """ - ctx = self.request_context(environ) - error = None - try: - try: - ctx.push() - response = self.full_dispatch_request() - except Exception as e: - error = e - response = self.handle_exception(e) - except: - error = sys.exc_info()[1] - raise - return response(environ, start_response) - finally: - if self.should_ignore_error(error): - error = None - ctx.auto_pop(error) - - def __call__(self, environ, start_response): - """The WSGI server calls the Flask application object as the - WSGI application. This calls :meth:`wsgi_app` which can be - wrapped to applying middleware.""" - return self.wsgi_app(environ, start_response) - - def __repr__(self): - return '<%s %r>' % ( - self.__class__.__name__, - self.name, - ) diff --git a/pyextra/flask/blueprints.py b/pyextra/flask/blueprints.py deleted file mode 100644 index 5ce5561e4..000000000 --- a/pyextra/flask/blueprints.py +++ /dev/null @@ -1,448 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.blueprints - ~~~~~~~~~~~~~~~~ - - Blueprints are the recommended way to implement larger or more - pluggable applications in Flask 0.7 and later. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" -from functools import update_wrapper -from werkzeug.urls import url_join - -from .helpers import _PackageBoundObject, _endpoint_from_view_func - - -class BlueprintSetupState(object): - """Temporary holder object for registering a blueprint with the - application. An instance of this class is created by the - :meth:`~flask.Blueprint.make_setup_state` method and later passed - to all register callback functions. - """ - - def __init__(self, blueprint, app, options, first_registration): - #: a reference to the current application - self.app = app - - #: a reference to the blueprint that created this setup state. - self.blueprint = blueprint - - #: a dictionary with all options that were passed to the - #: :meth:`~flask.Flask.register_blueprint` method. - self.options = options - - #: as blueprints can be registered multiple times with the - #: application and not everything wants to be registered - #: multiple times on it, this attribute can be used to figure - #: out if the blueprint was registered in the past already. - self.first_registration = first_registration - - subdomain = self.options.get('subdomain') - if subdomain is None: - subdomain = self.blueprint.subdomain - - #: The subdomain that the blueprint should be active for, ``None`` - #: otherwise. - self.subdomain = subdomain - - url_prefix = self.options.get('url_prefix') - if url_prefix is None: - url_prefix = self.blueprint.url_prefix - #: The prefix that should be used for all URLs defined on the - #: blueprint. - self.url_prefix = url_prefix - - #: A dictionary with URL defaults that is added to each and every - #: URL that was defined with the blueprint. - self.url_defaults = dict(self.blueprint.url_values_defaults) - self.url_defaults.update(self.options.get('url_defaults', ())) - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """A helper method to register a rule (and optionally a view function) - to the application. The endpoint is automatically prefixed with the - blueprint's name. - """ - if self.url_prefix is not None: - if rule: - rule = '/'.join(( - self.url_prefix.rstrip('/'), rule.lstrip('/'))) - else: - rule = self.url_prefix - options.setdefault('subdomain', self.subdomain) - if endpoint is None: - endpoint = _endpoint_from_view_func(view_func) - defaults = self.url_defaults - if 'defaults' in options: - defaults = dict(defaults, **options.pop('defaults')) - self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint), - view_func, defaults=defaults, **options) - - -class Blueprint(_PackageBoundObject): - """Represents a blueprint. A blueprint is an object that records - functions that will be called with the - :class:`~flask.blueprints.BlueprintSetupState` later to register functions - or other things on the main application. See :ref:`blueprints` for more - information. - - .. versionadded:: 0.7 - """ - - warn_on_modifications = False - _got_registered_once = False - - #: Blueprint local JSON decoder class to use. - #: Set to ``None`` to use the app's :class:`~flask.app.Flask.json_encoder`. - json_encoder = None - #: Blueprint local JSON decoder class to use. - #: Set to ``None`` to use the app's :class:`~flask.app.Flask.json_decoder`. - json_decoder = None - - # TODO remove the next three attrs when Sphinx :inherited-members: works - # https://github.com/sphinx-doc/sphinx/issues/741 - - #: The name of the package or module that this app belongs to. Do not - #: change this once it is set by the constructor. - import_name = None - - #: Location of the template files to be added to the template lookup. - #: ``None`` if templates should not be added. - template_folder = None - - #: Absolute path to the package on the filesystem. Used to look up - #: resources contained in the package. - root_path = None - - def __init__(self, name, import_name, static_folder=None, - static_url_path=None, template_folder=None, - url_prefix=None, subdomain=None, url_defaults=None, - root_path=None): - _PackageBoundObject.__init__(self, import_name, template_folder, - root_path=root_path) - self.name = name - self.url_prefix = url_prefix - self.subdomain = subdomain - self.static_folder = static_folder - self.static_url_path = static_url_path - self.deferred_functions = [] - if url_defaults is None: - url_defaults = {} - self.url_values_defaults = url_defaults - - def record(self, func): - """Registers a function that is called when the blueprint is - registered on the application. This function is called with the - state as argument as returned by the :meth:`make_setup_state` - method. - """ - if self._got_registered_once and self.warn_on_modifications: - from warnings import warn - warn(Warning('The blueprint was already registered once ' - 'but is getting modified now. These changes ' - 'will not show up.')) - self.deferred_functions.append(func) - - def record_once(self, func): - """Works like :meth:`record` but wraps the function in another - function that will ensure the function is only called once. If the - blueprint is registered a second time on the application, the - function passed is not called. - """ - def wrapper(state): - if state.first_registration: - func(state) - return self.record(update_wrapper(wrapper, func)) - - def make_setup_state(self, app, options, first_registration=False): - """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState` - object that is later passed to the register callback functions. - Subclasses can override this to return a subclass of the setup state. - """ - return BlueprintSetupState(self, app, options, first_registration) - - def register(self, app, options, first_registration=False): - """Called by :meth:`Flask.register_blueprint` to register all views - and callbacks registered on the blueprint with the application. Creates - a :class:`.BlueprintSetupState` and calls each :meth:`record` callback - with it. - - :param app: The application this blueprint is being registered with. - :param options: Keyword arguments forwarded from - :meth:`~Flask.register_blueprint`. - :param first_registration: Whether this is the first time this - blueprint has been registered on the application. - """ - self._got_registered_once = True - state = self.make_setup_state(app, options, first_registration) - - if self.has_static_folder: - state.add_url_rule( - self.static_url_path + '/', - view_func=self.send_static_file, endpoint='static' - ) - - for deferred in self.deferred_functions: - deferred(state) - - def route(self, rule, **options): - """Like :meth:`Flask.route` but for a blueprint. The endpoint for the - :func:`url_for` function is prefixed with the name of the blueprint. - """ - def decorator(f): - endpoint = options.pop("endpoint", f.__name__) - self.add_url_rule(rule, endpoint, f, **options) - return f - return decorator - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for - the :func:`url_for` function is prefixed with the name of the blueprint. - """ - if endpoint: - assert '.' not in endpoint, "Blueprint endpoints should not contain dots" - if view_func and hasattr(view_func, '__name__'): - assert '.' not in view_func.__name__, "Blueprint view function name should not contain dots" - self.record(lambda s: - s.add_url_rule(rule, endpoint, view_func, **options)) - - def endpoint(self, endpoint): - """Like :meth:`Flask.endpoint` but for a blueprint. This does not - prefix the endpoint with the blueprint name, this has to be done - explicitly by the user of this method. If the endpoint is prefixed - with a `.` it will be registered to the current blueprint, otherwise - it's an application independent endpoint. - """ - def decorator(f): - def register_endpoint(state): - state.app.view_functions[endpoint] = f - self.record_once(register_endpoint) - return f - return decorator - - def app_template_filter(self, name=None): - """Register a custom template filter, available application wide. Like - :meth:`Flask.template_filter` but for a blueprint. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def decorator(f): - self.add_app_template_filter(f, name=name) - return f - return decorator - - def add_app_template_filter(self, f, name=None): - """Register a custom template filter, available application wide. Like - :meth:`Flask.add_template_filter` but for a blueprint. Works exactly - like the :meth:`app_template_filter` decorator. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - def register_template(state): - state.app.jinja_env.filters[name or f.__name__] = f - self.record_once(register_template) - - def app_template_test(self, name=None): - """Register a custom template test, available application wide. Like - :meth:`Flask.template_test` but for a blueprint. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - def decorator(f): - self.add_app_template_test(f, name=name) - return f - return decorator - - def add_app_template_test(self, f, name=None): - """Register a custom template test, available application wide. Like - :meth:`Flask.add_template_test` but for a blueprint. Works exactly - like the :meth:`app_template_test` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - def register_template(state): - state.app.jinja_env.tests[name or f.__name__] = f - self.record_once(register_template) - - def app_template_global(self, name=None): - """Register a custom template global, available application wide. Like - :meth:`Flask.template_global` but for a blueprint. - - .. versionadded:: 0.10 - - :param name: the optional name of the global, otherwise the - function name will be used. - """ - def decorator(f): - self.add_app_template_global(f, name=name) - return f - return decorator - - def add_app_template_global(self, f, name=None): - """Register a custom template global, available application wide. Like - :meth:`Flask.add_template_global` but for a blueprint. Works exactly - like the :meth:`app_template_global` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the global, otherwise the - function name will be used. - """ - def register_template(state): - state.app.jinja_env.globals[name or f.__name__] = f - self.record_once(register_template) - - def before_request(self, f): - """Like :meth:`Flask.before_request` but for a blueprint. This function - is only executed before each request that is handled by a function of - that blueprint. - """ - self.record_once(lambda s: s.app.before_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def before_app_request(self, f): - """Like :meth:`Flask.before_request`. Such a function is executed - before each request, even if outside of a blueprint. - """ - self.record_once(lambda s: s.app.before_request_funcs - .setdefault(None, []).append(f)) - return f - - def before_app_first_request(self, f): - """Like :meth:`Flask.before_first_request`. Such a function is - executed before the first request to the application. - """ - self.record_once(lambda s: s.app.before_first_request_funcs.append(f)) - return f - - def after_request(self, f): - """Like :meth:`Flask.after_request` but for a blueprint. This function - is only executed after each request that is handled by a function of - that blueprint. - """ - self.record_once(lambda s: s.app.after_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def after_app_request(self, f): - """Like :meth:`Flask.after_request` but for a blueprint. Such a function - is executed after each request, even if outside of the blueprint. - """ - self.record_once(lambda s: s.app.after_request_funcs - .setdefault(None, []).append(f)) - return f - - def teardown_request(self, f): - """Like :meth:`Flask.teardown_request` but for a blueprint. This - function is only executed when tearing down requests handled by a - function of that blueprint. Teardown request functions are executed - when the request context is popped, even when no actual request was - performed. - """ - self.record_once(lambda s: s.app.teardown_request_funcs - .setdefault(self.name, []).append(f)) - return f - - def teardown_app_request(self, f): - """Like :meth:`Flask.teardown_request` but for a blueprint. Such a - function is executed when tearing down each request, even if outside of - the blueprint. - """ - self.record_once(lambda s: s.app.teardown_request_funcs - .setdefault(None, []).append(f)) - return f - - def context_processor(self, f): - """Like :meth:`Flask.context_processor` but for a blueprint. This - function is only executed for requests handled by a blueprint. - """ - self.record_once(lambda s: s.app.template_context_processors - .setdefault(self.name, []).append(f)) - return f - - def app_context_processor(self, f): - """Like :meth:`Flask.context_processor` but for a blueprint. Such a - function is executed each request, even if outside of the blueprint. - """ - self.record_once(lambda s: s.app.template_context_processors - .setdefault(None, []).append(f)) - return f - - def app_errorhandler(self, code): - """Like :meth:`Flask.errorhandler` but for a blueprint. This - handler is used for all requests, even if outside of the blueprint. - """ - def decorator(f): - self.record_once(lambda s: s.app.errorhandler(code)(f)) - return f - return decorator - - def url_value_preprocessor(self, f): - """Registers a function as URL value preprocessor for this - blueprint. It's called before the view functions are called and - can modify the url values provided. - """ - self.record_once(lambda s: s.app.url_value_preprocessors - .setdefault(self.name, []).append(f)) - return f - - def url_defaults(self, f): - """Callback function for URL defaults for this blueprint. It's called - with the endpoint and values and should update the values passed - in place. - """ - self.record_once(lambda s: s.app.url_default_functions - .setdefault(self.name, []).append(f)) - return f - - def app_url_value_preprocessor(self, f): - """Same as :meth:`url_value_preprocessor` but application wide. - """ - self.record_once(lambda s: s.app.url_value_preprocessors - .setdefault(None, []).append(f)) - return f - - def app_url_defaults(self, f): - """Same as :meth:`url_defaults` but application wide. - """ - self.record_once(lambda s: s.app.url_default_functions - .setdefault(None, []).append(f)) - return f - - def errorhandler(self, code_or_exception): - """Registers an error handler that becomes active for this blueprint - only. Please be aware that routing does not happen local to a - blueprint so an error handler for 404 usually is not handled by - a blueprint unless it is caused inside a view function. Another - special case is the 500 internal server error which is always looked - up from the application. - - Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator - of the :class:`~flask.Flask` object. - """ - def decorator(f): - self.record_once(lambda s: s.app._register_error_handler( - self.name, code_or_exception, f)) - return f - return decorator - - def register_error_handler(self, code_or_exception, f): - """Non-decorator version of the :meth:`errorhandler` error attach - function, akin to the :meth:`~flask.Flask.register_error_handler` - application-wide function of the :class:`~flask.Flask` object but - for error handlers limited to this blueprint. - - .. versionadded:: 0.11 - """ - self.record_once(lambda s: s.app._register_error_handler( - self.name, code_or_exception, f)) diff --git a/pyextra/flask/cli.py b/pyextra/flask/cli.py deleted file mode 100644 index efc1733e2..000000000 --- a/pyextra/flask/cli.py +++ /dev/null @@ -1,898 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.cli - ~~~~~~~~~ - - A simple command line application to run flask apps. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -from __future__ import print_function - -import ast -import inspect -import os -import re -import ssl -import sys -import traceback -from functools import update_wrapper -from operator import attrgetter -from threading import Lock, Thread - -import click -from werkzeug.utils import import_string - -from . import __version__ -from ._compat import getargspec, iteritems, reraise, text_type -from .globals import current_app -from .helpers import get_debug_flag, get_env, get_load_dotenv - -try: - import dotenv -except ImportError: - dotenv = None - - -class NoAppException(click.UsageError): - """Raised if an application cannot be found or loaded.""" - - -def find_best_app(script_info, module): - """Given a module instance this tries to find the best possible - application in the module or raises an exception. - """ - from . import Flask - - # Search for the most common names first. - for attr_name in ('app', 'application'): - app = getattr(module, attr_name, None) - - if isinstance(app, Flask): - return app - - # Otherwise find the only object that is a Flask instance. - matches = [ - v for k, v in iteritems(module.__dict__) if isinstance(v, Flask) - ] - - if len(matches) == 1: - return matches[0] - elif len(matches) > 1: - raise NoAppException( - 'Detected multiple Flask applications in module "{module}". Use ' - '"FLASK_APP={module}:name" to specify the correct ' - 'one.'.format(module=module.__name__) - ) - - # Search for app factory functions. - for attr_name in ('create_app', 'make_app'): - app_factory = getattr(module, attr_name, None) - - if inspect.isfunction(app_factory): - try: - app = call_factory(script_info, app_factory) - - if isinstance(app, Flask): - return app - except TypeError: - if not _called_with_wrong_args(app_factory): - raise - raise NoAppException( - 'Detected factory "{factory}" in module "{module}", but ' - 'could not call it without arguments. Use ' - '"FLASK_APP=\'{module}:{factory}(args)\'" to specify ' - 'arguments.'.format( - factory=attr_name, module=module.__name__ - ) - ) - - raise NoAppException( - 'Failed to find Flask application or factory in module "{module}". ' - 'Use "FLASK_APP={module}:name to specify one.'.format( - module=module.__name__ - ) - ) - - -def call_factory(script_info, app_factory, arguments=()): - """Takes an app factory, a ``script_info` object and optionally a tuple - of arguments. Checks for the existence of a script_info argument and calls - the app_factory depending on that and the arguments provided. - """ - args_spec = getargspec(app_factory) - arg_names = args_spec.args - arg_defaults = args_spec.defaults - - if 'script_info' in arg_names: - return app_factory(*arguments, script_info=script_info) - elif arguments: - return app_factory(*arguments) - elif not arguments and len(arg_names) == 1 and arg_defaults is None: - return app_factory(script_info) - - return app_factory() - - -def _called_with_wrong_args(factory): - """Check whether calling a function raised a ``TypeError`` because - the call failed or because something in the factory raised the - error. - - :param factory: the factory function that was called - :return: true if the call failed - """ - tb = sys.exc_info()[2] - - try: - while tb is not None: - if tb.tb_frame.f_code is factory.__code__: - # in the factory, it was called successfully - return False - - tb = tb.tb_next - - # didn't reach the factory - return True - finally: - del tb - - -def find_app_by_string(script_info, module, app_name): - """Checks if the given string is a variable name or a function. If it is a - function, it checks for specified arguments and whether it takes a - ``script_info`` argument and calls the function with the appropriate - arguments. - """ - from flask import Flask - match = re.match(r'^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$', app_name) - - if not match: - raise NoAppException( - '"{name}" is not a valid variable name or function ' - 'expression.'.format(name=app_name) - ) - - name, args = match.groups() - - try: - attr = getattr(module, name) - except AttributeError as e: - raise NoAppException(e.args[0]) - - if inspect.isfunction(attr): - if args: - try: - args = ast.literal_eval('({args},)'.format(args=args)) - except (ValueError, SyntaxError)as e: - raise NoAppException( - 'Could not parse the arguments in ' - '"{app_name}".'.format(e=e, app_name=app_name) - ) - else: - args = () - - try: - app = call_factory(script_info, attr, args) - except TypeError as e: - if not _called_with_wrong_args(attr): - raise - - raise NoAppException( - '{e}\nThe factory "{app_name}" in module "{module}" could not ' - 'be called with the specified arguments.'.format( - e=e, app_name=app_name, module=module.__name__ - ) - ) - else: - app = attr - - if isinstance(app, Flask): - return app - - raise NoAppException( - 'A valid Flask application was not obtained from ' - '"{module}:{app_name}".'.format( - module=module.__name__, app_name=app_name - ) - ) - - -def prepare_import(path): - """Given a filename this will try to calculate the python path, add it - to the search path and return the actual module name that is expected. - """ - path = os.path.realpath(path) - - if os.path.splitext(path)[1] == '.py': - path = os.path.splitext(path)[0] - - if os.path.basename(path) == '__init__': - path = os.path.dirname(path) - - module_name = [] - - # move up until outside package structure (no __init__.py) - while True: - path, name = os.path.split(path) - module_name.append(name) - - if not os.path.exists(os.path.join(path, '__init__.py')): - break - - if sys.path[0] != path: - sys.path.insert(0, path) - - return '.'.join(module_name[::-1]) - - -def locate_app(script_info, module_name, app_name, raise_if_not_found=True): - __traceback_hide__ = True - - try: - __import__(module_name) - except ImportError: - # Reraise the ImportError if it occurred within the imported module. - # Determine this by checking whether the trace has a depth > 1. - if sys.exc_info()[-1].tb_next: - raise NoAppException( - 'While importing "{name}", an ImportError was raised:' - '\n\n{tb}'.format(name=module_name, tb=traceback.format_exc()) - ) - elif raise_if_not_found: - raise NoAppException( - 'Could not import "{name}".'.format(name=module_name) - ) - else: - return - - module = sys.modules[module_name] - - if app_name is None: - return find_best_app(script_info, module) - else: - return find_app_by_string(script_info, module, app_name) - - -def get_version(ctx, param, value): - if not value or ctx.resilient_parsing: - return - message = 'Flask %(version)s\nPython %(python_version)s' - click.echo(message % { - 'version': __version__, - 'python_version': sys.version, - }, color=ctx.color) - ctx.exit() - - -version_option = click.Option( - ['--version'], - help='Show the flask version', - expose_value=False, - callback=get_version, - is_flag=True, - is_eager=True -) - - -class DispatchingApp(object): - """Special application that dispatches to a Flask application which - is imported by name in a background thread. If an error happens - it is recorded and shown as part of the WSGI handling which in case - of the Werkzeug debugger means that it shows up in the browser. - """ - - def __init__(self, loader, use_eager_loading=False): - self.loader = loader - self._app = None - self._lock = Lock() - self._bg_loading_exc_info = None - if use_eager_loading: - self._load_unlocked() - else: - self._load_in_background() - - def _load_in_background(self): - def _load_app(): - __traceback_hide__ = True - with self._lock: - try: - self._load_unlocked() - except Exception: - self._bg_loading_exc_info = sys.exc_info() - t = Thread(target=_load_app, args=()) - t.start() - - def _flush_bg_loading_exception(self): - __traceback_hide__ = True - exc_info = self._bg_loading_exc_info - if exc_info is not None: - self._bg_loading_exc_info = None - reraise(*exc_info) - - def _load_unlocked(self): - __traceback_hide__ = True - self._app = rv = self.loader() - self._bg_loading_exc_info = None - return rv - - def __call__(self, environ, start_response): - __traceback_hide__ = True - if self._app is not None: - return self._app(environ, start_response) - self._flush_bg_loading_exception() - with self._lock: - if self._app is not None: - rv = self._app - else: - rv = self._load_unlocked() - return rv(environ, start_response) - - -class ScriptInfo(object): - """Help object to deal with Flask applications. This is usually not - necessary to interface with as it's used internally in the dispatching - to click. In future versions of Flask this object will most likely play - a bigger role. Typically it's created automatically by the - :class:`FlaskGroup` but you can also manually create it and pass it - onwards as click object. - """ - - def __init__(self, app_import_path=None, create_app=None): - #: Optionally the import path for the Flask application. - self.app_import_path = app_import_path or os.environ.get('FLASK_APP') - #: Optionally a function that is passed the script info to create - #: the instance of the application. - self.create_app = create_app - #: A dictionary with arbitrary data that can be associated with - #: this script info. - self.data = {} - self._loaded_app = None - - def load_app(self): - """Loads the Flask app (if not yet loaded) and returns it. Calling - this multiple times will just result in the already loaded app to - be returned. - """ - __traceback_hide__ = True - - if self._loaded_app is not None: - return self._loaded_app - - app = None - - if self.create_app is not None: - app = call_factory(self, self.create_app) - else: - if self.app_import_path: - path, name = (self.app_import_path.split(':', 1) + [None])[:2] - import_name = prepare_import(path) - app = locate_app(self, import_name, name) - else: - for path in ('wsgi.py', 'app.py'): - import_name = prepare_import(path) - app = locate_app(self, import_name, None, - raise_if_not_found=False) - - if app: - break - - if not app: - raise NoAppException( - 'Could not locate a Flask application. You did not provide ' - 'the "FLASK_APP" environment variable, and a "wsgi.py" or ' - '"app.py" module was not found in the current directory.' - ) - - debug = get_debug_flag() - - # Update the app's debug flag through the descriptor so that other - # values repopulate as well. - if debug is not None: - app.debug = debug - - self._loaded_app = app - return app - - -pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True) - - -def with_appcontext(f): - """Wraps a callback so that it's guaranteed to be executed with the - script's application context. If callbacks are registered directly - to the ``app.cli`` object then they are wrapped with this function - by default unless it's disabled. - """ - @click.pass_context - def decorator(__ctx, *args, **kwargs): - with __ctx.ensure_object(ScriptInfo).load_app().app_context(): - return __ctx.invoke(f, *args, **kwargs) - return update_wrapper(decorator, f) - - -class AppGroup(click.Group): - """This works similar to a regular click :class:`~click.Group` but it - changes the behavior of the :meth:`command` decorator so that it - automatically wraps the functions in :func:`with_appcontext`. - - Not to be confused with :class:`FlaskGroup`. - """ - - def command(self, *args, **kwargs): - """This works exactly like the method of the same name on a regular - :class:`click.Group` but it wraps callbacks in :func:`with_appcontext` - unless it's disabled by passing ``with_appcontext=False``. - """ - wrap_for_ctx = kwargs.pop('with_appcontext', True) - def decorator(f): - if wrap_for_ctx: - f = with_appcontext(f) - return click.Group.command(self, *args, **kwargs)(f) - return decorator - - def group(self, *args, **kwargs): - """This works exactly like the method of the same name on a regular - :class:`click.Group` but it defaults the group class to - :class:`AppGroup`. - """ - kwargs.setdefault('cls', AppGroup) - return click.Group.group(self, *args, **kwargs) - - -class FlaskGroup(AppGroup): - """Special subclass of the :class:`AppGroup` group that supports - loading more commands from the configured Flask app. Normally a - developer does not have to interface with this class but there are - some very advanced use cases for which it makes sense to create an - instance of this. - - For information as of why this is useful see :ref:`custom-scripts`. - - :param add_default_commands: if this is True then the default run and - shell commands wil be added. - :param add_version_option: adds the ``--version`` option. - :param create_app: an optional callback that is passed the script info and - returns the loaded app. - :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv` - files to set environment variables. Will also change the working - directory to the directory containing the first file found. - - .. versionchanged:: 1.0 - If installed, python-dotenv will be used to load environment variables - from :file:`.env` and :file:`.flaskenv` files. - """ - - def __init__(self, add_default_commands=True, create_app=None, - add_version_option=True, load_dotenv=True, **extra): - params = list(extra.pop('params', None) or ()) - - if add_version_option: - params.append(version_option) - - AppGroup.__init__(self, params=params, **extra) - self.create_app = create_app - self.load_dotenv = load_dotenv - - if add_default_commands: - self.add_command(run_command) - self.add_command(shell_command) - self.add_command(routes_command) - - self._loaded_plugin_commands = False - - def _load_plugin_commands(self): - if self._loaded_plugin_commands: - return - try: - import pkg_resources - except ImportError: - self._loaded_plugin_commands = True - return - - for ep in pkg_resources.iter_entry_points('flask.commands'): - self.add_command(ep.load(), ep.name) - self._loaded_plugin_commands = True - - def get_command(self, ctx, name): - self._load_plugin_commands() - - # We load built-in commands first as these should always be the - # same no matter what the app does. If the app does want to - # override this it needs to make a custom instance of this group - # and not attach the default commands. - # - # This also means that the script stays functional in case the - # application completely fails. - rv = AppGroup.get_command(self, ctx, name) - if rv is not None: - return rv - - info = ctx.ensure_object(ScriptInfo) - try: - rv = info.load_app().cli.get_command(ctx, name) - if rv is not None: - return rv - except NoAppException: - pass - - def list_commands(self, ctx): - self._load_plugin_commands() - - # The commands available is the list of both the application (if - # available) plus the builtin commands. - rv = set(click.Group.list_commands(self, ctx)) - info = ctx.ensure_object(ScriptInfo) - try: - rv.update(info.load_app().cli.list_commands(ctx)) - except Exception: - # Here we intentionally swallow all exceptions as we don't - # want the help page to break if the app does not exist. - # If someone attempts to use the command we try to create - # the app again and this will give us the error. - # However, we will not do so silently because that would confuse - # users. - traceback.print_exc() - return sorted(rv) - - def main(self, *args, **kwargs): - # Set a global flag that indicates that we were invoked from the - # command line interface. This is detected by Flask.run to make the - # call into a no-op. This is necessary to avoid ugly errors when the - # script that is loaded here also attempts to start a server. - os.environ['FLASK_RUN_FROM_CLI'] = 'true' - - if get_load_dotenv(self.load_dotenv): - load_dotenv() - - obj = kwargs.get('obj') - - if obj is None: - obj = ScriptInfo(create_app=self.create_app) - - kwargs['obj'] = obj - kwargs.setdefault('auto_envvar_prefix', 'FLASK') - return super(FlaskGroup, self).main(*args, **kwargs) - - -def _path_is_ancestor(path, other): - """Take ``other`` and remove the length of ``path`` from it. Then join it - to ``path``. If it is the original value, ``path`` is an ancestor of - ``other``.""" - return os.path.join(path, other[len(path):].lstrip(os.sep)) == other - - -def load_dotenv(path=None): - """Load "dotenv" files in order of precedence to set environment variables. - - If an env var is already set it is not overwritten, so earlier files in the - list are preferred over later files. - - Changes the current working directory to the location of the first file - found, with the assumption that it is in the top level project directory - and will be where the Python path should import local packages from. - - This is a no-op if `python-dotenv`_ is not installed. - - .. _python-dotenv: https://github.com/theskumar/python-dotenv#readme - - :param path: Load the file at this location instead of searching. - :return: ``True`` if a file was loaded. - - .. versionadded:: 1.0 - """ - if dotenv is None: - if path or os.path.exists('.env') or os.path.exists('.flaskenv'): - click.secho( - ' * Tip: There are .env files present.' - ' Do "pip install python-dotenv" to use them.', - fg='yellow') - return - - if path is not None: - return dotenv.load_dotenv(path) - - new_dir = None - - for name in ('.env', '.flaskenv'): - path = dotenv.find_dotenv(name, usecwd=True) - - if not path: - continue - - if new_dir is None: - new_dir = os.path.dirname(path) - - dotenv.load_dotenv(path) - - if new_dir and os.getcwd() != new_dir: - os.chdir(new_dir) - - return new_dir is not None # at least one file was located and loaded - - -def show_server_banner(env, debug, app_import_path, eager_loading): - """Show extra startup messages the first time the server is run, - ignoring the reloader. - """ - if os.environ.get('WERKZEUG_RUN_MAIN') == 'true': - return - - if app_import_path is not None: - message = ' * Serving Flask app "{0}"'.format(app_import_path) - - if not eager_loading: - message += ' (lazy loading)' - - click.echo(message) - - click.echo(' * Environment: {0}'.format(env)) - - if env == 'production': - click.secho( - ' WARNING: Do not use the development server in a production' - ' environment.', fg='red') - click.secho(' Use a production WSGI server instead.', dim=True) - - if debug is not None: - click.echo(' * Debug mode: {0}'.format('on' if debug else 'off')) - - -class CertParamType(click.ParamType): - """Click option type for the ``--cert`` option. Allows either an - existing file, the string ``'adhoc'``, or an import for a - :class:`~ssl.SSLContext` object. - """ - - name = 'path' - - def __init__(self): - self.path_type = click.Path( - exists=True, dir_okay=False, resolve_path=True) - - def convert(self, value, param, ctx): - try: - return self.path_type(value, param, ctx) - except click.BadParameter: - value = click.STRING(value, param, ctx).lower() - - if value == 'adhoc': - try: - import OpenSSL - except ImportError: - raise click.BadParameter( - 'Using ad-hoc certificates requires pyOpenSSL.', - ctx, param) - - return value - - obj = import_string(value, silent=True) - - if sys.version_info < (2, 7): - if obj: - return obj - else: - if isinstance(obj, ssl.SSLContext): - return obj - - raise - - -def _validate_key(ctx, param, value): - """The ``--key`` option must be specified when ``--cert`` is a file. - Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed. - """ - cert = ctx.params.get('cert') - is_adhoc = cert == 'adhoc' - - if sys.version_info < (2, 7): - is_context = cert and not isinstance(cert, (text_type, bytes)) - else: - is_context = isinstance(cert, ssl.SSLContext) - - if value is not None: - if is_adhoc: - raise click.BadParameter( - 'When "--cert" is "adhoc", "--key" is not used.', - ctx, param) - - if is_context: - raise click.BadParameter( - 'When "--cert" is an SSLContext object, "--key is not used.', - ctx, param) - - if not cert: - raise click.BadParameter( - '"--cert" must also be specified.', - ctx, param) - - ctx.params['cert'] = cert, value - - else: - if cert and not (is_adhoc or is_context): - raise click.BadParameter( - 'Required when using "--cert".', - ctx, param) - - return value - - -@click.command('run', short_help='Runs a development server.') -@click.option('--host', '-h', default='127.0.0.1', - help='The interface to bind to.') -@click.option('--port', '-p', default=5000, - help='The port to bind to.') -@click.option('--cert', type=CertParamType(), - help='Specify a certificate file to use HTTPS.') -@click.option('--key', - type=click.Path(exists=True, dir_okay=False, resolve_path=True), - callback=_validate_key, expose_value=False, - help='The key file to use when specifying a certificate.') -@click.option('--reload/--no-reload', default=None, - help='Enable or disable the reloader. By default the reloader ' - 'is active if debug is enabled.') -@click.option('--debugger/--no-debugger', default=None, - help='Enable or disable the debugger. By default the debugger ' - 'is active if debug is enabled.') -@click.option('--eager-loading/--lazy-loader', default=None, - help='Enable or disable eager loading. By default eager ' - 'loading is enabled if the reloader is disabled.') -@click.option('--with-threads/--without-threads', default=True, - help='Enable or disable multithreading.') -@pass_script_info -def run_command(info, host, port, reload, debugger, eager_loading, - with_threads, cert): - """Run a local development server. - - This server is for development purposes only. It does not provide - the stability, security, or performance of production WSGI servers. - - The reloader and debugger are enabled by default if - FLASK_ENV=development or FLASK_DEBUG=1. - """ - debug = get_debug_flag() - - if reload is None: - reload = debug - - if debugger is None: - debugger = debug - - if eager_loading is None: - eager_loading = not reload - - show_server_banner(get_env(), debug, info.app_import_path, eager_loading) - app = DispatchingApp(info.load_app, use_eager_loading=eager_loading) - - from werkzeug.serving import run_simple - run_simple(host, port, app, use_reloader=reload, use_debugger=debugger, - threaded=with_threads, ssl_context=cert) - - -@click.command('shell', short_help='Runs a shell in the app context.') -@with_appcontext -def shell_command(): - """Runs an interactive Python shell in the context of a given - Flask application. The application will populate the default - namespace of this shell according to it's configuration. - - This is useful for executing small snippets of management code - without having to manually configure the application. - """ - import code - from flask.globals import _app_ctx_stack - app = _app_ctx_stack.top.app - banner = 'Python %s on %s\nApp: %s [%s]\nInstance: %s' % ( - sys.version, - sys.platform, - app.import_name, - app.env, - app.instance_path, - ) - ctx = {} - - # Support the regular Python interpreter startup script if someone - # is using it. - startup = os.environ.get('PYTHONSTARTUP') - if startup and os.path.isfile(startup): - with open(startup, 'r') as f: - eval(compile(f.read(), startup, 'exec'), ctx) - - ctx.update(app.make_shell_context()) - - code.interact(banner=banner, local=ctx) - - -@click.command('routes', short_help='Show the routes for the app.') -@click.option( - '--sort', '-s', - type=click.Choice(('endpoint', 'methods', 'rule', 'match')), - default='endpoint', - help=( - 'Method to sort routes by. "match" is the order that Flask will match ' - 'routes when dispatching a request.' - ) -) -@click.option( - '--all-methods', - is_flag=True, - help="Show HEAD and OPTIONS methods." -) -@with_appcontext -def routes_command(sort, all_methods): - """Show all registered routes with endpoints and methods.""" - - rules = list(current_app.url_map.iter_rules()) - if not rules: - click.echo('No routes were registered.') - return - - ignored_methods = set(() if all_methods else ('HEAD', 'OPTIONS')) - - if sort in ('endpoint', 'rule'): - rules = sorted(rules, key=attrgetter(sort)) - elif sort == 'methods': - rules = sorted(rules, key=lambda rule: sorted(rule.methods)) - - rule_methods = [ - ', '.join(sorted(rule.methods - ignored_methods)) for rule in rules - ] - - headers = ('Endpoint', 'Methods', 'Rule') - widths = ( - max(len(rule.endpoint) for rule in rules), - max(len(methods) for methods in rule_methods), - max(len(rule.rule) for rule in rules), - ) - widths = [max(len(h), w) for h, w in zip(headers, widths)] - row = '{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}'.format(*widths) - - click.echo(row.format(*headers).strip()) - click.echo(row.format(*('-' * width for width in widths))) - - for rule, methods in zip(rules, rule_methods): - click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip()) - - -cli = FlaskGroup(help="""\ -A general utility script for Flask applications. - -Provides commands from Flask, extensions, and the application. Loads the -application defined in the FLASK_APP environment variable, or from a wsgi.py -file. Setting the FLASK_ENV environment variable to 'development' will enable -debug mode. - -\b - {prefix}{cmd} FLASK_APP=hello.py - {prefix}{cmd} FLASK_ENV=development - {prefix}flask run -""".format( - cmd='export' if os.name == 'posix' else 'set', - prefix='$ ' if os.name == 'posix' else '> ' -)) - - -def main(as_module=False): - args = sys.argv[1:] - - if as_module: - this_module = 'flask' - - if sys.version_info < (2, 7): - this_module += '.cli' - - name = 'python -m ' + this_module - - # Python rewrites "python -m flask" to the path to the file in argv. - # Restore the original command so that the reloader works. - sys.argv = ['-m', this_module] + args - else: - name = None - - cli.main(args=args, prog_name=name) - - -if __name__ == '__main__': - main(as_module=True) diff --git a/pyextra/flask/config.py b/pyextra/flask/config.py deleted file mode 100644 index d6074baa8..000000000 --- a/pyextra/flask/config.py +++ /dev/null @@ -1,265 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.config - ~~~~~~~~~~~~ - - Implements the configuration related objects. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import os -import types -import errno - -from werkzeug.utils import import_string -from ._compat import string_types, iteritems -from . import json - - -class ConfigAttribute(object): - """Makes an attribute forward to the config""" - - def __init__(self, name, get_converter=None): - self.__name__ = name - self.get_converter = get_converter - - def __get__(self, obj, type=None): - if obj is None: - return self - rv = obj.config[self.__name__] - if self.get_converter is not None: - rv = self.get_converter(rv) - return rv - - def __set__(self, obj, value): - obj.config[self.__name__] = value - - -class Config(dict): - """Works exactly like a dict but provides ways to fill it from files - or special dictionaries. There are two common patterns to populate the - config. - - Either you can fill the config from a config file:: - - app.config.from_pyfile('yourconfig.cfg') - - Or alternatively you can define the configuration options in the - module that calls :meth:`from_object` or provide an import path to - a module that should be loaded. It is also possible to tell it to - use the same module and with that provide the configuration values - just before the call:: - - DEBUG = True - SECRET_KEY = 'development key' - app.config.from_object(__name__) - - In both cases (loading from any Python file or loading from modules), - only uppercase keys are added to the config. This makes it possible to use - lowercase values in the config file for temporary values that are not added - to the config or to define the config keys in the same file that implements - the application. - - Probably the most interesting way to load configurations is from an - environment variable pointing to a file:: - - app.config.from_envvar('YOURAPPLICATION_SETTINGS') - - In this case before launching the application you have to set this - environment variable to the file you want to use. On Linux and OS X - use the export statement:: - - export YOURAPPLICATION_SETTINGS='/path/to/config/file' - - On windows use `set` instead. - - :param root_path: path to which files are read relative from. When the - config object is created by the application, this is - the application's :attr:`~flask.Flask.root_path`. - :param defaults: an optional dictionary of default values - """ - - def __init__(self, root_path, defaults=None): - dict.__init__(self, defaults or {}) - self.root_path = root_path - - def from_envvar(self, variable_name, silent=False): - """Loads a configuration from an environment variable pointing to - a configuration file. This is basically just a shortcut with nicer - error messages for this line of code:: - - app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) - - :param variable_name: name of the environment variable - :param silent: set to ``True`` if you want silent failure for missing - files. - :return: bool. ``True`` if able to load config, ``False`` otherwise. - """ - rv = os.environ.get(variable_name) - if not rv: - if silent: - return False - raise RuntimeError('The environment variable %r is not set ' - 'and as such configuration could not be ' - 'loaded. Set this variable and make it ' - 'point to a configuration file' % - variable_name) - return self.from_pyfile(rv, silent=silent) - - def from_pyfile(self, filename, silent=False): - """Updates the values in the config from a Python file. This function - behaves as if the file was imported as module with the - :meth:`from_object` function. - - :param filename: the filename of the config. This can either be an - absolute filename or a filename relative to the - root path. - :param silent: set to ``True`` if you want silent failure for missing - files. - - .. versionadded:: 0.7 - `silent` parameter. - """ - filename = os.path.join(self.root_path, filename) - d = types.ModuleType('config') - d.__file__ = filename - try: - with open(filename, mode='rb') as config_file: - exec(compile(config_file.read(), filename, 'exec'), d.__dict__) - except IOError as e: - if silent and e.errno in ( - errno.ENOENT, errno.EISDIR, errno.ENOTDIR - ): - return False - e.strerror = 'Unable to load configuration file (%s)' % e.strerror - raise - self.from_object(d) - return True - - def from_object(self, obj): - """Updates the values from the given object. An object can be of one - of the following two types: - - - a string: in this case the object with that name will be imported - - an actual object reference: that object is used directly - - Objects are usually either modules or classes. :meth:`from_object` - loads only the uppercase attributes of the module/class. A ``dict`` - object will not work with :meth:`from_object` because the keys of a - ``dict`` are not attributes of the ``dict`` class. - - Example of module-based configuration:: - - app.config.from_object('yourapplication.default_config') - from yourapplication import default_config - app.config.from_object(default_config) - - You should not use this function to load the actual configuration but - rather configuration defaults. The actual config should be loaded - with :meth:`from_pyfile` and ideally from a location not within the - package because the package might be installed system wide. - - See :ref:`config-dev-prod` for an example of class-based configuration - using :meth:`from_object`. - - :param obj: an import name or object - """ - if isinstance(obj, string_types): - obj = import_string(obj) - for key in dir(obj): - if key.isupper(): - self[key] = getattr(obj, key) - - def from_json(self, filename, silent=False): - """Updates the values in the config from a JSON file. This function - behaves as if the JSON object was a dictionary and passed to the - :meth:`from_mapping` function. - - :param filename: the filename of the JSON file. This can either be an - absolute filename or a filename relative to the - root path. - :param silent: set to ``True`` if you want silent failure for missing - files. - - .. versionadded:: 0.11 - """ - filename = os.path.join(self.root_path, filename) - - try: - with open(filename) as json_file: - obj = json.loads(json_file.read()) - except IOError as e: - if silent and e.errno in (errno.ENOENT, errno.EISDIR): - return False - e.strerror = 'Unable to load configuration file (%s)' % e.strerror - raise - return self.from_mapping(obj) - - def from_mapping(self, *mapping, **kwargs): - """Updates the config like :meth:`update` ignoring items with non-upper - keys. - - .. versionadded:: 0.11 - """ - mappings = [] - if len(mapping) == 1: - if hasattr(mapping[0], 'items'): - mappings.append(mapping[0].items()) - else: - mappings.append(mapping[0]) - elif len(mapping) > 1: - raise TypeError( - 'expected at most 1 positional argument, got %d' % len(mapping) - ) - mappings.append(kwargs.items()) - for mapping in mappings: - for (key, value) in mapping: - if key.isupper(): - self[key] = value - return True - - def get_namespace(self, namespace, lowercase=True, trim_namespace=True): - """Returns a dictionary containing a subset of configuration options - that match the specified namespace/prefix. Example usage:: - - app.config['IMAGE_STORE_TYPE'] = 'fs' - app.config['IMAGE_STORE_PATH'] = '/var/app/images' - app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com' - image_store_config = app.config.get_namespace('IMAGE_STORE_') - - The resulting dictionary `image_store_config` would look like:: - - { - 'type': 'fs', - 'path': '/var/app/images', - 'base_url': 'http://img.website.com' - } - - This is often useful when configuration options map directly to - keyword arguments in functions or class constructors. - - :param namespace: a configuration namespace - :param lowercase: a flag indicating if the keys of the resulting - dictionary should be lowercase - :param trim_namespace: a flag indicating if the keys of the resulting - dictionary should not include the namespace - - .. versionadded:: 0.11 - """ - rv = {} - for k, v in iteritems(self): - if not k.startswith(namespace): - continue - if trim_namespace: - key = k[len(namespace):] - else: - key = k - if lowercase: - key = key.lower() - rv[key] = v - return rv - - def __repr__(self): - return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self)) diff --git a/pyextra/flask/ctx.py b/pyextra/flask/ctx.py deleted file mode 100644 index 8472c920c..000000000 --- a/pyextra/flask/ctx.py +++ /dev/null @@ -1,457 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.ctx - ~~~~~~~~~ - - Implements the objects required to keep the context. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import sys -from functools import update_wrapper - -from werkzeug.exceptions import HTTPException - -from .globals import _request_ctx_stack, _app_ctx_stack -from .signals import appcontext_pushed, appcontext_popped -from ._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise - - -# a singleton sentinel value for parameter defaults -_sentinel = object() - - -class _AppCtxGlobals(object): - """A plain object. Used as a namespace for storing data during an - application context. - - Creating an app context automatically creates this object, which is - made available as the :data:`g` proxy. - - .. describe:: 'key' in g - - Check whether an attribute is present. - - .. versionadded:: 0.10 - - .. describe:: iter(g) - - Return an iterator over the attribute names. - - .. versionadded:: 0.10 - """ - - def get(self, name, default=None): - """Get an attribute by name, or a default value. Like - :meth:`dict.get`. - - :param name: Name of attribute to get. - :param default: Value to return if the attribute is not present. - - .. versionadded:: 0.10 - """ - return self.__dict__.get(name, default) - - def pop(self, name, default=_sentinel): - """Get and remove an attribute by name. Like :meth:`dict.pop`. - - :param name: Name of attribute to pop. - :param default: Value to return if the attribute is not present, - instead of raise a ``KeyError``. - - .. versionadded:: 0.11 - """ - if default is _sentinel: - return self.__dict__.pop(name) - else: - return self.__dict__.pop(name, default) - - def setdefault(self, name, default=None): - """Get the value of an attribute if it is present, otherwise - set and return a default value. Like :meth:`dict.setdefault`. - - :param name: Name of attribute to get. - :param: default: Value to set and return if the attribute is not - present. - - .. versionadded:: 0.11 - """ - return self.__dict__.setdefault(name, default) - - def __contains__(self, item): - return item in self.__dict__ - - def __iter__(self): - return iter(self.__dict__) - - def __repr__(self): - top = _app_ctx_stack.top - if top is not None: - return '' % top.app.name - return object.__repr__(self) - - -def after_this_request(f): - """Executes a function after this request. This is useful to modify - response objects. The function is passed the response object and has - to return the same or a new one. - - Example:: - - @app.route('/') - def index(): - @after_this_request - def add_header(response): - response.headers['X-Foo'] = 'Parachute' - return response - return 'Hello World!' - - This is more useful if a function other than the view function wants to - modify a response. For instance think of a decorator that wants to add - some headers without converting the return value into a response object. - - .. versionadded:: 0.9 - """ - _request_ctx_stack.top._after_request_functions.append(f) - return f - - -def copy_current_request_context(f): - """A helper function that decorates a function to retain the current - request context. This is useful when working with greenlets. The moment - the function is decorated a copy of the request context is created and - then pushed when the function is called. - - Example:: - - import gevent - from flask import copy_current_request_context - - @app.route('/') - def index(): - @copy_current_request_context - def do_some_work(): - # do some work here, it can access flask.request like you - # would otherwise in the view function. - ... - gevent.spawn(do_some_work) - return 'Regular response' - - .. versionadded:: 0.10 - """ - top = _request_ctx_stack.top - if top is None: - raise RuntimeError('This decorator can only be used at local scopes ' - 'when a request context is on the stack. For instance within ' - 'view functions.') - reqctx = top.copy() - def wrapper(*args, **kwargs): - with reqctx: - return f(*args, **kwargs) - return update_wrapper(wrapper, f) - - -def has_request_context(): - """If you have code that wants to test if a request context is there or - not this function can be used. For instance, you may want to take advantage - of request information if the request object is available, but fail - silently if it is unavailable. - - :: - - class User(db.Model): - - def __init__(self, username, remote_addr=None): - self.username = username - if remote_addr is None and has_request_context(): - remote_addr = request.remote_addr - self.remote_addr = remote_addr - - Alternatively you can also just test any of the context bound objects - (such as :class:`request` or :class:`g` for truthness):: - - class User(db.Model): - - def __init__(self, username, remote_addr=None): - self.username = username - if remote_addr is None and request: - remote_addr = request.remote_addr - self.remote_addr = remote_addr - - .. versionadded:: 0.7 - """ - return _request_ctx_stack.top is not None - - -def has_app_context(): - """Works like :func:`has_request_context` but for the application - context. You can also just do a boolean check on the - :data:`current_app` object instead. - - .. versionadded:: 0.9 - """ - return _app_ctx_stack.top is not None - - -class AppContext(object): - """The application context binds an application object implicitly - to the current thread or greenlet, similar to how the - :class:`RequestContext` binds request information. The application - context is also implicitly created if a request context is created - but the application is not on top of the individual application - context. - """ - - def __init__(self, app): - self.app = app - self.url_adapter = app.create_url_adapter(None) - self.g = app.app_ctx_globals_class() - - # Like request context, app contexts can be pushed multiple times - # but there a basic "refcount" is enough to track them. - self._refcnt = 0 - - def push(self): - """Binds the app context to the current context.""" - self._refcnt += 1 - if hasattr(sys, 'exc_clear'): - sys.exc_clear() - _app_ctx_stack.push(self) - appcontext_pushed.send(self.app) - - def pop(self, exc=_sentinel): - """Pops the app context.""" - try: - self._refcnt -= 1 - if self._refcnt <= 0: - if exc is _sentinel: - exc = sys.exc_info()[1] - self.app.do_teardown_appcontext(exc) - finally: - rv = _app_ctx_stack.pop() - assert rv is self, 'Popped wrong app context. (%r instead of %r)' \ - % (rv, self) - appcontext_popped.send(self.app) - - def __enter__(self): - self.push() - return self - - def __exit__(self, exc_type, exc_value, tb): - self.pop(exc_value) - - if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: - reraise(exc_type, exc_value, tb) - - -class RequestContext(object): - """The request context contains all request relevant information. It is - created at the beginning of the request and pushed to the - `_request_ctx_stack` and removed at the end of it. It will create the - URL adapter and request object for the WSGI environment provided. - - Do not attempt to use this class directly, instead use - :meth:`~flask.Flask.test_request_context` and - :meth:`~flask.Flask.request_context` to create this object. - - When the request context is popped, it will evaluate all the - functions registered on the application for teardown execution - (:meth:`~flask.Flask.teardown_request`). - - The request context is automatically popped at the end of the request - for you. In debug mode the request context is kept around if - exceptions happen so that interactive debuggers have a chance to - introspect the data. With 0.4 this can also be forced for requests - that did not fail and outside of ``DEBUG`` mode. By setting - ``'flask._preserve_context'`` to ``True`` on the WSGI environment the - context will not pop itself at the end of the request. This is used by - the :meth:`~flask.Flask.test_client` for example to implement the - deferred cleanup functionality. - - You might find this helpful for unittests where you need the - information from the context local around for a little longer. Make - sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in - that situation, otherwise your unittests will leak memory. - """ - - def __init__(self, app, environ, request=None): - self.app = app - if request is None: - request = app.request_class(environ) - self.request = request - self.url_adapter = app.create_url_adapter(self.request) - self.flashes = None - self.session = None - - # Request contexts can be pushed multiple times and interleaved with - # other request contexts. Now only if the last level is popped we - # get rid of them. Additionally if an application context is missing - # one is created implicitly so for each level we add this information - self._implicit_app_ctx_stack = [] - - # indicator if the context was preserved. Next time another context - # is pushed the preserved context is popped. - self.preserved = False - - # remembers the exception for pop if there is one in case the context - # preservation kicks in. - self._preserved_exc = None - - # Functions that should be executed after the request on the response - # object. These will be called before the regular "after_request" - # functions. - self._after_request_functions = [] - - self.match_request() - - def _get_g(self): - return _app_ctx_stack.top.g - def _set_g(self, value): - _app_ctx_stack.top.g = value - g = property(_get_g, _set_g) - del _get_g, _set_g - - def copy(self): - """Creates a copy of this request context with the same request object. - This can be used to move a request context to a different greenlet. - Because the actual request object is the same this cannot be used to - move a request context to a different thread unless access to the - request object is locked. - - .. versionadded:: 0.10 - """ - return self.__class__(self.app, - environ=self.request.environ, - request=self.request - ) - - def match_request(self): - """Can be overridden by a subclass to hook into the matching - of the request. - """ - try: - url_rule, self.request.view_args = \ - self.url_adapter.match(return_rule=True) - self.request.url_rule = url_rule - except HTTPException as e: - self.request.routing_exception = e - - def push(self): - """Binds the request context to the current context.""" - # If an exception occurs in debug mode or if context preservation is - # activated under exception situations exactly one context stays - # on the stack. The rationale is that you want to access that - # information under debug situations. However if someone forgets to - # pop that context again we want to make sure that on the next push - # it's invalidated, otherwise we run at risk that something leaks - # memory. This is usually only a problem in test suite since this - # functionality is not active in production environments. - top = _request_ctx_stack.top - if top is not None and top.preserved: - top.pop(top._preserved_exc) - - # Before we push the request context we have to ensure that there - # is an application context. - app_ctx = _app_ctx_stack.top - if app_ctx is None or app_ctx.app != self.app: - app_ctx = self.app.app_context() - app_ctx.push() - self._implicit_app_ctx_stack.append(app_ctx) - else: - self._implicit_app_ctx_stack.append(None) - - if hasattr(sys, 'exc_clear'): - sys.exc_clear() - - _request_ctx_stack.push(self) - - # Open the session at the moment that the request context is available. - # This allows a custom open_session method to use the request context. - # Only open a new session if this is the first time the request was - # pushed, otherwise stream_with_context loses the session. - if self.session is None: - session_interface = self.app.session_interface - self.session = session_interface.open_session( - self.app, self.request - ) - - if self.session is None: - self.session = session_interface.make_null_session(self.app) - - def pop(self, exc=_sentinel): - """Pops the request context and unbinds it by doing that. This will - also trigger the execution of functions registered by the - :meth:`~flask.Flask.teardown_request` decorator. - - .. versionchanged:: 0.9 - Added the `exc` argument. - """ - app_ctx = self._implicit_app_ctx_stack.pop() - - try: - clear_request = False - if not self._implicit_app_ctx_stack: - self.preserved = False - self._preserved_exc = None - if exc is _sentinel: - exc = sys.exc_info()[1] - self.app.do_teardown_request(exc) - - # If this interpreter supports clearing the exception information - # we do that now. This will only go into effect on Python 2.x, - # on 3.x it disappears automatically at the end of the exception - # stack. - if hasattr(sys, 'exc_clear'): - sys.exc_clear() - - request_close = getattr(self.request, 'close', None) - if request_close is not None: - request_close() - clear_request = True - finally: - rv = _request_ctx_stack.pop() - - # get rid of circular dependencies at the end of the request - # so that we don't require the GC to be active. - if clear_request: - rv.request.environ['werkzeug.request'] = None - - # Get rid of the app as well if necessary. - if app_ctx is not None: - app_ctx.pop(exc) - - assert rv is self, 'Popped wrong request context. ' \ - '(%r instead of %r)' % (rv, self) - - def auto_pop(self, exc): - if self.request.environ.get('flask._preserve_context') or \ - (exc is not None and self.app.preserve_context_on_exception): - self.preserved = True - self._preserved_exc = exc - else: - self.pop(exc) - - def __enter__(self): - self.push() - return self - - def __exit__(self, exc_type, exc_value, tb): - # do not pop the request stack if we are in debug mode and an - # exception happened. This will allow the debugger to still - # access the request object in the interactive shell. Furthermore - # the context can be force kept alive for the test client. - # See flask.testing for how this works. - self.auto_pop(exc_value) - - if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: - reraise(exc_type, exc_value, tb) - - def __repr__(self): - return '<%s \'%s\' [%s] of %s>' % ( - self.__class__.__name__, - self.request.url, - self.request.method, - self.app.name, - ) diff --git a/pyextra/flask/debughelpers.py b/pyextra/flask/debughelpers.py deleted file mode 100644 index e9765f20d..000000000 --- a/pyextra/flask/debughelpers.py +++ /dev/null @@ -1,168 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.debughelpers - ~~~~~~~~~~~~~~~~~~ - - Various helpers to make the development experience better. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import os -from warnings import warn - -from ._compat import implements_to_string, text_type -from .app import Flask -from .blueprints import Blueprint -from .globals import _request_ctx_stack - - -class UnexpectedUnicodeError(AssertionError, UnicodeError): - """Raised in places where we want some better error reporting for - unexpected unicode or binary data. - """ - - -@implements_to_string -class DebugFilesKeyError(KeyError, AssertionError): - """Raised from request.files during debugging. The idea is that it can - provide a better error message than just a generic KeyError/BadRequest. - """ - - def __init__(self, request, key): - form_matches = request.form.getlist(key) - buf = ['You tried to access the file "%s" in the request.files ' - 'dictionary but it does not exist. The mimetype for the request ' - 'is "%s" instead of "multipart/form-data" which means that no ' - 'file contents were transmitted. To fix this error you should ' - 'provide enctype="multipart/form-data" in your form.' % - (key, request.mimetype)] - if form_matches: - buf.append('\n\nThe browser instead transmitted some file names. ' - 'This was submitted: %s' % ', '.join('"%s"' % x - for x in form_matches)) - self.msg = ''.join(buf) - - def __str__(self): - return self.msg - - -class FormDataRoutingRedirect(AssertionError): - """This exception is raised by Flask in debug mode if it detects a - redirect caused by the routing system when the request method is not - GET, HEAD or OPTIONS. Reasoning: form data will be dropped. - """ - - def __init__(self, request): - exc = request.routing_exception - buf = ['A request was sent to this URL (%s) but a redirect was ' - 'issued automatically by the routing system to "%s".' - % (request.url, exc.new_url)] - - # In case just a slash was appended we can be extra helpful - if request.base_url + '/' == exc.new_url.split('?')[0]: - buf.append(' The URL was defined with a trailing slash so ' - 'Flask will automatically redirect to the URL ' - 'with the trailing slash if it was accessed ' - 'without one.') - - buf.append(' Make sure to directly send your %s-request to this URL ' - 'since we can\'t make browsers or HTTP clients redirect ' - 'with form data reliably or without user interaction.' % - request.method) - buf.append('\n\nNote: this exception is only raised in debug mode') - AssertionError.__init__(self, ''.join(buf).encode('utf-8')) - - -def attach_enctype_error_multidict(request): - """Since Flask 0.8 we're monkeypatching the files object in case a - request is detected that does not use multipart form data but the files - object is accessed. - """ - oldcls = request.files.__class__ - class newcls(oldcls): - def __getitem__(self, key): - try: - return oldcls.__getitem__(self, key) - except KeyError: - if key not in request.form: - raise - raise DebugFilesKeyError(request, key) - newcls.__name__ = oldcls.__name__ - newcls.__module__ = oldcls.__module__ - request.files.__class__ = newcls - - -def _dump_loader_info(loader): - yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__) - for key, value in sorted(loader.__dict__.items()): - if key.startswith('_'): - continue - if isinstance(value, (tuple, list)): - if not all(isinstance(x, (str, text_type)) for x in value): - continue - yield '%s:' % key - for item in value: - yield ' - %s' % item - continue - elif not isinstance(value, (str, text_type, int, float, bool)): - continue - yield '%s: %r' % (key, value) - - -def explain_template_loading_attempts(app, template, attempts): - """This should help developers understand what failed""" - info = ['Locating template "%s":' % template] - total_found = 0 - blueprint = None - reqctx = _request_ctx_stack.top - if reqctx is not None and reqctx.request.blueprint is not None: - blueprint = reqctx.request.blueprint - - for idx, (loader, srcobj, triple) in enumerate(attempts): - if isinstance(srcobj, Flask): - src_info = 'application "%s"' % srcobj.import_name - elif isinstance(srcobj, Blueprint): - src_info = 'blueprint "%s" (%s)' % (srcobj.name, - srcobj.import_name) - else: - src_info = repr(srcobj) - - info.append('% 5d: trying loader of %s' % ( - idx + 1, src_info)) - - for line in _dump_loader_info(loader): - info.append(' %s' % line) - - if triple is None: - detail = 'no match' - else: - detail = 'found (%r)' % (triple[1] or '') - total_found += 1 - info.append(' -> %s' % detail) - - seems_fishy = False - if total_found == 0: - info.append('Error: the template could not be found.') - seems_fishy = True - elif total_found > 1: - info.append('Warning: multiple loaders returned a match for the template.') - seems_fishy = True - - if blueprint is not None and seems_fishy: - info.append(' The template was looked up from an endpoint that ' - 'belongs to the blueprint "%s".' % blueprint) - info.append(' Maybe you did not place a template in the right folder?') - info.append(' See http://flask.pocoo.org/docs/blueprints/#templates') - - app.logger.info('\n'.join(info)) - - -def explain_ignored_app_run(): - if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': - warn(Warning('Silently ignoring app.run() because the ' - 'application is run from the flask command line ' - 'executable. Consider putting app.run() behind an ' - 'if __name__ == "__main__" guard to silence this ' - 'warning.'), stacklevel=3) diff --git a/pyextra/flask/globals.py b/pyextra/flask/globals.py deleted file mode 100644 index 7d50a6f6d..000000000 --- a/pyextra/flask/globals.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.globals - ~~~~~~~~~~~~~ - - Defines all the global objects that are proxies to the current - active context. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -from functools import partial -from werkzeug.local import LocalStack, LocalProxy - - -_request_ctx_err_msg = '''\ -Working outside of request context. - -This typically means that you attempted to use functionality that needed -an active HTTP request. Consult the documentation on testing for -information about how to avoid this problem.\ -''' -_app_ctx_err_msg = '''\ -Working outside of application context. - -This typically means that you attempted to use functionality that needed -to interface with the current application object in some way. To solve -this, set up an application context with app.app_context(). See the -documentation for more information.\ -''' - - -def _lookup_req_object(name): - top = _request_ctx_stack.top - if top is None: - raise RuntimeError(_request_ctx_err_msg) - return getattr(top, name) - - -def _lookup_app_object(name): - top = _app_ctx_stack.top - if top is None: - raise RuntimeError(_app_ctx_err_msg) - return getattr(top, name) - - -def _find_app(): - top = _app_ctx_stack.top - if top is None: - raise RuntimeError(_app_ctx_err_msg) - return top.app - - -# context locals -_request_ctx_stack = LocalStack() -_app_ctx_stack = LocalStack() -current_app = LocalProxy(_find_app) -request = LocalProxy(partial(_lookup_req_object, 'request')) -session = LocalProxy(partial(_lookup_req_object, 'session')) -g = LocalProxy(partial(_lookup_app_object, 'g')) diff --git a/pyextra/flask/helpers.py b/pyextra/flask/helpers.py deleted file mode 100644 index df0b91fc5..000000000 --- a/pyextra/flask/helpers.py +++ /dev/null @@ -1,1044 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.helpers - ~~~~~~~~~~~~~ - - Implements various helpers. - - :copyright: © 2010 by the Pallets team. - :license: BSD, see LICENSE for more details. -""" - -import os -import socket -import sys -import pkgutil -import posixpath -import mimetypes -from time import time -from zlib import adler32 -from threading import RLock -import unicodedata -from werkzeug.routing import BuildError -from functools import update_wrapper - -from werkzeug.urls import url_quote -from werkzeug.datastructures import Headers, Range -from werkzeug.exceptions import BadRequest, NotFound, \ - RequestedRangeNotSatisfiable - -from werkzeug.wsgi import wrap_file -from jinja2 import FileSystemLoader - -from .signals import message_flashed -from .globals import session, _request_ctx_stack, _app_ctx_stack, \ - current_app, request -from ._compat import string_types, text_type, PY2 - -# sentinel -_missing = object() - - -# what separators does this operating system provide that are not a slash? -# this is used by the send_from_directory function to ensure that nobody is -# able to access files from outside the filesystem. -_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] - if sep not in (None, '/')) - - -def get_env(): - """Get the environment the app is running in, indicated by the - :envvar:`FLASK_ENV` environment variable. The default is - ``'production'``. - """ - return os.environ.get('FLASK_ENV') or 'production' - - -def get_debug_flag(): - """Get whether debug mode should be enabled for the app, indicated - by the :envvar:`FLASK_DEBUG` environment variable. The default is - ``True`` if :func:`.get_env` returns ``'development'``, or ``False`` - otherwise. - """ - val = os.environ.get('FLASK_DEBUG') - - if not val: - return get_env() == 'development' - - return val.lower() not in ('0', 'false', 'no') - - -def get_load_dotenv(default=True): - """Get whether the user has disabled loading dotenv files by setting - :envvar:`FLASK_SKIP_DOTENV`. The default is ``True``, load the - files. - - :param default: What to return if the env var isn't set. - """ - val = os.environ.get('FLASK_SKIP_DOTENV') - - if not val: - return default - - return val.lower() in ('0', 'false', 'no') - - -def _endpoint_from_view_func(view_func): - """Internal helper that returns the default endpoint for a given - function. This always is the function name. - """ - assert view_func is not None, 'expected view func if endpoint ' \ - 'is not provided.' - return view_func.__name__ - - -def stream_with_context(generator_or_function): - """Request contexts disappear when the response is started on the server. - This is done for efficiency reasons and to make it less likely to encounter - memory leaks with badly written WSGI middlewares. The downside is that if - you are using streamed responses, the generator cannot access request bound - information any more. - - This function however can help you keep the context around for longer:: - - from flask import stream_with_context, request, Response - - @app.route('/stream') - def streamed_response(): - @stream_with_context - def generate(): - yield 'Hello ' - yield request.args['name'] - yield '!' - return Response(generate()) - - Alternatively it can also be used around a specific generator:: - - from flask import stream_with_context, request, Response - - @app.route('/stream') - def streamed_response(): - def generate(): - yield 'Hello ' - yield request.args['name'] - yield '!' - return Response(stream_with_context(generate())) - - .. versionadded:: 0.9 - """ - try: - gen = iter(generator_or_function) - except TypeError: - def decorator(*args, **kwargs): - gen = generator_or_function(*args, **kwargs) - return stream_with_context(gen) - return update_wrapper(decorator, generator_or_function) - - def generator(): - ctx = _request_ctx_stack.top - if ctx is None: - raise RuntimeError('Attempted to stream with context but ' - 'there was no context in the first place to keep around.') - with ctx: - # Dummy sentinel. Has to be inside the context block or we're - # not actually keeping the context around. - yield None - - # The try/finally is here so that if someone passes a WSGI level - # iterator in we're still running the cleanup logic. Generators - # don't need that because they are closed on their destruction - # automatically. - try: - for item in gen: - yield item - finally: - if hasattr(gen, 'close'): - gen.close() - - # The trick is to start the generator. Then the code execution runs until - # the first dummy None is yielded at which point the context was already - # pushed. This item is discarded. Then when the iteration continues the - # real generator is executed. - wrapped_g = generator() - next(wrapped_g) - return wrapped_g - - -def make_response(*args): - """Sometimes it is necessary to set additional headers in a view. Because - views do not have to return response objects but can return a value that - is converted into a response object by Flask itself, it becomes tricky to - add headers to it. This function can be called instead of using a return - and you will get a response object which you can use to attach headers. - - If view looked like this and you want to add a new header:: - - def index(): - return render_template('index.html', foo=42) - - You can now do something like this:: - - def index(): - response = make_response(render_template('index.html', foo=42)) - response.headers['X-Parachutes'] = 'parachutes are cool' - return response - - This function accepts the very same arguments you can return from a - view function. This for example creates a response with a 404 error - code:: - - response = make_response(render_template('not_found.html'), 404) - - The other use case of this function is to force the return value of a - view function into a response which is helpful with view - decorators:: - - response = make_response(view_function()) - response.headers['X-Parachutes'] = 'parachutes are cool' - - Internally this function does the following things: - - - if no arguments are passed, it creates a new response argument - - if one argument is passed, :meth:`flask.Flask.make_response` - is invoked with it. - - if more than one argument is passed, the arguments are passed - to the :meth:`flask.Flask.make_response` function as tuple. - - .. versionadded:: 0.6 - """ - if not args: - return current_app.response_class() - if len(args) == 1: - args = args[0] - return current_app.make_response(args) - - -def url_for(endpoint, **values): - """Generates a URL to the given endpoint with the method provided. - - Variable arguments that are unknown to the target endpoint are appended - to the generated URL as query arguments. If the value of a query argument - is ``None``, the whole pair is skipped. In case blueprints are active - you can shortcut references to the same blueprint by prefixing the - local endpoint with a dot (``.``). - - This will reference the index function local to the current blueprint:: - - url_for('.index') - - For more information, head over to the :ref:`Quickstart `. - - To integrate applications, :class:`Flask` has a hook to intercept URL build - errors through :attr:`Flask.url_build_error_handlers`. The `url_for` - function results in a :exc:`~werkzeug.routing.BuildError` when the current - app does not have a URL for the given endpoint and values. When it does, the - :data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if - it is not ``None``, which can return a string to use as the result of - `url_for` (instead of `url_for`'s default to raise the - :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception. - An example:: - - def external_url_handler(error, endpoint, values): - "Looks up an external URL when `url_for` cannot build a URL." - # This is an example of hooking the build_error_handler. - # Here, lookup_url is some utility function you've built - # which looks up the endpoint in some external URL registry. - url = lookup_url(endpoint, **values) - if url is None: - # External lookup did not have a URL. - # Re-raise the BuildError, in context of original traceback. - exc_type, exc_value, tb = sys.exc_info() - if exc_value is error: - raise exc_type, exc_value, tb - else: - raise error - # url_for will use this result, instead of raising BuildError. - return url - - app.url_build_error_handlers.append(external_url_handler) - - Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and - `endpoint` and `values` are the arguments passed into `url_for`. Note - that this is for building URLs outside the current application, and not for - handling 404 NotFound errors. - - .. versionadded:: 0.10 - The `_scheme` parameter was added. - - .. versionadded:: 0.9 - The `_anchor` and `_method` parameters were added. - - .. versionadded:: 0.9 - Calls :meth:`Flask.handle_build_error` on - :exc:`~werkzeug.routing.BuildError`. - - :param endpoint: the endpoint of the URL (name of the function) - :param values: the variable arguments of the URL rule - :param _external: if set to ``True``, an absolute URL is generated. Server - address can be changed via ``SERVER_NAME`` configuration variable which - defaults to `localhost`. - :param _scheme: a string specifying the desired URL scheme. The `_external` - parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default - behavior uses the same scheme as the current request, or - ``PREFERRED_URL_SCHEME`` from the :ref:`app configuration ` if no - request context is available. As of Werkzeug 0.10, this also can be set - to an empty string to build protocol-relative URLs. - :param _anchor: if provided this is added as anchor to the URL. - :param _method: if provided this explicitly specifies an HTTP method. - """ - appctx = _app_ctx_stack.top - reqctx = _request_ctx_stack.top - - if appctx is None: - raise RuntimeError( - 'Attempted to generate a URL without the application context being' - ' pushed. This has to be executed when application context is' - ' available.' - ) - - # If request specific information is available we have some extra - # features that support "relative" URLs. - if reqctx is not None: - url_adapter = reqctx.url_adapter - blueprint_name = request.blueprint - - if endpoint[:1] == '.': - if blueprint_name is not None: - endpoint = blueprint_name + endpoint - else: - endpoint = endpoint[1:] - - external = values.pop('_external', False) - - # Otherwise go with the url adapter from the appctx and make - # the URLs external by default. - else: - url_adapter = appctx.url_adapter - - if url_adapter is None: - raise RuntimeError( - 'Application was not able to create a URL adapter for request' - ' independent URL generation. You might be able to fix this by' - ' setting the SERVER_NAME config variable.' - ) - - external = values.pop('_external', True) - - anchor = values.pop('_anchor', None) - method = values.pop('_method', None) - scheme = values.pop('_scheme', None) - appctx.app.inject_url_defaults(endpoint, values) - - # This is not the best way to deal with this but currently the - # underlying Werkzeug router does not support overriding the scheme on - # a per build call basis. - old_scheme = None - if scheme is not None: - if not external: - raise ValueError('When specifying _scheme, _external must be True') - old_scheme = url_adapter.url_scheme - url_adapter.url_scheme = scheme - - try: - try: - rv = url_adapter.build(endpoint, values, method=method, - force_external=external) - finally: - if old_scheme is not None: - url_adapter.url_scheme = old_scheme - except BuildError as error: - # We need to inject the values again so that the app callback can - # deal with that sort of stuff. - values['_external'] = external - values['_anchor'] = anchor - values['_method'] = method - values['_scheme'] = scheme - return appctx.app.handle_url_build_error(error, endpoint, values) - - if anchor is not None: - rv += '#' + url_quote(anchor) - return rv - - -def get_template_attribute(template_name, attribute): - """Loads a macro (or variable) a template exports. This can be used to - invoke a macro from within Python code. If you for example have a - template named :file:`_cider.html` with the following contents: - - .. sourcecode:: html+jinja - - {% macro hello(name) %}Hello {{ name }}!{% endmacro %} - - You can access this from Python code like this:: - - hello = get_template_attribute('_cider.html', 'hello') - return hello('World') - - .. versionadded:: 0.2 - - :param template_name: the name of the template - :param attribute: the name of the variable of macro to access - """ - return getattr(current_app.jinja_env.get_template(template_name).module, - attribute) - - -def flash(message, category='message'): - """Flashes a message to the next request. In order to remove the - flashed message from the session and to display it to the user, - the template has to call :func:`get_flashed_messages`. - - .. versionchanged:: 0.3 - `category` parameter added. - - :param message: the message to be flashed. - :param category: the category for the message. The following values - are recommended: ``'message'`` for any kind of message, - ``'error'`` for errors, ``'info'`` for information - messages and ``'warning'`` for warnings. However any - kind of string can be used as category. - """ - # Original implementation: - # - # session.setdefault('_flashes', []).append((category, message)) - # - # This assumed that changes made to mutable structures in the session are - # always in sync with the session object, which is not true for session - # implementations that use external storage for keeping their keys/values. - flashes = session.get('_flashes', []) - flashes.append((category, message)) - session['_flashes'] = flashes - message_flashed.send(current_app._get_current_object(), - message=message, category=category) - - -def get_flashed_messages(with_categories=False, category_filter=[]): - """Pulls all flashed messages from the session and returns them. - Further calls in the same request to the function will return - the same messages. By default just the messages are returned, - but when `with_categories` is set to ``True``, the return value will - be a list of tuples in the form ``(category, message)`` instead. - - Filter the flashed messages to one or more categories by providing those - categories in `category_filter`. This allows rendering categories in - separate html blocks. The `with_categories` and `category_filter` - arguments are distinct: - - * `with_categories` controls whether categories are returned with message - text (``True`` gives a tuple, where ``False`` gives just the message text). - * `category_filter` filters the messages down to only those matching the - provided categories. - - See :ref:`message-flashing-pattern` for examples. - - .. versionchanged:: 0.3 - `with_categories` parameter added. - - .. versionchanged:: 0.9 - `category_filter` parameter added. - - :param with_categories: set to ``True`` to also receive categories. - :param category_filter: whitelist of categories to limit return values - """ - flashes = _request_ctx_stack.top.flashes - if flashes is None: - _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \ - if '_flashes' in session else [] - if category_filter: - flashes = list(filter(lambda f: f[0] in category_filter, flashes)) - if not with_categories: - return [x[1] for x in flashes] - return flashes - - -def send_file(filename_or_fp, mimetype=None, as_attachment=False, - attachment_filename=None, add_etags=True, - cache_timeout=None, conditional=False, last_modified=None): - """Sends the contents of a file to the client. This will use the - most efficient method available and configured. By default it will - try to use the WSGI server's file_wrapper support. Alternatively - you can set the application's :attr:`~Flask.use_x_sendfile` attribute - to ``True`` to directly emit an ``X-Sendfile`` header. This however - requires support of the underlying webserver for ``X-Sendfile``. - - By default it will try to guess the mimetype for you, but you can - also explicitly provide one. For extra security you probably want - to send certain files as attachment (HTML for instance). The mimetype - guessing requires a `filename` or an `attachment_filename` to be - provided. - - ETags will also be attached automatically if a `filename` is provided. You - can turn this off by setting `add_etags=False`. - - If `conditional=True` and `filename` is provided, this method will try to - upgrade the response stream to support range requests. This will allow - the request to be answered with partial content response. - - Please never pass filenames to this function from user sources; - you should use :func:`send_from_directory` instead. - - .. versionadded:: 0.2 - - .. versionadded:: 0.5 - The `add_etags`, `cache_timeout` and `conditional` parameters were - added. The default behavior is now to attach etags. - - .. versionchanged:: 0.7 - mimetype guessing and etag support for file objects was - deprecated because it was unreliable. Pass a filename if you are - able to, otherwise attach an etag yourself. This functionality - will be removed in Flask 1.0 - - .. versionchanged:: 0.9 - cache_timeout pulls its default from application config, when None. - - .. versionchanged:: 0.12 - The filename is no longer automatically inferred from file objects. If - you want to use automatic mimetype and etag support, pass a filepath via - `filename_or_fp` or `attachment_filename`. - - .. versionchanged:: 0.12 - The `attachment_filename` is preferred over `filename` for MIME-type - detection. - - .. versionchanged:: 1.0 - UTF-8 filenames, as specified in `RFC 2231`_, are supported. - - .. _RFC 2231: https://tools.ietf.org/html/rfc2231#section-4 - - :param filename_or_fp: the filename of the file to send. - This is relative to the :attr:`~Flask.root_path` - if a relative path is specified. - Alternatively a file object might be provided in - which case ``X-Sendfile`` might not work and fall - back to the traditional method. Make sure that the - file pointer is positioned at the start of data to - send before calling :func:`send_file`. - :param mimetype: the mimetype of the file if provided. If a file path is - given, auto detection happens as fallback, otherwise an - error will be raised. - :param as_attachment: set to ``True`` if you want to send this file with - a ``Content-Disposition: attachment`` header. - :param attachment_filename: the filename for the attachment if it - differs from the file's filename. - :param add_etags: set to ``False`` to disable attaching of etags. - :param conditional: set to ``True`` to enable conditional responses. - - :param cache_timeout: the timeout in seconds for the headers. When ``None`` - (default), this value is set by - :meth:`~Flask.get_send_file_max_age` of - :data:`~flask.current_app`. - :param last_modified: set the ``Last-Modified`` header to this value, - a :class:`~datetime.datetime` or timestamp. - If a file was passed, this overrides its mtime. - """ - mtime = None - fsize = None - if isinstance(filename_or_fp, string_types): - filename = filename_or_fp - if not os.path.isabs(filename): - filename = os.path.join(current_app.root_path, filename) - file = None - if attachment_filename is None: - attachment_filename = os.path.basename(filename) - else: - file = filename_or_fp - filename = None - - if mimetype is None: - if attachment_filename is not None: - mimetype = mimetypes.guess_type(attachment_filename)[0] \ - or 'application/octet-stream' - - if mimetype is None: - raise ValueError( - 'Unable to infer MIME-type because no filename is available. ' - 'Please set either `attachment_filename`, pass a filepath to ' - '`filename_or_fp` or set your own MIME-type via `mimetype`.' - ) - - headers = Headers() - if as_attachment: - if attachment_filename is None: - raise TypeError('filename unavailable, required for ' - 'sending as attachment') - - try: - attachment_filename = attachment_filename.encode('latin-1') - except UnicodeEncodeError: - filenames = { - 'filename': unicodedata.normalize( - 'NFKD', attachment_filename).encode('latin-1', 'ignore'), - 'filename*': "UTF-8''%s" % url_quote(attachment_filename), - } - else: - filenames = {'filename': attachment_filename} - - headers.add('Content-Disposition', 'attachment', **filenames) - - if current_app.use_x_sendfile and filename: - if file is not None: - file.close() - headers['X-Sendfile'] = filename - fsize = os.path.getsize(filename) - headers['Content-Length'] = fsize - data = None - else: - if file is None: - file = open(filename, 'rb') - mtime = os.path.getmtime(filename) - fsize = os.path.getsize(filename) - headers['Content-Length'] = fsize - data = wrap_file(request.environ, file) - - rv = current_app.response_class(data, mimetype=mimetype, headers=headers, - direct_passthrough=True) - - if last_modified is not None: - rv.last_modified = last_modified - elif mtime is not None: - rv.last_modified = mtime - - rv.cache_control.public = True - if cache_timeout is None: - cache_timeout = current_app.get_send_file_max_age(filename) - if cache_timeout is not None: - rv.cache_control.max_age = cache_timeout - rv.expires = int(time() + cache_timeout) - - if add_etags and filename is not None: - from warnings import warn - - try: - rv.set_etag('%s-%s-%s' % ( - os.path.getmtime(filename), - os.path.getsize(filename), - adler32( - filename.encode('utf-8') if isinstance(filename, text_type) - else filename - ) & 0xffffffff - )) - except OSError: - warn('Access %s failed, maybe it does not exist, so ignore etags in ' - 'headers' % filename, stacklevel=2) - - if conditional: - try: - rv = rv.make_conditional(request, accept_ranges=True, - complete_length=fsize) - except RequestedRangeNotSatisfiable: - if file is not None: - file.close() - raise - # make sure we don't send x-sendfile for servers that - # ignore the 304 status code for x-sendfile. - if rv.status_code == 304: - rv.headers.pop('x-sendfile', None) - return rv - - -def safe_join(directory, *pathnames): - """Safely join `directory` and zero or more untrusted `pathnames` - components. - - Example usage:: - - @app.route('/wiki/') - def wiki_page(filename): - filename = safe_join(app.config['WIKI_FOLDER'], filename) - with open(filename, 'rb') as fd: - content = fd.read() # Read and process the file content... - - :param directory: the trusted base directory. - :param pathnames: the untrusted pathnames relative to that directory. - :raises: :class:`~werkzeug.exceptions.NotFound` if one or more passed - paths fall out of its boundaries. - """ - - parts = [directory] - - for filename in pathnames: - if filename != '': - filename = posixpath.normpath(filename) - - if ( - any(sep in filename for sep in _os_alt_seps) - or os.path.isabs(filename) - or filename == '..' - or filename.startswith('../') - ): - raise NotFound() - - parts.append(filename) - - return posixpath.join(*parts) - - -def send_from_directory(directory, filename, **options): - """Send a file from a given directory with :func:`send_file`. This - is a secure way to quickly expose static files from an upload folder - or something similar. - - Example usage:: - - @app.route('/uploads/') - def download_file(filename): - return send_from_directory(app.config['UPLOAD_FOLDER'], - filename, as_attachment=True) - - .. admonition:: Sending files and Performance - - It is strongly recommended to activate either ``X-Sendfile`` support in - your webserver or (if no authentication happens) to tell the webserver - to serve files for the given path on its own without calling into the - web application for improved performance. - - .. versionadded:: 0.5 - - :param directory: the directory where all the files are stored. - :param filename: the filename relative to that directory to - download. - :param options: optional keyword arguments that are directly - forwarded to :func:`send_file`. - """ - filename = safe_join(directory, filename) - if not os.path.isabs(filename): - filename = os.path.join(current_app.root_path, filename) - try: - if not os.path.isfile(filename): - raise NotFound() - except (TypeError, ValueError): - raise BadRequest() - options.setdefault('conditional', True) - return send_file(filename, **options) - - -def get_root_path(import_name): - """Returns the path to a package or cwd if that cannot be found. This - returns the path of a package or the folder that contains a module. - - Not to be confused with the package path returned by :func:`find_package`. - """ - # Module already imported and has a file attribute. Use that first. - mod = sys.modules.get(import_name) - if mod is not None and hasattr(mod, '__file__'): - return os.path.dirname(os.path.abspath(mod.__file__)) - - # Next attempt: check the loader. - loader = pkgutil.get_loader(import_name) - - # Loader does not exist or we're referring to an unloaded main module - # or a main module without path (interactive sessions), go with the - # current working directory. - if loader is None or import_name == '__main__': - return os.getcwd() - - # For .egg, zipimporter does not have get_filename until Python 2.7. - # Some other loaders might exhibit the same behavior. - if hasattr(loader, 'get_filename'): - filepath = loader.get_filename(import_name) - else: - # Fall back to imports. - __import__(import_name) - mod = sys.modules[import_name] - filepath = getattr(mod, '__file__', None) - - # If we don't have a filepath it might be because we are a - # namespace package. In this case we pick the root path from the - # first module that is contained in our package. - if filepath is None: - raise RuntimeError('No root path can be found for the provided ' - 'module "%s". This can happen because the ' - 'module came from an import hook that does ' - 'not provide file name information or because ' - 'it\'s a namespace package. In this case ' - 'the root path needs to be explicitly ' - 'provided.' % import_name) - - # filepath is import_name.py for a module, or __init__.py for a package. - return os.path.dirname(os.path.abspath(filepath)) - - -def _matching_loader_thinks_module_is_package(loader, mod_name): - """Given the loader that loaded a module and the module this function - attempts to figure out if the given module is actually a package. - """ - # If the loader can tell us if something is a package, we can - # directly ask the loader. - if hasattr(loader, 'is_package'): - return loader.is_package(mod_name) - # importlib's namespace loaders do not have this functionality but - # all the modules it loads are packages, so we can take advantage of - # this information. - elif (loader.__class__.__module__ == '_frozen_importlib' and - loader.__class__.__name__ == 'NamespaceLoader'): - return True - # Otherwise we need to fail with an error that explains what went - # wrong. - raise AttributeError( - ('%s.is_package() method is missing but is required by Flask of ' - 'PEP 302 import hooks. If you do not use import hooks and ' - 'you encounter this error please file a bug against Flask.') % - loader.__class__.__name__) - - -def find_package(import_name): - """Finds a package and returns the prefix (or None if the package is - not installed) as well as the folder that contains the package or - module as a tuple. The package path returned is the module that would - have to be added to the pythonpath in order to make it possible to - import the module. The prefix is the path below which a UNIX like - folder structure exists (lib, share etc.). - """ - root_mod_name = import_name.split('.')[0] - loader = pkgutil.get_loader(root_mod_name) - if loader is None or import_name == '__main__': - # import name is not found, or interactive/main module - package_path = os.getcwd() - else: - # For .egg, zipimporter does not have get_filename until Python 2.7. - if hasattr(loader, 'get_filename'): - filename = loader.get_filename(root_mod_name) - elif hasattr(loader, 'archive'): - # zipimporter's loader.archive points to the .egg or .zip - # archive filename is dropped in call to dirname below. - filename = loader.archive - else: - # At least one loader is missing both get_filename and archive: - # Google App Engine's HardenedModulesHook - # - # Fall back to imports. - __import__(import_name) - filename = sys.modules[import_name].__file__ - package_path = os.path.abspath(os.path.dirname(filename)) - - # In case the root module is a package we need to chop of the - # rightmost part. This needs to go through a helper function - # because of python 3.3 namespace packages. - if _matching_loader_thinks_module_is_package( - loader, root_mod_name): - package_path = os.path.dirname(package_path) - - site_parent, site_folder = os.path.split(package_path) - py_prefix = os.path.abspath(sys.prefix) - if package_path.startswith(py_prefix): - return py_prefix, package_path - elif site_folder.lower() == 'site-packages': - parent, folder = os.path.split(site_parent) - # Windows like installations - if folder.lower() == 'lib': - base_dir = parent - # UNIX like installations - elif os.path.basename(parent).lower() == 'lib': - base_dir = os.path.dirname(parent) - else: - base_dir = site_parent - return base_dir, package_path - return None, package_path - - -class locked_cached_property(object): - """A decorator that converts a function into a lazy property. The - function wrapped is called the first time to retrieve the result - and then that calculated result is used the next time you access - the value. Works like the one in Werkzeug but has a lock for - thread safety. - """ - - def __init__(self, func, name=None, doc=None): - self.__name__ = name or func.__name__ - self.__module__ = func.__module__ - self.__doc__ = doc or func.__doc__ - self.func = func - self.lock = RLock() - - def __get__(self, obj, type=None): - if obj is None: - return self - with self.lock: - value = obj.__dict__.get(self.__name__, _missing) - if value is _missing: - value = self.func(obj) - obj.__dict__[self.__name__] = value - return value - - -class _PackageBoundObject(object): - #: The name of the package or module that this app belongs to. Do not - #: change this once it is set by the constructor. - import_name = None - - #: Location of the template files to be added to the template lookup. - #: ``None`` if templates should not be added. - template_folder = None - - #: Absolute path to the package on the filesystem. Used to look up - #: resources contained in the package. - root_path = None - - def __init__(self, import_name, template_folder=None, root_path=None): - self.import_name = import_name - self.template_folder = template_folder - - if root_path is None: - root_path = get_root_path(self.import_name) - - self.root_path = root_path - self._static_folder = None - self._static_url_path = None - - def _get_static_folder(self): - if self._static_folder is not None: - return os.path.join(self.root_path, self._static_folder) - - def _set_static_folder(self, value): - self._static_folder = value - - static_folder = property( - _get_static_folder, _set_static_folder, - doc='The absolute path to the configured static folder.' - ) - del _get_static_folder, _set_static_folder - - def _get_static_url_path(self): - if self._static_url_path is not None: - return self._static_url_path - - if self.static_folder is not None: - return '/' + os.path.basename(self.static_folder) - - def _set_static_url_path(self, value): - self._static_url_path = value - - static_url_path = property( - _get_static_url_path, _set_static_url_path, - doc='The URL prefix that the static route will be registered for.' - ) - del _get_static_url_path, _set_static_url_path - - @property - def has_static_folder(self): - """This is ``True`` if the package bound object's container has a - folder for static files. - - .. versionadded:: 0.5 - """ - return self.static_folder is not None - - @locked_cached_property - def jinja_loader(self): - """The Jinja loader for this package bound object. - - .. versionadded:: 0.5 - """ - if self.template_folder is not None: - return FileSystemLoader(os.path.join(self.root_path, - self.template_folder)) - - def get_send_file_max_age(self, filename): - """Provides default cache_timeout for the :func:`send_file` functions. - - By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from - the configuration of :data:`~flask.current_app`. - - Static file functions such as :func:`send_from_directory` use this - function, and :func:`send_file` calls this function on - :data:`~flask.current_app` when the given cache_timeout is ``None``. If a - cache_timeout is given in :func:`send_file`, that timeout is used; - otherwise, this method is called. - - This allows subclasses to change the behavior when sending files based - on the filename. For example, to set the cache timeout for .js files - to 60 seconds:: - - class MyFlask(flask.Flask): - def get_send_file_max_age(self, name): - if name.lower().endswith('.js'): - return 60 - return flask.Flask.get_send_file_max_age(self, name) - - .. versionadded:: 0.9 - """ - return total_seconds(current_app.send_file_max_age_default) - - def send_static_file(self, filename): - """Function used internally to send static files from the static - folder to the browser. - - .. versionadded:: 0.5 - """ - if not self.has_static_folder: - raise RuntimeError('No static folder for this object') - # Ensure get_send_file_max_age is called in all cases. - # Here, we ensure get_send_file_max_age is called for Blueprints. - cache_timeout = self.get_send_file_max_age(filename) - return send_from_directory(self.static_folder, filename, - cache_timeout=cache_timeout) - - def open_resource(self, resource, mode='rb'): - """Opens a resource from the application's resource folder. To see - how this works, consider the following folder structure:: - - /myapplication.py - /schema.sql - /static - /style.css - /templates - /layout.html - /index.html - - If you want to open the :file:`schema.sql` file you would do the - following:: - - with app.open_resource('schema.sql') as f: - contents = f.read() - do_something_with(contents) - - :param resource: the name of the resource. To access resources within - subfolders use forward slashes as separator. - :param mode: resource file opening mode, default is 'rb'. - """ - if mode not in ('r', 'rb'): - raise ValueError('Resources can only be opened for reading') - return open(os.path.join(self.root_path, resource), mode) - - -def total_seconds(td): - """Returns the total seconds from a timedelta object. - - :param timedelta td: the timedelta to be converted in seconds - - :returns: number of seconds - :rtype: int - """ - return td.days * 60 * 60 * 24 + td.seconds - - -def is_ip(value): - """Determine if the given string is an IP address. - - Python 2 on Windows doesn't provide ``inet_pton``, so this only - checks IPv4 addresses in that environment. - - :param value: value to check - :type value: str - - :return: True if string is an IP address - :rtype: bool - """ - if PY2 and os.name == 'nt': - try: - socket.inet_aton(value) - return True - except socket.error: - return False - - for family in (socket.AF_INET, socket.AF_INET6): - try: - socket.inet_pton(family, value) - except socket.error: - pass - else: - return True - - return False diff --git a/pyextra/flask/json/__init__.py b/pyextra/flask/json/__init__.py deleted file mode 100644 index fbe6b92f0..000000000 --- a/pyextra/flask/json/__init__.py +++ /dev/null @@ -1,327 +0,0 @@ -# -*- coding: utf-8 -*- -""" -flask.json -~~~~~~~~~~ - -:copyright: © 2010 by the Pallets team. -:license: BSD, see LICENSE for more details. -""" -import codecs -import io -import uuid -from datetime import date, datetime -from flask.globals import current_app, request -from flask._compat import text_type, PY2 - -from werkzeug.http import http_date -from jinja2 import Markup - -# Use the same json implementation as itsdangerous on which we -# depend anyways. -from itsdangerous import json as _json - - -# Figure out if simplejson escapes slashes. This behavior was changed -# from one version to another without reason. -_slash_escape = '\\/' not in _json.dumps('/') - - -__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump', - 'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder', - 'jsonify'] - - -def _wrap_reader_for_text(fp, encoding): - if isinstance(fp.read(0), bytes): - fp = io.TextIOWrapper(io.BufferedReader(fp), encoding) - return fp - - -def _wrap_writer_for_text(fp, encoding): - try: - fp.write('') - except TypeError: - fp = io.TextIOWrapper(fp, encoding) - return fp - - -class JSONEncoder(_json.JSONEncoder): - """The default Flask JSON encoder. This one extends the default simplejson - encoder by also supporting ``datetime`` objects, ``UUID`` as well as - ``Markup`` objects which are serialized as RFC 822 datetime strings (same - as the HTTP date format). In order to support more data types override the - :meth:`default` method. - """ - - def default(self, o): - """Implement this method in a subclass such that it returns a - serializable object for ``o``, or calls the base implementation (to - raise a :exc:`TypeError`). - - For example, to support arbitrary iterators, you could implement - default like this:: - - def default(self, o): - try: - iterable = iter(o) - except TypeError: - pass - else: - return list(iterable) - return JSONEncoder.default(self, o) - """ - if isinstance(o, datetime): - return http_date(o.utctimetuple()) - if isinstance(o, date): - return http_date(o.timetuple()) - if isinstance(o, uuid.UUID): - return str(o) - if hasattr(o, '__html__'): - return text_type(o.__html__()) - return _json.JSONEncoder.default(self, o) - - -class JSONDecoder(_json.JSONDecoder): - """The default JSON decoder. This one does not change the behavior from - the default simplejson decoder. Consult the :mod:`json` documentation - for more information. This decoder is not only used for the load - functions of this module but also :attr:`~flask.Request`. - """ - - -def _dump_arg_defaults(kwargs): - """Inject default arguments for dump functions.""" - if current_app: - bp = current_app.blueprints.get(request.blueprint) if request else None - kwargs.setdefault( - 'cls', - bp.json_encoder if bp and bp.json_encoder - else current_app.json_encoder - ) - - if not current_app.config['JSON_AS_ASCII']: - kwargs.setdefault('ensure_ascii', False) - - kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS']) - else: - kwargs.setdefault('sort_keys', True) - kwargs.setdefault('cls', JSONEncoder) - - -def _load_arg_defaults(kwargs): - """Inject default arguments for load functions.""" - if current_app: - bp = current_app.blueprints.get(request.blueprint) if request else None - kwargs.setdefault( - 'cls', - bp.json_decoder if bp and bp.json_decoder - else current_app.json_decoder - ) - else: - kwargs.setdefault('cls', JSONDecoder) - - -def detect_encoding(data): - """Detect which UTF codec was used to encode the given bytes. - - The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is - accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big - or little endian. Some editors or libraries may prepend a BOM. - - :param data: Bytes in unknown UTF encoding. - :return: UTF encoding name - """ - head = data[:4] - - if head[:3] == codecs.BOM_UTF8: - return 'utf-8-sig' - - if b'\x00' not in head: - return 'utf-8' - - if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE): - return 'utf-32' - - if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE): - return 'utf-16' - - if len(head) == 4: - if head[:3] == b'\x00\x00\x00': - return 'utf-32-be' - - if head[::2] == b'\x00\x00': - return 'utf-16-be' - - if head[1:] == b'\x00\x00\x00': - return 'utf-32-le' - - if head[1::2] == b'\x00\x00': - return 'utf-16-le' - - if len(head) == 2: - return 'utf-16-be' if head.startswith(b'\x00') else 'utf-16-le' - - return 'utf-8' - - -def dumps(obj, **kwargs): - """Serialize ``obj`` to a JSON formatted ``str`` by using the application's - configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an - application on the stack. - - This function can return ``unicode`` strings or ascii-only bytestrings by - default which coerce into unicode strings automatically. That behavior by - default is controlled by the ``JSON_AS_ASCII`` configuration variable - and can be overridden by the simplejson ``ensure_ascii`` parameter. - """ - _dump_arg_defaults(kwargs) - encoding = kwargs.pop('encoding', None) - rv = _json.dumps(obj, **kwargs) - if encoding is not None and isinstance(rv, text_type): - rv = rv.encode(encoding) - return rv - - -def dump(obj, fp, **kwargs): - """Like :func:`dumps` but writes into a file object.""" - _dump_arg_defaults(kwargs) - encoding = kwargs.pop('encoding', None) - if encoding is not None: - fp = _wrap_writer_for_text(fp, encoding) - _json.dump(obj, fp, **kwargs) - - -def loads(s, **kwargs): - """Unserialize a JSON object from a string ``s`` by using the application's - configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an - application on the stack. - """ - _load_arg_defaults(kwargs) - if isinstance(s, bytes): - encoding = kwargs.pop('encoding', None) - if encoding is None: - encoding = detect_encoding(s) - s = s.decode(encoding) - return _json.loads(s, **kwargs) - - -def load(fp, **kwargs): - """Like :func:`loads` but reads from a file object. - """ - _load_arg_defaults(kwargs) - if not PY2: - fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8') - return _json.load(fp, **kwargs) - - -def htmlsafe_dumps(obj, **kwargs): - """Works exactly like :func:`dumps` but is safe for use in `` - - - - -

-''' -FOOTER = u'''\ - -
- -
-
-

Console Locked

-

- The console is locked and needs to be unlocked by entering the PIN. - You can find the PIN printed out on the standard output of your - shell that runs the server. -

-

PIN: - - -

-
-
- - -''' - -PAGE_HTML = HEADER + u'''\ -

%(exception_type)s

-
-

%(exception)s

-
-

Traceback (most recent call last)

-%(summary)s -
-
-

- - This is the Copy/Paste friendly version of the traceback. You can also paste this traceback into - a gist: - -

- -
-
-
- The debugger caught an exception in your WSGI application. You can now - look at the traceback which led to the error. - If you enable JavaScript you can also use additional features such as code - execution (if the evalex feature is enabled), automatic pasting of the - exceptions and much more. -
-''' + FOOTER + ''' - -''' - -CONSOLE_HTML = HEADER + u'''\ -

Interactive Console

-
-In this console you can execute Python expressions in the context of the -application. The initial namespace was created by the debugger automatically. -
-
The Console requires JavaScript.
-''' + FOOTER - -SUMMARY_HTML = u'''\ -
- %(title)s - - %(description)s -
-''' - -FRAME_HTML = u'''\ -
-

File "%(filename)s", - line %(lineno)s, - in %(function_name)s

-
%(lines)s
-
-''' - -SOURCE_LINE_HTML = u'''\ - - %(lineno)s - %(code)s - -''' - - -def render_console_html(secret, evalex_trusted=True): - return CONSOLE_HTML % { - 'evalex': 'true', - 'evalex_trusted': evalex_trusted and 'true' or 'false', - 'console': 'true', - 'title': 'Console', - 'secret': secret, - 'traceback_id': -1 - } - - -def get_current_traceback(ignore_system_exceptions=False, - show_hidden_frames=False, skip=0): - """Get the current exception info as `Traceback` object. Per default - calling this method will reraise system exceptions such as generator exit, - system exit or others. This behavior can be disabled by passing `False` - to the function as first parameter. - """ - exc_type, exc_value, tb = sys.exc_info() - if ignore_system_exceptions and exc_type in system_exceptions: - raise - for x in range_type(skip): - if tb.tb_next is None: - break - tb = tb.tb_next - tb = Traceback(exc_type, exc_value, tb) - if not show_hidden_frames: - tb.filter_hidden_frames() - return tb - - -class Line(object): - """Helper for the source renderer.""" - __slots__ = ('lineno', 'code', 'in_frame', 'current') - - def __init__(self, lineno, code): - self.lineno = lineno - self.code = code - self.in_frame = False - self.current = False - - def classes(self): - rv = ['line'] - if self.in_frame: - rv.append('in-frame') - if self.current: - rv.append('current') - return rv - classes = property(classes) - - def render(self): - return SOURCE_LINE_HTML % { - 'classes': u' '.join(self.classes), - 'lineno': self.lineno, - 'code': escape(self.code) - } - - -class Traceback(object): - """Wraps a traceback.""" - - def __init__(self, exc_type, exc_value, tb): - self.exc_type = exc_type - self.exc_value = exc_value - if not isinstance(exc_type, str): - exception_type = exc_type.__name__ - if exc_type.__module__ not in ('__builtin__', 'exceptions'): - exception_type = exc_type.__module__ + '.' + exception_type - else: - exception_type = exc_type - self.exception_type = exception_type - - # we only add frames to the list that are not hidden. This follows - # the the magic variables as defined by paste.exceptions.collector - self.frames = [] - while tb: - self.frames.append(Frame(exc_type, exc_value, tb)) - tb = tb.tb_next - - def filter_hidden_frames(self): - """Remove the frames according to the paste spec.""" - if not self.frames: - return - - new_frames = [] - hidden = False - for frame in self.frames: - hide = frame.hide - if hide in ('before', 'before_and_this'): - new_frames = [] - hidden = False - if hide == 'before_and_this': - continue - elif hide in ('reset', 'reset_and_this'): - hidden = False - if hide == 'reset_and_this': - continue - elif hide in ('after', 'after_and_this'): - hidden = True - if hide == 'after_and_this': - continue - elif hide or hidden: - continue - new_frames.append(frame) - - # if we only have one frame and that frame is from the codeop - # module, remove it. - if len(new_frames) == 1 and self.frames[0].module == 'codeop': - del self.frames[:] - - # if the last frame is missing something went terrible wrong :( - elif self.frames[-1] in new_frames: - self.frames[:] = new_frames - - def is_syntax_error(self): - """Is it a syntax error?""" - return isinstance(self.exc_value, SyntaxError) - is_syntax_error = property(is_syntax_error) - - def exception(self): - """String representation of the exception.""" - buf = traceback.format_exception_only(self.exc_type, self.exc_value) - rv = ''.join(buf).strip() - return rv.decode('utf-8', 'replace') if PY2 else rv - exception = property(exception) - - def log(self, logfile=None): - """Log the ASCII traceback into a file object.""" - if logfile is None: - logfile = sys.stderr - tb = self.plaintext.rstrip() + u'\n' - if PY2: - tb = tb.encode('utf-8', 'replace') - logfile.write(tb) - - def paste(self): - """Create a paste and return the paste id.""" - data = json.dumps({ - 'description': 'Werkzeug Internal Server Error', - 'public': False, - 'files': { - 'traceback.txt': { - 'content': self.plaintext - } - } - }).encode('utf-8') - try: - from urllib2 import urlopen - except ImportError: - from urllib.request import urlopen - rv = urlopen('https://api.github.com/gists', data=data) - resp = json.loads(rv.read().decode('utf-8')) - rv.close() - return { - 'url': resp['html_url'], - 'id': resp['id'] - } - - def render_summary(self, include_title=True): - """Render the traceback for the interactive console.""" - title = '' - frames = [] - classes = ['traceback'] - if not self.frames: - classes.append('noframe-traceback') - - if include_title: - if self.is_syntax_error: - title = u'Syntax Error' - else: - title = u'Traceback (most recent call last):' - - for frame in self.frames: - frames.append(u'%s' % ( - frame.info and u' title="%s"' % escape(frame.info) or u'', - frame.render() - )) - - if self.is_syntax_error: - description_wrapper = u'
%s
' - else: - description_wrapper = u'
%s
' - - return SUMMARY_HTML % { - 'classes': u' '.join(classes), - 'title': title and u'

%s

' % title or u'', - 'frames': u'\n'.join(frames), - 'description': description_wrapper % escape(self.exception) - } - - def render_full(self, evalex=False, secret=None, - evalex_trusted=True): - """Render the Full HTML page with the traceback info.""" - exc = escape(self.exception) - return PAGE_HTML % { - 'evalex': evalex and 'true' or 'false', - 'evalex_trusted': evalex_trusted and 'true' or 'false', - 'console': 'false', - 'title': exc, - 'exception': exc, - 'exception_type': escape(self.exception_type), - 'summary': self.render_summary(include_title=False), - 'plaintext': escape(self.plaintext), - 'plaintext_cs': re.sub('-{2,}', '-', self.plaintext), - 'traceback_id': self.id, - 'secret': secret - } - - def generate_plaintext_traceback(self): - """Like the plaintext attribute but returns a generator""" - yield u'Traceback (most recent call last):' - for frame in self.frames: - yield u' File "%s", line %s, in %s' % ( - frame.filename, - frame.lineno, - frame.function_name - ) - yield u' ' + frame.current_line.strip() - yield self.exception - - def plaintext(self): - return u'\n'.join(self.generate_plaintext_traceback()) - plaintext = cached_property(plaintext) - - id = property(lambda x: id(x)) - - -class Frame(object): - - """A single frame in a traceback.""" - - def __init__(self, exc_type, exc_value, tb): - self.lineno = tb.tb_lineno - self.function_name = tb.tb_frame.f_code.co_name - self.locals = tb.tb_frame.f_locals - self.globals = tb.tb_frame.f_globals - - fn = inspect.getsourcefile(tb) or inspect.getfile(tb) - if fn[-4:] in ('.pyo', '.pyc'): - fn = fn[:-1] - # if it's a file on the file system resolve the real filename. - if os.path.isfile(fn): - fn = os.path.realpath(fn) - self.filename = to_unicode(fn, get_filesystem_encoding()) - self.module = self.globals.get('__name__') - self.loader = self.globals.get('__loader__') - self.code = tb.tb_frame.f_code - - # support for paste's traceback extensions - self.hide = self.locals.get('__traceback_hide__', False) - info = self.locals.get('__traceback_info__') - if info is not None: - try: - info = text_type(info) - except UnicodeError: - info = str(info).decode('utf-8', 'replace') - self.info = info - - def render(self): - """Render a single frame in a traceback.""" - return FRAME_HTML % { - 'id': self.id, - 'filename': escape(self.filename), - 'lineno': self.lineno, - 'function_name': escape(self.function_name), - 'lines': self.render_line_context(), - } - - def render_line_context(self): - before, current, after = self.get_context_lines() - rv = [] - - def render_line(line, cls): - line = line.expandtabs().rstrip() - stripped_line = line.strip() - prefix = len(line) - len(stripped_line) - rv.append( - '
%s%s
' % ( - cls, ' ' * prefix, escape(stripped_line) or ' ')) - - for line in before: - render_line(line, 'before') - render_line(current, 'current') - for line in after: - render_line(line, 'after') - - return '\n'.join(rv) - - def get_annotated_lines(self): - """Helper function that returns lines with extra information.""" - lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)] - - # find function definition and mark lines - if hasattr(self.code, 'co_firstlineno'): - lineno = self.code.co_firstlineno - 1 - while lineno > 0: - if _funcdef_re.match(lines[lineno].code): - break - lineno -= 1 - try: - offset = len(inspect.getblock([x.code + '\n' for x - in lines[lineno:]])) - except TokenError: - offset = 0 - for line in lines[lineno:lineno + offset]: - line.in_frame = True - - # mark current line - try: - lines[self.lineno - 1].current = True - except IndexError: - pass - - return lines - - def eval(self, code, mode='single'): - """Evaluate code in the context of the frame.""" - if isinstance(code, string_types): - if PY2 and isinstance(code, unicode): # noqa - code = UTF8_COOKIE + code.encode('utf-8') - code = compile(code, '', mode) - return eval(code, self.globals, self.locals) - - @cached_property - def sourcelines(self): - """The sourcecode of the file as list of unicode strings.""" - # get sourcecode from loader or file - source = None - if self.loader is not None: - try: - if hasattr(self.loader, 'get_source'): - source = self.loader.get_source(self.module) - elif hasattr(self.loader, 'get_source_by_code'): - source = self.loader.get_source_by_code(self.code) - except Exception: - # we munch the exception so that we don't cause troubles - # if the loader is broken. - pass - - if source is None: - try: - f = open(to_native(self.filename, get_filesystem_encoding()), - mode='rb') - except IOError: - return [] - try: - source = f.read() - finally: - f.close() - - # already unicode? return right away - if isinstance(source, text_type): - return source.splitlines() - - # yes. it should be ascii, but we don't want to reject too many - # characters in the debugger if something breaks - charset = 'utf-8' - if source.startswith(UTF8_COOKIE): - source = source[3:] - else: - for idx, match in enumerate(_line_re.finditer(source)): - match = _coding_re.search(match.group()) - if match is not None: - charset = match.group(1) - break - if idx > 1: - break - - # on broken cookies we fall back to utf-8 too - charset = to_native(charset) - try: - codecs.lookup(charset) - except LookupError: - charset = 'utf-8' - - return source.decode(charset, 'replace').splitlines() - - def get_context_lines(self, context=5): - before = self.sourcelines[self.lineno - context - 1:self.lineno - 1] - past = self.sourcelines[self.lineno:self.lineno + context] - return ( - before, - self.current_line, - past, - ) - - @property - def current_line(self): - try: - return self.sourcelines[self.lineno - 1] - except IndexError: - return u'' - - @cached_property - def console(self): - return Console(self.globals, self.locals) - - id = property(lambda x: id(x)) diff --git a/pyextra/werkzeug/exceptions.py b/pyextra/werkzeug/exceptions.py deleted file mode 100644 index 1d68185a9..000000000 --- a/pyextra/werkzeug/exceptions.py +++ /dev/null @@ -1,719 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.exceptions - ~~~~~~~~~~~~~~~~~~~ - - This module implements a number of Python exceptions you can raise from - within your views to trigger a standard non-200 response. - - - Usage Example - ------------- - - :: - - from werkzeug.wrappers import BaseRequest - from werkzeug.wsgi import responder - from werkzeug.exceptions import HTTPException, NotFound - - def view(request): - raise NotFound() - - @responder - def application(environ, start_response): - request = BaseRequest(environ) - try: - return view(request) - except HTTPException as e: - return e - - - As you can see from this example those exceptions are callable WSGI - applications. Because of Python 2.4 compatibility those do not extend - from the response objects but only from the python exception class. - - As a matter of fact they are not Werkzeug response objects. However you - can get a response object by calling ``get_response()`` on a HTTP - exception. - - Keep in mind that you have to pass an environment to ``get_response()`` - because some errors fetch additional information from the WSGI - environment. - - If you want to hook in a different exception page to say, a 404 status - code, you can add a second except for a specific subclass of an error:: - - @responder - def application(environ, start_response): - request = BaseRequest(environ) - try: - return view(request) - except NotFound, e: - return not_found(request) - except HTTPException, e: - return e - - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import sys - -# Because of bootstrapping reasons we need to manually patch ourselves -# onto our parent module. -import werkzeug -werkzeug.exceptions = sys.modules[__name__] - -from werkzeug._internal import _get_environ -from werkzeug._compat import iteritems, integer_types, text_type, \ - implements_to_string - -from werkzeug.wrappers import Response - - -@implements_to_string -class HTTPException(Exception): - - """ - Baseclass for all HTTP exceptions. This exception can be called as WSGI - application to render a default error page or you can catch the subclasses - of it independently and render nicer error messages. - """ - - code = None - description = None - - def __init__(self, description=None, response=None): - Exception.__init__(self) - if description is not None: - self.description = description - self.response = response - - @classmethod - def wrap(cls, exception, name=None): - """This method returns a new subclass of the exception provided that - also is a subclass of `BadRequest`. - """ - class newcls(cls, exception): - - def __init__(self, arg=None, *args, **kwargs): - cls.__init__(self, *args, **kwargs) - exception.__init__(self, arg) - newcls.__module__ = sys._getframe(1).f_globals.get('__name__') - newcls.__name__ = name or cls.__name__ + exception.__name__ - return newcls - - @property - def name(self): - """The status name.""" - return HTTP_STATUS_CODES.get(self.code, 'Unknown Error') - - def get_description(self, environ=None): - """Get the description.""" - return u'

%s

' % escape(self.description) - - def get_body(self, environ=None): - """Get the HTML body.""" - return text_type(( - u'\n' - u'%(code)s %(name)s\n' - u'

%(name)s

\n' - u'%(description)s\n' - ) % { - 'code': self.code, - 'name': escape(self.name), - 'description': self.get_description(environ) - }) - - def get_headers(self, environ=None): - """Get a list of headers.""" - return [('Content-Type', 'text/html')] - - def get_response(self, environ=None): - """Get a response object. If one was passed to the exception - it's returned directly. - - :param environ: the optional environ for the request. This - can be used to modify the response depending - on how the request looked like. - :return: a :class:`Response` object or a subclass thereof. - """ - if self.response is not None: - return self.response - if environ is not None: - environ = _get_environ(environ) - headers = self.get_headers(environ) - return Response(self.get_body(environ), self.code, headers) - - def __call__(self, environ, start_response): - """Call the exception as WSGI application. - - :param environ: the WSGI environment. - :param start_response: the response callable provided by the WSGI - server. - """ - response = self.get_response(environ) - return response(environ, start_response) - - def __str__(self): - code = self.code if self.code is not None else '???' - return '%s %s: %s' % (code, self.name, self.description) - - def __repr__(self): - code = self.code if self.code is not None else '???' - return "<%s '%s: %s'>" % (self.__class__.__name__, code, self.name) - - -class BadRequest(HTTPException): - - """*400* `Bad Request` - - Raise if the browser sends something to the application the application - or server cannot handle. - """ - code = 400 - description = ( - 'The browser (or proxy) sent a request that this server could ' - 'not understand.' - ) - - -class ClientDisconnected(BadRequest): - - """Internal exception that is raised if Werkzeug detects a disconnected - client. Since the client is already gone at that point attempting to - send the error message to the client might not work and might ultimately - result in another exception in the server. Mainly this is here so that - it is silenced by default as far as Werkzeug is concerned. - - Since disconnections cannot be reliably detected and are unspecified - by WSGI to a large extent this might or might not be raised if a client - is gone. - - .. versionadded:: 0.8 - """ - - -class SecurityError(BadRequest): - - """Raised if something triggers a security error. This is otherwise - exactly like a bad request error. - - .. versionadded:: 0.9 - """ - - -class BadHost(BadRequest): - - """Raised if the submitted host is badly formatted. - - .. versionadded:: 0.11.2 - """ - - -class Unauthorized(HTTPException): - - """*401* `Unauthorized` - - Raise if the user is not authorized. Also used if you want to use HTTP - basic auth. - """ - code = 401 - description = ( - 'The server could not verify that you are authorized to access ' - 'the URL requested. You either supplied the wrong credentials (e.g. ' - 'a bad password), or your browser doesn\'t understand how to supply ' - 'the credentials required.' - ) - - -class Forbidden(HTTPException): - - """*403* `Forbidden` - - Raise if the user doesn't have the permission for the requested resource - but was authenticated. - """ - code = 403 - description = ( - 'You don\'t have the permission to access the requested resource. ' - 'It is either read-protected or not readable by the server.' - ) - - -class NotFound(HTTPException): - - """*404* `Not Found` - - Raise if a resource does not exist and never existed. - """ - code = 404 - description = ( - 'The requested URL was not found on the server. ' - 'If you entered the URL manually please check your spelling and ' - 'try again.' - ) - - -class MethodNotAllowed(HTTPException): - - """*405* `Method Not Allowed` - - Raise if the server used a method the resource does not handle. For - example `POST` if the resource is view only. Especially useful for REST. - - The first argument for this exception should be a list of allowed methods. - Strictly speaking the response would be invalid if you don't provide valid - methods in the header which you can do with that list. - """ - code = 405 - description = 'The method is not allowed for the requested URL.' - - def __init__(self, valid_methods=None, description=None): - """Takes an optional list of valid http methods - starting with werkzeug 0.3 the list will be mandatory.""" - HTTPException.__init__(self, description) - self.valid_methods = valid_methods - - def get_headers(self, environ): - headers = HTTPException.get_headers(self, environ) - if self.valid_methods: - headers.append(('Allow', ', '.join(self.valid_methods))) - return headers - - -class NotAcceptable(HTTPException): - - """*406* `Not Acceptable` - - Raise if the server can't return any content conforming to the - `Accept` headers of the client. - """ - code = 406 - - description = ( - 'The resource identified by the request is only capable of ' - 'generating response entities which have content characteristics ' - 'not acceptable according to the accept headers sent in the ' - 'request.' - ) - - -class RequestTimeout(HTTPException): - - """*408* `Request Timeout` - - Raise to signalize a timeout. - """ - code = 408 - description = ( - 'The server closed the network connection because the browser ' - 'didn\'t finish the request within the specified time.' - ) - - -class Conflict(HTTPException): - - """*409* `Conflict` - - Raise to signal that a request cannot be completed because it conflicts - with the current state on the server. - - .. versionadded:: 0.7 - """ - code = 409 - description = ( - 'A conflict happened while processing the request. The resource ' - 'might have been modified while the request was being processed.' - ) - - -class Gone(HTTPException): - - """*410* `Gone` - - Raise if a resource existed previously and went away without new location. - """ - code = 410 - description = ( - 'The requested URL is no longer available on this server and there ' - 'is no forwarding address. If you followed a link from a foreign ' - 'page, please contact the author of this page.' - ) - - -class LengthRequired(HTTPException): - - """*411* `Length Required` - - Raise if the browser submitted data but no ``Content-Length`` header which - is required for the kind of processing the server does. - """ - code = 411 - description = ( - 'A request with this method requires a valid Content-' - 'Length header.' - ) - - -class PreconditionFailed(HTTPException): - - """*412* `Precondition Failed` - - Status code used in combination with ``If-Match``, ``If-None-Match``, or - ``If-Unmodified-Since``. - """ - code = 412 - description = ( - 'The precondition on the request for the URL failed positive ' - 'evaluation.' - ) - - -class RequestEntityTooLarge(HTTPException): - - """*413* `Request Entity Too Large` - - The status code one should return if the data submitted exceeded a given - limit. - """ - code = 413 - description = ( - 'The data value transmitted exceeds the capacity limit.' - ) - - -class RequestURITooLarge(HTTPException): - - """*414* `Request URI Too Large` - - Like *413* but for too long URLs. - """ - code = 414 - description = ( - 'The length of the requested URL exceeds the capacity limit ' - 'for this server. The request cannot be processed.' - ) - - -class UnsupportedMediaType(HTTPException): - - """*415* `Unsupported Media Type` - - The status code returned if the server is unable to handle the media type - the client transmitted. - """ - code = 415 - description = ( - 'The server does not support the media type transmitted in ' - 'the request.' - ) - - -class RequestedRangeNotSatisfiable(HTTPException): - - """*416* `Requested Range Not Satisfiable` - - The client asked for an invalid part of the file. - - .. versionadded:: 0.7 - """ - code = 416 - description = ( - 'The server cannot provide the requested range.' - ) - - def __init__(self, length=None, units="bytes", description=None): - """Takes an optional `Content-Range` header value based on ``length`` - parameter. - """ - HTTPException.__init__(self, description) - self.length = length - self.units = units - - def get_headers(self, environ): - headers = HTTPException.get_headers(self, environ) - if self.length is not None: - headers.append( - ('Content-Range', '%s */%d' % (self.units, self.length))) - return headers - - -class ExpectationFailed(HTTPException): - - """*417* `Expectation Failed` - - The server cannot meet the requirements of the Expect request-header. - - .. versionadded:: 0.7 - """ - code = 417 - description = ( - 'The server could not meet the requirements of the Expect header' - ) - - -class ImATeapot(HTTPException): - - """*418* `I'm a teapot` - - The server should return this if it is a teapot and someone attempted - to brew coffee with it. - - .. versionadded:: 0.7 - """ - code = 418 - description = ( - 'This server is a teapot, not a coffee machine' - ) - - -class UnprocessableEntity(HTTPException): - - """*422* `Unprocessable Entity` - - Used if the request is well formed, but the instructions are otherwise - incorrect. - """ - code = 422 - description = ( - 'The request was well-formed but was unable to be followed ' - 'due to semantic errors.' - ) - - -class Locked(HTTPException): - - """*423* `Locked` - - Used if the resource that is being accessed is locked. - """ - code = 423 - description = ( - 'The resource that is being accessed is locked.' - ) - - -class PreconditionRequired(HTTPException): - - """*428* `Precondition Required` - - The server requires this request to be conditional, typically to prevent - the lost update problem, which is a race condition between two or more - clients attempting to update a resource through PUT or DELETE. By requiring - each client to include a conditional header ("If-Match" or "If-Unmodified- - Since") with the proper value retained from a recent GET request, the - server ensures that each client has at least seen the previous revision of - the resource. - """ - code = 428 - description = ( - 'This request is required to be conditional; try using "If-Match" ' - 'or "If-Unmodified-Since".' - ) - - -class TooManyRequests(HTTPException): - - """*429* `Too Many Requests` - - The server is limiting the rate at which this user receives responses, and - this request exceeds that rate. (The server may use any convenient method - to identify users and their request rates). The server may include a - "Retry-After" header to indicate how long the user should wait before - retrying. - """ - code = 429 - description = ( - 'This user has exceeded an allotted request count. Try again later.' - ) - - -class RequestHeaderFieldsTooLarge(HTTPException): - - """*431* `Request Header Fields Too Large` - - The server refuses to process the request because the header fields are too - large. One or more individual fields may be too large, or the set of all - headers is too large. - """ - code = 431 - description = ( - 'One or more header fields exceeds the maximum size.' - ) - - -class UnavailableForLegalReasons(HTTPException): - - """*451* `Unavailable For Legal Reasons` - - This status code indicates that the server is denying access to the - resource as a consequence of a legal demand. - """ - code = 451 - description = ( - 'Unavailable for legal reasons.' - ) - - -class InternalServerError(HTTPException): - - """*500* `Internal Server Error` - - Raise if an internal server error occurred. This is a good fallback if an - unknown error occurred in the dispatcher. - """ - code = 500 - description = ( - 'The server encountered an internal error and was unable to ' - 'complete your request. Either the server is overloaded or there ' - 'is an error in the application.' - ) - - -class NotImplemented(HTTPException): - - """*501* `Not Implemented` - - Raise if the application does not support the action requested by the - browser. - """ - code = 501 - description = ( - 'The server does not support the action requested by the ' - 'browser.' - ) - - -class BadGateway(HTTPException): - - """*502* `Bad Gateway` - - If you do proxying in your application you should return this status code - if you received an invalid response from the upstream server it accessed - in attempting to fulfill the request. - """ - code = 502 - description = ( - 'The proxy server received an invalid response from an upstream ' - 'server.' - ) - - -class ServiceUnavailable(HTTPException): - - """*503* `Service Unavailable` - - Status code you should return if a service is temporarily unavailable. - """ - code = 503 - description = ( - 'The server is temporarily unable to service your request due to ' - 'maintenance downtime or capacity problems. Please try again ' - 'later.' - ) - - -class GatewayTimeout(HTTPException): - - """*504* `Gateway Timeout` - - Status code you should return if a connection to an upstream server - times out. - """ - code = 504 - description = ( - 'The connection to an upstream server timed out.' - ) - - -class HTTPVersionNotSupported(HTTPException): - - """*505* `HTTP Version Not Supported` - - The server does not support the HTTP protocol version used in the request. - """ - code = 505 - description = ( - 'The server does not support the HTTP protocol version used in the ' - 'request.' - ) - - -default_exceptions = {} -__all__ = ['HTTPException'] - - -def _find_exceptions(): - for name, obj in iteritems(globals()): - try: - is_http_exception = issubclass(obj, HTTPException) - except TypeError: - is_http_exception = False - if not is_http_exception or obj.code is None: - continue - __all__.append(obj.__name__) - old_obj = default_exceptions.get(obj.code, None) - if old_obj is not None and issubclass(obj, old_obj): - continue - default_exceptions[obj.code] = obj -_find_exceptions() -del _find_exceptions - - -class Aborter(object): - - """ - When passed a dict of code -> exception items it can be used as - callable that raises exceptions. If the first argument to the - callable is an integer it will be looked up in the mapping, if it's - a WSGI application it will be raised in a proxy exception. - - The rest of the arguments are forwarded to the exception constructor. - """ - - def __init__(self, mapping=None, extra=None): - if mapping is None: - mapping = default_exceptions - self.mapping = dict(mapping) - if extra is not None: - self.mapping.update(extra) - - def __call__(self, code, *args, **kwargs): - if not args and not kwargs and not isinstance(code, integer_types): - raise HTTPException(response=code) - if code not in self.mapping: - raise LookupError('no exception for %r' % code) - raise self.mapping[code](*args, **kwargs) - - -def abort(status, *args, **kwargs): - ''' - Raises an :py:exc:`HTTPException` for the given status code or WSGI - application:: - - abort(404) # 404 Not Found - abort(Response('Hello World')) - - Can be passed a WSGI application or a status code. If a status code is - given it's looked up in the list of exceptions and will raise that - exception, if passed a WSGI application it will wrap it in a proxy WSGI - exception and raise that:: - - abort(404) - abort(Response('Hello World')) - - ''' - return _aborter(status, *args, **kwargs) - -_aborter = Aborter() - - -#: an exception that is used internally to signal both a key error and a -#: bad request. Used by a lot of the datastructures. -BadRequestKeyError = BadRequest.wrap(KeyError) - - -# imported here because of circular dependencies of werkzeug.utils -from werkzeug.utils import escape -from werkzeug.http import HTTP_STATUS_CODES diff --git a/pyextra/werkzeug/filesystem.py b/pyextra/werkzeug/filesystem.py deleted file mode 100644 index 624674655..000000000 --- a/pyextra/werkzeug/filesystem.py +++ /dev/null @@ -1,66 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.filesystem - ~~~~~~~~~~~~~~~~~~~ - - Various utilities for the local filesystem. - - :copyright: (c) 2015 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" - -import codecs -import sys -import warnings - -# We do not trust traditional unixes. -has_likely_buggy_unicode_filesystem = \ - sys.platform.startswith('linux') or 'bsd' in sys.platform - - -def _is_ascii_encoding(encoding): - """ - Given an encoding this figures out if the encoding is actually ASCII (which - is something we don't actually want in most cases). This is necessary - because ASCII comes under many names such as ANSI_X3.4-1968. - """ - if encoding is None: - return False - try: - return codecs.lookup(encoding).name == 'ascii' - except LookupError: - return False - - -class BrokenFilesystemWarning(RuntimeWarning, UnicodeWarning): - '''The warning used by Werkzeug to signal a broken filesystem. Will only be - used once per runtime.''' - - -_warned_about_filesystem_encoding = False - - -def get_filesystem_encoding(): - """ - Returns the filesystem encoding that should be used. Note that this is - different from the Python understanding of the filesystem encoding which - might be deeply flawed. Do not use this value against Python's unicode APIs - because it might be different. See :ref:`filesystem-encoding` for the exact - behavior. - - The concept of a filesystem encoding in generally is not something you - should rely on. As such if you ever need to use this function except for - writing wrapper code reconsider. - """ - global _warned_about_filesystem_encoding - rv = sys.getfilesystemencoding() - if has_likely_buggy_unicode_filesystem and not rv \ - or _is_ascii_encoding(rv): - if not _warned_about_filesystem_encoding: - warnings.warn( - 'Detected a misconfigured UNIX filesystem: Will use UTF-8 as ' - 'filesystem encoding instead of {0!r}'.format(rv), - BrokenFilesystemWarning) - _warned_about_filesystem_encoding = True - return 'utf-8' - return rv diff --git a/pyextra/werkzeug/formparser.py b/pyextra/werkzeug/formparser.py deleted file mode 100644 index c56859e90..000000000 --- a/pyextra/werkzeug/formparser.py +++ /dev/null @@ -1,534 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.formparser - ~~~~~~~~~~~~~~~~~~~ - - This module implements the form parsing. It supports url-encoded forms - as well as non-nested multipart uploads. - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import re -import codecs - -# there are some platforms where SpooledTemporaryFile is not available. -# In that case we need to provide a fallback. -try: - from tempfile import SpooledTemporaryFile -except ImportError: - from tempfile import TemporaryFile - SpooledTemporaryFile = None - -from itertools import chain, repeat, tee -from functools import update_wrapper - -from werkzeug._compat import to_native, text_type, BytesIO -from werkzeug.urls import url_decode_stream -from werkzeug.wsgi import make_line_iter, \ - get_input_stream, get_content_length -from werkzeug.datastructures import Headers, FileStorage, MultiDict -from werkzeug.http import parse_options_header - - -#: an iterator that yields empty strings -_empty_string_iter = repeat('') - -#: a regular expression for multipart boundaries -_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$') - -#: supported http encodings that are also available in python we support -#: for multipart messages. -_supported_multipart_encodings = frozenset(['base64', 'quoted-printable']) - - -def default_stream_factory(total_content_length, filename, content_type, - content_length=None): - """The stream factory that is used per default.""" - max_size = 1024 * 500 - if SpooledTemporaryFile is not None: - return SpooledTemporaryFile(max_size=max_size, mode='wb+') - if total_content_length is None or total_content_length > max_size: - return TemporaryFile('wb+') - return BytesIO() - - -def parse_form_data(environ, stream_factory=None, charset='utf-8', - errors='replace', max_form_memory_size=None, - max_content_length=None, cls=None, - silent=True): - """Parse the form data in the environ and return it as tuple in the form - ``(stream, form, files)``. You should only call this method if the - transport method is `POST`, `PUT`, or `PATCH`. - - If the mimetype of the data transmitted is `multipart/form-data` the - files multidict will be filled with `FileStorage` objects. If the - mimetype is unknown the input stream is wrapped and returned as first - argument, else the stream is empty. - - This is a shortcut for the common usage of :class:`FormDataParser`. - - Have a look at :ref:`dealing-with-request-data` for more details. - - .. versionadded:: 0.5 - The `max_form_memory_size`, `max_content_length` and - `cls` parameters were added. - - .. versionadded:: 0.5.1 - The optional `silent` flag was added. - - :param environ: the WSGI environment to be used for parsing. - :param stream_factory: An optional callable that returns a new read and - writeable file descriptor. This callable works - the same as :meth:`~BaseResponse._get_file_stream`. - :param charset: The character set for URL and url encoded form data. - :param errors: The encoding error behavior. - :param max_form_memory_size: the maximum number of bytes to be accepted for - in-memory stored form data. If the data - exceeds the value specified an - :exc:`~exceptions.RequestEntityTooLarge` - exception is raised. - :param max_content_length: If this is provided and the transmitted data - is longer than this value an - :exc:`~exceptions.RequestEntityTooLarge` - exception is raised. - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`MultiDict` is used. - :param silent: If set to False parsing errors will not be caught. - :return: A tuple in the form ``(stream, form, files)``. - """ - return FormDataParser(stream_factory, charset, errors, - max_form_memory_size, max_content_length, - cls, silent).parse_from_environ(environ) - - -def exhaust_stream(f): - """Helper decorator for methods that exhausts the stream on return.""" - - def wrapper(self, stream, *args, **kwargs): - try: - return f(self, stream, *args, **kwargs) - finally: - exhaust = getattr(stream, 'exhaust', None) - if exhaust is not None: - exhaust() - else: - while 1: - chunk = stream.read(1024 * 64) - if not chunk: - break - return update_wrapper(wrapper, f) - - -class FormDataParser(object): - - """This class implements parsing of form data for Werkzeug. By itself - it can parse multipart and url encoded form data. It can be subclassed - and extended but for most mimetypes it is a better idea to use the - untouched stream and expose it as separate attributes on a request - object. - - .. versionadded:: 0.8 - - :param stream_factory: An optional callable that returns a new read and - writeable file descriptor. This callable works - the same as :meth:`~BaseResponse._get_file_stream`. - :param charset: The character set for URL and url encoded form data. - :param errors: The encoding error behavior. - :param max_form_memory_size: the maximum number of bytes to be accepted for - in-memory stored form data. If the data - exceeds the value specified an - :exc:`~exceptions.RequestEntityTooLarge` - exception is raised. - :param max_content_length: If this is provided and the transmitted data - is longer than this value an - :exc:`~exceptions.RequestEntityTooLarge` - exception is raised. - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`MultiDict` is used. - :param silent: If set to False parsing errors will not be caught. - """ - - def __init__(self, stream_factory=None, charset='utf-8', - errors='replace', max_form_memory_size=None, - max_content_length=None, cls=None, - silent=True): - if stream_factory is None: - stream_factory = default_stream_factory - self.stream_factory = stream_factory - self.charset = charset - self.errors = errors - self.max_form_memory_size = max_form_memory_size - self.max_content_length = max_content_length - if cls is None: - cls = MultiDict - self.cls = cls - self.silent = silent - - def get_parse_func(self, mimetype, options): - return self.parse_functions.get(mimetype) - - def parse_from_environ(self, environ): - """Parses the information from the environment as form data. - - :param environ: the WSGI environment to be used for parsing. - :return: A tuple in the form ``(stream, form, files)``. - """ - content_type = environ.get('CONTENT_TYPE', '') - content_length = get_content_length(environ) - mimetype, options = parse_options_header(content_type) - return self.parse(get_input_stream(environ), mimetype, - content_length, options) - - def parse(self, stream, mimetype, content_length, options=None): - """Parses the information from the given stream, mimetype, - content length and mimetype parameters. - - :param stream: an input stream - :param mimetype: the mimetype of the data - :param content_length: the content length of the incoming data - :param options: optional mimetype parameters (used for - the multipart boundary for instance) - :return: A tuple in the form ``(stream, form, files)``. - """ - if self.max_content_length is not None and \ - content_length is not None and \ - content_length > self.max_content_length: - raise exceptions.RequestEntityTooLarge() - if options is None: - options = {} - - parse_func = self.get_parse_func(mimetype, options) - if parse_func is not None: - try: - return parse_func(self, stream, mimetype, - content_length, options) - except ValueError: - if not self.silent: - raise - - return stream, self.cls(), self.cls() - - @exhaust_stream - def _parse_multipart(self, stream, mimetype, content_length, options): - parser = MultiPartParser(self.stream_factory, self.charset, self.errors, - max_form_memory_size=self.max_form_memory_size, - cls=self.cls) - boundary = options.get('boundary') - if boundary is None: - raise ValueError('Missing boundary') - if isinstance(boundary, text_type): - boundary = boundary.encode('ascii') - form, files = parser.parse(stream, boundary, content_length) - return stream, form, files - - @exhaust_stream - def _parse_urlencoded(self, stream, mimetype, content_length, options): - if self.max_form_memory_size is not None and \ - content_length is not None and \ - content_length > self.max_form_memory_size: - raise exceptions.RequestEntityTooLarge() - form = url_decode_stream(stream, self.charset, - errors=self.errors, cls=self.cls) - return stream, form, self.cls() - - #: mapping of mimetypes to parsing functions - parse_functions = { - 'multipart/form-data': _parse_multipart, - 'application/x-www-form-urlencoded': _parse_urlencoded, - 'application/x-url-encoded': _parse_urlencoded - } - - -def is_valid_multipart_boundary(boundary): - """Checks if the string given is a valid multipart boundary.""" - return _multipart_boundary_re.match(boundary) is not None - - -def _line_parse(line): - """Removes line ending characters and returns a tuple (`stripped_line`, - `is_terminated`). - """ - if line[-2:] in ['\r\n', b'\r\n']: - return line[:-2], True - elif line[-1:] in ['\r', '\n', b'\r', b'\n']: - return line[:-1], True - return line, False - - -def parse_multipart_headers(iterable): - """Parses multipart headers from an iterable that yields lines (including - the trailing newline symbol). The iterable has to be newline terminated. - - The iterable will stop at the line where the headers ended so it can be - further consumed. - - :param iterable: iterable of strings that are newline terminated - """ - result = [] - for line in iterable: - line = to_native(line) - line, line_terminated = _line_parse(line) - if not line_terminated: - raise ValueError('unexpected end of line in multipart header') - if not line: - break - elif line[0] in ' \t' and result: - key, value = result[-1] - result[-1] = (key, value + '\n ' + line[1:]) - else: - parts = line.split(':', 1) - if len(parts) == 2: - result.append((parts[0].strip(), parts[1].strip())) - - # we link the list to the headers, no need to create a copy, the - # list was not shared anyways. - return Headers(result) - - -_begin_form = 'begin_form' -_begin_file = 'begin_file' -_cont = 'cont' -_end = 'end' - - -class MultiPartParser(object): - - def __init__(self, stream_factory=None, charset='utf-8', errors='replace', - max_form_memory_size=None, cls=None, buffer_size=64 * 1024): - self.charset = charset - self.errors = errors - self.max_form_memory_size = max_form_memory_size - self.stream_factory = default_stream_factory if stream_factory is None else stream_factory - self.cls = MultiDict if cls is None else cls - - # make sure the buffer size is divisible by four so that we can base64 - # decode chunk by chunk - assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4' - # also the buffer size has to be at least 1024 bytes long or long headers - # will freak out the system - assert buffer_size >= 1024, 'buffer size has to be at least 1KB' - - self.buffer_size = buffer_size - - def _fix_ie_filename(self, filename): - """Internet Explorer 6 transmits the full file name if a file is - uploaded. This function strips the full path if it thinks the - filename is Windows-like absolute. - """ - if filename[1:3] == ':\\' or filename[:2] == '\\\\': - return filename.split('\\')[-1] - return filename - - def _find_terminator(self, iterator): - """The terminator might have some additional newlines before it. - There is at least one application that sends additional newlines - before headers (the python setuptools package). - """ - for line in iterator: - if not line: - break - line = line.strip() - if line: - return line - return b'' - - def fail(self, message): - raise ValueError(message) - - def get_part_encoding(self, headers): - transfer_encoding = headers.get('content-transfer-encoding') - if transfer_encoding is not None and \ - transfer_encoding in _supported_multipart_encodings: - return transfer_encoding - - def get_part_charset(self, headers): - # Figure out input charset for current part - content_type = headers.get('content-type') - if content_type: - mimetype, ct_params = parse_options_header(content_type) - return ct_params.get('charset', self.charset) - return self.charset - - def start_file_streaming(self, filename, headers, total_content_length): - if isinstance(filename, bytes): - filename = filename.decode(self.charset, self.errors) - filename = self._fix_ie_filename(filename) - content_type = headers.get('content-type') - try: - content_length = int(headers['content-length']) - except (KeyError, ValueError): - content_length = 0 - container = self.stream_factory(total_content_length, content_type, - filename, content_length) - return filename, container - - def in_memory_threshold_reached(self, bytes): - raise exceptions.RequestEntityTooLarge() - - def validate_boundary(self, boundary): - if not boundary: - self.fail('Missing boundary') - if not is_valid_multipart_boundary(boundary): - self.fail('Invalid boundary: %s' % boundary) - if len(boundary) > self.buffer_size: # pragma: no cover - # this should never happen because we check for a minimum size - # of 1024 and boundaries may not be longer than 200. The only - # situation when this happens is for non debug builds where - # the assert is skipped. - self.fail('Boundary longer than buffer size') - - def parse_lines(self, file, boundary, content_length, cap_at_buffer=True): - """Generate parts of - ``('begin_form', (headers, name))`` - ``('begin_file', (headers, name, filename))`` - ``('cont', bytestring)`` - ``('end', None)`` - - Always obeys the grammar - parts = ( begin_form cont* end | - begin_file cont* end )* - """ - next_part = b'--' + boundary - last_part = next_part + b'--' - - iterator = chain(make_line_iter(file, limit=content_length, - buffer_size=self.buffer_size, - cap_at_buffer=cap_at_buffer), - _empty_string_iter) - - terminator = self._find_terminator(iterator) - - if terminator == last_part: - return - elif terminator != next_part: - self.fail('Expected boundary at start of multipart data') - - while terminator != last_part: - headers = parse_multipart_headers(iterator) - - disposition = headers.get('content-disposition') - if disposition is None: - self.fail('Missing Content-Disposition header') - disposition, extra = parse_options_header(disposition) - transfer_encoding = self.get_part_encoding(headers) - name = extra.get('name') - - # Accept filename* to support non-ascii filenames as per rfc2231 - filename = extra.get('filename') or extra.get('filename*') - - # if no content type is given we stream into memory. A list is - # used as a temporary container. - if filename is None: - yield _begin_form, (headers, name) - - # otherwise we parse the rest of the headers and ask the stream - # factory for something we can write in. - else: - yield _begin_file, (headers, name, filename) - - buf = b'' - for line in iterator: - if not line: - self.fail('unexpected end of stream') - - if line[:2] == b'--': - terminator = line.rstrip() - if terminator in (next_part, last_part): - break - - if transfer_encoding is not None: - if transfer_encoding == 'base64': - transfer_encoding = 'base64_codec' - try: - line = codecs.decode(line, transfer_encoding) - except Exception: - self.fail('could not decode transfer encoded chunk') - - # we have something in the buffer from the last iteration. - # this is usually a newline delimiter. - if buf: - yield _cont, buf - buf = b'' - - # If the line ends with windows CRLF we write everything except - # the last two bytes. In all other cases however we write - # everything except the last byte. If it was a newline, that's - # fine, otherwise it does not matter because we will write it - # the next iteration. this ensures we do not write the - # final newline into the stream. That way we do not have to - # truncate the stream. However we do have to make sure that - # if something else than a newline is in there we write it - # out. - if line[-2:] == b'\r\n': - buf = b'\r\n' - cutoff = -2 - else: - buf = line[-1:] - cutoff = -1 - yield _cont, line[:cutoff] - - else: # pragma: no cover - raise ValueError('unexpected end of part') - - # if we have a leftover in the buffer that is not a newline - # character we have to flush it, otherwise we will chop of - # certain values. - if buf not in (b'', b'\r', b'\n', b'\r\n'): - yield _cont, buf - - yield _end, None - - def parse_parts(self, file, boundary, content_length): - """Generate ``('file', (name, val))`` and - ``('form', (name, val))`` parts. - """ - in_memory = 0 - - for ellt, ell in self.parse_lines(file, boundary, content_length): - if ellt == _begin_file: - headers, name, filename = ell - is_file = True - guard_memory = False - filename, container = self.start_file_streaming( - filename, headers, content_length) - _write = container.write - - elif ellt == _begin_form: - headers, name = ell - is_file = False - container = [] - _write = container.append - guard_memory = self.max_form_memory_size is not None - - elif ellt == _cont: - _write(ell) - # if we write into memory and there is a memory size limit we - # count the number of bytes in memory and raise an exception if - # there is too much data in memory. - if guard_memory: - in_memory += len(ell) - if in_memory > self.max_form_memory_size: - self.in_memory_threshold_reached(in_memory) - - elif ellt == _end: - if is_file: - container.seek(0) - yield ('file', - (name, FileStorage(container, filename, name, - headers=headers))) - else: - part_charset = self.get_part_charset(headers) - yield ('form', - (name, b''.join(container).decode( - part_charset, self.errors))) - - def parse(self, file, boundary, content_length): - formstream, filestream = tee( - self.parse_parts(file, boundary, content_length), 2) - form = (p[1] for p in formstream if p[0] == 'form') - files = (p[1] for p in filestream if p[0] == 'file') - return self.cls(form), self.cls(files) - - -from werkzeug import exceptions diff --git a/pyextra/werkzeug/http.py b/pyextra/werkzeug/http.py deleted file mode 100644 index 22197221c..000000000 --- a/pyextra/werkzeug/http.py +++ /dev/null @@ -1,1158 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.http - ~~~~~~~~~~~~~ - - Werkzeug comes with a bunch of utilities that help Werkzeug to deal with - HTTP data. Most of the classes and functions provided by this module are - used by the wrappers, but they are useful on their own, too, especially if - the response and request objects are not used. - - This covers some of the more HTTP centric features of WSGI, some other - utilities such as cookie handling are documented in the `werkzeug.utils` - module. - - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import re -import warnings -from time import time, gmtime -try: - from email.utils import parsedate_tz -except ImportError: # pragma: no cover - from email.Utils import parsedate_tz -try: - from urllib.request import parse_http_list as _parse_list_header - from urllib.parse import unquote_to_bytes as _unquote -except ImportError: # pragma: no cover - from urllib2 import parse_http_list as _parse_list_header, \ - unquote as _unquote -from datetime import datetime, timedelta -from hashlib import md5 -import base64 - -from werkzeug._internal import _cookie_quote, _make_cookie_domain, \ - _cookie_parse_impl -from werkzeug._compat import to_unicode, iteritems, text_type, \ - string_types, try_coerce_native, to_bytes, PY2, \ - integer_types - - -_cookie_charset = 'latin1' -# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231 -_accept_re = re.compile( - r'''( # media-range capturing-parenthesis - [^\s;,]+ # type/subtype - (?:[ \t]*;[ \t]* # ";" - (?: # parameter non-capturing-parenthesis - [^\s;,q][^\s;,]* # token that doesn't start with "q" - | # or - q[^\s;,=][^\s;,]* # token that is more than just "q" - ) - )* # zero or more parameters - ) # end of media-range - (?:[ \t]*;[ \t]*q= # weight is a "q" parameter - (\d*(?:\.\d+)?) # qvalue capturing-parentheses - [^,]* # "extension" accept params: who cares? - )? # accept params are optional - ''', re.VERBOSE) -_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - '^_`abcdefghijklmnopqrstuvwxyz|~') -_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)') -_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t') -_option_header_piece_re = re.compile(r''' - ;\s* - (?P - "[^"\\]*(?:\\.[^"\\]*)*" # quoted string - | - [^\s;,=*]+ # token - ) - \s* - (?: # optionally followed by =value - (?: # equals sign, possibly with encoding - \*\s*=\s* # * indicates extended notation - (?P[^\s]+?) - '(?P[^\s]*?)' - | - =\s* # basic notation - ) - (?P - "[^"\\]*(?:\\.[^"\\]*)*" # quoted string - | - [^;,]+ # token - )? - )? - \s* -''', flags=re.VERBOSE) -_option_header_start_mime_type = re.compile(r',\s*([^;,\s]+)([;,]\s*.+)?') - -_entity_headers = frozenset([ - 'allow', 'content-encoding', 'content-language', 'content-length', - 'content-location', 'content-md5', 'content-range', 'content-type', - 'expires', 'last-modified' -]) -_hop_by_hop_headers = frozenset([ - 'connection', 'keep-alive', 'proxy-authenticate', - 'proxy-authorization', 'te', 'trailer', 'transfer-encoding', - 'upgrade' -]) - - -HTTP_STATUS_CODES = { - 100: 'Continue', - 101: 'Switching Protocols', - 102: 'Processing', - 200: 'OK', - 201: 'Created', - 202: 'Accepted', - 203: 'Non Authoritative Information', - 204: 'No Content', - 205: 'Reset Content', - 206: 'Partial Content', - 207: 'Multi Status', - 226: 'IM Used', # see RFC 3229 - 300: 'Multiple Choices', - 301: 'Moved Permanently', - 302: 'Found', - 303: 'See Other', - 304: 'Not Modified', - 305: 'Use Proxy', - 307: 'Temporary Redirect', - 400: 'Bad Request', - 401: 'Unauthorized', - 402: 'Payment Required', # unused - 403: 'Forbidden', - 404: 'Not Found', - 405: 'Method Not Allowed', - 406: 'Not Acceptable', - 407: 'Proxy Authentication Required', - 408: 'Request Timeout', - 409: 'Conflict', - 410: 'Gone', - 411: 'Length Required', - 412: 'Precondition Failed', - 413: 'Request Entity Too Large', - 414: 'Request URI Too Long', - 415: 'Unsupported Media Type', - 416: 'Requested Range Not Satisfiable', - 417: 'Expectation Failed', - 418: 'I\'m a teapot', # see RFC 2324 - 422: 'Unprocessable Entity', - 423: 'Locked', - 424: 'Failed Dependency', - 426: 'Upgrade Required', - 428: 'Precondition Required', # see RFC 6585 - 429: 'Too Many Requests', - 431: 'Request Header Fields Too Large', - 449: 'Retry With', # proprietary MS extension - 451: 'Unavailable For Legal Reasons', - 500: 'Internal Server Error', - 501: 'Not Implemented', - 502: 'Bad Gateway', - 503: 'Service Unavailable', - 504: 'Gateway Timeout', - 505: 'HTTP Version Not Supported', - 507: 'Insufficient Storage', - 510: 'Not Extended' -} - - -def wsgi_to_bytes(data): - """coerce wsgi unicode represented bytes to real ones - - """ - if isinstance(data, bytes): - return data - return data.encode('latin1') # XXX: utf8 fallback? - - -def bytes_to_wsgi(data): - assert isinstance(data, bytes), 'data must be bytes' - if isinstance(data, str): - return data - else: - return data.decode('latin1') - - -def quote_header_value(value, extra_chars='', allow_token=True): - """Quote a header value if necessary. - - .. versionadded:: 0.5 - - :param value: the value to quote. - :param extra_chars: a list of extra characters to skip quoting. - :param allow_token: if this is enabled token values are returned - unchanged. - """ - if isinstance(value, bytes): - value = bytes_to_wsgi(value) - value = str(value) - if allow_token: - token_chars = _token_chars | set(extra_chars) - if set(value).issubset(token_chars): - return value - return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"') - - -def unquote_header_value(value, is_filename=False): - r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). - This does not use the real unquoting but what browsers are actually - using for quoting. - - .. versionadded:: 0.5 - - :param value: the header value to unquote. - """ - if value and value[0] == value[-1] == '"': - # this is not the real unquoting, but fixing this so that the - # RFC is met will result in bugs with internet explorer and - # probably some other browsers as well. IE for example is - # uploading files with "C:\foo\bar.txt" as filename - value = value[1:-1] - - # if this is a filename and the starting characters look like - # a UNC path, then just return the value without quotes. Using the - # replace sequence below on a UNC path has the effect of turning - # the leading double slash into a single slash and then - # _fix_ie_filename() doesn't work correctly. See #458. - if not is_filename or value[:2] != '\\\\': - return value.replace('\\\\', '\\').replace('\\"', '"') - return value - - -def dump_options_header(header, options): - """The reverse function to :func:`parse_options_header`. - - :param header: the header to dump - :param options: a dict of options to append. - """ - segments = [] - if header is not None: - segments.append(header) - for key, value in iteritems(options): - if value is None: - segments.append(key) - else: - segments.append('%s=%s' % (key, quote_header_value(value))) - return '; '.join(segments) - - -def dump_header(iterable, allow_token=True): - """Dump an HTTP header again. This is the reversal of - :func:`parse_list_header`, :func:`parse_set_header` and - :func:`parse_dict_header`. This also quotes strings that include an - equals sign unless you pass it as dict of key, value pairs. - - >>> dump_header({'foo': 'bar baz'}) - 'foo="bar baz"' - >>> dump_header(('foo', 'bar baz')) - 'foo, "bar baz"' - - :param iterable: the iterable or dict of values to quote. - :param allow_token: if set to `False` tokens as values are disallowed. - See :func:`quote_header_value` for more details. - """ - if isinstance(iterable, dict): - items = [] - for key, value in iteritems(iterable): - if value is None: - items.append(key) - else: - items.append('%s=%s' % ( - key, - quote_header_value(value, allow_token=allow_token) - )) - else: - items = [quote_header_value(x, allow_token=allow_token) - for x in iterable] - return ', '.join(items) - - -def parse_list_header(value): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Quotes are removed automatically after parsing. - - It basically works like :func:`parse_set_header` just that items - may appear multiple times and case sensitivity is preserved. - - The return value is a standard :class:`list`: - - >>> parse_list_header('token, "quoted value"') - ['token', 'quoted value'] - - To create a header from the :class:`list` again, use the - :func:`dump_header` function. - - :param value: a string with a list header. - :return: :class:`list` - """ - result = [] - for item in _parse_list_header(value): - if item[:1] == item[-1:] == '"': - item = unquote_header_value(item[1:-1]) - result.append(item) - return result - - -def parse_dict_header(value, cls=dict): - """Parse lists of key, value pairs as described by RFC 2068 Section 2 and - convert them into a python dict (or any other mapping object created from - the type with a dict like interface provided by the `cls` argument): - - >>> d = parse_dict_header('foo="is a fish", bar="as well"') - >>> type(d) is dict - True - >>> sorted(d.items()) - [('bar', 'as well'), ('foo', 'is a fish')] - - If there is no value for a key it will be `None`: - - >>> parse_dict_header('key_without_value') - {'key_without_value': None} - - To create a header from the :class:`dict` again, use the - :func:`dump_header` function. - - .. versionchanged:: 0.9 - Added support for `cls` argument. - - :param value: a string with a dict header. - :param cls: callable to use for storage of parsed results. - :return: an instance of `cls` - """ - result = cls() - if not isinstance(value, text_type): - # XXX: validate - value = bytes_to_wsgi(value) - for item in _parse_list_header(value): - if '=' not in item: - result[item] = None - continue - name, value = item.split('=', 1) - if value[:1] == value[-1:] == '"': - value = unquote_header_value(value[1:-1]) - result[name] = value - return result - - -def parse_options_header(value, multiple=False): - """Parse a ``Content-Type`` like header into a tuple with the content - type and the options: - - >>> parse_options_header('text/html; charset=utf8') - ('text/html', {'charset': 'utf8'}) - - This should not be used to parse ``Cache-Control`` like headers that use - a slightly different format. For these headers use the - :func:`parse_dict_header` function. - - .. versionadded:: 0.5 - - :param value: the header to parse. - :param multiple: Whether try to parse and return multiple MIME types - :return: (mimetype, options) or (mimetype, options, mimetype, options, …) - if multiple=True - """ - if not value: - return '', {} - - result = [] - - value = "," + value.replace("\n", ",") - while value: - match = _option_header_start_mime_type.match(value) - if not match: - break - result.append(match.group(1)) # mimetype - options = {} - # Parse options - rest = match.group(2) - while rest: - optmatch = _option_header_piece_re.match(rest) - if not optmatch: - break - option, encoding, _, option_value = optmatch.groups() - option = unquote_header_value(option) - if option_value is not None: - option_value = unquote_header_value( - option_value, - option == 'filename') - if encoding is not None: - option_value = _unquote(option_value).decode(encoding) - options[option] = option_value - rest = rest[optmatch.end():] - result.append(options) - if multiple is False: - return tuple(result) - value = rest - - return tuple(result) if result else ('', {}) - - -def parse_accept_header(value, cls=None): - """Parses an HTTP Accept-* header. This does not implement a complete - valid algorithm but one that supports at least value and quality - extraction. - - Returns a new :class:`Accept` object (basically a list of ``(value, quality)`` - tuples sorted by the quality with some additional accessor methods). - - The second parameter can be a subclass of :class:`Accept` that is created - with the parsed values and returned. - - :param value: the accept header string to be parsed. - :param cls: the wrapper class for the return value (can be - :class:`Accept` or a subclass thereof) - :return: an instance of `cls`. - """ - if cls is None: - cls = Accept - - if not value: - return cls(None) - - result = [] - for match in _accept_re.finditer(value): - quality = match.group(2) - if not quality: - quality = 1 - else: - quality = max(min(float(quality), 1), 0) - result.append((match.group(1), quality)) - return cls(result) - - -def parse_cache_control_header(value, on_update=None, cls=None): - """Parse a cache control header. The RFC differs between response and - request cache control, this method does not. It's your responsibility - to not use the wrong control statements. - - .. versionadded:: 0.5 - The `cls` was added. If not specified an immutable - :class:`~werkzeug.datastructures.RequestCacheControl` is returned. - - :param value: a cache control header to be parsed. - :param on_update: an optional callable that is called every time a value - on the :class:`~werkzeug.datastructures.CacheControl` - object is changed. - :param cls: the class for the returned object. By default - :class:`~werkzeug.datastructures.RequestCacheControl` is used. - :return: a `cls` object. - """ - if cls is None: - cls = RequestCacheControl - if not value: - return cls(None, on_update) - return cls(parse_dict_header(value), on_update) - - -def parse_set_header(value, on_update=None): - """Parse a set-like header and return a - :class:`~werkzeug.datastructures.HeaderSet` object: - - >>> hs = parse_set_header('token, "quoted value"') - - The return value is an object that treats the items case-insensitively - and keeps the order of the items: - - >>> 'TOKEN' in hs - True - >>> hs.index('quoted value') - 1 - >>> hs - HeaderSet(['token', 'quoted value']) - - To create a header from the :class:`HeaderSet` again, use the - :func:`dump_header` function. - - :param value: a set header to be parsed. - :param on_update: an optional callable that is called every time a - value on the :class:`~werkzeug.datastructures.HeaderSet` - object is changed. - :return: a :class:`~werkzeug.datastructures.HeaderSet` - """ - if not value: - return HeaderSet(None, on_update) - return HeaderSet(parse_list_header(value), on_update) - - -def parse_authorization_header(value): - """Parse an HTTP basic/digest authorization header transmitted by the web - browser. The return value is either `None` if the header was invalid or - not given, otherwise an :class:`~werkzeug.datastructures.Authorization` - object. - - :param value: the authorization header to parse. - :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`. - """ - if not value: - return - value = wsgi_to_bytes(value) - try: - auth_type, auth_info = value.split(None, 1) - auth_type = auth_type.lower() - except ValueError: - return - if auth_type == b'basic': - try: - username, password = base64.b64decode(auth_info).split(b':', 1) - except Exception: - return - return Authorization('basic', {'username': bytes_to_wsgi(username), - 'password': bytes_to_wsgi(password)}) - elif auth_type == b'digest': - auth_map = parse_dict_header(auth_info) - for key in 'username', 'realm', 'nonce', 'uri', 'response': - if key not in auth_map: - return - if 'qop' in auth_map: - if not auth_map.get('nc') or not auth_map.get('cnonce'): - return - return Authorization('digest', auth_map) - - -def parse_www_authenticate_header(value, on_update=None): - """Parse an HTTP WWW-Authenticate header into a - :class:`~werkzeug.datastructures.WWWAuthenticate` object. - - :param value: a WWW-Authenticate header to parse. - :param on_update: an optional callable that is called every time a value - on the :class:`~werkzeug.datastructures.WWWAuthenticate` - object is changed. - :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object. - """ - if not value: - return WWWAuthenticate(on_update=on_update) - try: - auth_type, auth_info = value.split(None, 1) - auth_type = auth_type.lower() - except (ValueError, AttributeError): - return WWWAuthenticate(value.strip().lower(), on_update=on_update) - return WWWAuthenticate(auth_type, parse_dict_header(auth_info), - on_update) - - -def parse_if_range_header(value): - """Parses an if-range header which can be an etag or a date. Returns - a :class:`~werkzeug.datastructures.IfRange` object. - - .. versionadded:: 0.7 - """ - if not value: - return IfRange() - date = parse_date(value) - if date is not None: - return IfRange(date=date) - # drop weakness information - return IfRange(unquote_etag(value)[0]) - - -def parse_range_header(value, make_inclusive=True): - """Parses a range header into a :class:`~werkzeug.datastructures.Range` - object. If the header is missing or malformed `None` is returned. - `ranges` is a list of ``(start, stop)`` tuples where the ranges are - non-inclusive. - - .. versionadded:: 0.7 - """ - if not value or '=' not in value: - return None - - ranges = [] - last_end = 0 - units, rng = value.split('=', 1) - units = units.strip().lower() - - for item in rng.split(','): - item = item.strip() - if '-' not in item: - return None - if item.startswith('-'): - if last_end < 0: - return None - try: - begin = int(item) - except ValueError: - return None - end = None - last_end = -1 - elif '-' in item: - begin, end = item.split('-', 1) - begin = begin.strip() - end = end.strip() - if not begin.isdigit(): - return None - begin = int(begin) - if begin < last_end or last_end < 0: - return None - if end: - if not end.isdigit(): - return None - end = int(end) + 1 - if begin >= end: - return None - else: - end = None - last_end = end - ranges.append((begin, end)) - - return Range(units, ranges) - - -def parse_content_range_header(value, on_update=None): - """Parses a range header into a - :class:`~werkzeug.datastructures.ContentRange` object or `None` if - parsing is not possible. - - .. versionadded:: 0.7 - - :param value: a content range header to be parsed. - :param on_update: an optional callable that is called every time a value - on the :class:`~werkzeug.datastructures.ContentRange` - object is changed. - """ - if value is None: - return None - try: - units, rangedef = (value or '').strip().split(None, 1) - except ValueError: - return None - - if '/' not in rangedef: - return None - rng, length = rangedef.split('/', 1) - if length == '*': - length = None - elif length.isdigit(): - length = int(length) - else: - return None - - if rng == '*': - return ContentRange(units, None, None, length, on_update=on_update) - elif '-' not in rng: - return None - - start, stop = rng.split('-', 1) - try: - start = int(start) - stop = int(stop) + 1 - except ValueError: - return None - - if is_byte_range_valid(start, stop, length): - return ContentRange(units, start, stop, length, on_update=on_update) - - -def quote_etag(etag, weak=False): - """Quote an etag. - - :param etag: the etag to quote. - :param weak: set to `True` to tag it "weak". - """ - if '"' in etag: - raise ValueError('invalid etag') - etag = '"%s"' % etag - if weak: - etag = 'W/' + etag - return etag - - -def unquote_etag(etag): - """Unquote a single etag: - - >>> unquote_etag('W/"bar"') - ('bar', True) - >>> unquote_etag('"bar"') - ('bar', False) - - :param etag: the etag identifier to unquote. - :return: a ``(etag, weak)`` tuple. - """ - if not etag: - return None, None - etag = etag.strip() - weak = False - if etag.startswith(('W/', 'w/')): - weak = True - etag = etag[2:] - if etag[:1] == etag[-1:] == '"': - etag = etag[1:-1] - return etag, weak - - -def parse_etags(value): - """Parse an etag header. - - :param value: the tag header to parse - :return: an :class:`~werkzeug.datastructures.ETags` object. - """ - if not value: - return ETags() - strong = [] - weak = [] - end = len(value) - pos = 0 - while pos < end: - match = _etag_re.match(value, pos) - if match is None: - break - is_weak, quoted, raw = match.groups() - if raw == '*': - return ETags(star_tag=True) - elif quoted: - raw = quoted - if is_weak: - weak.append(raw) - else: - strong.append(raw) - pos = match.end() - return ETags(strong, weak) - - -def generate_etag(data): - """Generate an etag for some data.""" - return md5(data).hexdigest() - - -def parse_date(value): - """Parse one of the following date formats into a datetime object: - - .. sourcecode:: text - - Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 - Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 - Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format - - If parsing fails the return value is `None`. - - :param value: a string with a supported date format. - :return: a :class:`datetime.datetime` object. - """ - if value: - t = parsedate_tz(value.strip()) - if t is not None: - try: - year = t[0] - # unfortunately that function does not tell us if two digit - # years were part of the string, or if they were prefixed - # with two zeroes. So what we do is to assume that 69-99 - # refer to 1900, and everything below to 2000 - if year >= 0 and year <= 68: - year += 2000 - elif year >= 69 and year <= 99: - year += 1900 - return datetime(*((year,) + t[1:7])) - \ - timedelta(seconds=t[-1] or 0) - except (ValueError, OverflowError): - return None - - -def _dump_date(d, delim): - """Used for `http_date` and `cookie_date`.""" - if d is None: - d = gmtime() - elif isinstance(d, datetime): - d = d.utctimetuple() - elif isinstance(d, (integer_types, float)): - d = gmtime(d) - return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % ( - ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday], - d.tm_mday, delim, - ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', - 'Oct', 'Nov', 'Dec')[d.tm_mon - 1], - delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec - ) - - -def cookie_date(expires=None): - """Formats the time to ensure compatibility with Netscape's cookie - standard. - - Accepts a floating point number expressed in seconds since the epoch in, a - datetime object or a timetuple. All times in UTC. The :func:`parse_date` - function can be used to parse such a date. - - Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``. - - :param expires: If provided that date is used, otherwise the current. - """ - return _dump_date(expires, '-') - - -def http_date(timestamp=None): - """Formats the time to match the RFC1123 date format. - - Accepts a floating point number expressed in seconds since the epoch in, a - datetime object or a timetuple. All times in UTC. The :func:`parse_date` - function can be used to parse such a date. - - Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``. - - :param timestamp: If provided that date is used, otherwise the current. - """ - return _dump_date(timestamp, ' ') - - -def parse_age(value=None): - """Parses a base-10 integer count of seconds into a timedelta. - - If parsing fails, the return value is `None`. - - :param value: a string consisting of an integer represented in base-10 - :return: a :class:`datetime.timedelta` object or `None`. - """ - if not value: - return None - try: - seconds = int(value) - except ValueError: - return None - if seconds < 0: - return None - try: - return timedelta(seconds=seconds) - except OverflowError: - return None - - -def dump_age(age=None): - """Formats the duration as a base-10 integer. - - :param age: should be an integer number of seconds, - a :class:`datetime.timedelta` object, or, - if the age is unknown, `None` (default). - """ - if age is None: - return - if isinstance(age, timedelta): - # do the equivalent of Python 2.7's timedelta.total_seconds(), - # but disregarding fractional seconds - age = age.seconds + (age.days * 24 * 3600) - - age = int(age) - if age < 0: - raise ValueError('age cannot be negative') - - return str(age) - - -def is_resource_modified(environ, etag=None, data=None, last_modified=None, - ignore_if_range=True): - """Convenience method for conditional requests. - - :param environ: the WSGI environment of the request to be checked. - :param etag: the etag for the response for comparison. - :param data: or alternatively the data of the response to automatically - generate an etag using :func:`generate_etag`. - :param last_modified: an optional date of the last modification. - :param ignore_if_range: If `False`, `If-Range` header will be taken into - account. - :return: `True` if the resource was modified, otherwise `False`. - """ - if etag is None and data is not None: - etag = generate_etag(data) - elif data is not None: - raise TypeError('both data and etag given') - if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'): - return False - - unmodified = False - if isinstance(last_modified, string_types): - last_modified = parse_date(last_modified) - - # ensure that microsecond is zero because the HTTP spec does not transmit - # that either and we might have some false positives. See issue #39 - if last_modified is not None: - last_modified = last_modified.replace(microsecond=0) - - if_range = None - if not ignore_if_range and 'HTTP_RANGE' in environ: - # http://tools.ietf.org/html/rfc7233#section-3.2 - # A server MUST ignore an If-Range header field received in a request - # that does not contain a Range header field. - if_range = parse_if_range_header(environ.get('HTTP_IF_RANGE')) - - if if_range is not None and if_range.date is not None: - modified_since = if_range.date - else: - modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE')) - - if modified_since and last_modified and last_modified <= modified_since: - unmodified = True - - if etag: - etag, _ = unquote_etag(etag) - if if_range is not None and if_range.etag is not None: - unmodified = parse_etags(if_range.etag).contains(etag) - else: - if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH')) - if if_none_match: - # http://tools.ietf.org/html/rfc7232#section-3.2 - # "A recipient MUST use the weak comparison function when comparing - # entity-tags for If-None-Match" - unmodified = if_none_match.contains_weak(etag) - - # https://tools.ietf.org/html/rfc7232#section-3.1 - # "Origin server MUST use the strong comparison function when - # comparing entity-tags for If-Match" - if_match = parse_etags(environ.get('HTTP_IF_MATCH')) - if if_match: - unmodified = not if_match.is_strong(etag) - - return not unmodified - - -def remove_entity_headers(headers, allowed=('expires', 'content-location')): - """Remove all entity headers from a list or :class:`Headers` object. This - operation works in-place. `Expires` and `Content-Location` headers are - by default not removed. The reason for this is :rfc:`2616` section - 10.3.5 which specifies some entity headers that should be sent. - - .. versionchanged:: 0.5 - added `allowed` parameter. - - :param headers: a list or :class:`Headers` object. - :param allowed: a list of headers that should still be allowed even though - they are entity headers. - """ - allowed = set(x.lower() for x in allowed) - headers[:] = [(key, value) for key, value in headers if - not is_entity_header(key) or key.lower() in allowed] - - -def remove_hop_by_hop_headers(headers): - """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or - :class:`Headers` object. This operation works in-place. - - .. versionadded:: 0.5 - - :param headers: a list or :class:`Headers` object. - """ - headers[:] = [(key, value) for key, value in headers if - not is_hop_by_hop_header(key)] - - -def is_entity_header(header): - """Check if a header is an entity header. - - .. versionadded:: 0.5 - - :param header: the header to test. - :return: `True` if it's an entity header, `False` otherwise. - """ - return header.lower() in _entity_headers - - -def is_hop_by_hop_header(header): - """Check if a header is an HTTP/1.1 "Hop-by-Hop" header. - - .. versionadded:: 0.5 - - :param header: the header to test. - :return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise. - """ - return header.lower() in _hop_by_hop_headers - - -def parse_cookie(header, charset='utf-8', errors='replace', cls=None): - """Parse a cookie. Either from a string or WSGI environ. - - Per default encoding errors are ignored. If you want a different behavior - you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a - :exc:`HTTPUnicodeError` is raised. - - .. versionchanged:: 0.5 - This function now returns a :class:`TypeConversionDict` instead of a - regular dict. The `cls` parameter was added. - - :param header: the header to be used to parse the cookie. Alternatively - this can be a WSGI environment. - :param charset: the charset for the cookie values. - :param errors: the error behavior for the charset decoding. - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`TypeConversionDict` is - used. - """ - if isinstance(header, dict): - header = header.get('HTTP_COOKIE', '') - elif header is None: - header = '' - - # If the value is an unicode string it's mangled through latin1. This - # is done because on PEP 3333 on Python 3 all headers are assumed latin1 - # which however is incorrect for cookies, which are sent in page encoding. - # As a result we - if isinstance(header, text_type): - header = header.encode('latin1', 'replace') - - if cls is None: - cls = TypeConversionDict - - def _parse_pairs(): - for key, val in _cookie_parse_impl(header): - key = to_unicode(key, charset, errors, allow_none_charset=True) - val = to_unicode(val, charset, errors, allow_none_charset=True) - yield try_coerce_native(key), val - - return cls(_parse_pairs()) - - -def dump_cookie(key, value='', max_age=None, expires=None, path='/', - domain=None, secure=False, httponly=False, - charset='utf-8', sync_expires=True, max_size=4093, - samesite=None): - """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix - The parameters are the same as in the cookie Morsel object in the - Python standard library but it accepts unicode data, too. - - On Python 3 the return value of this function will be a unicode - string, on Python 2 it will be a native string. In both cases the - return value is usually restricted to ascii as the vast majority of - values are properly escaped, but that is no guarantee. If a unicode - string is returned it's tunneled through latin1 as required by - PEP 3333. - - The return value is not ASCII safe if the key contains unicode - characters. This is technically against the specification but - happens in the wild. It's strongly recommended to not use - non-ASCII values for the keys. - - :param max_age: should be a number of seconds, or `None` (default) if - the cookie should last only as long as the client's - browser session. Additionally `timedelta` objects - are accepted, too. - :param expires: should be a `datetime` object or unix timestamp. - :param path: limits the cookie to a given path, per default it will - span the whole domain. - :param domain: Use this if you want to set a cross-domain cookie. For - example, ``domain=".example.com"`` will set a cookie - that is readable by the domain ``www.example.com``, - ``foo.example.com`` etc. Otherwise, a cookie will only - be readable by the domain that set it. - :param secure: The cookie will only be available via HTTPS - :param httponly: disallow JavaScript to access the cookie. This is an - extension to the cookie standard and probably not - supported by all browsers. - :param charset: the encoding for unicode values. - :param sync_expires: automatically set expires if max_age is defined - but expires not. - :param max_size: Warn if the final header value exceeds this size. The - default, 4093, should be safely `supported by most browsers - `_. Set to 0 to disable this check. - :param samesite: Limits the scope of the cookie such that it will only - be attached to requests if those requests are "same-site". - - .. _`cookie`: http://browsercookielimits.squawky.net/ - """ - key = to_bytes(key, charset) - value = to_bytes(value, charset) - - if path is not None: - path = iri_to_uri(path, charset) - domain = _make_cookie_domain(domain) - if isinstance(max_age, timedelta): - max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds - if expires is not None: - if not isinstance(expires, string_types): - expires = cookie_date(expires) - elif max_age is not None and sync_expires: - expires = to_bytes(cookie_date(time() + max_age)) - - samesite = samesite.title() if samesite else None - if samesite not in ('Strict', 'Lax', None): - raise ValueError("invalid SameSite value; must be 'Strict', 'Lax' or None") - - buf = [key + b'=' + _cookie_quote(value)] - - # XXX: In theory all of these parameters that are not marked with `None` - # should be quoted. Because stdlib did not quote it before I did not - # want to introduce quoting there now. - for k, v, q in ((b'Domain', domain, True), - (b'Expires', expires, False,), - (b'Max-Age', max_age, False), - (b'Secure', secure, None), - (b'HttpOnly', httponly, None), - (b'Path', path, False), - (b'SameSite', samesite, False)): - if q is None: - if v: - buf.append(k) - continue - - if v is None: - continue - - tmp = bytearray(k) - if not isinstance(v, (bytes, bytearray)): - v = to_bytes(text_type(v), charset) - if q: - v = _cookie_quote(v) - tmp += b'=' + v - buf.append(bytes(tmp)) - - # The return value will be an incorrectly encoded latin1 header on - # Python 3 for consistency with the headers object and a bytestring - # on Python 2 because that's how the API makes more sense. - rv = b'; '.join(buf) - if not PY2: - rv = rv.decode('latin1') - - # Warn if the final value of the cookie is less than the limit. If the - # cookie is too large, then it may be silently ignored, which can be quite - # hard to debug. - cookie_size = len(rv) - - if max_size and cookie_size > max_size: - value_size = len(value) - warnings.warn( - 'The "{key}" cookie is too large: the value was {value_size} bytes' - ' but the header required {extra_size} extra bytes. The final size' - ' was {cookie_size} bytes but the limit is {max_size} bytes.' - ' Browsers may silently ignore cookies larger than this.'.format( - key=key, - value_size=value_size, - extra_size=cookie_size - value_size, - cookie_size=cookie_size, - max_size=max_size - ), - stacklevel=2 - ) - - return rv - - -def is_byte_range_valid(start, stop, length): - """Checks if a given byte content range is valid for the given length. - - .. versionadded:: 0.7 - """ - if (start is None) != (stop is None): - return False - elif start is None: - return length is None or length >= 0 - elif length is None: - return 0 <= start < stop - elif start >= stop: - return False - return 0 <= start < length - - -# circular dependency fun -from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \ - WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \ - RequestCacheControl - - -# DEPRECATED -# backwards compatible imports -from werkzeug.datastructures import ( # noqa - MIMEAccept, CharsetAccept, LanguageAccept, Headers -) -from werkzeug.urls import iri_to_uri diff --git a/pyextra/werkzeug/local.py b/pyextra/werkzeug/local.py deleted file mode 100644 index e6d7478a0..000000000 --- a/pyextra/werkzeug/local.py +++ /dev/null @@ -1,420 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.local - ~~~~~~~~~~~~~~ - - This module implements context-local objects. - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import copy -from functools import update_wrapper -from werkzeug.wsgi import ClosingIterator -from werkzeug._compat import PY2, implements_bool - -# since each thread has its own greenlet we can just use those as identifiers -# for the context. If greenlets are not available we fall back to the -# current thread ident depending on where it is. -try: - from greenlet import getcurrent as get_ident -except ImportError: - try: - from thread import get_ident - except ImportError: - from _thread import get_ident - - -def release_local(local): - """Releases the contents of the local for the current context. - This makes it possible to use locals without a manager. - - Example:: - - >>> loc = Local() - >>> loc.foo = 42 - >>> release_local(loc) - >>> hasattr(loc, 'foo') - False - - With this function one can release :class:`Local` objects as well - as :class:`LocalStack` objects. However it is not possible to - release data held by proxies that way, one always has to retain - a reference to the underlying local object in order to be able - to release it. - - .. versionadded:: 0.6.1 - """ - local.__release_local__() - - -class Local(object): - __slots__ = ('__storage__', '__ident_func__') - - def __init__(self): - object.__setattr__(self, '__storage__', {}) - object.__setattr__(self, '__ident_func__', get_ident) - - def __iter__(self): - return iter(self.__storage__.items()) - - def __call__(self, proxy): - """Create a proxy for a name.""" - return LocalProxy(self, proxy) - - def __release_local__(self): - self.__storage__.pop(self.__ident_func__(), None) - - def __getattr__(self, name): - try: - return self.__storage__[self.__ident_func__()][name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name, value): - ident = self.__ident_func__() - storage = self.__storage__ - try: - storage[ident][name] = value - except KeyError: - storage[ident] = {name: value} - - def __delattr__(self, name): - try: - del self.__storage__[self.__ident_func__()][name] - except KeyError: - raise AttributeError(name) - - -class LocalStack(object): - - """This class works similar to a :class:`Local` but keeps a stack - of objects instead. This is best explained with an example:: - - >>> ls = LocalStack() - >>> ls.push(42) - >>> ls.top - 42 - >>> ls.push(23) - >>> ls.top - 23 - >>> ls.pop() - 23 - >>> ls.top - 42 - - They can be force released by using a :class:`LocalManager` or with - the :func:`release_local` function but the correct way is to pop the - item from the stack after using. When the stack is empty it will - no longer be bound to the current context (and as such released). - - By calling the stack without arguments it returns a proxy that resolves to - the topmost item on the stack. - - .. versionadded:: 0.6.1 - """ - - def __init__(self): - self._local = Local() - - def __release_local__(self): - self._local.__release_local__() - - def _get__ident_func__(self): - return self._local.__ident_func__ - - def _set__ident_func__(self, value): - object.__setattr__(self._local, '__ident_func__', value) - __ident_func__ = property(_get__ident_func__, _set__ident_func__) - del _get__ident_func__, _set__ident_func__ - - def __call__(self): - def _lookup(): - rv = self.top - if rv is None: - raise RuntimeError('object unbound') - return rv - return LocalProxy(_lookup) - - def push(self, obj): - """Pushes a new item to the stack""" - rv = getattr(self._local, 'stack', None) - if rv is None: - self._local.stack = rv = [] - rv.append(obj) - return rv - - def pop(self): - """Removes the topmost item from the stack, will return the - old value or `None` if the stack was already empty. - """ - stack = getattr(self._local, 'stack', None) - if stack is None: - return None - elif len(stack) == 1: - release_local(self._local) - return stack[-1] - else: - return stack.pop() - - @property - def top(self): - """The topmost item on the stack. If the stack is empty, - `None` is returned. - """ - try: - return self._local.stack[-1] - except (AttributeError, IndexError): - return None - - -class LocalManager(object): - - """Local objects cannot manage themselves. For that you need a local - manager. You can pass a local manager multiple locals or add them later - by appending them to `manager.locals`. Every time the manager cleans up, - it will clean up all the data left in the locals for this context. - - The `ident_func` parameter can be added to override the default ident - function for the wrapped locals. - - .. versionchanged:: 0.6.1 - Instead of a manager the :func:`release_local` function can be used - as well. - - .. versionchanged:: 0.7 - `ident_func` was added. - """ - - def __init__(self, locals=None, ident_func=None): - if locals is None: - self.locals = [] - elif isinstance(locals, Local): - self.locals = [locals] - else: - self.locals = list(locals) - if ident_func is not None: - self.ident_func = ident_func - for local in self.locals: - object.__setattr__(local, '__ident_func__', ident_func) - else: - self.ident_func = get_ident - - def get_ident(self): - """Return the context identifier the local objects use internally for - this context. You cannot override this method to change the behavior - but use it to link other context local objects (such as SQLAlchemy's - scoped sessions) to the Werkzeug locals. - - .. versionchanged:: 0.7 - You can pass a different ident function to the local manager that - will then be propagated to all the locals passed to the - constructor. - """ - return self.ident_func() - - def cleanup(self): - """Manually clean up the data in the locals for this context. Call - this at the end of the request or use `make_middleware()`. - """ - for local in self.locals: - release_local(local) - - def make_middleware(self, app): - """Wrap a WSGI application so that cleaning up happens after - request end. - """ - def application(environ, start_response): - return ClosingIterator(app(environ, start_response), self.cleanup) - return application - - def middleware(self, func): - """Like `make_middleware` but for decorating functions. - - Example usage:: - - @manager.middleware - def application(environ, start_response): - ... - - The difference to `make_middleware` is that the function passed - will have all the arguments copied from the inner application - (name, docstring, module). - """ - return update_wrapper(self.make_middleware(func), func) - - def __repr__(self): - return '<%s storages: %d>' % ( - self.__class__.__name__, - len(self.locals) - ) - - -@implements_bool -class LocalProxy(object): - - """Acts as a proxy for a werkzeug local. Forwards all operations to - a proxied object. The only operations not supported for forwarding - are right handed operands and any kind of assignment. - - Example usage:: - - from werkzeug.local import Local - l = Local() - - # these are proxies - request = l('request') - user = l('user') - - - from werkzeug.local import LocalStack - _response_local = LocalStack() - - # this is a proxy - response = _response_local() - - Whenever something is bound to l.user / l.request the proxy objects - will forward all operations. If no object is bound a :exc:`RuntimeError` - will be raised. - - To create proxies to :class:`Local` or :class:`LocalStack` objects, - call the object as shown above. If you want to have a proxy to an - object looked up by a function, you can (as of Werkzeug 0.6.1) pass - a function to the :class:`LocalProxy` constructor:: - - session = LocalProxy(lambda: get_current_request().session) - - .. versionchanged:: 0.6.1 - The class can be instantiated with a callable as well now. - """ - __slots__ = ('__local', '__dict__', '__name__', '__wrapped__') - - def __init__(self, local, name=None): - object.__setattr__(self, '_LocalProxy__local', local) - object.__setattr__(self, '__name__', name) - if callable(local) and not hasattr(local, '__release_local__'): - # "local" is a callable that is not an instance of Local or - # LocalManager: mark it as a wrapped function. - object.__setattr__(self, '__wrapped__', local) - - def _get_current_object(self): - """Return the current object. This is useful if you want the real - object behind the proxy at a time for performance reasons or because - you want to pass the object into a different context. - """ - if not hasattr(self.__local, '__release_local__'): - return self.__local() - try: - return getattr(self.__local, self.__name__) - except AttributeError: - raise RuntimeError('no object bound to %s' % self.__name__) - - @property - def __dict__(self): - try: - return self._get_current_object().__dict__ - except RuntimeError: - raise AttributeError('__dict__') - - def __repr__(self): - try: - obj = self._get_current_object() - except RuntimeError: - return '<%s unbound>' % self.__class__.__name__ - return repr(obj) - - def __bool__(self): - try: - return bool(self._get_current_object()) - except RuntimeError: - return False - - def __unicode__(self): - try: - return unicode(self._get_current_object()) # noqa - except RuntimeError: - return repr(self) - - def __dir__(self): - try: - return dir(self._get_current_object()) - except RuntimeError: - return [] - - def __getattr__(self, name): - if name == '__members__': - return dir(self._get_current_object()) - return getattr(self._get_current_object(), name) - - def __setitem__(self, key, value): - self._get_current_object()[key] = value - - def __delitem__(self, key): - del self._get_current_object()[key] - - if PY2: - __getslice__ = lambda x, i, j: x._get_current_object()[i:j] - - def __setslice__(self, i, j, seq): - self._get_current_object()[i:j] = seq - - def __delslice__(self, i, j): - del self._get_current_object()[i:j] - - __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v) - __delattr__ = lambda x, n: delattr(x._get_current_object(), n) - __str__ = lambda x: str(x._get_current_object()) - __lt__ = lambda x, o: x._get_current_object() < o - __le__ = lambda x, o: x._get_current_object() <= o - __eq__ = lambda x, o: x._get_current_object() == o - __ne__ = lambda x, o: x._get_current_object() != o - __gt__ = lambda x, o: x._get_current_object() > o - __ge__ = lambda x, o: x._get_current_object() >= o - __cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa - __hash__ = lambda x: hash(x._get_current_object()) - __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) - __len__ = lambda x: len(x._get_current_object()) - __getitem__ = lambda x, i: x._get_current_object()[i] - __iter__ = lambda x: iter(x._get_current_object()) - __contains__ = lambda x, i: i in x._get_current_object() - __add__ = lambda x, o: x._get_current_object() + o - __sub__ = lambda x, o: x._get_current_object() - o - __mul__ = lambda x, o: x._get_current_object() * o - __floordiv__ = lambda x, o: x._get_current_object() // o - __mod__ = lambda x, o: x._get_current_object() % o - __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) - __pow__ = lambda x, o: x._get_current_object() ** o - __lshift__ = lambda x, o: x._get_current_object() << o - __rshift__ = lambda x, o: x._get_current_object() >> o - __and__ = lambda x, o: x._get_current_object() & o - __xor__ = lambda x, o: x._get_current_object() ^ o - __or__ = lambda x, o: x._get_current_object() | o - __div__ = lambda x, o: x._get_current_object().__div__(o) - __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) - __neg__ = lambda x: -(x._get_current_object()) - __pos__ = lambda x: +(x._get_current_object()) - __abs__ = lambda x: abs(x._get_current_object()) - __invert__ = lambda x: ~(x._get_current_object()) - __complex__ = lambda x: complex(x._get_current_object()) - __int__ = lambda x: int(x._get_current_object()) - __long__ = lambda x: long(x._get_current_object()) # noqa - __float__ = lambda x: float(x._get_current_object()) - __oct__ = lambda x: oct(x._get_current_object()) - __hex__ = lambda x: hex(x._get_current_object()) - __index__ = lambda x: x._get_current_object().__index__() - __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) - __enter__ = lambda x: x._get_current_object().__enter__() - __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) - __radd__ = lambda x, o: o + x._get_current_object() - __rsub__ = lambda x, o: o - x._get_current_object() - __rmul__ = lambda x, o: o * x._get_current_object() - __rdiv__ = lambda x, o: o / x._get_current_object() - if PY2: - __rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o) - else: - __rtruediv__ = __rdiv__ - __rfloordiv__ = lambda x, o: o // x._get_current_object() - __rmod__ = lambda x, o: o % x._get_current_object() - __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) - __copy__ = lambda x: copy.copy(x._get_current_object()) - __deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo) diff --git a/pyextra/werkzeug/posixemulation.py b/pyextra/werkzeug/posixemulation.py deleted file mode 100644 index 8fd6314f2..000000000 --- a/pyextra/werkzeug/posixemulation.py +++ /dev/null @@ -1,106 +0,0 @@ -# -*- coding: utf-8 -*- -r""" - werkzeug.posixemulation - ~~~~~~~~~~~~~~~~~~~~~~~ - - Provides a POSIX emulation for some features that are relevant to - web applications. The main purpose is to simplify support for - systems such as Windows NT that are not 100% POSIX compatible. - - Currently this only implements a :func:`rename` function that - follows POSIX semantics. Eg: if the target file already exists it - will be replaced without asking. - - This module was introduced in 0.6.1 and is not a public interface. - It might become one in later versions of Werkzeug. - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import sys -import os -import errno -import time -import random - -from ._compat import to_unicode -from .filesystem import get_filesystem_encoding - - -can_rename_open_file = False -if os.name == 'nt': # pragma: no cover - _rename = lambda src, dst: False - _rename_atomic = lambda src, dst: False - - try: - import ctypes - - _MOVEFILE_REPLACE_EXISTING = 0x1 - _MOVEFILE_WRITE_THROUGH = 0x8 - _MoveFileEx = ctypes.windll.kernel32.MoveFileExW - - def _rename(src, dst): - src = to_unicode(src, get_filesystem_encoding()) - dst = to_unicode(dst, get_filesystem_encoding()) - if _rename_atomic(src, dst): - return True - retry = 0 - rv = False - while not rv and retry < 100: - rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING | - _MOVEFILE_WRITE_THROUGH) - if not rv: - time.sleep(0.001) - retry += 1 - return rv - - # new in Vista and Windows Server 2008 - _CreateTransaction = ctypes.windll.ktmw32.CreateTransaction - _CommitTransaction = ctypes.windll.ktmw32.CommitTransaction - _MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW - _CloseHandle = ctypes.windll.kernel32.CloseHandle - can_rename_open_file = True - - def _rename_atomic(src, dst): - ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename') - if ta == -1: - return False - try: - retry = 0 - rv = False - while not rv and retry < 100: - rv = _MoveFileTransacted(src, dst, None, None, - _MOVEFILE_REPLACE_EXISTING | - _MOVEFILE_WRITE_THROUGH, ta) - if rv: - rv = _CommitTransaction(ta) - break - else: - time.sleep(0.001) - retry += 1 - return rv - finally: - _CloseHandle(ta) - except Exception: - pass - - def rename(src, dst): - # Try atomic or pseudo-atomic rename - if _rename(src, dst): - return - # Fall back to "move away and replace" - try: - os.rename(src, dst) - except OSError as e: - if e.errno != errno.EEXIST: - raise - old = "%s-%08x" % (dst, random.randint(0, sys.maxint)) - os.rename(dst, old) - os.rename(src, dst) - try: - os.unlink(old) - except Exception: - pass -else: - rename = os.rename - can_rename_open_file = True diff --git a/pyextra/werkzeug/routing.py b/pyextra/werkzeug/routing.py deleted file mode 100644 index 2e4e2f42c..000000000 --- a/pyextra/werkzeug/routing.py +++ /dev/null @@ -1,1792 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.routing - ~~~~~~~~~~~~~~~~ - - When it comes to combining multiple controller or view functions (however - you want to call them) you need a dispatcher. A simple way would be - applying regular expression tests on the ``PATH_INFO`` and calling - registered callback functions that return the value then. - - This module implements a much more powerful system than simple regular - expression matching because it can also convert values in the URLs and - build URLs. - - Here a simple example that creates an URL map for an application with - two subdomains (www and kb) and some URL rules: - - >>> m = Map([ - ... # Static URLs - ... Rule('/', endpoint='static/index'), - ... Rule('/about', endpoint='static/about'), - ... Rule('/help', endpoint='static/help'), - ... # Knowledge Base - ... Subdomain('kb', [ - ... Rule('/', endpoint='kb/index'), - ... Rule('/browse/', endpoint='kb/browse'), - ... Rule('/browse//', endpoint='kb/browse'), - ... Rule('/browse//', endpoint='kb/browse') - ... ]) - ... ], default_subdomain='www') - - If the application doesn't use subdomains it's perfectly fine to not set - the default subdomain and not use the `Subdomain` rule factory. The endpoint - in the rules can be anything, for example import paths or unique - identifiers. The WSGI application can use those endpoints to get the - handler for that URL. It doesn't have to be a string at all but it's - recommended. - - Now it's possible to create a URL adapter for one of the subdomains and - build URLs: - - >>> c = m.bind('example.com') - >>> c.build("kb/browse", dict(id=42)) - 'http://kb.example.com/browse/42/' - >>> c.build("kb/browse", dict()) - 'http://kb.example.com/browse/' - >>> c.build("kb/browse", dict(id=42, page=3)) - 'http://kb.example.com/browse/42/3' - >>> c.build("static/about") - '/about' - >>> c.build("static/index", force_external=True) - 'http://www.example.com/' - - >>> c = m.bind('example.com', subdomain='kb') - >>> c.build("static/about") - 'http://www.example.com/about' - - The first argument to bind is the server name *without* the subdomain. - Per default it will assume that the script is mounted on the root, but - often that's not the case so you can provide the real mount point as - second argument: - - >>> c = m.bind('example.com', '/applications/example') - - The third argument can be the subdomain, if not given the default - subdomain is used. For more details about binding have a look at the - documentation of the `MapAdapter`. - - And here is how you can match URLs: - - >>> c = m.bind('example.com') - >>> c.match("/") - ('static/index', {}) - >>> c.match("/about") - ('static/about', {}) - >>> c = m.bind('example.com', '/', 'kb') - >>> c.match("/") - ('kb/index', {}) - >>> c.match("/browse/42/23") - ('kb/browse', {'id': 42, 'page': 23}) - - If matching fails you get a `NotFound` exception, if the rule thinks - it's a good idea to redirect (for example because the URL was defined - to have a slash at the end but the request was missing that slash) it - will raise a `RequestRedirect` exception. Both are subclasses of the - `HTTPException` so you can use those errors as responses in the - application. - - If matching succeeded but the URL rule was incompatible to the given - method (for example there were only rules for `GET` and `HEAD` and - routing system tried to match a `POST` request) a `MethodNotAllowed` - exception is raised. - - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import difflib -import re -import uuid -import posixpath - -from pprint import pformat -from threading import Lock - -from werkzeug.urls import url_encode, url_quote, url_join -from werkzeug.utils import redirect, format_string -from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed, \ - BadHost -from werkzeug._internal import _get_environ, _encode_idna -from werkzeug._compat import itervalues, iteritems, to_unicode, to_bytes, \ - text_type, string_types, native_string_result, \ - implements_to_string, wsgi_decoding_dance -from werkzeug.datastructures import ImmutableDict, MultiDict -from werkzeug.utils import cached_property - - -_rule_re = re.compile(r''' - (?P[^<]*) # static rule data - < - (?: - (?P[a-zA-Z_][a-zA-Z0-9_]*) # converter name - (?:\((?P.*?)\))? # converter arguments - \: # variable delimiter - )? - (?P[a-zA-Z_][a-zA-Z0-9_]*) # variable name - > -''', re.VERBOSE) -_simple_rule_re = re.compile(r'<([^>]+)>') -_converter_args_re = re.compile(r''' - ((?P\w+)\s*=\s*)? - (?P - True|False| - \d+.\d+| - \d+.| - \d+| - [\w\d_.]+| - [urUR]?(?P"[^"]*?"|'[^']*') - )\s*, -''', re.VERBOSE | re.UNICODE) - - -_PYTHON_CONSTANTS = { - 'None': None, - 'True': True, - 'False': False -} - - -def _pythonize(value): - if value in _PYTHON_CONSTANTS: - return _PYTHON_CONSTANTS[value] - for convert in int, float: - try: - return convert(value) - except ValueError: - pass - if value[:1] == value[-1:] and value[0] in '"\'': - value = value[1:-1] - return text_type(value) - - -def parse_converter_args(argstr): - argstr += ',' - args = [] - kwargs = {} - - for item in _converter_args_re.finditer(argstr): - value = item.group('stringval') - if value is None: - value = item.group('value') - value = _pythonize(value) - if not item.group('name'): - args.append(value) - else: - name = item.group('name') - kwargs[name] = value - - return tuple(args), kwargs - - -def parse_rule(rule): - """Parse a rule and return it as generator. Each iteration yields tuples - in the form ``(converter, arguments, variable)``. If the converter is - `None` it's a static url part, otherwise it's a dynamic one. - - :internal: - """ - pos = 0 - end = len(rule) - do_match = _rule_re.match - used_names = set() - while pos < end: - m = do_match(rule, pos) - if m is None: - break - data = m.groupdict() - if data['static']: - yield None, None, data['static'] - variable = data['variable'] - converter = data['converter'] or 'default' - if variable in used_names: - raise ValueError('variable name %r used twice.' % variable) - used_names.add(variable) - yield converter, data['args'] or None, variable - pos = m.end() - if pos < end: - remaining = rule[pos:] - if '>' in remaining or '<' in remaining: - raise ValueError('malformed url rule: %r' % rule) - yield None, None, remaining - - -class RoutingException(Exception): - - """Special exceptions that require the application to redirect, notifying - about missing urls, etc. - - :internal: - """ - - -class RequestRedirect(HTTPException, RoutingException): - - """Raise if the map requests a redirect. This is for example the case if - `strict_slashes` are activated and an url that requires a trailing slash. - - The attribute `new_url` contains the absolute destination url. - """ - code = 301 - - def __init__(self, new_url): - RoutingException.__init__(self, new_url) - self.new_url = new_url - - def get_response(self, environ): - return redirect(self.new_url, self.code) - - -class RequestSlash(RoutingException): - - """Internal exception.""" - - -class RequestAliasRedirect(RoutingException): - - """This rule is an alias and wants to redirect to the canonical URL.""" - - def __init__(self, matched_values): - self.matched_values = matched_values - - -@implements_to_string -class BuildError(RoutingException, LookupError): - - """Raised if the build system cannot find a URL for an endpoint with the - values provided. - """ - - def __init__(self, endpoint, values, method, adapter=None): - LookupError.__init__(self, endpoint, values, method) - self.endpoint = endpoint - self.values = values - self.method = method - self.adapter = adapter - - @cached_property - def suggested(self): - return self.closest_rule(self.adapter) - - def closest_rule(self, adapter): - def _score_rule(rule): - return sum([ - 0.98 * difflib.SequenceMatcher( - None, rule.endpoint, self.endpoint - ).ratio(), - 0.01 * bool(set(self.values or ()).issubset(rule.arguments)), - 0.01 * bool(rule.methods and self.method in rule.methods) - ]) - - if adapter and adapter.map._rules: - return max(adapter.map._rules, key=_score_rule) - - def __str__(self): - message = [] - message.append('Could not build url for endpoint %r' % self.endpoint) - if self.method: - message.append(' (%r)' % self.method) - if self.values: - message.append(' with values %r' % sorted(self.values.keys())) - message.append('.') - if self.suggested: - if self.endpoint == self.suggested.endpoint: - if self.method and self.method not in self.suggested.methods: - message.append(' Did you mean to use methods %r?' % sorted( - self.suggested.methods - )) - missing_values = self.suggested.arguments.union( - set(self.suggested.defaults or ()) - ) - set(self.values.keys()) - if missing_values: - message.append( - ' Did you forget to specify values %r?' % - sorted(missing_values) - ) - else: - message.append( - ' Did you mean %r instead?' % self.suggested.endpoint - ) - return u''.join(message) - - -class ValidationError(ValueError): - - """Validation error. If a rule converter raises this exception the rule - does not match the current URL and the next URL is tried. - """ - - -class RuleFactory(object): - - """As soon as you have more complex URL setups it's a good idea to use rule - factories to avoid repetitive tasks. Some of them are builtin, others can - be added by subclassing `RuleFactory` and overriding `get_rules`. - """ - - def get_rules(self, map): - """Subclasses of `RuleFactory` have to override this method and return - an iterable of rules.""" - raise NotImplementedError() - - -class Subdomain(RuleFactory): - - """All URLs provided by this factory have the subdomain set to a - specific domain. For example if you want to use the subdomain for - the current language this can be a good setup:: - - url_map = Map([ - Rule('/', endpoint='#select_language'), - Subdomain('', [ - Rule('/', endpoint='index'), - Rule('/about', endpoint='about'), - Rule('/help', endpoint='help') - ]) - ]) - - All the rules except for the ``'#select_language'`` endpoint will now - listen on a two letter long subdomain that holds the language code - for the current request. - """ - - def __init__(self, subdomain, rules): - self.subdomain = subdomain - self.rules = rules - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - rule = rule.empty() - rule.subdomain = self.subdomain - yield rule - - -class Submount(RuleFactory): - - """Like `Subdomain` but prefixes the URL rule with a given string:: - - url_map = Map([ - Rule('/', endpoint='index'), - Submount('/blog', [ - Rule('/', endpoint='blog/index'), - Rule('/entry/', endpoint='blog/show') - ]) - ]) - - Now the rule ``'blog/show'`` matches ``/blog/entry/``. - """ - - def __init__(self, path, rules): - self.path = path.rstrip('/') - self.rules = rules - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - rule = rule.empty() - rule.rule = self.path + rule.rule - yield rule - - -class EndpointPrefix(RuleFactory): - - """Prefixes all endpoints (which must be strings for this factory) with - another string. This can be useful for sub applications:: - - url_map = Map([ - Rule('/', endpoint='index'), - EndpointPrefix('blog/', [Submount('/blog', [ - Rule('/', endpoint='index'), - Rule('/entry/', endpoint='show') - ])]) - ]) - """ - - def __init__(self, prefix, rules): - self.prefix = prefix - self.rules = rules - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - rule = rule.empty() - rule.endpoint = self.prefix + rule.endpoint - yield rule - - -class RuleTemplate(object): - - """Returns copies of the rules wrapped and expands string templates in - the endpoint, rule, defaults or subdomain sections. - - Here a small example for such a rule template:: - - from werkzeug.routing import Map, Rule, RuleTemplate - - resource = RuleTemplate([ - Rule('/$name/', endpoint='$name.list'), - Rule('/$name/', endpoint='$name.show') - ]) - - url_map = Map([resource(name='user'), resource(name='page')]) - - When a rule template is called the keyword arguments are used to - replace the placeholders in all the string parameters. - """ - - def __init__(self, rules): - self.rules = list(rules) - - def __call__(self, *args, **kwargs): - return RuleTemplateFactory(self.rules, dict(*args, **kwargs)) - - -class RuleTemplateFactory(RuleFactory): - - """A factory that fills in template variables into rules. Used by - `RuleTemplate` internally. - - :internal: - """ - - def __init__(self, rules, context): - self.rules = rules - self.context = context - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - new_defaults = subdomain = None - if rule.defaults: - new_defaults = {} - for key, value in iteritems(rule.defaults): - if isinstance(value, string_types): - value = format_string(value, self.context) - new_defaults[key] = value - if rule.subdomain is not None: - subdomain = format_string(rule.subdomain, self.context) - new_endpoint = rule.endpoint - if isinstance(new_endpoint, string_types): - new_endpoint = format_string(new_endpoint, self.context) - yield Rule( - format_string(rule.rule, self.context), - new_defaults, - subdomain, - rule.methods, - rule.build_only, - new_endpoint, - rule.strict_slashes - ) - - -@implements_to_string -class Rule(RuleFactory): - - """A Rule represents one URL pattern. There are some options for `Rule` - that change the way it behaves and are passed to the `Rule` constructor. - Note that besides the rule-string all arguments *must* be keyword arguments - in order to not break the application on Werkzeug upgrades. - - `string` - Rule strings basically are just normal URL paths with placeholders in - the format ```` where the converter and the - arguments are optional. If no converter is defined the `default` - converter is used which means `string` in the normal configuration. - - URL rules that end with a slash are branch URLs, others are leaves. - If you have `strict_slashes` enabled (which is the default), all - branch URLs that are matched without a trailing slash will trigger a - redirect to the same URL with the missing slash appended. - - The converters are defined on the `Map`. - - `endpoint` - The endpoint for this rule. This can be anything. A reference to a - function, a string, a number etc. The preferred way is using a string - because the endpoint is used for URL generation. - - `defaults` - An optional dict with defaults for other rules with the same endpoint. - This is a bit tricky but useful if you want to have unique URLs:: - - url_map = Map([ - Rule('/all/', defaults={'page': 1}, endpoint='all_entries'), - Rule('/all/page/', endpoint='all_entries') - ]) - - If a user now visits ``http://example.com/all/page/1`` he will be - redirected to ``http://example.com/all/``. If `redirect_defaults` is - disabled on the `Map` instance this will only affect the URL - generation. - - `subdomain` - The subdomain rule string for this rule. If not specified the rule - only matches for the `default_subdomain` of the map. If the map is - not bound to a subdomain this feature is disabled. - - Can be useful if you want to have user profiles on different subdomains - and all subdomains are forwarded to your application:: - - url_map = Map([ - Rule('/', subdomain='', endpoint='user/homepage'), - Rule('/stats', subdomain='', endpoint='user/stats') - ]) - - `methods` - A sequence of http methods this rule applies to. If not specified, all - methods are allowed. For example this can be useful if you want different - endpoints for `POST` and `GET`. If methods are defined and the path - matches but the method matched against is not in this list or in the - list of another rule for that path the error raised is of the type - `MethodNotAllowed` rather than `NotFound`. If `GET` is present in the - list of methods and `HEAD` is not, `HEAD` is added automatically. - - .. versionchanged:: 0.6.1 - `HEAD` is now automatically added to the methods if `GET` is - present. The reason for this is that existing code often did not - work properly in servers not rewriting `HEAD` to `GET` - automatically and it was not documented how `HEAD` should be - treated. This was considered a bug in Werkzeug because of that. - - `strict_slashes` - Override the `Map` setting for `strict_slashes` only for this rule. If - not specified the `Map` setting is used. - - `build_only` - Set this to True and the rule will never match but will create a URL - that can be build. This is useful if you have resources on a subdomain - or folder that are not handled by the WSGI application (like static data) - - `redirect_to` - If given this must be either a string or callable. In case of a - callable it's called with the url adapter that triggered the match and - the values of the URL as keyword arguments and has to return the target - for the redirect, otherwise it has to be a string with placeholders in - rule syntax:: - - def foo_with_slug(adapter, id): - # ask the database for the slug for the old id. this of - # course has nothing to do with werkzeug. - return 'foo/' + Foo.get_slug_for_id(id) - - url_map = Map([ - Rule('/foo/', endpoint='foo'), - Rule('/some/old/url/', redirect_to='foo/'), - Rule('/other/old/url/', redirect_to=foo_with_slug) - ]) - - When the rule is matched the routing system will raise a - `RequestRedirect` exception with the target for the redirect. - - Keep in mind that the URL will be joined against the URL root of the - script so don't use a leading slash on the target URL unless you - really mean root of that domain. - - `alias` - If enabled this rule serves as an alias for another rule with the same - endpoint and arguments. - - `host` - If provided and the URL map has host matching enabled this can be - used to provide a match rule for the whole host. This also means - that the subdomain feature is disabled. - - .. versionadded:: 0.7 - The `alias` and `host` parameters were added. - """ - - def __init__(self, string, defaults=None, subdomain=None, methods=None, - build_only=False, endpoint=None, strict_slashes=None, - redirect_to=None, alias=False, host=None): - if not string.startswith('/'): - raise ValueError('urls must start with a leading slash') - self.rule = string - self.is_leaf = not string.endswith('/') - - self.map = None - self.strict_slashes = strict_slashes - self.subdomain = subdomain - self.host = host - self.defaults = defaults - self.build_only = build_only - self.alias = alias - if methods is None: - self.methods = None - else: - if isinstance(methods, str): - raise TypeError('param `methods` should be `Iterable[str]`, not `str`') - self.methods = set([x.upper() for x in methods]) - if 'HEAD' not in self.methods and 'GET' in self.methods: - self.methods.add('HEAD') - self.endpoint = endpoint - self.redirect_to = redirect_to - - if defaults: - self.arguments = set(map(str, defaults)) - else: - self.arguments = set() - self._trace = self._converters = self._regex = self._argument_weights = None - - def empty(self): - """ - Return an unbound copy of this rule. - - This can be useful if want to reuse an already bound URL for another - map. See ``get_empty_kwargs`` to override what keyword arguments are - provided to the new copy. - """ - return type(self)(self.rule, **self.get_empty_kwargs()) - - def get_empty_kwargs(self): - """ - Provides kwargs for instantiating empty copy with empty() - - Use this method to provide custom keyword arguments to the subclass of - ``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass - has custom keyword arguments that are needed at instantiation. - - Must return a ``dict`` that will be provided as kwargs to the new - instance of ``Rule``, following the initial ``self.rule`` value which - is always provided as the first, required positional argument. - """ - defaults = None - if self.defaults: - defaults = dict(self.defaults) - return dict(defaults=defaults, subdomain=self.subdomain, - methods=self.methods, build_only=self.build_only, - endpoint=self.endpoint, strict_slashes=self.strict_slashes, - redirect_to=self.redirect_to, alias=self.alias, - host=self.host) - - def get_rules(self, map): - yield self - - def refresh(self): - """Rebinds and refreshes the URL. Call this if you modified the - rule in place. - - :internal: - """ - self.bind(self.map, rebind=True) - - def bind(self, map, rebind=False): - """Bind the url to a map and create a regular expression based on - the information from the rule itself and the defaults from the map. - - :internal: - """ - if self.map is not None and not rebind: - raise RuntimeError('url rule %r already bound to map %r' % - (self, self.map)) - self.map = map - if self.strict_slashes is None: - self.strict_slashes = map.strict_slashes - if self.subdomain is None: - self.subdomain = map.default_subdomain - self.compile() - - def get_converter(self, variable_name, converter_name, args, kwargs): - """Looks up the converter for the given parameter. - - .. versionadded:: 0.9 - """ - if converter_name not in self.map.converters: - raise LookupError('the converter %r does not exist' % converter_name) - return self.map.converters[converter_name](self.map, *args, **kwargs) - - def compile(self): - """Compiles the regular expression and stores it.""" - assert self.map is not None, 'rule not bound' - - if self.map.host_matching: - domain_rule = self.host or '' - else: - domain_rule = self.subdomain or '' - - self._trace = [] - self._converters = {} - self._static_weights = [] - self._argument_weights = [] - regex_parts = [] - - def _build_regex(rule): - index = 0 - for converter, arguments, variable in parse_rule(rule): - if converter is None: - regex_parts.append(re.escape(variable)) - self._trace.append((False, variable)) - for part in variable.split('/'): - if part: - self._static_weights.append((index, -len(part))) - else: - if arguments: - c_args, c_kwargs = parse_converter_args(arguments) - else: - c_args = () - c_kwargs = {} - convobj = self.get_converter( - variable, converter, c_args, c_kwargs) - regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex)) - self._converters[variable] = convobj - self._trace.append((True, variable)) - self._argument_weights.append(convobj.weight) - self.arguments.add(str(variable)) - index = index + 1 - - _build_regex(domain_rule) - regex_parts.append('\\|') - self._trace.append((False, '|')) - _build_regex(self.is_leaf and self.rule or self.rule.rstrip('/')) - if not self.is_leaf: - self._trace.append((False, '/')) - - if self.build_only: - return - regex = r'^%s%s$' % ( - u''.join(regex_parts), - (not self.is_leaf or not self.strict_slashes) and - '(?/?)' or '' - ) - self._regex = re.compile(regex, re.UNICODE) - - def match(self, path, method=None): - """Check if the rule matches a given path. Path is a string in the - form ``"subdomain|/path"`` and is assembled by the map. If - the map is doing host matching the subdomain part will be the host - instead. - - If the rule matches a dict with the converted values is returned, - otherwise the return value is `None`. - - :internal: - """ - if not self.build_only: - m = self._regex.search(path) - if m is not None: - groups = m.groupdict() - # we have a folder like part of the url without a trailing - # slash and strict slashes enabled. raise an exception that - # tells the map to redirect to the same url but with a - # trailing slash - if self.strict_slashes and not self.is_leaf and \ - not groups.pop('__suffix__') and \ - (method is None or self.methods is None or - method in self.methods): - raise RequestSlash() - # if we are not in strict slashes mode we have to remove - # a __suffix__ - elif not self.strict_slashes: - del groups['__suffix__'] - - result = {} - for name, value in iteritems(groups): - try: - value = self._converters[name].to_python(value) - except ValidationError: - return - result[str(name)] = value - if self.defaults: - result.update(self.defaults) - - if self.alias and self.map.redirect_defaults: - raise RequestAliasRedirect(result) - - return result - - def build(self, values, append_unknown=True): - """Assembles the relative url for that rule and the subdomain. - If building doesn't work for some reasons `None` is returned. - - :internal: - """ - tmp = [] - add = tmp.append - processed = set(self.arguments) - for is_dynamic, data in self._trace: - if is_dynamic: - try: - add(self._converters[data].to_url(values[data])) - except ValidationError: - return - processed.add(data) - else: - add(url_quote(to_bytes(data, self.map.charset), safe='/:|+')) - domain_part, url = (u''.join(tmp)).split(u'|', 1) - - if append_unknown: - query_vars = MultiDict(values) - for key in processed: - if key in query_vars: - del query_vars[key] - - if query_vars: - url += u'?' + url_encode(query_vars, charset=self.map.charset, - sort=self.map.sort_parameters, - key=self.map.sort_key) - - return domain_part, url - - def provides_defaults_for(self, rule): - """Check if this rule has defaults for a given rule. - - :internal: - """ - return not self.build_only and self.defaults and \ - self.endpoint == rule.endpoint and self != rule and \ - self.arguments == rule.arguments - - def suitable_for(self, values, method=None): - """Check if the dict of values has enough data for url generation. - - :internal: - """ - # if a method was given explicitly and that method is not supported - # by this rule, this rule is not suitable. - if method is not None and self.methods is not None \ - and method not in self.methods: - return False - - defaults = self.defaults or () - - # all arguments required must be either in the defaults dict or - # the value dictionary otherwise it's not suitable - for key in self.arguments: - if key not in defaults and key not in values: - return False - - # in case defaults are given we ensure taht either the value was - # skipped or the value is the same as the default value. - if defaults: - for key, value in iteritems(defaults): - if key in values and value != values[key]: - return False - - return True - - def match_compare_key(self): - """The match compare key for sorting. - - Current implementation: - - 1. rules without any arguments come first for performance - reasons only as we expect them to match faster and some - common ones usually don't have any arguments (index pages etc.) - 2. rules with more static parts come first so the second argument - is the negative length of the number of the static weights. - 3. we order by static weights, which is a combination of index - and length - 4. The more complex rules come first so the next argument is the - negative length of the number of argument weights. - 5. lastly we order by the actual argument weights. - - :internal: - """ - return bool(self.arguments), -len(self._static_weights), self._static_weights,\ - -len(self._argument_weights), self._argument_weights - - def build_compare_key(self): - """The build compare key for sorting. - - :internal: - """ - return self.alias and 1 or 0, -len(self.arguments), \ - -len(self.defaults or ()) - - def __eq__(self, other): - return self.__class__ is other.__class__ and \ - self._trace == other._trace - - __hash__ = None - - def __ne__(self, other): - return not self.__eq__(other) - - def __str__(self): - return self.rule - - @native_string_result - def __repr__(self): - if self.map is None: - return u'<%s (unbound)>' % self.__class__.__name__ - tmp = [] - for is_dynamic, data in self._trace: - if is_dynamic: - tmp.append(u'<%s>' % data) - else: - tmp.append(data) - return u'<%s %s%s -> %s>' % ( - self.__class__.__name__, - repr((u''.join(tmp)).lstrip(u'|')).lstrip(u'u'), - self.methods is not None - and u' (%s)' % u', '.join(self.methods) - or u'', - self.endpoint - ) - - -class BaseConverter(object): - - """Base class for all converters.""" - regex = '[^/]+' - weight = 100 - - def __init__(self, map): - self.map = map - - def to_python(self, value): - return value - - def to_url(self, value): - return url_quote(value, charset=self.map.charset) - - -class UnicodeConverter(BaseConverter): - - """This converter is the default converter and accepts any string but - only one path segment. Thus the string can not include a slash. - - This is the default validator. - - Example:: - - Rule('/pages/'), - Rule('/') - - :param map: the :class:`Map`. - :param minlength: the minimum length of the string. Must be greater - or equal 1. - :param maxlength: the maximum length of the string. - :param length: the exact length of the string. - """ - - def __init__(self, map, minlength=1, maxlength=None, length=None): - BaseConverter.__init__(self, map) - if length is not None: - length = '{%d}' % int(length) - else: - if maxlength is None: - maxlength = '' - else: - maxlength = int(maxlength) - length = '{%s,%s}' % ( - int(minlength), - maxlength - ) - self.regex = '[^/]' + length - - -class AnyConverter(BaseConverter): - - """Matches one of the items provided. Items can either be Python - identifiers or strings:: - - Rule('/') - - :param map: the :class:`Map`. - :param items: this function accepts the possible items as positional - arguments. - """ - - def __init__(self, map, *items): - BaseConverter.__init__(self, map) - self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items]) - - -class PathConverter(BaseConverter): - - """Like the default :class:`UnicodeConverter`, but it also matches - slashes. This is useful for wikis and similar applications:: - - Rule('/') - Rule('//edit') - - :param map: the :class:`Map`. - """ - regex = '[^/].*?' - weight = 200 - - -class NumberConverter(BaseConverter): - - """Baseclass for `IntegerConverter` and `FloatConverter`. - - :internal: - """ - weight = 50 - - def __init__(self, map, fixed_digits=0, min=None, max=None): - BaseConverter.__init__(self, map) - self.fixed_digits = fixed_digits - self.min = min - self.max = max - - def to_python(self, value): - if (self.fixed_digits and len(value) != self.fixed_digits): - raise ValidationError() - value = self.num_convert(value) - if (self.min is not None and value < self.min) or \ - (self.max is not None and value > self.max): - raise ValidationError() - return value - - def to_url(self, value): - value = self.num_convert(value) - if self.fixed_digits: - value = ('%%0%sd' % self.fixed_digits) % value - return str(value) - - -class IntegerConverter(NumberConverter): - - """This converter only accepts integer values:: - - Rule('/page/') - - This converter does not support negative values. - - :param map: the :class:`Map`. - :param fixed_digits: the number of fixed digits in the URL. If you set - this to ``4`` for example, the application will - only match if the url looks like ``/0001/``. The - default is variable length. - :param min: the minimal value. - :param max: the maximal value. - """ - regex = r'\d+' - num_convert = int - - -class FloatConverter(NumberConverter): - - """This converter only accepts floating point values:: - - Rule('/probability/') - - This converter does not support negative values. - - :param map: the :class:`Map`. - :param min: the minimal value. - :param max: the maximal value. - """ - regex = r'\d+\.\d+' - num_convert = float - - def __init__(self, map, min=None, max=None): - NumberConverter.__init__(self, map, 0, min, max) - - -class UUIDConverter(BaseConverter): - - """This converter only accepts UUID strings:: - - Rule('/object/') - - .. versionadded:: 0.10 - - :param map: the :class:`Map`. - """ - regex = r'[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-' \ - r'[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}' - - def to_python(self, value): - return uuid.UUID(value) - - def to_url(self, value): - return str(value) - - -#: the default converter mapping for the map. -DEFAULT_CONVERTERS = { - 'default': UnicodeConverter, - 'string': UnicodeConverter, - 'any': AnyConverter, - 'path': PathConverter, - 'int': IntegerConverter, - 'float': FloatConverter, - 'uuid': UUIDConverter, -} - - -class Map(object): - - """The map class stores all the URL rules and some configuration - parameters. Some of the configuration values are only stored on the - `Map` instance since those affect all rules, others are just defaults - and can be overridden for each rule. Note that you have to specify all - arguments besides the `rules` as keyword arguments! - - :param rules: sequence of url rules for this map. - :param default_subdomain: The default subdomain for rules without a - subdomain defined. - :param charset: charset of the url. defaults to ``"utf-8"`` - :param strict_slashes: Take care of trailing slashes. - :param redirect_defaults: This will redirect to the default rule if it - wasn't visited that way. This helps creating - unique URLs. - :param converters: A dict of converters that adds additional converters - to the list of converters. If you redefine one - converter this will override the original one. - :param sort_parameters: If set to `True` the url parameters are sorted. - See `url_encode` for more details. - :param sort_key: The sort key function for `url_encode`. - :param encoding_errors: the error method to use for decoding - :param host_matching: if set to `True` it enables the host matching - feature and disables the subdomain one. If - enabled the `host` parameter to rules is used - instead of the `subdomain` one. - - .. versionadded:: 0.5 - `sort_parameters` and `sort_key` was added. - - .. versionadded:: 0.7 - `encoding_errors` and `host_matching` was added. - """ - - #: .. versionadded:: 0.6 - #: a dict of default converters to be used. - default_converters = ImmutableDict(DEFAULT_CONVERTERS) - - def __init__(self, rules=None, default_subdomain='', charset='utf-8', - strict_slashes=True, redirect_defaults=True, - converters=None, sort_parameters=False, sort_key=None, - encoding_errors='replace', host_matching=False): - self._rules = [] - self._rules_by_endpoint = {} - self._remap = True - self._remap_lock = Lock() - - self.default_subdomain = default_subdomain - self.charset = charset - self.encoding_errors = encoding_errors - self.strict_slashes = strict_slashes - self.redirect_defaults = redirect_defaults - self.host_matching = host_matching - - self.converters = self.default_converters.copy() - if converters: - self.converters.update(converters) - - self.sort_parameters = sort_parameters - self.sort_key = sort_key - - for rulefactory in rules or (): - self.add(rulefactory) - - def is_endpoint_expecting(self, endpoint, *arguments): - """Iterate over all rules and check if the endpoint expects - the arguments provided. This is for example useful if you have - some URLs that expect a language code and others that do not and - you want to wrap the builder a bit so that the current language - code is automatically added if not provided but endpoints expect - it. - - :param endpoint: the endpoint to check. - :param arguments: this function accepts one or more arguments - as positional arguments. Each one of them is - checked. - """ - self.update() - arguments = set(arguments) - for rule in self._rules_by_endpoint[endpoint]: - if arguments.issubset(rule.arguments): - return True - return False - - def iter_rules(self, endpoint=None): - """Iterate over all rules or the rules of an endpoint. - - :param endpoint: if provided only the rules for that endpoint - are returned. - :return: an iterator - """ - self.update() - if endpoint is not None: - return iter(self._rules_by_endpoint[endpoint]) - return iter(self._rules) - - def add(self, rulefactory): - """Add a new rule or factory to the map and bind it. Requires that the - rule is not bound to another map. - - :param rulefactory: a :class:`Rule` or :class:`RuleFactory` - """ - for rule in rulefactory.get_rules(self): - rule.bind(self) - self._rules.append(rule) - self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule) - self._remap = True - - def bind(self, server_name, script_name=None, subdomain=None, - url_scheme='http', default_method='GET', path_info=None, - query_args=None): - """Return a new :class:`MapAdapter` with the details specified to the - call. Note that `script_name` will default to ``'/'`` if not further - specified or `None`. The `server_name` at least is a requirement - because the HTTP RFC requires absolute URLs for redirects and so all - redirect exceptions raised by Werkzeug will contain the full canonical - URL. - - If no path_info is passed to :meth:`match` it will use the default path - info passed to bind. While this doesn't really make sense for - manual bind calls, it's useful if you bind a map to a WSGI - environment which already contains the path info. - - `subdomain` will default to the `default_subdomain` for this map if - no defined. If there is no `default_subdomain` you cannot use the - subdomain feature. - - .. versionadded:: 0.7 - `query_args` added - - .. versionadded:: 0.8 - `query_args` can now also be a string. - """ - server_name = server_name.lower() - if self.host_matching: - if subdomain is not None: - raise RuntimeError('host matching enabled and a ' - 'subdomain was provided') - elif subdomain is None: - subdomain = self.default_subdomain - if script_name is None: - script_name = '/' - try: - server_name = _encode_idna(server_name) - except UnicodeError: - raise BadHost() - return MapAdapter(self, server_name, script_name, subdomain, - url_scheme, path_info, default_method, query_args) - - def bind_to_environ(self, environ, server_name=None, subdomain=None): - """Like :meth:`bind` but you can pass it an WSGI environment and it - will fetch the information from that dictionary. Note that because of - limitations in the protocol there is no way to get the current - subdomain and real `server_name` from the environment. If you don't - provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or - `HTTP_HOST` if provided) as used `server_name` with disabled subdomain - feature. - - If `subdomain` is `None` but an environment and a server name is - provided it will calculate the current subdomain automatically. - Example: `server_name` is ``'example.com'`` and the `SERVER_NAME` - in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated - subdomain will be ``'staging.dev'``. - - If the object passed as environ has an environ attribute, the value of - this attribute is used instead. This allows you to pass request - objects. Additionally `PATH_INFO` added as a default of the - :class:`MapAdapter` so that you don't have to pass the path info to - the match method. - - .. versionchanged:: 0.5 - previously this method accepted a bogus `calculate_subdomain` - parameter that did not have any effect. It was removed because - of that. - - .. versionchanged:: 0.8 - This will no longer raise a ValueError when an unexpected server - name was passed. - - :param environ: a WSGI environment. - :param server_name: an optional server name hint (see above). - :param subdomain: optionally the current subdomain (see above). - """ - environ = _get_environ(environ) - - if 'HTTP_HOST' in environ: - wsgi_server_name = environ['HTTP_HOST'] - - if environ['wsgi.url_scheme'] == 'http' \ - and wsgi_server_name.endswith(':80'): - wsgi_server_name = wsgi_server_name[:-3] - elif environ['wsgi.url_scheme'] == 'https' \ - and wsgi_server_name.endswith(':443'): - wsgi_server_name = wsgi_server_name[:-4] - else: - wsgi_server_name = environ['SERVER_NAME'] - - if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \ - in (('https', '443'), ('http', '80')): - wsgi_server_name += ':' + environ['SERVER_PORT'] - - wsgi_server_name = wsgi_server_name.lower() - - if server_name is None: - server_name = wsgi_server_name - else: - server_name = server_name.lower() - - if subdomain is None and not self.host_matching: - cur_server_name = wsgi_server_name.split('.') - real_server_name = server_name.split('.') - offset = -len(real_server_name) - if cur_server_name[offset:] != real_server_name: - # This can happen even with valid configs if the server was - # accesssed directly by IP address under some situations. - # Instead of raising an exception like in Werkzeug 0.7 or - # earlier we go by an invalid subdomain which will result - # in a 404 error on matching. - subdomain = '' - else: - subdomain = '.'.join(filter(None, cur_server_name[:offset])) - - def _get_wsgi_string(name): - val = environ.get(name) - if val is not None: - return wsgi_decoding_dance(val, self.charset) - - script_name = _get_wsgi_string('SCRIPT_NAME') - path_info = _get_wsgi_string('PATH_INFO') - query_args = _get_wsgi_string('QUERY_STRING') - return Map.bind(self, server_name, script_name, - subdomain, environ['wsgi.url_scheme'], - environ['REQUEST_METHOD'], path_info, - query_args=query_args) - - def update(self): - """Called before matching and building to keep the compiled rules - in the correct order after things changed. - """ - if not self._remap: - return - - with self._remap_lock: - if not self._remap: - return - - self._rules.sort(key=lambda x: x.match_compare_key()) - for rules in itervalues(self._rules_by_endpoint): - rules.sort(key=lambda x: x.build_compare_key()) - self._remap = False - - def __repr__(self): - rules = self.iter_rules() - return '%s(%s)' % (self.__class__.__name__, pformat(list(rules))) - - -class MapAdapter(object): - - """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does - the URL matching and building based on runtime information. - """ - - def __init__(self, map, server_name, script_name, subdomain, - url_scheme, path_info, default_method, query_args=None): - self.map = map - self.server_name = to_unicode(server_name) - script_name = to_unicode(script_name) - if not script_name.endswith(u'/'): - script_name += u'/' - self.script_name = script_name - self.subdomain = to_unicode(subdomain) - self.url_scheme = to_unicode(url_scheme) - self.path_info = to_unicode(path_info) - self.default_method = to_unicode(default_method) - self.query_args = query_args - - def dispatch(self, view_func, path_info=None, method=None, - catch_http_exceptions=False): - """Does the complete dispatching process. `view_func` is called with - the endpoint and a dict with the values for the view. It should - look up the view function, call it, and return a response object - or WSGI application. http exceptions are not caught by default - so that applications can display nicer error messages by just - catching them by hand. If you want to stick with the default - error messages you can pass it ``catch_http_exceptions=True`` and - it will catch the http exceptions. - - Here a small example for the dispatch usage:: - - from werkzeug.wrappers import Request, Response - from werkzeug.wsgi import responder - from werkzeug.routing import Map, Rule - - def on_index(request): - return Response('Hello from the index') - - url_map = Map([Rule('/', endpoint='index')]) - views = {'index': on_index} - - @responder - def application(environ, start_response): - request = Request(environ) - urls = url_map.bind_to_environ(environ) - return urls.dispatch(lambda e, v: views[e](request, **v), - catch_http_exceptions=True) - - Keep in mind that this method might return exception objects, too, so - use :class:`Response.force_type` to get a response object. - - :param view_func: a function that is called with the endpoint as - first argument and the value dict as second. Has - to dispatch to the actual view function with this - information. (see above) - :param path_info: the path info to use for matching. Overrides the - path info specified on binding. - :param method: the HTTP method used for matching. Overrides the - method specified on binding. - :param catch_http_exceptions: set to `True` to catch any of the - werkzeug :class:`HTTPException`\s. - """ - try: - try: - endpoint, args = self.match(path_info, method) - except RequestRedirect as e: - return e - return view_func(endpoint, args) - except HTTPException as e: - if catch_http_exceptions: - return e - raise - - def match(self, path_info=None, method=None, return_rule=False, - query_args=None): - """The usage is simple: you just pass the match method the current - path info as well as the method (which defaults to `GET`). The - following things can then happen: - - - you receive a `NotFound` exception that indicates that no URL is - matching. A `NotFound` exception is also a WSGI application you - can call to get a default page not found page (happens to be the - same object as `werkzeug.exceptions.NotFound`) - - - you receive a `MethodNotAllowed` exception that indicates that there - is a match for this URL but not for the current request method. - This is useful for RESTful applications. - - - you receive a `RequestRedirect` exception with a `new_url` - attribute. This exception is used to notify you about a request - Werkzeug requests from your WSGI application. This is for example the - case if you request ``/foo`` although the correct URL is ``/foo/`` - You can use the `RequestRedirect` instance as response-like object - similar to all other subclasses of `HTTPException`. - - - you get a tuple in the form ``(endpoint, arguments)`` if there is - a match (unless `return_rule` is True, in which case you get a tuple - in the form ``(rule, arguments)``) - - If the path info is not passed to the match method the default path - info of the map is used (defaults to the root URL if not defined - explicitly). - - All of the exceptions raised are subclasses of `HTTPException` so they - can be used as WSGI responses. They will all render generic error or - redirect pages. - - Here is a small example for matching: - - >>> m = Map([ - ... Rule('/', endpoint='index'), - ... Rule('/downloads/', endpoint='downloads/index'), - ... Rule('/downloads/', endpoint='downloads/show') - ... ]) - >>> urls = m.bind("example.com", "/") - >>> urls.match("/", "GET") - ('index', {}) - >>> urls.match("/downloads/42") - ('downloads/show', {'id': 42}) - - And here is what happens on redirect and missing URLs: - - >>> urls.match("/downloads") - Traceback (most recent call last): - ... - RequestRedirect: http://example.com/downloads/ - >>> urls.match("/missing") - Traceback (most recent call last): - ... - NotFound: 404 Not Found - - :param path_info: the path info to use for matching. Overrides the - path info specified on binding. - :param method: the HTTP method used for matching. Overrides the - method specified on binding. - :param return_rule: return the rule that matched instead of just the - endpoint (defaults to `False`). - :param query_args: optional query arguments that are used for - automatic redirects as string or dictionary. It's - currently not possible to use the query arguments - for URL matching. - - .. versionadded:: 0.6 - `return_rule` was added. - - .. versionadded:: 0.7 - `query_args` was added. - - .. versionchanged:: 0.8 - `query_args` can now also be a string. - """ - self.map.update() - if path_info is None: - path_info = self.path_info - else: - path_info = to_unicode(path_info, self.map.charset) - if query_args is None: - query_args = self.query_args - method = (method or self.default_method).upper() - - path = u'%s|%s' % ( - self.map.host_matching and self.server_name or self.subdomain, - path_info and '/%s' % path_info.lstrip('/') - ) - - have_match_for = set() - for rule in self.map._rules: - try: - rv = rule.match(path, method) - except RequestSlash: - raise RequestRedirect(self.make_redirect_url( - url_quote(path_info, self.map.charset, - safe='/:|+') + '/', query_args)) - except RequestAliasRedirect as e: - raise RequestRedirect(self.make_alias_redirect_url( - path, rule.endpoint, e.matched_values, method, query_args)) - if rv is None: - continue - if rule.methods is not None and method not in rule.methods: - have_match_for.update(rule.methods) - continue - - if self.map.redirect_defaults: - redirect_url = self.get_default_redirect(rule, method, rv, - query_args) - if redirect_url is not None: - raise RequestRedirect(redirect_url) - - if rule.redirect_to is not None: - if isinstance(rule.redirect_to, string_types): - def _handle_match(match): - value = rv[match.group(1)] - return rule._converters[match.group(1)].to_url(value) - redirect_url = _simple_rule_re.sub(_handle_match, - rule.redirect_to) - else: - redirect_url = rule.redirect_to(self, **rv) - raise RequestRedirect(str(url_join('%s://%s%s%s' % ( - self.url_scheme or 'http', - self.subdomain and self.subdomain + '.' or '', - self.server_name, - self.script_name - ), redirect_url))) - - if return_rule: - return rule, rv - else: - return rule.endpoint, rv - - if have_match_for: - raise MethodNotAllowed(valid_methods=list(have_match_for)) - raise NotFound() - - def test(self, path_info=None, method=None): - """Test if a rule would match. Works like `match` but returns `True` - if the URL matches, or `False` if it does not exist. - - :param path_info: the path info to use for matching. Overrides the - path info specified on binding. - :param method: the HTTP method used for matching. Overrides the - method specified on binding. - """ - try: - self.match(path_info, method) - except RequestRedirect: - pass - except HTTPException: - return False - return True - - def allowed_methods(self, path_info=None): - """Returns the valid methods that match for a given path. - - .. versionadded:: 0.7 - """ - try: - self.match(path_info, method='--') - except MethodNotAllowed as e: - return e.valid_methods - except HTTPException as e: - pass - return [] - - def get_host(self, domain_part): - """Figures out the full host name for the given domain part. The - domain part is a subdomain in case host matching is disabled or - a full host name. - """ - if self.map.host_matching: - if domain_part is None: - return self.server_name - return to_unicode(domain_part, 'ascii') - subdomain = domain_part - if subdomain is None: - subdomain = self.subdomain - else: - subdomain = to_unicode(subdomain, 'ascii') - return (subdomain and subdomain + u'.' or u'') + self.server_name - - def get_default_redirect(self, rule, method, values, query_args): - """A helper that returns the URL to redirect to if it finds one. - This is used for default redirecting only. - - :internal: - """ - assert self.map.redirect_defaults - for r in self.map._rules_by_endpoint[rule.endpoint]: - # every rule that comes after this one, including ourself - # has a lower priority for the defaults. We order the ones - # with the highest priority up for building. - if r is rule: - break - if r.provides_defaults_for(rule) and \ - r.suitable_for(values, method): - values.update(r.defaults) - domain_part, path = r.build(values) - return self.make_redirect_url( - path, query_args, domain_part=domain_part) - - def encode_query_args(self, query_args): - if not isinstance(query_args, string_types): - query_args = url_encode(query_args, self.map.charset) - return query_args - - def make_redirect_url(self, path_info, query_args=None, domain_part=None): - """Creates a redirect URL. - - :internal: - """ - suffix = '' - if query_args: - suffix = '?' + self.encode_query_args(query_args) - return str('%s://%s/%s%s' % ( - self.url_scheme or 'http', - self.get_host(domain_part), - posixpath.join(self.script_name[:-1].lstrip('/'), - path_info.lstrip('/')), - suffix - )) - - def make_alias_redirect_url(self, path, endpoint, values, method, query_args): - """Internally called to make an alias redirect URL.""" - url = self.build(endpoint, values, method, append_unknown=False, - force_external=True) - if query_args: - url += '?' + self.encode_query_args(query_args) - assert url != path, 'detected invalid alias setting. No canonical ' \ - 'URL found' - return url - - def _partial_build(self, endpoint, values, method, append_unknown): - """Helper for :meth:`build`. Returns subdomain and path for the - rule that accepts this endpoint, values and method. - - :internal: - """ - # in case the method is none, try with the default method first - if method is None: - rv = self._partial_build(endpoint, values, self.default_method, - append_unknown) - if rv is not None: - return rv - - # default method did not match or a specific method is passed, - # check all and go with first result. - for rule in self.map._rules_by_endpoint.get(endpoint, ()): - if rule.suitable_for(values, method): - rv = rule.build(values, append_unknown) - if rv is not None: - return rv - - def build(self, endpoint, values=None, method=None, force_external=False, - append_unknown=True): - """Building URLs works pretty much the other way round. Instead of - `match` you call `build` and pass it the endpoint and a dict of - arguments for the placeholders. - - The `build` function also accepts an argument called `force_external` - which, if you set it to `True` will force external URLs. Per default - external URLs (include the server name) will only be used if the - target URL is on a different subdomain. - - >>> m = Map([ - ... Rule('/', endpoint='index'), - ... Rule('/downloads/', endpoint='downloads/index'), - ... Rule('/downloads/', endpoint='downloads/show') - ... ]) - >>> urls = m.bind("example.com", "/") - >>> urls.build("index", {}) - '/' - >>> urls.build("downloads/show", {'id': 42}) - '/downloads/42' - >>> urls.build("downloads/show", {'id': 42}, force_external=True) - 'http://example.com/downloads/42' - - Because URLs cannot contain non ASCII data you will always get - bytestrings back. Non ASCII characters are urlencoded with the - charset defined on the map instance. - - Additional values are converted to unicode and appended to the URL as - URL querystring parameters: - - >>> urls.build("index", {'q': 'My Searchstring'}) - '/?q=My+Searchstring' - - When processing those additional values, lists are furthermore - interpreted as multiple values (as per - :py:class:`werkzeug.datastructures.MultiDict`): - - >>> urls.build("index", {'q': ['a', 'b', 'c']}) - '/?q=a&q=b&q=c' - - If a rule does not exist when building a `BuildError` exception is - raised. - - The build method accepts an argument called `method` which allows you - to specify the method you want to have an URL built for if you have - different methods for the same endpoint specified. - - .. versionadded:: 0.6 - the `append_unknown` parameter was added. - - :param endpoint: the endpoint of the URL to build. - :param values: the values for the URL to build. Unhandled values are - appended to the URL as query parameters. - :param method: the HTTP method for the rule if there are different - URLs for different methods on the same endpoint. - :param force_external: enforce full canonical external URLs. If the URL - scheme is not provided, this will generate - a protocol-relative URL. - :param append_unknown: unknown parameters are appended to the generated - URL as query string argument. Disable this - if you want the builder to ignore those. - """ - self.map.update() - if values: - if isinstance(values, MultiDict): - valueiter = iteritems(values, multi=True) - else: - valueiter = iteritems(values) - values = dict((k, v) for k, v in valueiter if v is not None) - else: - values = {} - - rv = self._partial_build(endpoint, values, method, append_unknown) - if rv is None: - raise BuildError(endpoint, values, method, self) - domain_part, path = rv - - host = self.get_host(domain_part) - - # shortcut this. - if not force_external and ( - (self.map.host_matching and host == self.server_name) or - (not self.map.host_matching and domain_part == self.subdomain) - ): - return str(url_join(self.script_name, './' + path.lstrip('/'))) - return str('%s//%s%s/%s' % ( - self.url_scheme + ':' if self.url_scheme else '', - host, - self.script_name[:-1], - path.lstrip('/') - )) diff --git a/pyextra/werkzeug/security.py b/pyextra/werkzeug/security.py deleted file mode 100644 index d4c5c9f48..000000000 --- a/pyextra/werkzeug/security.py +++ /dev/null @@ -1,270 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.security - ~~~~~~~~~~~~~~~~~ - - Security related helpers such as secure password hashing tools. - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import os -import hmac -import hashlib -import posixpath -import codecs -from struct import Struct -from random import SystemRandom -from operator import xor -from itertools import starmap - -from werkzeug._compat import range_type, PY2, text_type, izip, to_bytes, \ - string_types, to_native - - -SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' -DEFAULT_PBKDF2_ITERATIONS = 50000 - - -_pack_int = Struct('>I').pack -_builtin_safe_str_cmp = getattr(hmac, 'compare_digest', None) -_sys_rng = SystemRandom() -_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] - if sep not in (None, '/')) - - -def _find_hashlib_algorithms(): - algos = getattr(hashlib, 'algorithms', None) - if algos is None: - algos = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512') - rv = {} - for algo in algos: - func = getattr(hashlib, algo, None) - if func is not None: - rv[algo] = func - return rv -_hash_funcs = _find_hashlib_algorithms() - - -def pbkdf2_hex(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, - keylen=None, hashfunc=None): - """Like :func:`pbkdf2_bin`, but returns a hex-encoded string. - - .. versionadded:: 0.9 - - :param data: the data to derive. - :param salt: the salt for the derivation. - :param iterations: the number of iterations. - :param keylen: the length of the resulting key. If not provided, - the digest size will be used. - :param hashfunc: the hash function to use. This can either be the - string name of a known hash function, or a function - from the hashlib module. Defaults to sha256. - """ - rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc) - return to_native(codecs.encode(rv, 'hex_codec')) - - -_has_native_pbkdf2 = hasattr(hashlib, 'pbkdf2_hmac') - - -def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, - keylen=None, hashfunc=None): - """Returns a binary digest for the PBKDF2 hash algorithm of `data` - with the given `salt`. It iterates `iterations` times and produces a - key of `keylen` bytes. By default, SHA-256 is used as hash function; - a different hashlib `hashfunc` can be provided. - - .. versionadded:: 0.9 - - :param data: the data to derive. - :param salt: the salt for the derivation. - :param iterations: the number of iterations. - :param keylen: the length of the resulting key. If not provided - the digest size will be used. - :param hashfunc: the hash function to use. This can either be the - string name of a known hash function or a function - from the hashlib module. Defaults to sha256. - """ - if isinstance(hashfunc, string_types): - hashfunc = _hash_funcs[hashfunc] - elif not hashfunc: - hashfunc = hashlib.sha256 - data = to_bytes(data) - salt = to_bytes(salt) - - # If we're on Python with pbkdf2_hmac we can try to use it for - # compatible digests. - if _has_native_pbkdf2: - _test_hash = hashfunc() - if hasattr(_test_hash, 'name') and \ - _test_hash.name in _hash_funcs: - return hashlib.pbkdf2_hmac(_test_hash.name, - data, salt, iterations, - keylen) - - mac = hmac.HMAC(data, None, hashfunc) - if not keylen: - keylen = mac.digest_size - - def _pseudorandom(x, mac=mac): - h = mac.copy() - h.update(x) - return bytearray(h.digest()) - buf = bytearray() - for block in range_type(1, -(-keylen // mac.digest_size) + 1): - rv = u = _pseudorandom(salt + _pack_int(block)) - for i in range_type(iterations - 1): - u = _pseudorandom(bytes(u)) - rv = bytearray(starmap(xor, izip(rv, u))) - buf.extend(rv) - return bytes(buf[:keylen]) - - -def safe_str_cmp(a, b): - """This function compares strings in somewhat constant time. This - requires that the length of at least one string is known in advance. - - Returns `True` if the two strings are equal, or `False` if they are not. - - .. versionadded:: 0.7 - """ - if isinstance(a, text_type): - a = a.encode('utf-8') - if isinstance(b, text_type): - b = b.encode('utf-8') - - if _builtin_safe_str_cmp is not None: - return _builtin_safe_str_cmp(a, b) - - if len(a) != len(b): - return False - - rv = 0 - if PY2: - for x, y in izip(a, b): - rv |= ord(x) ^ ord(y) - else: - for x, y in izip(a, b): - rv |= x ^ y - - return rv == 0 - - -def gen_salt(length): - """Generate a random string of SALT_CHARS with specified ``length``.""" - if length <= 0: - raise ValueError('Salt length must be positive') - return ''.join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length)) - - -def _hash_internal(method, salt, password): - """Internal password hash helper. Supports plaintext without salt, - unsalted and salted passwords. In case salted passwords are used - hmac is used. - """ - if method == 'plain': - return password, method - - if isinstance(password, text_type): - password = password.encode('utf-8') - - if method.startswith('pbkdf2:'): - args = method[7:].split(':') - if len(args) not in (1, 2): - raise ValueError('Invalid number of arguments for PBKDF2') - method = args.pop(0) - iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS - is_pbkdf2 = True - actual_method = 'pbkdf2:%s:%d' % (method, iterations) - else: - is_pbkdf2 = False - actual_method = method - - hash_func = _hash_funcs.get(method) - if hash_func is None: - raise TypeError('invalid method %r' % method) - - if is_pbkdf2: - if not salt: - raise ValueError('Salt is required for PBKDF2') - rv = pbkdf2_hex(password, salt, iterations, - hashfunc=hash_func) - elif salt: - if isinstance(salt, text_type): - salt = salt.encode('utf-8') - rv = hmac.HMAC(salt, password, hash_func).hexdigest() - else: - h = hash_func() - h.update(password) - rv = h.hexdigest() - return rv, actual_method - - -def generate_password_hash(password, method='pbkdf2:sha256', salt_length=8): - """Hash a password with the given method and salt with a string of - the given length. The format of the string returned includes the method - that was used so that :func:`check_password_hash` can check the hash. - - The format for the hashed string looks like this:: - - method$salt$hash - - This method can **not** generate unsalted passwords but it is possible - to set param method='plain' in order to enforce plaintext passwords. - If a salt is used, hmac is used internally to salt the password. - - If PBKDF2 is wanted it can be enabled by setting the method to - ``pbkdf2:method:iterations`` where iterations is optional:: - - pbkdf2:sha256:80000$salt$hash - pbkdf2:sha256$salt$hash - - :param password: the password to hash. - :param method: the hash method to use (one that hashlib supports). Can - optionally be in the format ``pbkdf2:[:iterations]`` - to enable PBKDF2. - :param salt_length: the length of the salt in letters. - """ - salt = method != 'plain' and gen_salt(salt_length) or '' - h, actual_method = _hash_internal(method, salt, password) - return '%s$%s$%s' % (actual_method, salt, h) - - -def check_password_hash(pwhash, password): - """check a password against a given salted and hashed password value. - In order to support unsalted legacy passwords this method supports - plain text passwords, md5 and sha1 hashes (both salted and unsalted). - - Returns `True` if the password matched, `False` otherwise. - - :param pwhash: a hashed string like returned by - :func:`generate_password_hash`. - :param password: the plaintext password to compare against the hash. - """ - if pwhash.count('$') < 2: - return False - method, salt, hashval = pwhash.split('$', 2) - return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval) - - -def safe_join(directory, *pathnames): - """Safely join `directory` and one or more untrusted `pathnames`. If this - cannot be done, this function returns ``None``. - - :param directory: the base directory. - :param pathnames: the untrusted pathnames relative to that directory. - """ - parts = [directory] - for filename in pathnames: - if filename != '': - filename = posixpath.normpath(filename) - for sep in _os_alt_seps: - if sep in filename: - return None - if os.path.isabs(filename) or \ - filename == '..' or \ - filename.startswith('../'): - return None - parts.append(filename) - return posixpath.join(*parts) diff --git a/pyextra/werkzeug/serving.py b/pyextra/werkzeug/serving.py deleted file mode 100644 index 902f1aa1c..000000000 --- a/pyextra/werkzeug/serving.py +++ /dev/null @@ -1,862 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.serving - ~~~~~~~~~~~~~~~~ - - There are many ways to serve a WSGI application. While you're developing - it you usually don't want a full blown webserver like Apache but a simple - standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in - the standard library. If you're using older versions of Python you can - download the package from the cheeseshop. - - However there are some caveats. Sourcecode won't reload itself when - changed and each time you kill the server using ``^C`` you get an - `KeyboardInterrupt` error. While the latter is easy to solve the first - one can be a pain in the ass in some situations. - - The easiest way is creating a small ``start-myproject.py`` that runs the - application:: - - #!/usr/bin/env python - # -*- coding: utf-8 -*- - from myproject import make_app - from werkzeug.serving import run_simple - - app = make_app(...) - run_simple('localhost', 8080, app, use_reloader=True) - - You can also pass it a `extra_files` keyword argument with a list of - additional files (like configuration files) you want to observe. - - For bigger applications you should consider using `click` - (http://click.pocoo.org) instead of a simple start file. - - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -from __future__ import with_statement - -import io -import os -import socket -import sys -import signal - - -can_fork = hasattr(os, "fork") - - -try: - import termcolor -except ImportError: - termcolor = None - -try: - import ssl -except ImportError: - class _SslDummy(object): - def __getattr__(self, name): - raise RuntimeError('SSL support unavailable') - ssl = _SslDummy() - - -def _get_openssl_crypto_module(): - try: - from OpenSSL import crypto - except ImportError: - raise TypeError('Using ad-hoc certificates requires the pyOpenSSL ' - 'library.') - else: - return crypto - - -try: - import SocketServer as socketserver - from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler -except ImportError: - import socketserver - from http.server import HTTPServer, BaseHTTPRequestHandler - -ThreadingMixIn = socketserver.ThreadingMixIn - -if can_fork: - ForkingMixIn = socketserver.ForkingMixIn -else: - class ForkingMixIn(object): - pass - -# important: do not use relative imports here or python -m will break -import werkzeug -from werkzeug._internal import _log -from werkzeug._compat import PY2, WIN, reraise, wsgi_encoding_dance -from werkzeug.urls import url_parse, url_unquote -from werkzeug.exceptions import InternalServerError - - -LISTEN_QUEUE = 128 -can_open_by_fd = not WIN and hasattr(socket, 'fromfd') - - -class DechunkedInput(io.RawIOBase): - """An input stream that handles Transfer-Encoding 'chunked'""" - - def __init__(self, rfile): - self._rfile = rfile - self._done = False - self._len = 0 - - def readable(self): - return True - - def read_chunk_len(self): - try: - line = self._rfile.readline().decode('latin1') - _len = int(line.strip(), 16) - except ValueError: - raise IOError('Invalid chunk header') - if _len < 0: - raise IOError('Negative chunk length not allowed') - return _len - - def readinto(self, buf): - read = 0 - while not self._done and read < len(buf): - if self._len == 0: - # This is the first chunk or we fully consumed the previous - # one. Read the next length of the next chunk - self._len = self.read_chunk_len() - - if self._len == 0: - # Found the final chunk of size 0. The stream is now exhausted, - # but there is still a final newline that should be consumed - self._done = True - - if self._len > 0: - # There is data (left) in this chunk, so append it to the - # buffer. If this operation fully consumes the chunk, this will - # reset self._len to 0. - n = min(len(buf), self._len) - buf[read:read + n] = self._rfile.read(n) - self._len -= n - read += n - - if self._len == 0: - # Skip the terminating newline of a chunk that has been fully - # consumed. This also applies to the 0-sized final chunk - terminator = self._rfile.readline() - if terminator not in (b'\n', b'\r\n', b'\r'): - raise IOError('Missing chunk terminating newline') - - return read - - -class WSGIRequestHandler(BaseHTTPRequestHandler, object): - - """A request handler that implements WSGI dispatching.""" - - @property - def server_version(self): - return 'Werkzeug/' + werkzeug.__version__ - - def make_environ(self): - request_url = url_parse(self.path) - - def shutdown_server(): - self.server.shutdown_signal = True - - url_scheme = self.server.ssl_context is None and 'http' or 'https' - path_info = url_unquote(request_url.path) - - environ = { - 'wsgi.version': (1, 0), - 'wsgi.url_scheme': url_scheme, - 'wsgi.input': self.rfile, - 'wsgi.errors': sys.stderr, - 'wsgi.multithread': self.server.multithread, - 'wsgi.multiprocess': self.server.multiprocess, - 'wsgi.run_once': False, - 'werkzeug.server.shutdown': shutdown_server, - 'SERVER_SOFTWARE': self.server_version, - 'REQUEST_METHOD': self.command, - 'SCRIPT_NAME': '', - 'PATH_INFO': wsgi_encoding_dance(path_info), - 'QUERY_STRING': wsgi_encoding_dance(request_url.query), - 'REMOTE_ADDR': self.address_string(), - 'REMOTE_PORT': self.port_integer(), - 'SERVER_NAME': self.server.server_address[0], - 'SERVER_PORT': str(self.server.server_address[1]), - 'SERVER_PROTOCOL': self.request_version - } - - for key, value in self.headers.items(): - key = key.upper().replace('-', '_') - if key not in ('CONTENT_TYPE', 'CONTENT_LENGTH'): - key = 'HTTP_' + key - environ[key] = value - - if environ.get('HTTP_TRANSFER_ENCODING', '').strip().lower() == 'chunked': - environ['wsgi.input_terminated'] = True - environ['wsgi.input'] = DechunkedInput(environ['wsgi.input']) - - if request_url.scheme and request_url.netloc: - environ['HTTP_HOST'] = request_url.netloc - - return environ - - def run_wsgi(self): - if self.headers.get('Expect', '').lower().strip() == '100-continue': - self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n') - - self.environ = environ = self.make_environ() - headers_set = [] - headers_sent = [] - - def write(data): - assert headers_set, 'write() before start_response' - if not headers_sent: - status, response_headers = headers_sent[:] = headers_set - try: - code, msg = status.split(None, 1) - except ValueError: - code, msg = status, "" - code = int(code) - self.send_response(code, msg) - header_keys = set() - for key, value in response_headers: - self.send_header(key, value) - key = key.lower() - header_keys.add(key) - if not ('content-length' in header_keys or - environ['REQUEST_METHOD'] == 'HEAD' or - code < 200 or code in (204, 304)): - self.close_connection = True - self.send_header('Connection', 'close') - if 'server' not in header_keys: - self.send_header('Server', self.version_string()) - if 'date' not in header_keys: - self.send_header('Date', self.date_time_string()) - self.end_headers() - - assert isinstance(data, bytes), 'applications must write bytes' - self.wfile.write(data) - self.wfile.flush() - - def start_response(status, response_headers, exc_info=None): - if exc_info: - try: - if headers_sent: - reraise(*exc_info) - finally: - exc_info = None - elif headers_set: - raise AssertionError('Headers already set') - headers_set[:] = [status, response_headers] - return write - - def execute(app): - application_iter = app(environ, start_response) - try: - for data in application_iter: - write(data) - if not headers_sent: - write(b'') - finally: - if hasattr(application_iter, 'close'): - application_iter.close() - application_iter = None - - try: - execute(self.server.app) - except (socket.error, socket.timeout) as e: - self.connection_dropped(e, environ) - except Exception: - if self.server.passthrough_errors: - raise - from werkzeug.debug.tbtools import get_current_traceback - traceback = get_current_traceback(ignore_system_exceptions=True) - try: - # if we haven't yet sent the headers but they are set - # we roll back to be able to set them again. - if not headers_sent: - del headers_set[:] - execute(InternalServerError()) - except Exception: - pass - self.server.log('error', 'Error on request:\n%s', - traceback.plaintext) - - def handle(self): - """Handles a request ignoring dropped connections.""" - rv = None - try: - rv = BaseHTTPRequestHandler.handle(self) - except (socket.error, socket.timeout) as e: - self.connection_dropped(e) - except Exception: - if self.server.ssl_context is None or not is_ssl_error(): - raise - if self.server.shutdown_signal: - self.initiate_shutdown() - return rv - - def initiate_shutdown(self): - """A horrible, horrible way to kill the server for Python 2.6 and - later. It's the best we can do. - """ - # Windows does not provide SIGKILL, go with SIGTERM then. - sig = getattr(signal, 'SIGKILL', signal.SIGTERM) - # reloader active - if os.environ.get('WERKZEUG_RUN_MAIN') == 'true': - os.kill(os.getpid(), sig) - # python 2.7 - self.server._BaseServer__shutdown_request = True - # python 2.6 - self.server._BaseServer__serving = False - - def connection_dropped(self, error, environ=None): - """Called if the connection was closed by the client. By default - nothing happens. - """ - - def handle_one_request(self): - """Handle a single HTTP request.""" - self.raw_requestline = self.rfile.readline() - if not self.raw_requestline: - self.close_connection = 1 - elif self.parse_request(): - return self.run_wsgi() - - def send_response(self, code, message=None): - """Send the response header and log the response code.""" - self.log_request(code) - if message is None: - message = code in self.responses and self.responses[code][0] or '' - if self.request_version != 'HTTP/0.9': - hdr = "%s %d %s\r\n" % (self.protocol_version, code, message) - self.wfile.write(hdr.encode('ascii')) - - def version_string(self): - return BaseHTTPRequestHandler.version_string(self).strip() - - def address_string(self): - if getattr(self, 'environ', None): - return self.environ['REMOTE_ADDR'] - else: - return self.client_address[0] - - def port_integer(self): - return self.client_address[1] - - def log_request(self, code='-', size='-'): - msg = self.requestline - code = str(code) - - if termcolor: - color = termcolor.colored - - if code[0] == '1': # 1xx - Informational - msg = color(msg, attrs=['bold']) - elif code[0] == '2': # 2xx - Success - msg = color(msg, color='white') - elif code == '304': # 304 - Resource Not Modified - msg = color(msg, color='cyan') - elif code[0] == '3': # 3xx - Redirection - msg = color(msg, color='green') - elif code == '404': # 404 - Resource Not Found - msg = color(msg, color='yellow') - elif code[0] == '4': # 4xx - Client Error - msg = color(msg, color='red', attrs=['bold']) - else: # 5xx, or any other response - msg = color(msg, color='magenta', attrs=['bold']) - - self.log('info', '"%s" %s %s', msg, code, size) - - def log_error(self, *args): - self.log('error', *args) - - def log_message(self, format, *args): - self.log('info', format, *args) - - def log(self, type, message, *args): - _log(type, '%s - - [%s] %s\n' % (self.address_string(), - self.log_date_time_string(), - message % args)) - - -#: backwards compatible name if someone is subclassing it -BaseRequestHandler = WSGIRequestHandler - - -def generate_adhoc_ssl_pair(cn=None): - from random import random - crypto = _get_openssl_crypto_module() - - # pretty damn sure that this is not actually accepted by anyone - if cn is None: - cn = '*' - - cert = crypto.X509() - cert.set_serial_number(int(random() * sys.maxsize)) - cert.gmtime_adj_notBefore(0) - cert.gmtime_adj_notAfter(60 * 60 * 24 * 365) - - subject = cert.get_subject() - subject.CN = cn - subject.O = 'Dummy Certificate' # noqa: E741 - - issuer = cert.get_issuer() - issuer.CN = 'Untrusted Authority' - issuer.O = 'Self-Signed' # noqa: E741 - - pkey = crypto.PKey() - pkey.generate_key(crypto.TYPE_RSA, 2048) - cert.set_pubkey(pkey) - cert.sign(pkey, 'sha256') - - return cert, pkey - - -def make_ssl_devcert(base_path, host=None, cn=None): - """Creates an SSL key for development. This should be used instead of - the ``'adhoc'`` key which generates a new cert on each server start. - It accepts a path for where it should store the key and cert and - either a host or CN. If a host is given it will use the CN - ``*.host/CN=host``. - - For more information see :func:`run_simple`. - - .. versionadded:: 0.9 - - :param base_path: the path to the certificate and key. The extension - ``.crt`` is added for the certificate, ``.key`` is - added for the key. - :param host: the name of the host. This can be used as an alternative - for the `cn`. - :param cn: the `CN` to use. - """ - from OpenSSL import crypto - if host is not None: - cn = '*.%s/CN=%s' % (host, host) - cert, pkey = generate_adhoc_ssl_pair(cn=cn) - - cert_file = base_path + '.crt' - pkey_file = base_path + '.key' - - with open(cert_file, 'wb') as f: - f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) - with open(pkey_file, 'wb') as f: - f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) - - return cert_file, pkey_file - - -def generate_adhoc_ssl_context(): - """Generates an adhoc SSL context for the development server.""" - crypto = _get_openssl_crypto_module() - import tempfile - import atexit - - cert, pkey = generate_adhoc_ssl_pair() - cert_handle, cert_file = tempfile.mkstemp() - pkey_handle, pkey_file = tempfile.mkstemp() - atexit.register(os.remove, pkey_file) - atexit.register(os.remove, cert_file) - - os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) - os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey)) - os.close(cert_handle) - os.close(pkey_handle) - ctx = load_ssl_context(cert_file, pkey_file) - return ctx - - -def load_ssl_context(cert_file, pkey_file=None, protocol=None): - """Loads SSL context from cert/private key files and optional protocol. - Many parameters are directly taken from the API of - :py:class:`ssl.SSLContext`. - - :param cert_file: Path of the certificate to use. - :param pkey_file: Path of the private key to use. If not given, the key - will be obtained from the certificate file. - :param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl`` - module. Defaults to ``PROTOCOL_SSLv23``. - """ - if protocol is None: - protocol = ssl.PROTOCOL_SSLv23 - ctx = _SSLContext(protocol) - ctx.load_cert_chain(cert_file, pkey_file) - return ctx - - -class _SSLContext(object): - - '''A dummy class with a small subset of Python3's ``ssl.SSLContext``, only - intended to be used with and by Werkzeug.''' - - def __init__(self, protocol): - self._protocol = protocol - self._certfile = None - self._keyfile = None - self._password = None - - def load_cert_chain(self, certfile, keyfile=None, password=None): - self._certfile = certfile - self._keyfile = keyfile or certfile - self._password = password - - def wrap_socket(self, sock, **kwargs): - return ssl.wrap_socket(sock, keyfile=self._keyfile, - certfile=self._certfile, - ssl_version=self._protocol, **kwargs) - - -def is_ssl_error(error=None): - """Checks if the given error (or the current one) is an SSL error.""" - exc_types = (ssl.SSLError,) - try: - from OpenSSL.SSL import Error - exc_types += (Error,) - except ImportError: - pass - - if error is None: - error = sys.exc_info()[1] - return isinstance(error, exc_types) - - -def select_ip_version(host, port): - """Returns AF_INET4 or AF_INET6 depending on where to connect to.""" - # disabled due to problems with current ipv6 implementations - # and various operating systems. Probably this code also is - # not supposed to work, but I can't come up with any other - # ways to implement this. - # try: - # info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, - # socket.SOCK_STREAM, 0, - # socket.AI_PASSIVE) - # if info: - # return info[0][0] - # except socket.gaierror: - # pass - if ':' in host and hasattr(socket, 'AF_INET6'): - return socket.AF_INET6 - return socket.AF_INET - - -def get_sockaddr(host, port, family): - """Returns a fully qualified socket address, that can properly used by - socket.bind""" - try: - res = socket.getaddrinfo(host, port, family, - socket.SOCK_STREAM, socket.SOL_TCP) - except socket.gaierror: - return (host, port) - return res[0][4] - - -class BaseWSGIServer(HTTPServer, object): - - """Simple single-threaded, single-process WSGI server.""" - multithread = False - multiprocess = False - request_queue_size = LISTEN_QUEUE - - def __init__(self, host, port, app, handler=None, - passthrough_errors=False, ssl_context=None, fd=None): - if handler is None: - handler = WSGIRequestHandler - - self.address_family = select_ip_version(host, port) - - if fd is not None: - real_sock = socket.fromfd(fd, self.address_family, - socket.SOCK_STREAM) - port = 0 - HTTPServer.__init__(self, get_sockaddr(host, int(port), - self.address_family), handler) - self.app = app - self.passthrough_errors = passthrough_errors - self.shutdown_signal = False - self.host = host - self.port = self.socket.getsockname()[1] - - # Patch in the original socket. - if fd is not None: - self.socket.close() - self.socket = real_sock - self.server_address = self.socket.getsockname() - - if ssl_context is not None: - if isinstance(ssl_context, tuple): - ssl_context = load_ssl_context(*ssl_context) - if ssl_context == 'adhoc': - ssl_context = generate_adhoc_ssl_context() - # If we are on Python 2 the return value from socket.fromfd - # is an internal socket object but what we need for ssl wrap - # is the wrapper around it :( - sock = self.socket - if PY2 and not isinstance(sock, socket.socket): - sock = socket.socket(sock.family, sock.type, sock.proto, sock) - self.socket = ssl_context.wrap_socket(sock, server_side=True) - self.ssl_context = ssl_context - else: - self.ssl_context = None - - def log(self, type, message, *args): - _log(type, message, *args) - - def serve_forever(self): - self.shutdown_signal = False - try: - HTTPServer.serve_forever(self) - except KeyboardInterrupt: - pass - finally: - self.server_close() - - def handle_error(self, request, client_address): - if self.passthrough_errors: - raise - return HTTPServer.handle_error(self, request, client_address) - - def get_request(self): - con, info = self.socket.accept() - return con, info - - -class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer): - - """A WSGI server that does threading.""" - multithread = True - daemon_threads = True - - -class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer): - - """A WSGI server that does forking.""" - multiprocess = True - - def __init__(self, host, port, app, processes=40, handler=None, - passthrough_errors=False, ssl_context=None, fd=None): - if not can_fork: - raise ValueError('Your platform does not support forking.') - BaseWSGIServer.__init__(self, host, port, app, handler, - passthrough_errors, ssl_context, fd) - self.max_children = processes - - -def make_server(host=None, port=None, app=None, threaded=False, processes=1, - request_handler=None, passthrough_errors=False, - ssl_context=None, fd=None): - """Create a new server instance that is either threaded, or forks - or just processes one request after another. - """ - if threaded and processes > 1: - raise ValueError("cannot have a multithreaded and " - "multi process server.") - elif threaded: - return ThreadedWSGIServer(host, port, app, request_handler, - passthrough_errors, ssl_context, fd=fd) - elif processes > 1: - return ForkingWSGIServer(host, port, app, processes, request_handler, - passthrough_errors, ssl_context, fd=fd) - else: - return BaseWSGIServer(host, port, app, request_handler, - passthrough_errors, ssl_context, fd=fd) - - -def is_running_from_reloader(): - """Checks if the application is running from within the Werkzeug - reloader subprocess. - - .. versionadded:: 0.10 - """ - return os.environ.get('WERKZEUG_RUN_MAIN') == 'true' - - -def run_simple(hostname, port, application, use_reloader=False, - use_debugger=False, use_evalex=True, - extra_files=None, reloader_interval=1, - reloader_type='auto', threaded=False, - processes=1, request_handler=None, static_files=None, - passthrough_errors=False, ssl_context=None): - """Start a WSGI application. Optional features include a reloader, - multithreading and fork support. - - This function has a command-line interface too:: - - python -m werkzeug.serving --help - - .. versionadded:: 0.5 - `static_files` was added to simplify serving of static files as well - as `passthrough_errors`. - - .. versionadded:: 0.6 - support for SSL was added. - - .. versionadded:: 0.8 - Added support for automatically loading a SSL context from certificate - file and private key. - - .. versionadded:: 0.9 - Added command-line interface. - - .. versionadded:: 0.10 - Improved the reloader and added support for changing the backend - through the `reloader_type` parameter. See :ref:`reloader` - for more information. - - :param hostname: The host for the application. eg: ``'localhost'`` - :param port: The port for the server. eg: ``8080`` - :param application: the WSGI application to execute - :param use_reloader: should the server automatically restart the python - process if modules were changed? - :param use_debugger: should the werkzeug debugging system be used? - :param use_evalex: should the exception evaluation feature be enabled? - :param extra_files: a list of files the reloader should watch - additionally to the modules. For example configuration - files. - :param reloader_interval: the interval for the reloader in seconds. - :param reloader_type: the type of reloader to use. The default is - auto detection. Valid values are ``'stat'`` and - ``'watchdog'``. See :ref:`reloader` for more - information. - :param threaded: should the process handle each request in a separate - thread? - :param processes: if greater than 1 then handle each request in a new process - up to this maximum number of concurrent processes. - :param request_handler: optional parameter that can be used to replace - the default one. You can use this to replace it - with a different - :class:`~BaseHTTPServer.BaseHTTPRequestHandler` - subclass. - :param static_files: a list or dict of paths for static files. This works - exactly like :class:`SharedDataMiddleware`, it's actually - just wrapping the application in that middleware before - serving. - :param passthrough_errors: set this to `True` to disable the error catching. - This means that the server will die on errors but - it can be useful to hook debuggers in (pdb etc.) - :param ssl_context: an SSL context for the connection. Either an - :class:`ssl.SSLContext`, a tuple in the form - ``(cert_file, pkey_file)``, the string ``'adhoc'`` if - the server should automatically create one, or ``None`` - to disable SSL (which is the default). - """ - if not isinstance(port, int): - raise TypeError('port must be an integer') - if use_debugger: - from werkzeug.debug import DebuggedApplication - application = DebuggedApplication(application, use_evalex) - if static_files: - from werkzeug.wsgi import SharedDataMiddleware - application = SharedDataMiddleware(application, static_files) - - def log_startup(sock): - display_hostname = hostname not in ('', '*') and hostname or 'localhost' - if ':' in display_hostname: - display_hostname = '[%s]' % display_hostname - quit_msg = '(Press CTRL+C to quit)' - port = sock.getsockname()[1] - _log('info', ' * Running on %s://%s:%d/ %s', - ssl_context is None and 'http' or 'https', - display_hostname, port, quit_msg) - - def inner(): - try: - fd = int(os.environ['WERKZEUG_SERVER_FD']) - except (LookupError, ValueError): - fd = None - srv = make_server(hostname, port, application, threaded, - processes, request_handler, - passthrough_errors, ssl_context, - fd=fd) - if fd is None: - log_startup(srv.socket) - srv.serve_forever() - - if use_reloader: - # If we're not running already in the subprocess that is the - # reloader we want to open up a socket early to make sure the - # port is actually available. - if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': - if port == 0 and not can_open_by_fd: - raise ValueError('Cannot bind to a random port with enabled ' - 'reloader if the Python interpreter does ' - 'not support socket opening by fd.') - - # Create and destroy a socket so that any exceptions are - # raised before we spawn a separate Python interpreter and - # lose this ability. - address_family = select_ip_version(hostname, port) - s = socket.socket(address_family, socket.SOCK_STREAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - s.bind(get_sockaddr(hostname, port, address_family)) - if hasattr(s, 'set_inheritable'): - s.set_inheritable(True) - - # If we can open the socket by file descriptor, then we can just - # reuse this one and our socket will survive the restarts. - if can_open_by_fd: - os.environ['WERKZEUG_SERVER_FD'] = str(s.fileno()) - s.listen(LISTEN_QUEUE) - log_startup(s) - else: - s.close() - - # Do not use relative imports, otherwise "python -m werkzeug.serving" - # breaks. - from werkzeug._reloader import run_with_reloader - run_with_reloader(inner, extra_files, reloader_interval, - reloader_type) - else: - inner() - - -def run_with_reloader(*args, **kwargs): - # People keep using undocumented APIs. Do not use this function - # please, we do not guarantee that it continues working. - from werkzeug._reloader import run_with_reloader - return run_with_reloader(*args, **kwargs) - - -def main(): - '''A simple command-line interface for :py:func:`run_simple`.''' - - # in contrast to argparse, this works at least under Python < 2.7 - import optparse - from werkzeug.utils import import_string - - parser = optparse.OptionParser( - usage='Usage: %prog [options] app_module:app_object') - parser.add_option('-b', '--bind', dest='address', - help='The hostname:port the app should listen on.') - parser.add_option('-d', '--debug', dest='use_debugger', - action='store_true', default=False, - help='Use Werkzeug\'s debugger.') - parser.add_option('-r', '--reload', dest='use_reloader', - action='store_true', default=False, - help='Reload Python process if modules change.') - options, args = parser.parse_args() - - hostname, port = None, None - if options.address: - address = options.address.split(':') - hostname = address[0] - if len(address) > 1: - port = address[1] - - if len(args) != 1: - sys.stdout.write('No application supplied, or too much. See --help\n') - sys.exit(1) - app = import_string(args[0]) - - run_simple( - hostname=(hostname or '127.0.0.1'), port=int(port or 5000), - application=app, use_reloader=options.use_reloader, - use_debugger=options.use_debugger - ) - -if __name__ == '__main__': - main() diff --git a/pyextra/werkzeug/test.py b/pyextra/werkzeug/test.py deleted file mode 100644 index acd21793d..000000000 --- a/pyextra/werkzeug/test.py +++ /dev/null @@ -1,948 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.test - ~~~~~~~~~~~~~ - - This module implements a client to WSGI applications for testing. - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import sys -import mimetypes -from time import time -from random import random -from itertools import chain -from tempfile import TemporaryFile -from io import BytesIO - -try: - from urllib2 import Request as U2Request -except ImportError: - from urllib.request import Request as U2Request -try: - from http.cookiejar import CookieJar -except ImportError: # Py2 - from cookielib import CookieJar - -from werkzeug._compat import iterlists, iteritems, itervalues, to_bytes, \ - string_types, text_type, reraise, wsgi_encoding_dance, \ - make_literal_wrapper -from werkzeug._internal import _empty_stream, _get_environ -from werkzeug.wrappers import BaseRequest -from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \ - url_unparse, url_parse -from werkzeug.wsgi import get_host, get_current_url, ClosingIterator -from werkzeug.utils import dump_cookie, get_content_type -from werkzeug.datastructures import FileMultiDict, MultiDict, \ - CombinedMultiDict, Headers, FileStorage, CallbackDict -from werkzeug.http import dump_options_header, parse_options_header - - -def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500, - boundary=None, charset='utf-8'): - """Encode a dict of values (either strings or file descriptors or - :class:`FileStorage` objects.) into a multipart encoded string stored - in a file descriptor. - """ - if boundary is None: - boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random()) - _closure = [BytesIO(), 0, False] - - if use_tempfile: - def write_binary(string): - stream, total_length, on_disk = _closure - if on_disk: - stream.write(string) - else: - length = len(string) - if length + _closure[1] <= threshold: - stream.write(string) - else: - new_stream = TemporaryFile('wb+') - new_stream.write(stream.getvalue()) - new_stream.write(string) - _closure[0] = new_stream - _closure[2] = True - _closure[1] = total_length + length - else: - write_binary = _closure[0].write - - def write(string): - write_binary(string.encode(charset)) - - if not isinstance(values, MultiDict): - values = MultiDict(values) - - for key, values in iterlists(values): - for value in values: - write('--%s\r\nContent-Disposition: form-data; name="%s"' % - (boundary, key)) - reader = getattr(value, 'read', None) - if reader is not None: - filename = getattr(value, 'filename', - getattr(value, 'name', None)) - content_type = getattr(value, 'content_type', None) - if content_type is None: - content_type = filename and \ - mimetypes.guess_type(filename)[0] or \ - 'application/octet-stream' - if filename is not None: - write('; filename="%s"\r\n' % filename) - else: - write('\r\n') - write('Content-Type: %s\r\n\r\n' % content_type) - while 1: - chunk = reader(16384) - if not chunk: - break - write_binary(chunk) - else: - if not isinstance(value, string_types): - value = str(value) - - value = to_bytes(value, charset) - write('\r\n\r\n') - write_binary(value) - write('\r\n') - write('--%s--\r\n' % boundary) - - length = int(_closure[0].tell()) - _closure[0].seek(0) - return _closure[0], length, boundary - - -def encode_multipart(values, boundary=None, charset='utf-8'): - """Like `stream_encode_multipart` but returns a tuple in the form - (``boundary``, ``data``) where data is a bytestring. - """ - stream, length, boundary = stream_encode_multipart( - values, use_tempfile=False, boundary=boundary, charset=charset) - return boundary, stream.read() - - -def File(fd, filename=None, mimetype=None): - """Backwards compat.""" - from warnings import warn - warn(DeprecationWarning('werkzeug.test.File is deprecated, use the ' - 'EnvironBuilder or FileStorage instead')) - return FileStorage(fd, filename=filename, content_type=mimetype) - - -class _TestCookieHeaders(object): - - """A headers adapter for cookielib - """ - - def __init__(self, headers): - self.headers = headers - - def getheaders(self, name): - headers = [] - name = name.lower() - for k, v in self.headers: - if k.lower() == name: - headers.append(v) - return headers - - def get_all(self, name, default=None): - rv = [] - for k, v in self.headers: - if k.lower() == name.lower(): - rv.append(v) - return rv or default or [] - - -class _TestCookieResponse(object): - - """Something that looks like a httplib.HTTPResponse, but is actually just an - adapter for our test responses to make them available for cookielib. - """ - - def __init__(self, headers): - self.headers = _TestCookieHeaders(headers) - - def info(self): - return self.headers - - -class _TestCookieJar(CookieJar): - - """A cookielib.CookieJar modified to inject and read cookie headers from - and to wsgi environments, and wsgi application responses. - """ - - def inject_wsgi(self, environ): - """Inject the cookies as client headers into the server's wsgi - environment. - """ - cvals = [] - for cookie in self: - cvals.append('%s=%s' % (cookie.name, cookie.value)) - if cvals: - environ['HTTP_COOKIE'] = '; '.join(cvals) - - def extract_wsgi(self, environ, headers): - """Extract the server's set-cookie headers as cookies into the - cookie jar. - """ - self.extract_cookies( - _TestCookieResponse(headers), - U2Request(get_current_url(environ)), - ) - - -def _iter_data(data): - """Iterates over a `dict` or :class:`MultiDict` yielding all keys and - values. - This is used to iterate over the data passed to the - :class:`EnvironBuilder`. - """ - if isinstance(data, MultiDict): - for key, values in iterlists(data): - for value in values: - yield key, value - else: - for key, values in iteritems(data): - if isinstance(values, list): - for value in values: - yield key, value - else: - yield key, values - - -class EnvironBuilder(object): - - """This class can be used to conveniently create a WSGI environment - for testing purposes. It can be used to quickly create WSGI environments - or request objects from arbitrary data. - - The signature of this class is also used in some other places as of - Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`, - :meth:`Client.open`). Because of this most of the functionality is - available through the constructor alone. - - Files and regular form data can be manipulated independently of each - other with the :attr:`form` and :attr:`files` attributes, but are - passed with the same argument to the constructor: `data`. - - `data` can be any of these values: - - - a `str` or `bytes` object: The object is converted into an - :attr:`input_stream`, the :attr:`content_length` is set and you have to - provide a :attr:`content_type`. - - a `dict` or :class:`MultiDict`: The keys have to be strings. The values - have to be either any of the following objects, or a list of any of the - following objects: - - - a :class:`file`-like object: These are converted into - :class:`FileStorage` objects automatically. - - a `tuple`: The :meth:`~FileMultiDict.add_file` method is called - with the key and the unpacked `tuple` items as positional - arguments. - - a `str`: The string is set as form data for the associated key. - - a file-like object: The object content is loaded in memory and then - handled like a regular `str` or a `bytes`. - - .. versionadded:: 0.6 - `path` and `base_url` can now be unicode strings that are encoded using - the :func:`iri_to_uri` function. - - :param path: the path of the request. In the WSGI environment this will - end up as `PATH_INFO`. If the `query_string` is not defined - and there is a question mark in the `path` everything after - it is used as query string. - :param base_url: the base URL is a URL that is used to extract the WSGI - URL scheme, host (server name + server port) and the - script root (`SCRIPT_NAME`). - :param query_string: an optional string or dict with URL parameters. - :param method: the HTTP method to use, defaults to `GET`. - :param input_stream: an optional input stream. Do not specify this and - `data`. As soon as an input stream is set you can't - modify :attr:`args` and :attr:`files` unless you - set the :attr:`input_stream` to `None` again. - :param content_type: The content type for the request. As of 0.5 you - don't have to provide this when specifying files - and form data via `data`. - :param content_length: The content length for the request. You don't - have to specify this when providing data via - `data`. - :param errors_stream: an optional error stream that is used for - `wsgi.errors`. Defaults to :data:`stderr`. - :param multithread: controls `wsgi.multithread`. Defaults to `False`. - :param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`. - :param run_once: controls `wsgi.run_once`. Defaults to `False`. - :param headers: an optional list or :class:`Headers` object of headers. - :param data: a string or dict of form data or a file-object. - See explanation above. - :param environ_base: an optional dict of environment defaults. - :param environ_overrides: an optional dict of environment overrides. - :param charset: the charset used to encode unicode data. - """ - - #: the server protocol to use. defaults to HTTP/1.1 - server_protocol = 'HTTP/1.1' - - #: the wsgi version to use. defaults to (1, 0) - wsgi_version = (1, 0) - - #: the default request class for :meth:`get_request` - request_class = BaseRequest - - def __init__(self, path='/', base_url=None, query_string=None, - method='GET', input_stream=None, content_type=None, - content_length=None, errors_stream=None, multithread=False, - multiprocess=False, run_once=False, headers=None, data=None, - environ_base=None, environ_overrides=None, charset='utf-8', - mimetype=None): - path_s = make_literal_wrapper(path) - if query_string is None and path_s('?') in path: - path, query_string = path.split(path_s('?'), 1) - self.charset = charset - self.path = iri_to_uri(path) - if base_url is not None: - base_url = url_fix(iri_to_uri(base_url, charset), charset) - self.base_url = base_url - if isinstance(query_string, (bytes, text_type)): - self.query_string = query_string - else: - if query_string is None: - query_string = MultiDict() - elif not isinstance(query_string, MultiDict): - query_string = MultiDict(query_string) - self.args = query_string - self.method = method - if headers is None: - headers = Headers() - elif not isinstance(headers, Headers): - headers = Headers(headers) - self.headers = headers - if content_type is not None: - self.content_type = content_type - if errors_stream is None: - errors_stream = sys.stderr - self.errors_stream = errors_stream - self.multithread = multithread - self.multiprocess = multiprocess - self.run_once = run_once - self.environ_base = environ_base - self.environ_overrides = environ_overrides - self.input_stream = input_stream - self.content_length = content_length - self.closed = False - - if data: - if input_stream is not None: - raise TypeError('can\'t provide input stream and data') - if hasattr(data, 'read'): - data = data.read() - if isinstance(data, text_type): - data = data.encode(self.charset) - if isinstance(data, bytes): - self.input_stream = BytesIO(data) - if self.content_length is None: - self.content_length = len(data) - else: - for key, value in _iter_data(data): - if isinstance(value, (tuple, dict)) or \ - hasattr(value, 'read'): - self._add_file_from_data(key, value) - else: - self.form.setlistdefault(key).append(value) - - if mimetype is not None: - self.mimetype = mimetype - - def _add_file_from_data(self, key, value): - """Called in the EnvironBuilder to add files from the data dict.""" - if isinstance(value, tuple): - self.files.add_file(key, *value) - elif isinstance(value, dict): - from warnings import warn - warn(DeprecationWarning('it\'s no longer possible to pass dicts ' - 'as `data`. Use tuples or FileStorage ' - 'objects instead'), stacklevel=2) - value = dict(value) - mimetype = value.pop('mimetype', None) - if mimetype is not None: - value['content_type'] = mimetype - self.files.add_file(key, **value) - else: - self.files.add_file(key, value) - - def _get_base_url(self): - return url_unparse((self.url_scheme, self.host, - self.script_root, '', '')).rstrip('/') + '/' - - def _set_base_url(self, value): - if value is None: - scheme = 'http' - netloc = 'localhost' - script_root = '' - else: - scheme, netloc, script_root, qs, anchor = url_parse(value) - if qs or anchor: - raise ValueError('base url must not contain a query string ' - 'or fragment') - self.script_root = script_root.rstrip('/') - self.host = netloc - self.url_scheme = scheme - - base_url = property(_get_base_url, _set_base_url, doc=''' - The base URL is a URL that is used to extract the WSGI - URL scheme, host (server name + server port) and the - script root (`SCRIPT_NAME`).''') - del _get_base_url, _set_base_url - - def _get_content_type(self): - ct = self.headers.get('Content-Type') - if ct is None and not self._input_stream: - if self._files: - return 'multipart/form-data' - elif self._form: - return 'application/x-www-form-urlencoded' - return None - return ct - - def _set_content_type(self, value): - if value is None: - self.headers.pop('Content-Type', None) - else: - self.headers['Content-Type'] = value - - content_type = property(_get_content_type, _set_content_type, doc=''' - The content type for the request. Reflected from and to the - :attr:`headers`. Do not set if you set :attr:`files` or - :attr:`form` for auto detection.''') - del _get_content_type, _set_content_type - - def _get_content_length(self): - return self.headers.get('Content-Length', type=int) - - def _get_mimetype(self): - ct = self.content_type - if ct: - return ct.split(';')[0].strip() - - def _set_mimetype(self, value): - self.content_type = get_content_type(value, self.charset) - - def _get_mimetype_params(self): - def on_update(d): - self.headers['Content-Type'] = \ - dump_options_header(self.mimetype, d) - d = parse_options_header(self.headers.get('content-type', ''))[1] - return CallbackDict(d, on_update) - - mimetype = property(_get_mimetype, _set_mimetype, doc=''' - The mimetype (content type without charset etc.) - - .. versionadded:: 0.14 - ''') - mimetype_params = property(_get_mimetype_params, doc=''' - The mimetype parameters as dict. For example if the content - type is ``text/html; charset=utf-8`` the params would be - ``{'charset': 'utf-8'}``. - - .. versionadded:: 0.14 - ''') - del _get_mimetype, _set_mimetype, _get_mimetype_params - - def _set_content_length(self, value): - if value is None: - self.headers.pop('Content-Length', None) - else: - self.headers['Content-Length'] = str(value) - - content_length = property(_get_content_length, _set_content_length, doc=''' - The content length as integer. Reflected from and to the - :attr:`headers`. Do not set if you set :attr:`files` or - :attr:`form` for auto detection.''') - del _get_content_length, _set_content_length - - def form_property(name, storage, doc): - key = '_' + name - - def getter(self): - if self._input_stream is not None: - raise AttributeError('an input stream is defined') - rv = getattr(self, key) - if rv is None: - rv = storage() - setattr(self, key, rv) - - return rv - - def setter(self, value): - self._input_stream = None - setattr(self, key, value) - return property(getter, setter, doc=doc) - - form = form_property('form', MultiDict, doc=''' - A :class:`MultiDict` of form values.''') - files = form_property('files', FileMultiDict, doc=''' - A :class:`FileMultiDict` of uploaded files. You can use the - :meth:`~FileMultiDict.add_file` method to add new files to the - dict.''') - del form_property - - def _get_input_stream(self): - return self._input_stream - - def _set_input_stream(self, value): - self._input_stream = value - self._form = self._files = None - - input_stream = property(_get_input_stream, _set_input_stream, doc=''' - An optional input stream. If you set this it will clear - :attr:`form` and :attr:`files`.''') - del _get_input_stream, _set_input_stream - - def _get_query_string(self): - if self._query_string is None: - if self._args is not None: - return url_encode(self._args, charset=self.charset) - return '' - return self._query_string - - def _set_query_string(self, value): - self._query_string = value - self._args = None - - query_string = property(_get_query_string, _set_query_string, doc=''' - The query string. If you set this to a string :attr:`args` will - no longer be available.''') - del _get_query_string, _set_query_string - - def _get_args(self): - if self._query_string is not None: - raise AttributeError('a query string is defined') - if self._args is None: - self._args = MultiDict() - return self._args - - def _set_args(self, value): - self._query_string = None - self._args = value - - args = property(_get_args, _set_args, doc=''' - The URL arguments as :class:`MultiDict`.''') - del _get_args, _set_args - - @property - def server_name(self): - """The server name (read-only, use :attr:`host` to set)""" - return self.host.split(':', 1)[0] - - @property - def server_port(self): - """The server port as integer (read-only, use :attr:`host` to set)""" - pieces = self.host.split(':', 1) - if len(pieces) == 2 and pieces[1].isdigit(): - return int(pieces[1]) - elif self.url_scheme == 'https': - return 443 - return 80 - - def __del__(self): - try: - self.close() - except Exception: - pass - - def close(self): - """Closes all files. If you put real :class:`file` objects into the - :attr:`files` dict you can call this method to automatically close - them all in one go. - """ - if self.closed: - return - try: - files = itervalues(self.files) - except AttributeError: - files = () - for f in files: - try: - f.close() - except Exception: - pass - self.closed = True - - def get_environ(self): - """Return the built environ.""" - input_stream = self.input_stream - content_length = self.content_length - - mimetype = self.mimetype - content_type = self.content_type - - if input_stream is not None: - start_pos = input_stream.tell() - input_stream.seek(0, 2) - end_pos = input_stream.tell() - input_stream.seek(start_pos) - content_length = end_pos - start_pos - elif mimetype == 'multipart/form-data': - values = CombinedMultiDict([self.form, self.files]) - input_stream, content_length, boundary = \ - stream_encode_multipart(values, charset=self.charset) - content_type = mimetype + '; boundary="%s"' % boundary - elif mimetype == 'application/x-www-form-urlencoded': - # XXX: py2v3 review - values = url_encode(self.form, charset=self.charset) - values = values.encode('ascii') - content_length = len(values) - input_stream = BytesIO(values) - else: - input_stream = _empty_stream - - result = {} - if self.environ_base: - result.update(self.environ_base) - - def _path_encode(x): - return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset) - - qs = wsgi_encoding_dance(self.query_string) - - result.update({ - 'REQUEST_METHOD': self.method, - 'SCRIPT_NAME': _path_encode(self.script_root), - 'PATH_INFO': _path_encode(self.path), - 'QUERY_STRING': qs, - 'SERVER_NAME': self.server_name, - 'SERVER_PORT': str(self.server_port), - 'HTTP_HOST': self.host, - 'SERVER_PROTOCOL': self.server_protocol, - 'CONTENT_TYPE': content_type or '', - 'CONTENT_LENGTH': str(content_length or '0'), - 'wsgi.version': self.wsgi_version, - 'wsgi.url_scheme': self.url_scheme, - 'wsgi.input': input_stream, - 'wsgi.errors': self.errors_stream, - 'wsgi.multithread': self.multithread, - 'wsgi.multiprocess': self.multiprocess, - 'wsgi.run_once': self.run_once - }) - for key, value in self.headers.to_wsgi_list(): - result['HTTP_%s' % key.upper().replace('-', '_')] = value - if self.environ_overrides: - result.update(self.environ_overrides) - return result - - def get_request(self, cls=None): - """Returns a request with the data. If the request class is not - specified :attr:`request_class` is used. - - :param cls: The request wrapper to use. - """ - if cls is None: - cls = self.request_class - return cls(self.get_environ()) - - -class ClientRedirectError(Exception): - - """ - If a redirect loop is detected when using follow_redirects=True with - the :cls:`Client`, then this exception is raised. - """ - - -class Client(object): - - """This class allows to send requests to a wrapped application. - - The response wrapper can be a class or factory function that takes - three arguments: app_iter, status and headers. The default response - wrapper just returns a tuple. - - Example:: - - class ClientResponse(BaseResponse): - ... - - client = Client(MyApplication(), response_wrapper=ClientResponse) - - The use_cookies parameter indicates whether cookies should be stored and - sent for subsequent requests. This is True by default, but passing False - will disable this behaviour. - - If you want to request some subdomain of your application you may set - `allow_subdomain_redirects` to `True` as if not no external redirects - are allowed. - - .. versionadded:: 0.5 - `use_cookies` is new in this version. Older versions did not provide - builtin cookie support. - - .. versionadded:: 0.14 - The `mimetype` parameter was added. - """ - - def __init__(self, application, response_wrapper=None, use_cookies=True, - allow_subdomain_redirects=False): - self.application = application - self.response_wrapper = response_wrapper - if use_cookies: - self.cookie_jar = _TestCookieJar() - else: - self.cookie_jar = None - self.allow_subdomain_redirects = allow_subdomain_redirects - - def set_cookie(self, server_name, key, value='', max_age=None, - expires=None, path='/', domain=None, secure=None, - httponly=False, charset='utf-8'): - """Sets a cookie in the client's cookie jar. The server name - is required and has to match the one that is also passed to - the open call. - """ - assert self.cookie_jar is not None, 'cookies disabled' - header = dump_cookie(key, value, max_age, expires, path, domain, - secure, httponly, charset) - environ = create_environ(path, base_url='http://' + server_name) - headers = [('Set-Cookie', header)] - self.cookie_jar.extract_wsgi(environ, headers) - - def delete_cookie(self, server_name, key, path='/', domain=None): - """Deletes a cookie in the test client.""" - self.set_cookie(server_name, key, expires=0, max_age=0, - path=path, domain=domain) - - def run_wsgi_app(self, environ, buffered=False): - """Runs the wrapped WSGI app with the given environment.""" - if self.cookie_jar is not None: - self.cookie_jar.inject_wsgi(environ) - rv = run_wsgi_app(self.application, environ, buffered=buffered) - if self.cookie_jar is not None: - self.cookie_jar.extract_wsgi(environ, rv[2]) - return rv - - def resolve_redirect(self, response, new_location, environ, buffered=False): - """Resolves a single redirect and triggers the request again - directly on this redirect client. - """ - scheme, netloc, script_root, qs, anchor = url_parse(new_location) - base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/' - - cur_server_name = netloc.split(':', 1)[0].split('.') - real_server_name = get_host(environ).rsplit(':', 1)[0].split('.') - if cur_server_name == ['']: - # this is a local redirect having autocorrect_location_header=False - cur_server_name = real_server_name - base_url = EnvironBuilder(environ).base_url - - if self.allow_subdomain_redirects: - allowed = cur_server_name[-len(real_server_name):] == real_server_name - else: - allowed = cur_server_name == real_server_name - - if not allowed: - raise RuntimeError('%r does not support redirect to ' - 'external targets' % self.__class__) - - status_code = int(response[1].split(None, 1)[0]) - if status_code == 307: - method = environ['REQUEST_METHOD'] - else: - method = 'GET' - - # For redirect handling we temporarily disable the response - # wrapper. This is not threadsafe but not a real concern - # since the test client must not be shared anyways. - old_response_wrapper = self.response_wrapper - self.response_wrapper = None - try: - return self.open(path=script_root, base_url=base_url, - query_string=qs, as_tuple=True, - buffered=buffered, method=method) - finally: - self.response_wrapper = old_response_wrapper - - def open(self, *args, **kwargs): - """Takes the same arguments as the :class:`EnvironBuilder` class with - some additions: You can provide a :class:`EnvironBuilder` or a WSGI - environment as only argument instead of the :class:`EnvironBuilder` - arguments and two optional keyword arguments (`as_tuple`, `buffered`) - that change the type of the return value or the way the application is - executed. - - .. versionchanged:: 0.5 - If a dict is provided as file in the dict for the `data` parameter - the content type has to be called `content_type` now instead of - `mimetype`. This change was made for consistency with - :class:`werkzeug.FileWrapper`. - - The `follow_redirects` parameter was added to :func:`open`. - - Additional parameters: - - :param as_tuple: Returns a tuple in the form ``(environ, result)`` - :param buffered: Set this to True to buffer the application run. - This will automatically close the application for - you as well. - :param follow_redirects: Set this to True if the `Client` should - follow HTTP redirects. - """ - as_tuple = kwargs.pop('as_tuple', False) - buffered = kwargs.pop('buffered', False) - follow_redirects = kwargs.pop('follow_redirects', False) - environ = None - if not kwargs and len(args) == 1: - if isinstance(args[0], EnvironBuilder): - environ = args[0].get_environ() - elif isinstance(args[0], dict): - environ = args[0] - if environ is None: - builder = EnvironBuilder(*args, **kwargs) - try: - environ = builder.get_environ() - finally: - builder.close() - - response = self.run_wsgi_app(environ, buffered=buffered) - - # handle redirects - redirect_chain = [] - while 1: - status_code = int(response[1].split(None, 1)[0]) - if status_code not in (301, 302, 303, 305, 307) \ - or not follow_redirects: - break - new_location = response[2]['location'] - new_redirect_entry = (new_location, status_code) - if new_redirect_entry in redirect_chain: - raise ClientRedirectError('loop detected') - redirect_chain.append(new_redirect_entry) - environ, response = self.resolve_redirect(response, new_location, - environ, - buffered=buffered) - - if self.response_wrapper is not None: - response = self.response_wrapper(*response) - if as_tuple: - return environ, response - return response - - def get(self, *args, **kw): - """Like open but method is enforced to GET.""" - kw['method'] = 'GET' - return self.open(*args, **kw) - - def patch(self, *args, **kw): - """Like open but method is enforced to PATCH.""" - kw['method'] = 'PATCH' - return self.open(*args, **kw) - - def post(self, *args, **kw): - """Like open but method is enforced to POST.""" - kw['method'] = 'POST' - return self.open(*args, **kw) - - def head(self, *args, **kw): - """Like open but method is enforced to HEAD.""" - kw['method'] = 'HEAD' - return self.open(*args, **kw) - - def put(self, *args, **kw): - """Like open but method is enforced to PUT.""" - kw['method'] = 'PUT' - return self.open(*args, **kw) - - def delete(self, *args, **kw): - """Like open but method is enforced to DELETE.""" - kw['method'] = 'DELETE' - return self.open(*args, **kw) - - def options(self, *args, **kw): - """Like open but method is enforced to OPTIONS.""" - kw['method'] = 'OPTIONS' - return self.open(*args, **kw) - - def trace(self, *args, **kw): - """Like open but method is enforced to TRACE.""" - kw['method'] = 'TRACE' - return self.open(*args, **kw) - - def __repr__(self): - return '<%s %r>' % ( - self.__class__.__name__, - self.application - ) - - -def create_environ(*args, **kwargs): - """Create a new WSGI environ dict based on the values passed. The first - parameter should be the path of the request which defaults to '/'. The - second one can either be an absolute path (in that case the host is - localhost:80) or a full path to the request with scheme, netloc port and - the path to the script. - - This accepts the same arguments as the :class:`EnvironBuilder` - constructor. - - .. versionchanged:: 0.5 - This function is now a thin wrapper over :class:`EnvironBuilder` which - was added in 0.5. The `headers`, `environ_base`, `environ_overrides` - and `charset` parameters were added. - """ - builder = EnvironBuilder(*args, **kwargs) - try: - return builder.get_environ() - finally: - builder.close() - - -def run_wsgi_app(app, environ, buffered=False): - """Return a tuple in the form (app_iter, status, headers) of the - application output. This works best if you pass it an application that - returns an iterator all the time. - - Sometimes applications may use the `write()` callable returned - by the `start_response` function. This tries to resolve such edge - cases automatically. But if you don't get the expected output you - should set `buffered` to `True` which enforces buffering. - - If passed an invalid WSGI application the behavior of this function is - undefined. Never pass non-conforming WSGI applications to this function. - - :param app: the application to execute. - :param buffered: set to `True` to enforce buffering. - :return: tuple in the form ``(app_iter, status, headers)`` - """ - environ = _get_environ(environ) - response = [] - buffer = [] - - def start_response(status, headers, exc_info=None): - if exc_info is not None: - reraise(*exc_info) - response[:] = [status, headers] - return buffer.append - - app_rv = app(environ, start_response) - close_func = getattr(app_rv, 'close', None) - app_iter = iter(app_rv) - - # when buffering we emit the close call early and convert the - # application iterator into a regular list - if buffered: - try: - app_iter = list(app_iter) - finally: - if close_func is not None: - close_func() - - # otherwise we iterate the application iter until we have a response, chain - # the already received data with the already collected data and wrap it in - # a new `ClosingIterator` if we need to restore a `close` callable from the - # original return value. - else: - while not response: - buffer.append(next(app_iter)) - if buffer: - app_iter = chain(buffer, app_iter) - if close_func is not None and app_iter is not app_rv: - app_iter = ClosingIterator(app_iter, close_func) - - return app_iter, response[0], Headers(response[1]) diff --git a/pyextra/werkzeug/testapp.py b/pyextra/werkzeug/testapp.py deleted file mode 100644 index 595555a09..000000000 --- a/pyextra/werkzeug/testapp.py +++ /dev/null @@ -1,230 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.testapp - ~~~~~~~~~~~~~~~~ - - Provide a small test application that can be used to test a WSGI server - and check it for WSGI compliance. - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import os -import sys -import werkzeug -from textwrap import wrap -from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response -from werkzeug.utils import escape -import base64 - -logo = Response(base64.b64decode(''' -R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP///////// -//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv -nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25 -7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq -ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX -m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G -p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo -SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf -78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA -ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA -tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx -w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx -lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45 -Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB -yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd -dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r -idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh -EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8 -ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64 -gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C -JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y -Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9 -YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX -c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb -qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL -cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG -cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2 -KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe -EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb -UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB -Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z -aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn -kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs -='''), mimetype='image/png') - - -TEMPLATE = u'''\ - -WSGI Information - -
- -

WSGI Information

-

- This page displays all available information about the WSGI server and - the underlying Python interpreter. -

Python Interpreter

- - - - - - -
Python Version - %(python_version)s -
Platform - %(platform)s [%(os)s] -
API Version - %(api_version)s -
Byteorder - %(byteorder)s -
Werkzeug Version - %(werkzeug_version)s -
-

WSGI Environment

- %(wsgi_env)s
-

Installed Eggs

-

- The following python packages were installed on the system as - Python eggs: -

    %(python_eggs)s
-

System Path

-

- The following paths are the current contents of the load path. The - following entries are looked up for Python packages. Note that not - all items in this path are folders. Gray and underlined items are - entries pointing to invalid resources or used by custom import hooks - such as the zip importer. -

- Items with a bright background were expanded for display from a relative - path. If you encounter such paths in the output you might want to check - your setup as relative paths are usually problematic in multithreaded - environments. -

    %(sys_path)s
-
-''' - - -def iter_sys_path(): - if os.name == 'posix': - def strip(x): - prefix = os.path.expanduser('~') - if x.startswith(prefix): - x = '~' + x[len(prefix):] - return x - else: - strip = lambda x: x - - cwd = os.path.abspath(os.getcwd()) - for item in sys.path: - path = os.path.join(cwd, item or os.path.curdir) - yield strip(os.path.normpath(path)), \ - not os.path.isdir(path), path != item - - -def render_testapp(req): - try: - import pkg_resources - except ImportError: - eggs = () - else: - eggs = sorted(pkg_resources.working_set, - key=lambda x: x.project_name.lower()) - python_eggs = [] - for egg in eggs: - try: - version = egg.version - except (ValueError, AttributeError): - version = 'unknown' - python_eggs.append('
  • %s [%s]' % ( - escape(egg.project_name), - escape(version) - )) - - wsgi_env = [] - sorted_environ = sorted(req.environ.items(), - key=lambda x: repr(x[0]).lower()) - for key, value in sorted_environ: - wsgi_env.append('%s%s' % ( - escape(str(key)), - ' '.join(wrap(escape(repr(value)))) - )) - - sys_path = [] - for item, virtual, expanded in iter_sys_path(): - class_ = [] - if virtual: - class_.append('virtual') - if expanded: - class_.append('exp') - sys_path.append('%s' % ( - class_ and ' class="%s"' % ' '.join(class_) or '', - escape(item) - )) - - return (TEMPLATE % { - 'python_version': '
    '.join(escape(sys.version).splitlines()), - 'platform': escape(sys.platform), - 'os': escape(os.name), - 'api_version': sys.api_version, - 'byteorder': sys.byteorder, - 'werkzeug_version': werkzeug.__version__, - 'python_eggs': '\n'.join(python_eggs), - 'wsgi_env': '\n'.join(wsgi_env), - 'sys_path': '\n'.join(sys_path) - }).encode('utf-8') - - -def test_app(environ, start_response): - """Simple test application that dumps the environment. You can use - it to check if Werkzeug is working properly: - - .. sourcecode:: pycon - - >>> from werkzeug.serving import run_simple - >>> from werkzeug.testapp import test_app - >>> run_simple('localhost', 3000, test_app) - * Running on http://localhost:3000/ - - The application displays important information from the WSGI environment, - the Python interpreter and the installed libraries. - """ - req = Request(environ, populate_request=False) - if req.args.get('resource') == 'logo': - response = logo - else: - response = Response(render_testapp(req), mimetype='text/html') - return response(environ, start_response) - - -if __name__ == '__main__': - from werkzeug.serving import run_simple - run_simple('localhost', 5000, test_app, use_reloader=True) diff --git a/pyextra/werkzeug/urls.py b/pyextra/werkzeug/urls.py deleted file mode 100644 index 5bd9a40d8..000000000 --- a/pyextra/werkzeug/urls.py +++ /dev/null @@ -1,1007 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.urls - ~~~~~~~~~~~~~ - - ``werkzeug.urls`` used to provide several wrapper functions for Python 2 - urlparse, whose main purpose were to work around the behavior of the Py2 - stdlib and its lack of unicode support. While this was already a somewhat - inconvenient situation, it got even more complicated because Python 3's - ``urllib.parse`` actually does handle unicode properly. In other words, - this module would wrap two libraries with completely different behavior. So - now this module contains a 2-and-3-compatible backport of Python 3's - ``urllib.parse``, which is mostly API-compatible. - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import os -import re -from werkzeug._compat import text_type, PY2, to_unicode, \ - to_native, implements_to_string, try_coerce_native, \ - normalize_string_tuple, make_literal_wrapper, \ - fix_tuple_repr -from werkzeug._internal import _encode_idna, _decode_idna -from werkzeug.datastructures import MultiDict, iter_multi_items -from collections import namedtuple - - -# A regular expression for what a valid schema looks like -_scheme_re = re.compile(r'^[a-zA-Z0-9+-.]+$') - -# Characters that are safe in any part of an URL. -_always_safe = (b'abcdefghijklmnopqrstuvwxyz' - b'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.-+') - -_hexdigits = '0123456789ABCDEFabcdef' -_hextobyte = dict( - ((a + b).encode(), int(a + b, 16)) - for a in _hexdigits for b in _hexdigits -) -_bytetohex = [ - ('%%%02X' % char).encode('ascii') for char in range(256) -] - - -_URLTuple = fix_tuple_repr(namedtuple( - '_URLTuple', - ['scheme', 'netloc', 'path', 'query', 'fragment'] -)) - - -class BaseURL(_URLTuple): - - '''Superclass of :py:class:`URL` and :py:class:`BytesURL`.''' - __slots__ = () - - def replace(self, **kwargs): - """Return an URL with the same values, except for those parameters - given new values by whichever keyword arguments are specified.""" - return self._replace(**kwargs) - - @property - def host(self): - """The host part of the URL if available, otherwise `None`. The - host is either the hostname or the IP address mentioned in the - URL. It will not contain the port. - """ - return self._split_host()[0] - - @property - def ascii_host(self): - """Works exactly like :attr:`host` but will return a result that - is restricted to ASCII. If it finds a netloc that is not ASCII - it will attempt to idna decode it. This is useful for socket - operations when the URL might include internationalized characters. - """ - rv = self.host - if rv is not None and isinstance(rv, text_type): - try: - rv = _encode_idna(rv) - except UnicodeError: - rv = rv.encode('ascii', 'ignore') - return to_native(rv, 'ascii', 'ignore') - - @property - def port(self): - """The port in the URL as an integer if it was present, `None` - otherwise. This does not fill in default ports. - """ - try: - rv = int(to_native(self._split_host()[1])) - if 0 <= rv <= 65535: - return rv - except (ValueError, TypeError): - pass - - @property - def auth(self): - """The authentication part in the URL if available, `None` - otherwise. - """ - return self._split_netloc()[0] - - @property - def username(self): - """The username if it was part of the URL, `None` otherwise. - This undergoes URL decoding and will always be a unicode string. - """ - rv = self._split_auth()[0] - if rv is not None: - return _url_unquote_legacy(rv) - - @property - def raw_username(self): - """The username if it was part of the URL, `None` otherwise. - Unlike :attr:`username` this one is not being decoded. - """ - return self._split_auth()[0] - - @property - def password(self): - """The password if it was part of the URL, `None` otherwise. - This undergoes URL decoding and will always be a unicode string. - """ - rv = self._split_auth()[1] - if rv is not None: - return _url_unquote_legacy(rv) - - @property - def raw_password(self): - """The password if it was part of the URL, `None` otherwise. - Unlike :attr:`password` this one is not being decoded. - """ - return self._split_auth()[1] - - def decode_query(self, *args, **kwargs): - """Decodes the query part of the URL. Ths is a shortcut for - calling :func:`url_decode` on the query argument. The arguments and - keyword arguments are forwarded to :func:`url_decode` unchanged. - """ - return url_decode(self.query, *args, **kwargs) - - def join(self, *args, **kwargs): - """Joins this URL with another one. This is just a convenience - function for calling into :meth:`url_join` and then parsing the - return value again. - """ - return url_parse(url_join(self, *args, **kwargs)) - - def to_url(self): - """Returns a URL string or bytes depending on the type of the - information stored. This is just a convenience function - for calling :meth:`url_unparse` for this URL. - """ - return url_unparse(self) - - def decode_netloc(self): - """Decodes the netloc part into a string.""" - rv = _decode_idna(self.host or '') - - if ':' in rv: - rv = '[%s]' % rv - port = self.port - if port is not None: - rv = '%s:%d' % (rv, port) - auth = ':'.join(filter(None, [ - _url_unquote_legacy(self.raw_username or '', '/:%@'), - _url_unquote_legacy(self.raw_password or '', '/:%@'), - ])) - if auth: - rv = '%s@%s' % (auth, rv) - return rv - - def to_uri_tuple(self): - """Returns a :class:`BytesURL` tuple that holds a URI. This will - encode all the information in the URL properly to ASCII using the - rules a web browser would follow. - - It's usually more interesting to directly call :meth:`iri_to_uri` which - will return a string. - """ - return url_parse(iri_to_uri(self).encode('ascii')) - - def to_iri_tuple(self): - """Returns a :class:`URL` tuple that holds a IRI. This will try - to decode as much information as possible in the URL without - losing information similar to how a web browser does it for the - URL bar. - - It's usually more interesting to directly call :meth:`uri_to_iri` which - will return a string. - """ - return url_parse(uri_to_iri(self)) - - def get_file_location(self, pathformat=None): - """Returns a tuple with the location of the file in the form - ``(server, location)``. If the netloc is empty in the URL or - points to localhost, it's represented as ``None``. - - The `pathformat` by default is autodetection but needs to be set - when working with URLs of a specific system. The supported values - are ``'windows'`` when working with Windows or DOS paths and - ``'posix'`` when working with posix paths. - - If the URL does not point to to a local file, the server and location - are both represented as ``None``. - - :param pathformat: The expected format of the path component. - Currently ``'windows'`` and ``'posix'`` are - supported. Defaults to ``None`` which is - autodetect. - """ - if self.scheme != 'file': - return None, None - - path = url_unquote(self.path) - host = self.netloc or None - - if pathformat is None: - if os.name == 'nt': - pathformat = 'windows' - else: - pathformat = 'posix' - - if pathformat == 'windows': - if path[:1] == '/' and path[1:2].isalpha() and path[2:3] in '|:': - path = path[1:2] + ':' + path[3:] - windows_share = path[:3] in ('\\' * 3, '/' * 3) - import ntpath - path = ntpath.normpath(path) - # Windows shared drives are represented as ``\\host\\directory``. - # That results in a URL like ``file://///host/directory``, and a - # path like ``///host/directory``. We need to special-case this - # because the path contains the hostname. - if windows_share and host is None: - parts = path.lstrip('\\').split('\\', 1) - if len(parts) == 2: - host, path = parts - else: - host = parts[0] - path = '' - elif pathformat == 'posix': - import posixpath - path = posixpath.normpath(path) - else: - raise TypeError('Invalid path format %s' % repr(pathformat)) - - if host in ('127.0.0.1', '::1', 'localhost'): - host = None - - return host, path - - def _split_netloc(self): - if self._at in self.netloc: - return self.netloc.split(self._at, 1) - return None, self.netloc - - def _split_auth(self): - auth = self._split_netloc()[0] - if not auth: - return None, None - if self._colon not in auth: - return auth, None - return auth.split(self._colon, 1) - - def _split_host(self): - rv = self._split_netloc()[1] - if not rv: - return None, None - - if not rv.startswith(self._lbracket): - if self._colon in rv: - return rv.split(self._colon, 1) - return rv, None - - idx = rv.find(self._rbracket) - if idx < 0: - return rv, None - - host = rv[1:idx] - rest = rv[idx + 1:] - if rest.startswith(self._colon): - return host, rest[1:] - return host, None - - -@implements_to_string -class URL(BaseURL): - - """Represents a parsed URL. This behaves like a regular tuple but - also has some extra attributes that give further insight into the - URL. - """ - __slots__ = () - _at = '@' - _colon = ':' - _lbracket = '[' - _rbracket = ']' - - def __str__(self): - return self.to_url() - - def encode_netloc(self): - """Encodes the netloc part to an ASCII safe URL as bytes.""" - rv = self.ascii_host or '' - if ':' in rv: - rv = '[%s]' % rv - port = self.port - if port is not None: - rv = '%s:%d' % (rv, port) - auth = ':'.join(filter(None, [ - url_quote(self.raw_username or '', 'utf-8', 'strict', '/:%'), - url_quote(self.raw_password or '', 'utf-8', 'strict', '/:%'), - ])) - if auth: - rv = '%s@%s' % (auth, rv) - return to_native(rv) - - def encode(self, charset='utf-8', errors='replace'): - """Encodes the URL to a tuple made out of bytes. The charset is - only being used for the path, query and fragment. - """ - return BytesURL( - self.scheme.encode('ascii'), - self.encode_netloc(), - self.path.encode(charset, errors), - self.query.encode(charset, errors), - self.fragment.encode(charset, errors) - ) - - -class BytesURL(BaseURL): - - """Represents a parsed URL in bytes.""" - __slots__ = () - _at = b'@' - _colon = b':' - _lbracket = b'[' - _rbracket = b']' - - def __str__(self): - return self.to_url().decode('utf-8', 'replace') - - def encode_netloc(self): - """Returns the netloc unchanged as bytes.""" - return self.netloc - - def decode(self, charset='utf-8', errors='replace'): - """Decodes the URL to a tuple made out of strings. The charset is - only being used for the path, query and fragment. - """ - return URL( - self.scheme.decode('ascii'), - self.decode_netloc(), - self.path.decode(charset, errors), - self.query.decode(charset, errors), - self.fragment.decode(charset, errors) - ) - - -def _unquote_to_bytes(string, unsafe=''): - if isinstance(string, text_type): - string = string.encode('utf-8') - if isinstance(unsafe, text_type): - unsafe = unsafe.encode('utf-8') - unsafe = frozenset(bytearray(unsafe)) - bits = iter(string.split(b'%')) - result = bytearray(next(bits, b'')) - for item in bits: - try: - char = _hextobyte[item[:2]] - if char in unsafe: - raise KeyError() - result.append(char) - result.extend(item[2:]) - except KeyError: - result.extend(b'%') - result.extend(item) - return bytes(result) - - -def _url_encode_impl(obj, charset, encode_keys, sort, key): - iterable = iter_multi_items(obj) - if sort: - iterable = sorted(iterable, key=key) - for key, value in iterable: - if value is None: - continue - if not isinstance(key, bytes): - key = text_type(key).encode(charset) - if not isinstance(value, bytes): - value = text_type(value).encode(charset) - yield url_quote_plus(key) + '=' + url_quote_plus(value) - - -def _url_unquote_legacy(value, unsafe=''): - try: - return url_unquote(value, charset='utf-8', - errors='strict', unsafe=unsafe) - except UnicodeError: - return url_unquote(value, charset='latin1', unsafe=unsafe) - - -def url_parse(url, scheme=None, allow_fragments=True): - """Parses a URL from a string into a :class:`URL` tuple. If the URL - is lacking a scheme it can be provided as second argument. Otherwise, - it is ignored. Optionally fragments can be stripped from the URL - by setting `allow_fragments` to `False`. - - The inverse of this function is :func:`url_unparse`. - - :param url: the URL to parse. - :param scheme: the default schema to use if the URL is schemaless. - :param allow_fragments: if set to `False` a fragment will be removed - from the URL. - """ - s = make_literal_wrapper(url) - is_text_based = isinstance(url, text_type) - - if scheme is None: - scheme = s('') - netloc = query = fragment = s('') - i = url.find(s(':')) - if i > 0 and _scheme_re.match(to_native(url[:i], errors='replace')): - # make sure "iri" is not actually a port number (in which case - # "scheme" is really part of the path) - rest = url[i + 1:] - if not rest or any(c not in s('0123456789') for c in rest): - # not a port number - scheme, url = url[:i].lower(), rest - - if url[:2] == s('//'): - delim = len(url) - for c in s('/?#'): - wdelim = url.find(c, 2) - if wdelim >= 0: - delim = min(delim, wdelim) - netloc, url = url[2:delim], url[delim:] - if (s('[') in netloc and s(']') not in netloc) or \ - (s(']') in netloc and s('[') not in netloc): - raise ValueError('Invalid IPv6 URL') - - if allow_fragments and s('#') in url: - url, fragment = url.split(s('#'), 1) - if s('?') in url: - url, query = url.split(s('?'), 1) - - result_type = is_text_based and URL or BytesURL - return result_type(scheme, netloc, url, query, fragment) - - -def url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''): - """URL encode a single string with a given encoding. - - :param s: the string to quote. - :param charset: the charset to be used. - :param safe: an optional sequence of safe characters. - :param unsafe: an optional sequence of unsafe characters. - - .. versionadded:: 0.9.2 - The `unsafe` parameter was added. - """ - if not isinstance(string, (text_type, bytes, bytearray)): - string = text_type(string) - if isinstance(string, text_type): - string = string.encode(charset, errors) - if isinstance(safe, text_type): - safe = safe.encode(charset, errors) - if isinstance(unsafe, text_type): - unsafe = unsafe.encode(charset, errors) - safe = frozenset(bytearray(safe) + _always_safe) - frozenset(bytearray(unsafe)) - rv = bytearray() - for char in bytearray(string): - if char in safe: - rv.append(char) - else: - rv.extend(_bytetohex[char]) - return to_native(bytes(rv)) - - -def url_quote_plus(string, charset='utf-8', errors='strict', safe=''): - """URL encode a single string with the given encoding and convert - whitespace to "+". - - :param s: The string to quote. - :param charset: The charset to be used. - :param safe: An optional sequence of safe characters. - """ - return url_quote(string, charset, errors, safe + ' ', '+').replace(' ', '+') - - -def url_unparse(components): - """The reverse operation to :meth:`url_parse`. This accepts arbitrary - as well as :class:`URL` tuples and returns a URL as a string. - - :param components: the parsed URL as tuple which should be converted - into a URL string. - """ - scheme, netloc, path, query, fragment = \ - normalize_string_tuple(components) - s = make_literal_wrapper(scheme) - url = s('') - - # We generally treat file:///x and file:/x the same which is also - # what browsers seem to do. This also allows us to ignore a schema - # register for netloc utilization or having to differenciate between - # empty and missing netloc. - if netloc or (scheme and path.startswith(s('/'))): - if path and path[:1] != s('/'): - path = s('/') + path - url = s('//') + (netloc or s('')) + path - elif path: - url += path - if scheme: - url = scheme + s(':') + url - if query: - url = url + s('?') + query - if fragment: - url = url + s('#') + fragment - return url - - -def url_unquote(string, charset='utf-8', errors='replace', unsafe=''): - """URL decode a single string with a given encoding. If the charset - is set to `None` no unicode decoding is performed and raw bytes - are returned. - - :param s: the string to unquote. - :param charset: the charset of the query string. If set to `None` - no unicode decoding will take place. - :param errors: the error handling for the charset decoding. - """ - rv = _unquote_to_bytes(string, unsafe) - if charset is not None: - rv = rv.decode(charset, errors) - return rv - - -def url_unquote_plus(s, charset='utf-8', errors='replace'): - """URL decode a single string with the given `charset` and decode "+" to - whitespace. - - Per default encoding errors are ignored. If you want a different behavior - you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a - :exc:`HTTPUnicodeError` is raised. - - :param s: The string to unquote. - :param charset: the charset of the query string. If set to `None` - no unicode decoding will take place. - :param errors: The error handling for the `charset` decoding. - """ - if isinstance(s, text_type): - s = s.replace(u'+', u' ') - else: - s = s.replace(b'+', b' ') - return url_unquote(s, charset, errors) - - -def url_fix(s, charset='utf-8'): - r"""Sometimes you get an URL by a user that just isn't a real URL because - it contains unsafe characters like ' ' and so on. This function can fix - some of the problems in a similar way browsers handle data entered by the - user: - - >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)') - 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)' - - :param s: the string with the URL to fix. - :param charset: The target charset for the URL if the url was given as - unicode string. - """ - # First step is to switch to unicode processing and to convert - # backslashes (which are invalid in URLs anyways) to slashes. This is - # consistent with what Chrome does. - s = to_unicode(s, charset, 'replace').replace('\\', '/') - - # For the specific case that we look like a malformed windows URL - # we want to fix this up manually: - if s.startswith('file://') and s[7:8].isalpha() and s[8:10] in (':/', '|/'): - s = 'file:///' + s[7:] - - url = url_parse(s) - path = url_quote(url.path, charset, safe='/%+$!*\'(),') - qs = url_quote_plus(url.query, charset, safe=':&%=+$!*\'(),') - anchor = url_quote_plus(url.fragment, charset, safe=':&%=+$!*\'(),') - return to_native(url_unparse((url.scheme, url.encode_netloc(), - path, qs, anchor))) - - -def uri_to_iri(uri, charset='utf-8', errors='replace'): - r""" - Converts a URI in a given charset to a IRI. - - Examples for URI versus IRI: - - >>> uri_to_iri(b'http://xn--n3h.net/') - u'http://\u2603.net/' - >>> uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th') - u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th' - - Query strings are left unchanged: - - >>> uri_to_iri('/?foo=24&x=%26%2f') - u'/?foo=24&x=%26%2f' - - .. versionadded:: 0.6 - - :param uri: The URI to convert. - :param charset: The charset of the URI. - :param errors: The error handling on decode. - """ - if isinstance(uri, tuple): - uri = url_unparse(uri) - uri = url_parse(to_unicode(uri, charset)) - path = url_unquote(uri.path, charset, errors, '%/;?') - query = url_unquote(uri.query, charset, errors, '%;/?:@&=+,$#') - fragment = url_unquote(uri.fragment, charset, errors, '%;/?:@&=+,$#') - return url_unparse((uri.scheme, uri.decode_netloc(), - path, query, fragment)) - - -def iri_to_uri(iri, charset='utf-8', errors='strict', safe_conversion=False): - r""" - Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always - uses utf-8 URLs internally because this is what browsers and HTTP do as - well. In some places where it accepts an URL it also accepts a unicode IRI - and converts it into a URI. - - Examples for IRI versus URI: - - >>> iri_to_uri(u'http://☃.net/') - 'http://xn--n3h.net/' - >>> iri_to_uri(u'http://üser:pässword@☃.net/påth') - 'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th' - - There is a general problem with IRI and URI conversion with some - protocols that appear in the wild that are in violation of the URI - specification. In places where Werkzeug goes through a forced IRI to - URI conversion it will set the `safe_conversion` flag which will - not perform a conversion if the end result is already ASCII. This - can mean that the return value is not an entirely correct URI but - it will not destroy such invalid URLs in the process. - - As an example consider the following two IRIs:: - - magnet:?xt=uri:whatever - itms-services://?action=download-manifest - - The internal representation after parsing of those URLs is the same - and there is no way to reconstruct the original one. If safe - conversion is enabled however this function becomes a noop for both of - those strings as they both can be considered URIs. - - .. versionadded:: 0.6 - - .. versionchanged:: 0.9.6 - The `safe_conversion` parameter was added. - - :param iri: The IRI to convert. - :param charset: The charset for the URI. - :param safe_conversion: indicates if a safe conversion should take place. - For more information see the explanation above. - """ - if isinstance(iri, tuple): - iri = url_unparse(iri) - - if safe_conversion: - try: - native_iri = to_native(iri) - ascii_iri = to_native(iri).encode('ascii') - if ascii_iri.split() == [ascii_iri]: - return native_iri - except UnicodeError: - pass - - iri = url_parse(to_unicode(iri, charset, errors)) - - netloc = iri.encode_netloc() - path = url_quote(iri.path, charset, errors, '/:~+%') - query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=') - fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/') - - return to_native(url_unparse((iri.scheme, netloc, - path, query, fragment))) - - -def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True, - errors='replace', separator='&', cls=None): - """ - Parse a querystring and return it as :class:`MultiDict`. There is a - difference in key decoding on different Python versions. On Python 3 - keys will always be fully decoded whereas on Python 2, keys will - remain bytestrings if they fit into ASCII. On 2.x keys can be forced - to be unicode by setting `decode_keys` to `True`. - - If the charset is set to `None` no unicode decoding will happen and - raw bytes will be returned. - - Per default a missing value for a key will default to an empty key. If - you don't want that behavior you can set `include_empty` to `False`. - - Per default encoding errors are ignored. If you want a different behavior - you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a - `HTTPUnicodeError` is raised. - - .. versionchanged:: 0.5 - In previous versions ";" and "&" could be used for url decoding. - This changed in 0.5 where only "&" is supported. If you want to - use ";" instead a different `separator` can be provided. - - The `cls` parameter was added. - - :param s: a string with the query string to decode. - :param charset: the charset of the query string. If set to `None` - no unicode decoding will take place. - :param decode_keys: Used on Python 2.x to control whether keys should - be forced to be unicode objects. If set to `True` - then keys will be unicode in all cases. Otherwise, - they remain `str` if they fit into ASCII. - :param include_empty: Set to `False` if you don't want empty values to - appear in the dict. - :param errors: the decoding error behavior. - :param separator: the pair separator to be used, defaults to ``&`` - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`MultiDict` is used. - """ - if cls is None: - cls = MultiDict - if isinstance(s, text_type) and not isinstance(separator, text_type): - separator = separator.decode(charset or 'ascii') - elif isinstance(s, bytes) and not isinstance(separator, bytes): - separator = separator.encode(charset or 'ascii') - return cls(_url_decode_impl(s.split(separator), charset, decode_keys, - include_empty, errors)) - - -def url_decode_stream(stream, charset='utf-8', decode_keys=False, - include_empty=True, errors='replace', separator='&', - cls=None, limit=None, return_iterator=False): - """Works like :func:`url_decode` but decodes a stream. The behavior - of stream and limit follows functions like - :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is - directly fed to the `cls` so you can consume the data while it's - parsed. - - .. versionadded:: 0.8 - - :param stream: a stream with the encoded querystring - :param charset: the charset of the query string. If set to `None` - no unicode decoding will take place. - :param decode_keys: Used on Python 2.x to control whether keys should - be forced to be unicode objects. If set to `True`, - keys will be unicode in all cases. Otherwise, they - remain `str` if they fit into ASCII. - :param include_empty: Set to `False` if you don't want empty values to - appear in the dict. - :param errors: the decoding error behavior. - :param separator: the pair separator to be used, defaults to ``&`` - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`MultiDict` is used. - :param limit: the content length of the URL data. Not necessary if - a limited stream is provided. - :param return_iterator: if set to `True` the `cls` argument is ignored - and an iterator over all decoded pairs is - returned - """ - from werkzeug.wsgi import make_chunk_iter - if return_iterator: - cls = lambda x: x - elif cls is None: - cls = MultiDict - pair_iter = make_chunk_iter(stream, separator, limit) - return cls(_url_decode_impl(pair_iter, charset, decode_keys, - include_empty, errors)) - - -def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors): - for pair in pair_iter: - if not pair: - continue - s = make_literal_wrapper(pair) - equal = s('=') - if equal in pair: - key, value = pair.split(equal, 1) - else: - if not include_empty: - continue - key = pair - value = s('') - key = url_unquote_plus(key, charset, errors) - if charset is not None and PY2 and not decode_keys: - key = try_coerce_native(key) - yield key, url_unquote_plus(value, charset, errors) - - -def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None, - separator=b'&'): - """URL encode a dict/`MultiDict`. If a value is `None` it will not appear - in the result string. Per default only values are encoded into the target - charset strings. If `encode_keys` is set to ``True`` unicode keys are - supported too. - - If `sort` is set to `True` the items are sorted by `key` or the default - sorting algorithm. - - .. versionadded:: 0.5 - `sort`, `key`, and `separator` were added. - - :param obj: the object to encode into a query string. - :param charset: the charset of the query string. - :param encode_keys: set to `True` if you have unicode keys. (Ignored on - Python 3.x) - :param sort: set to `True` if you want parameters to be sorted by `key`. - :param separator: the separator to be used for the pairs. - :param key: an optional function to be used for sorting. For more details - check out the :func:`sorted` documentation. - """ - separator = to_native(separator, 'ascii') - return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key)) - - -def url_encode_stream(obj, stream=None, charset='utf-8', encode_keys=False, - sort=False, key=None, separator=b'&'): - """Like :meth:`url_encode` but writes the results to a stream - object. If the stream is `None` a generator over all encoded - pairs is returned. - - .. versionadded:: 0.8 - - :param obj: the object to encode into a query string. - :param stream: a stream to write the encoded object into or `None` if - an iterator over the encoded pairs should be returned. In - that case the separator argument is ignored. - :param charset: the charset of the query string. - :param encode_keys: set to `True` if you have unicode keys. (Ignored on - Python 3.x) - :param sort: set to `True` if you want parameters to be sorted by `key`. - :param separator: the separator to be used for the pairs. - :param key: an optional function to be used for sorting. For more details - check out the :func:`sorted` documentation. - """ - separator = to_native(separator, 'ascii') - gen = _url_encode_impl(obj, charset, encode_keys, sort, key) - if stream is None: - return gen - for idx, chunk in enumerate(gen): - if idx: - stream.write(separator) - stream.write(chunk) - - -def url_join(base, url, allow_fragments=True): - """Join a base URL and a possibly relative URL to form an absolute - interpretation of the latter. - - :param base: the base URL for the join operation. - :param url: the URL to join. - :param allow_fragments: indicates whether fragments should be allowed. - """ - if isinstance(base, tuple): - base = url_unparse(base) - if isinstance(url, tuple): - url = url_unparse(url) - - base, url = normalize_string_tuple((base, url)) - s = make_literal_wrapper(base) - - if not base: - return url - if not url: - return base - - bscheme, bnetloc, bpath, bquery, bfragment = \ - url_parse(base, allow_fragments=allow_fragments) - scheme, netloc, path, query, fragment = \ - url_parse(url, bscheme, allow_fragments) - if scheme != bscheme: - return url - if netloc: - return url_unparse((scheme, netloc, path, query, fragment)) - netloc = bnetloc - - if path[:1] == s('/'): - segments = path.split(s('/')) - elif not path: - segments = bpath.split(s('/')) - if not query: - query = bquery - else: - segments = bpath.split(s('/'))[:-1] + path.split(s('/')) - - # If the rightmost part is "./" we want to keep the slash but - # remove the dot. - if segments[-1] == s('.'): - segments[-1] = s('') - - # Resolve ".." and "." - segments = [segment for segment in segments if segment != s('.')] - while 1: - i = 1 - n = len(segments) - 1 - while i < n: - if segments[i] == s('..') and \ - segments[i - 1] not in (s(''), s('..')): - del segments[i - 1:i + 1] - break - i += 1 - else: - break - - # Remove trailing ".." if the URL is absolute - unwanted_marker = [s(''), s('..')] - while segments[:2] == unwanted_marker: - del segments[1] - - path = s('/').join(segments) - return url_unparse((scheme, netloc, path, query, fragment)) - - -class Href(object): - - """Implements a callable that constructs URLs with the given base. The - function can be called with any number of positional and keyword - arguments which than are used to assemble the URL. Works with URLs - and posix paths. - - Positional arguments are appended as individual segments to - the path of the URL: - - >>> href = Href('/foo') - >>> href('bar', 23) - '/foo/bar/23' - >>> href('foo', bar=23) - '/foo/foo?bar=23' - - If any of the arguments (positional or keyword) evaluates to `None` it - will be skipped. If no keyword arguments are given the last argument - can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass), - otherwise the keyword arguments are used for the query parameters, cutting - off the first trailing underscore of the parameter name: - - >>> href(is_=42) - '/foo?is=42' - >>> href({'foo': 'bar'}) - '/foo?foo=bar' - - Combining of both methods is not allowed: - - >>> href({'foo': 'bar'}, bar=42) - Traceback (most recent call last): - ... - TypeError: keyword arguments and query-dicts can't be combined - - Accessing attributes on the href object creates a new href object with - the attribute name as prefix: - - >>> bar_href = href.bar - >>> bar_href("blub") - '/foo/bar/blub' - - If `sort` is set to `True` the items are sorted by `key` or the default - sorting algorithm: - - >>> href = Href("/", sort=True) - >>> href(a=1, b=2, c=3) - '/?a=1&b=2&c=3' - - .. versionadded:: 0.5 - `sort` and `key` were added. - """ - - def __init__(self, base='./', charset='utf-8', sort=False, key=None): - if not base: - base = './' - self.base = base - self.charset = charset - self.sort = sort - self.key = key - - def __getattr__(self, name): - if name[:2] == '__': - raise AttributeError(name) - base = self.base - if base[-1:] != '/': - base += '/' - return Href(url_join(base, name), self.charset, self.sort, self.key) - - def __call__(self, *path, **query): - if path and isinstance(path[-1], dict): - if query: - raise TypeError('keyword arguments and query-dicts ' - 'can\'t be combined') - query, path = path[-1], path[:-1] - elif query: - query = dict([(k.endswith('_') and k[:-1] or k, v) - for k, v in query.items()]) - path = '/'.join([to_unicode(url_quote(x, self.charset), 'ascii') - for x in path if x is not None]).lstrip('/') - rv = self.base - if path: - if not rv.endswith('/'): - rv += '/' - rv = url_join(rv, './' + path) - if query: - rv += '?' + to_unicode(url_encode(query, self.charset, sort=self.sort, - key=self.key), 'ascii') - return to_native(rv) diff --git a/pyextra/werkzeug/useragents.py b/pyextra/werkzeug/useragents.py deleted file mode 100644 index 4d7d41f1c..000000000 --- a/pyextra/werkzeug/useragents.py +++ /dev/null @@ -1,212 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.useragents - ~~~~~~~~~~~~~~~~~~~ - - This module provides a helper to inspect user agent strings. This module - is far from complete but should work for most of the currently available - browsers. - - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import re - - -class UserAgentParser(object): - - """A simple user agent parser. Used by the `UserAgent`.""" - - platforms = ( - ('cros', 'chromeos'), - ('iphone|ios', 'iphone'), - ('ipad', 'ipad'), - (r'darwin|mac|os\s*x', 'macos'), - ('win', 'windows'), - (r'android', 'android'), - ('netbsd', 'netbsd'), - ('openbsd', 'openbsd'), - ('freebsd', 'freebsd'), - ('dragonfly', 'dragonflybsd'), - ('(sun|i86)os', 'solaris'), - (r'x11|lin(\b|ux)?', 'linux'), - (r'nintendo\s+wii', 'wii'), - ('irix', 'irix'), - ('hp-?ux', 'hpux'), - ('aix', 'aix'), - ('sco|unix_sv', 'sco'), - ('bsd', 'bsd'), - ('amiga', 'amiga'), - ('blackberry|playbook', 'blackberry'), - ('symbian', 'symbian') - ) - browsers = ( - ('googlebot', 'google'), - ('msnbot', 'msn'), - ('yahoo', 'yahoo'), - ('ask jeeves', 'ask'), - (r'aol|america\s+online\s+browser', 'aol'), - ('opera', 'opera'), - ('edge', 'edge'), - ('chrome', 'chrome'), - ('seamonkey', 'seamonkey'), - ('firefox|firebird|phoenix|iceweasel', 'firefox'), - ('galeon', 'galeon'), - ('safari|version', 'safari'), - ('webkit', 'webkit'), - ('camino', 'camino'), - ('konqueror', 'konqueror'), - ('k-meleon', 'kmeleon'), - ('netscape', 'netscape'), - (r'msie|microsoft\s+internet\s+explorer|trident/.+? rv:', 'msie'), - ('lynx', 'lynx'), - ('links', 'links'), - ('Baiduspider', 'baidu'), - ('bingbot', 'bing'), - ('mozilla', 'mozilla') - ) - - _browser_version_re = r'(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?' - _language_re = re.compile( - r'(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|' - r'(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)' - ) - - def __init__(self): - self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms] - self.browsers = [(b, re.compile(self._browser_version_re % a, re.I)) - for a, b in self.browsers] - - def __call__(self, user_agent): - for platform, regex in self.platforms: - match = regex.search(user_agent) - if match is not None: - break - else: - platform = None - for browser, regex in self.browsers: - match = regex.search(user_agent) - if match is not None: - version = match.group(1) - break - else: - browser = version = None - match = self._language_re.search(user_agent) - if match is not None: - language = match.group(1) or match.group(2) - else: - language = None - return platform, browser, version, language - - -class UserAgent(object): - - """Represents a user agent. Pass it a WSGI environment or a user agent - string and you can inspect some of the details from the user agent - string via the attributes. The following attributes exist: - - .. attribute:: string - - the raw user agent string - - .. attribute:: platform - - the browser platform. The following platforms are currently - recognized: - - - `aix` - - `amiga` - - `android` - - `blackberry` - - `bsd` - - `chromeos` - - `dragonflybsd` - - `freebsd` - - `hpux` - - `ipad` - - `iphone` - - `irix` - - `linux` - - `macos` - - `netbsd` - - `openbsd` - - `sco` - - `solaris` - - `symbian` - - `wii` - - `windows` - - .. attribute:: browser - - the name of the browser. The following browsers are currently - recognized: - - - `aol` * - - `ask` * - - `baidu` * - - `bing` * - - `camino` - - `chrome` - - `firefox` - - `galeon` - - `google` * - - `kmeleon` - - `konqueror` - - `links` - - `lynx` - - `mozilla` - - `msie` - - `msn` - - `netscape` - - `opera` - - `safari` - - `seamonkey` - - `webkit` - - `yahoo` * - - (Browsers marked with a star (``*``) are crawlers.) - - .. attribute:: version - - the version of the browser - - .. attribute:: language - - the language of the browser - """ - - _parser = UserAgentParser() - - def __init__(self, environ_or_string): - if isinstance(environ_or_string, dict): - environ_or_string = environ_or_string.get('HTTP_USER_AGENT', '') - self.string = environ_or_string - self.platform, self.browser, self.version, self.language = \ - self._parser(environ_or_string) - - def to_header(self): - return self.string - - def __str__(self): - return self.string - - def __nonzero__(self): - return bool(self.browser) - - __bool__ = __nonzero__ - - def __repr__(self): - return '<%s %r/%s>' % ( - self.__class__.__name__, - self.browser, - self.version - ) - - -# conceptionally this belongs in this module but because we want to lazily -# load the user agent module (which happens in wrappers.py) we have to import -# it afterwards. The class itself has the module set to this module so -# pickle, inspect and similar modules treat the object as if it was really -# implemented here. -from werkzeug.wrappers import UserAgentMixin # noqa diff --git a/pyextra/werkzeug/utils.py b/pyextra/werkzeug/utils.py deleted file mode 100644 index 918e7e5b6..000000000 --- a/pyextra/werkzeug/utils.py +++ /dev/null @@ -1,628 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.utils - ~~~~~~~~~~~~~~ - - This module implements various utilities for WSGI applications. Most of - them are used by the request and response wrappers but especially for - middleware development it makes sense to use them without the wrappers. - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import re -import os -import sys -import pkgutil -try: - from html.entities import name2codepoint -except ImportError: - from htmlentitydefs import name2codepoint - -from werkzeug._compat import unichr, text_type, string_types, iteritems, \ - reraise, PY2 -from werkzeug._internal import _DictAccessorProperty, \ - _parse_signature, _missing - - -_format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2)) -_entity_re = re.compile(r'&([^;]+);') -_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]') -_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1', - 'LPT2', 'LPT3', 'PRN', 'NUL') - - -class cached_property(property): - - """A decorator that converts a function into a lazy property. The - function wrapped is called the first time to retrieve the result - and then that calculated result is used the next time you access - the value:: - - class Foo(object): - - @cached_property - def foo(self): - # calculate something important here - return 42 - - The class has to have a `__dict__` in order for this property to - work. - """ - - # implementation detail: A subclass of python's builtin property - # decorator, we override __get__ to check for a cached value. If one - # choses to invoke __get__ by hand the property will still work as - # expected because the lookup logic is replicated in __get__ for - # manual invocation. - - def __init__(self, func, name=None, doc=None): - self.__name__ = name or func.__name__ - self.__module__ = func.__module__ - self.__doc__ = doc or func.__doc__ - self.func = func - - def __set__(self, obj, value): - obj.__dict__[self.__name__] = value - - def __get__(self, obj, type=None): - if obj is None: - return self - value = obj.__dict__.get(self.__name__, _missing) - if value is _missing: - value = self.func(obj) - obj.__dict__[self.__name__] = value - return value - - -class environ_property(_DictAccessorProperty): - - """Maps request attributes to environment variables. This works not only - for the Werzeug request object, but also any other class with an - environ attribute: - - >>> class Test(object): - ... environ = {'key': 'value'} - ... test = environ_property('key') - >>> var = Test() - >>> var.test - 'value' - - If you pass it a second value it's used as default if the key does not - exist, the third one can be a converter that takes a value and converts - it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value - is used. If no default value is provided `None` is used. - - Per default the property is read only. You have to explicitly enable it - by passing ``read_only=False`` to the constructor. - """ - - read_only = True - - def lookup(self, obj): - return obj.environ - - -class header_property(_DictAccessorProperty): - - """Like `environ_property` but for headers.""" - - def lookup(self, obj): - return obj.headers - - -class HTMLBuilder(object): - - """Helper object for HTML generation. - - Per default there are two instances of that class. The `html` one, and - the `xhtml` one for those two dialects. The class uses keyword parameters - and positional parameters to generate small snippets of HTML. - - Keyword parameters are converted to XML/SGML attributes, positional - arguments are used as children. Because Python accepts positional - arguments before keyword arguments it's a good idea to use a list with the - star-syntax for some children: - - >>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ', - ... html.a('bar', href='bar.html')]) - u'

    foo bar

    ' - - This class works around some browser limitations and can not be used for - arbitrary SGML/XML generation. For that purpose lxml and similar - libraries exist. - - Calling the builder escapes the string passed: - - >>> html.p(html("")) - u'

    <foo>

    ' - """ - - _entity_re = re.compile(r'&([^;]+);') - _entities = name2codepoint.copy() - _entities['apos'] = 39 - _empty_elements = set([ - 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame', - 'hr', 'img', 'input', 'keygen', 'isindex', 'link', 'meta', 'param', - 'source', 'wbr' - ]) - _boolean_attributes = set([ - 'selected', 'checked', 'compact', 'declare', 'defer', 'disabled', - 'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap' - ]) - _plaintext_elements = set(['textarea']) - _c_like_cdata = set(['script', 'style']) - - def __init__(self, dialect): - self._dialect = dialect - - def __call__(self, s): - return escape(s) - - def __getattr__(self, tag): - if tag[:2] == '__': - raise AttributeError(tag) - - def proxy(*children, **arguments): - buffer = '<' + tag - for key, value in iteritems(arguments): - if value is None: - continue - if key[-1] == '_': - key = key[:-1] - if key in self._boolean_attributes: - if not value: - continue - if self._dialect == 'xhtml': - value = '="' + key + '"' - else: - value = '' - else: - value = '="' + escape(value) + '"' - buffer += ' ' + key + value - if not children and tag in self._empty_elements: - if self._dialect == 'xhtml': - buffer += ' />' - else: - buffer += '>' - return buffer - buffer += '>' - - children_as_string = ''.join([text_type(x) for x in children - if x is not None]) - - if children_as_string: - if tag in self._plaintext_elements: - children_as_string = escape(children_as_string) - elif tag in self._c_like_cdata and self._dialect == 'xhtml': - children_as_string = '/**/' - buffer += children_as_string + '' - return buffer - return proxy - - def __repr__(self): - return '<%s for %r>' % ( - self.__class__.__name__, - self._dialect - ) - - -html = HTMLBuilder('html') -xhtml = HTMLBuilder('xhtml') - - -def get_content_type(mimetype, charset): - """Returns the full content type string with charset for a mimetype. - - If the mimetype represents text the charset will be appended as charset - parameter, otherwise the mimetype is returned unchanged. - - :param mimetype: the mimetype to be used as content type. - :param charset: the charset to be appended in case it was a text mimetype. - :return: the content type. - """ - if mimetype.startswith('text/') or \ - mimetype == 'application/xml' or \ - (mimetype.startswith('application/') and - mimetype.endswith('+xml')): - mimetype += '; charset=' + charset - return mimetype - - -def format_string(string, context): - """String-template format a string: - - >>> format_string('$foo and ${foo}s', dict(foo=42)) - '42 and 42s' - - This does not do any attribute lookup etc. For more advanced string - formattings have a look at the `werkzeug.template` module. - - :param string: the format string. - :param context: a dict with the variables to insert. - """ - def lookup_arg(match): - x = context[match.group(1) or match.group(2)] - if not isinstance(x, string_types): - x = type(string)(x) - return x - return _format_re.sub(lookup_arg, string) - - -def secure_filename(filename): - r"""Pass it a filename and it will return a secure version of it. This - filename can then safely be stored on a regular file system and passed - to :func:`os.path.join`. The filename returned is an ASCII only string - for maximum portability. - - On windows systems the function also makes sure that the file is not - named after one of the special device files. - - >>> secure_filename("My cool movie.mov") - 'My_cool_movie.mov' - >>> secure_filename("../../../etc/passwd") - 'etc_passwd' - >>> secure_filename(u'i contain cool \xfcml\xe4uts.txt') - 'i_contain_cool_umlauts.txt' - - The function might return an empty filename. It's your responsibility - to ensure that the filename is unique and that you generate random - filename if the function returned an empty one. - - .. versionadded:: 0.5 - - :param filename: the filename to secure - """ - if isinstance(filename, text_type): - from unicodedata import normalize - filename = normalize('NFKD', filename).encode('ascii', 'ignore') - if not PY2: - filename = filename.decode('ascii') - for sep in os.path.sep, os.path.altsep: - if sep: - filename = filename.replace(sep, ' ') - filename = str(_filename_ascii_strip_re.sub('', '_'.join( - filename.split()))).strip('._') - - # on nt a couple of special files are present in each folder. We - # have to ensure that the target file is not such a filename. In - # this case we prepend an underline - if os.name == 'nt' and filename and \ - filename.split('.')[0].upper() in _windows_device_files: - filename = '_' + filename - - return filename - - -def escape(s, quote=None): - """Replace special characters "&", "<", ">" and (") to HTML-safe sequences. - - There is a special handling for `None` which escapes to an empty string. - - .. versionchanged:: 0.9 - `quote` is now implicitly on. - - :param s: the string to escape. - :param quote: ignored. - """ - if s is None: - return '' - elif hasattr(s, '__html__'): - return text_type(s.__html__()) - elif not isinstance(s, string_types): - s = text_type(s) - if quote is not None: - from warnings import warn - warn(DeprecationWarning('quote parameter is implicit now'), stacklevel=2) - s = s.replace('&', '&').replace('<', '<') \ - .replace('>', '>').replace('"', """) - return s - - -def unescape(s): - """The reverse function of `escape`. This unescapes all the HTML - entities, not only the XML entities inserted by `escape`. - - :param s: the string to unescape. - """ - def handle_match(m): - name = m.group(1) - if name in HTMLBuilder._entities: - return unichr(HTMLBuilder._entities[name]) - try: - if name[:2] in ('#x', '#X'): - return unichr(int(name[2:], 16)) - elif name.startswith('#'): - return unichr(int(name[1:])) - except ValueError: - pass - return u'' - return _entity_re.sub(handle_match, s) - - -def redirect(location, code=302, Response=None): - """Returns a response object (a WSGI application) that, if called, - redirects the client to the target location. Supported codes are 301, - 302, 303, 305, and 307. 300 is not supported because it's not a real - redirect and 304 because it's the answer for a request with a request - with defined If-Modified-Since headers. - - .. versionadded:: 0.6 - The location can now be a unicode string that is encoded using - the :func:`iri_to_uri` function. - - .. versionadded:: 0.10 - The class used for the Response object can now be passed in. - - :param location: the location the response should redirect to. - :param code: the redirect status code. defaults to 302. - :param class Response: a Response class to use when instantiating a - response. The default is :class:`werkzeug.wrappers.Response` if - unspecified. - """ - if Response is None: - from werkzeug.wrappers import Response - - display_location = escape(location) - if isinstance(location, text_type): - # Safe conversion is necessary here as we might redirect - # to a broken URI scheme (for instance itms-services). - from werkzeug.urls import iri_to_uri - location = iri_to_uri(location, safe_conversion=True) - response = Response( - '\n' - 'Redirecting...\n' - '

    Redirecting...

    \n' - '

    You should be redirected automatically to target URL: ' - '%s. If not click the link.' % - (escape(location), display_location), code, mimetype='text/html') - response.headers['Location'] = location - return response - - -def append_slash_redirect(environ, code=301): - """Redirects to the same URL but with a slash appended. The behavior - of this function is undefined if the path ends with a slash already. - - :param environ: the WSGI environment for the request that triggers - the redirect. - :param code: the status code for the redirect. - """ - new_path = environ['PATH_INFO'].strip('/') + '/' - query_string = environ.get('QUERY_STRING') - if query_string: - new_path += '?' + query_string - return redirect(new_path, code) - - -def import_string(import_name, silent=False): - """Imports an object based on a string. This is useful if you want to - use import paths as endpoints or something similar. An import path can - be specified either in dotted notation (``xml.sax.saxutils.escape``) - or with a colon as object delimiter (``xml.sax.saxutils:escape``). - - If `silent` is True the return value will be `None` if the import fails. - - :param import_name: the dotted name for the object to import. - :param silent: if set to `True` import errors are ignored and - `None` is returned instead. - :return: imported object - """ - # force the import name to automatically convert to strings - # __import__ is not able to handle unicode strings in the fromlist - # if the module is a package - import_name = str(import_name).replace(':', '.') - try: - try: - __import__(import_name) - except ImportError: - if '.' not in import_name: - raise - else: - return sys.modules[import_name] - - module_name, obj_name = import_name.rsplit('.', 1) - try: - module = __import__(module_name, None, None, [obj_name]) - except ImportError: - # support importing modules not yet set up by the parent module - # (or package for that matter) - module = import_string(module_name) - - try: - return getattr(module, obj_name) - except AttributeError as e: - raise ImportError(e) - - except ImportError as e: - if not silent: - reraise( - ImportStringError, - ImportStringError(import_name, e), - sys.exc_info()[2]) - - -def find_modules(import_path, include_packages=False, recursive=False): - """Finds all the modules below a package. This can be useful to - automatically import all views / controllers so that their metaclasses / - function decorators have a chance to register themselves on the - application. - - Packages are not returned unless `include_packages` is `True`. This can - also recursively list modules but in that case it will import all the - packages to get the correct load path of that module. - - :param import_path: the dotted name for the package to find child modules. - :param include_packages: set to `True` if packages should be returned, too. - :param recursive: set to `True` if recursion should happen. - :return: generator - """ - module = import_string(import_path) - path = getattr(module, '__path__', None) - if path is None: - raise ValueError('%r is not a package' % import_path) - basename = module.__name__ + '.' - for importer, modname, ispkg in pkgutil.iter_modules(path): - modname = basename + modname - if ispkg: - if include_packages: - yield modname - if recursive: - for item in find_modules(modname, include_packages, True): - yield item - else: - yield modname - - -def validate_arguments(func, args, kwargs, drop_extra=True): - """Checks if the function accepts the arguments and keyword arguments. - Returns a new ``(args, kwargs)`` tuple that can safely be passed to - the function without causing a `TypeError` because the function signature - is incompatible. If `drop_extra` is set to `True` (which is the default) - any extra positional or keyword arguments are dropped automatically. - - The exception raised provides three attributes: - - `missing` - A set of argument names that the function expected but where - missing. - - `extra` - A dict of keyword arguments that the function can not handle but - where provided. - - `extra_positional` - A list of values that where given by positional argument but the - function cannot accept. - - This can be useful for decorators that forward user submitted data to - a view function:: - - from werkzeug.utils import ArgumentValidationError, validate_arguments - - def sanitize(f): - def proxy(request): - data = request.values.to_dict() - try: - args, kwargs = validate_arguments(f, (request,), data) - except ArgumentValidationError: - raise BadRequest('The browser failed to transmit all ' - 'the data expected.') - return f(*args, **kwargs) - return proxy - - :param func: the function the validation is performed against. - :param args: a tuple of positional arguments. - :param kwargs: a dict of keyword arguments. - :param drop_extra: set to `False` if you don't want extra arguments - to be silently dropped. - :return: tuple in the form ``(args, kwargs)``. - """ - parser = _parse_signature(func) - args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5] - if missing: - raise ArgumentValidationError(tuple(missing)) - elif (extra or extra_positional) and not drop_extra: - raise ArgumentValidationError(None, extra, extra_positional) - return tuple(args), kwargs - - -def bind_arguments(func, args, kwargs): - """Bind the arguments provided into a dict. When passed a function, - a tuple of arguments and a dict of keyword arguments `bind_arguments` - returns a dict of names as the function would see it. This can be useful - to implement a cache decorator that uses the function arguments to build - the cache key based on the values of the arguments. - - :param func: the function the arguments should be bound for. - :param args: tuple of positional arguments. - :param kwargs: a dict of keyword arguments. - :return: a :class:`dict` of bound keyword arguments. - """ - args, kwargs, missing, extra, extra_positional, \ - arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs) - values = {} - for (name, has_default, default), value in zip(arg_spec, args): - values[name] = value - if vararg_var is not None: - values[vararg_var] = tuple(extra_positional) - elif extra_positional: - raise TypeError('too many positional arguments') - if kwarg_var is not None: - multikw = set(extra) & set([x[0] for x in arg_spec]) - if multikw: - raise TypeError('got multiple values for keyword argument ' + - repr(next(iter(multikw)))) - values[kwarg_var] = extra - elif extra: - raise TypeError('got unexpected keyword argument ' + - repr(next(iter(extra)))) - return values - - -class ArgumentValidationError(ValueError): - - """Raised if :func:`validate_arguments` fails to validate""" - - def __init__(self, missing=None, extra=None, extra_positional=None): - self.missing = set(missing or ()) - self.extra = extra or {} - self.extra_positional = extra_positional or [] - ValueError.__init__(self, 'function arguments invalid. (' - '%d missing, %d additional)' % ( - len(self.missing), - len(self.extra) + len(self.extra_positional) - )) - - -class ImportStringError(ImportError): - - """Provides information about a failed :func:`import_string` attempt.""" - - #: String in dotted notation that failed to be imported. - import_name = None - #: Wrapped exception. - exception = None - - def __init__(self, import_name, exception): - self.import_name = import_name - self.exception = exception - - msg = ( - 'import_string() failed for %r. Possible reasons are:\n\n' - '- missing __init__.py in a package;\n' - '- package or module path not included in sys.path;\n' - '- duplicated package or module name taking precedence in ' - 'sys.path;\n' - '- missing module, class, function or variable;\n\n' - 'Debugged import:\n\n%s\n\n' - 'Original exception:\n\n%s: %s') - - name = '' - tracked = [] - for part in import_name.replace(':', '.').split('.'): - name += (name and '.') + part - imported = import_string(name, silent=True) - if imported: - tracked.append((name, getattr(imported, '__file__', None))) - else: - track = ['- %r found in %r.' % (n, i) for n, i in tracked] - track.append('- %r not found.' % name) - msg = msg % (import_name, '\n'.join(track), - exception.__class__.__name__, str(exception)) - break - - ImportError.__init__(self, msg) - - def __repr__(self): - return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name, - self.exception) - - -# DEPRECATED -# these objects were previously in this module as well. we import -# them here for backwards compatibility with old pickles. -from werkzeug.datastructures import ( # noqa - MultiDict, CombinedMultiDict, Headers, EnvironHeaders) -from werkzeug.http import parse_cookie, dump_cookie # noqa diff --git a/pyextra/werkzeug/websocket.py b/pyextra/werkzeug/websocket.py deleted file mode 100644 index 764698b70..000000000 --- a/pyextra/werkzeug/websocket.py +++ /dev/null @@ -1,337 +0,0 @@ -import re -import errno -import socket -import struct -import collections - -from base64 import b64decode, b64encode -from hashlib import sha1 - - -WS_KEY = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11' - - -def pack_message(message): - """Pack the message inside ``00`` and ``FF`` - As per the dataframing section (5.3) for the websocket spec - """ - if isinstance(message, unicode): - message = message.encode('utf-8') - elif not isinstance(message, str): - message = str(message) - packed = "\x00%s\xFF" % message - return packed - - -def encode_hybi(buf, opcode, base64=False): - """ Encode a HyBi style WebSocket frame. - Optional opcode: - 0x0 - continuation - 0x1 - text frame (base64 encode buf) - 0x2 - binary frame (use raw buf) - 0x8 - connection close - 0x9 - ping - 0xA - pong - """ - if base64: - buf = b64encode(buf) - b1 = 0x80 | (opcode & 0x0f) # FIN + opcode - payload_len = len(buf) - if payload_len <= 125: - header = struct.pack('>BB', b1, payload_len) - elif payload_len > 125 and payload_len < 65536: - header = struct.pack('>BBH', b1, 126, payload_len) - elif payload_len >= 65536: - header = struct.pack('>BBQ', b1, 127, payload_len) - return header + buf, len(header), 0 - - -def decode_hybi(buf, base64=False): - """Decode HyBi style WebSocket packets.""" - f = {'fin' : 0, - 'opcode' : 0, - 'mask' : 0, - 'hlen' : 2, - 'length' : 0, - 'payload' : None, - 'left' : 0, - 'close_code' : None, - 'close_reason' : None} - - blen = len(buf) - f['left'] = blen - - if blen < f['hlen']: - return f # Incomplete frame header - - b1, b2 = struct.unpack_from(">BB", buf) - f['opcode'] = b1 & 0x0f - f['fin'] = (b1 & 0x80) >> 7 - has_mask = (b2 & 0x80) >> 7 - - f['length'] = b2 & 0x7f - - if f['length'] == 126: - f['hlen'] = 4 - if blen < f['hlen']: - return f # Incomplete frame header - (f['length'],) = struct.unpack_from('>xxH', buf) - elif f['length'] == 127: - f['hlen'] = 10 - if blen < f['hlen']: - return f # Incomplete frame header - (f['length'],) = struct.unpack_from('>xxQ', buf) - - full_len = f['hlen'] + has_mask * 4 + f['length'] - - if blen < full_len: # Incomplete frame - return f # Incomplete frame header - - # Number of bytes that are part of the next frame(s) - f['left'] = blen - full_len - - # Process 1 frame - if has_mask: - # unmask payload - f['mask'] = buf[f['hlen']:f['hlen']+4] - b = c = '' - if f['length'] >= 4: - data = struct.unpack('= 2: - f['close_code'] = struct.unpack_from(">H", f['payload']) - if f['length'] > 3: - f['close_reason'] = f['payload'][2:] - - return f - - -class WebSocketWSGI(object): - - def __init__(self, handler): - self.handler = handler - - def verify_client(self, ws): - pass - - def __call__(self, environ, start_response): - if not (environ.get('HTTP_CONNECTION').find('Upgrade') != -1 and - environ['HTTP_UPGRADE'].lower() == 'websocket'): - # need to check a few more things here for true compliance - start_response('400 Bad Request', [('Connection','close')]) - return [] - - sock = environ['gunicorn.socket'] - - ws = WebSocket(sock, environ) - - handshake_reply = ("HTTP/1.1 101 Switching Protocols\r\n" - "Upgrade: websocket\r\n" - "Connection: Upgrade\r\n") - - path = environ['PATH_INFO'] - key = environ.get('HTTP_SEC_WEBSOCKET_KEY') - if key: - ws_key = b64decode(key) - if len(ws_key) != 16: - start_response('400 Bad Request', [('Connection','close')]) - return [] - - protocols = [] - subprotocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL') - ws_protocols = [] - if subprotocols: - for s in subprotocols.split(','): - s = s.strip() - if s in protocols: - ws_protocols.append(s) - if ws_protocols: - handshake_reply += 'Sec-WebSocket-Protocol: %s\r\n' % ', '.join(ws_protocols) - - exts = [] - extensions = environ.get('HTTP_SEC_WEBSOCKET_EXTENSIONS') - ws_extensions = [] - if extensions: - for ext in extensions.split(','): - ext = ext.strip() - if ext in exts: - ws_extensions.append(ext) - if ws_extensions: - handshake_reply += 'Sec-WebSocket-Extensions: %s\r\n' % ', '.join(ws_extensions) - - handshake_reply += ( - "Sec-WebSocket-Origin: %s\r\n" - "Sec-WebSocket-Location: ws://%s%s\r\n" - "Sec-WebSocket-Version: %s\r\n" - "Sec-WebSocket-Accept: %s\r\n\r\n" - % ( - environ.get('HTTP_ORIGIN'), - environ.get('HTTP_HOST'), - path, - ws.version, - b64encode(sha1(key + WS_KEY).digest()) - )) - - else: - - handshake_reply += ( - "WebSocket-Origin: %s\r\n" - "WebSocket-Location: ws://%s%s\r\n\r\n" % ( - environ.get('HTTP_ORIGIN'), - environ.get('HTTP_HOST'), - path)) - - sock.sendall(handshake_reply) - - try: - self.handler(ws) - except socket.error as e: - if e[0] != errno.EPIPE: - raise - - # use this undocumented feature of grainbows to ensure that it - # doesn't barf on the fact that we didn't call start_response - return ALREADY_HANDLED - - -class WebSocket(object): - - def __init__(self, sock, environ, version=76): - self._socket = sock - try: - version = int(environ.get('HTTP_SEC_WEBSOCKET_VERSION')) - except (ValueError, TypeError): - version = 76 - self.version = version - self.closed = False - self.accepted = False - self._buf = b'' - self._msgs = collections.deque() - - def _parse_messages(self): - """ Parses for messages in the buffer *buf*. It is assumed that - the buffer contains the start character for a message, but that it - may contain only part of the rest of the message. - Returns an array of messages, and the buffer remainder that - didn't contain any full messages.""" - msgs = [] - end_idx = 0 - buf = self._buf - while buf: - if self.version in (7, 8, 13): - frame = decode_hybi(buf, base64=False) - - if frame['payload'] == None: - break - else: - if frame['opcode'] == 0x8: # connection close - self.closed = True - break - else: - msgs.append(frame['payload']); - if frame['left']: - buf = buf[-frame['left']:] - else: - buf = b'' - - else: - frame_type = ord(buf[0]) - if frame_type == 0: - # Normal message. - end_idx = buf.find("\xFF") - if end_idx == -1: #pragma NO COVER - break - msgs.append(buf[1:end_idx].decode('utf-8', 'replace')) - buf = buf[end_idx+1:] - elif frame_type == 255: - # Closing handshake. - assert ord(buf[1]) == 0, "Unexpected closing handshake: %r" % buf - self.closed = True - break - else: - raise ValueError("Don't understand how to parse this type of message: %r" % buf) - self._buf = buf - return msgs - - def send(self, message): - """Send a message to the browser. - *message* should be convertable to a string; unicode objects should be - encodable as utf-8. Raises socket.error with errno of 32 - (broken pipe) if the socket has already been closed by the client. - """ - if self.version in (7, 8, 13): - packed, lenhead, lentail = encode_hybi( - message, opcode=0x01, base64=False) - else: - packed = pack_message(message) - self._socket.sendall(packed) - - def wait(self): - """Waits for and deserializes messages. - Returns a single message; the oldest not yet processed. If the client - has already closed the connection, returns None. This is different - from normal socket behavior because the empty string is a valid - websocket message.""" - while not self._msgs: - # Websocket might be closed already. - if self.closed: - return None - # no parsed messages, must mean buf needs more data - delta = self._socket.recv(8096) - if delta == '': - return None - self._buf += delta - msgs = self._parse_messages() - self._msgs.extend(msgs) - return self._msgs.popleft() - - def _send_closing_frame(self, ignore_send_errors=False): - """Sends the closing frame to the client, if required.""" - if self.version in (7, 8, 13) and not self.closed: - msg = '' - #if code != None: - # msg = struct.pack(">H%ds" % (len(reason)), code) - - buf, h, t = encode_hybi(msg, opcode=0x08, base64=False) - self._socket.sendall(buf) - self.closed = True - - elif self.version == 76 and not self.closed: - try: - self._socket.sendall("\xff\x00") - except socket.error: - # Sometimes, like when the remote side cuts off the connection, - # we don't care about this. - if not ignore_send_errors: #pragma NO COVER - raise - self.closed = True - - def close(self): - """Forcibly close the websocket; generally it is preferable to - return from the handler method.""" - self._send_closing_frame() - self._socket.shutdown(True) - self._socket.close() diff --git a/pyextra/werkzeug/wrappers.py b/pyextra/werkzeug/wrappers.py deleted file mode 100644 index 92dfa5dac..000000000 --- a/pyextra/werkzeug/wrappers.py +++ /dev/null @@ -1,2028 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.wrappers - ~~~~~~~~~~~~~~~~~ - - The wrappers are simple request and response objects which you can - subclass to do whatever you want them to do. The request object contains - the information transmitted by the client (webbrowser) and the response - object contains all the information sent back to the browser. - - An important detail is that the request object is created with the WSGI - environ and will act as high-level proxy whereas the response object is an - actual WSGI application. - - Like everything else in Werkzeug these objects will work correctly with - unicode data. Incoming form data parsed by the response object will be - decoded into an unicode object if possible and if it makes sense. - - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -from functools import update_wrapper -from datetime import datetime, timedelta -from warnings import warn - -from werkzeug.http import HTTP_STATUS_CODES, \ - parse_accept_header, parse_cache_control_header, parse_etags, \ - parse_date, generate_etag, is_resource_modified, unquote_etag, \ - quote_etag, parse_set_header, parse_authorization_header, \ - parse_www_authenticate_header, remove_entity_headers, \ - parse_options_header, dump_options_header, http_date, \ - parse_if_range_header, parse_cookie, dump_cookie, \ - parse_range_header, parse_content_range_header, dump_header, \ - parse_age, dump_age -from werkzeug.urls import url_decode, iri_to_uri, url_join -from werkzeug.formparser import FormDataParser, default_stream_factory -from werkzeug.utils import cached_property, environ_property, \ - header_property, get_content_type -from werkzeug.wsgi import get_current_url, get_host, \ - ClosingIterator, get_input_stream, get_content_length, _RangeWrapper -from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \ - EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \ - ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \ - ResponseCacheControl, RequestCacheControl, CallbackDict, \ - ContentRange, iter_multi_items -from werkzeug._internal import _get_environ -from werkzeug._compat import to_bytes, string_types, text_type, \ - integer_types, wsgi_decoding_dance, wsgi_get_bytes, \ - to_unicode, to_native, BytesIO - - -def _run_wsgi_app(*args): - """This function replaces itself to ensure that the test module is not - imported unless required. DO NOT USE! - """ - global _run_wsgi_app - from werkzeug.test import run_wsgi_app as _run_wsgi_app - return _run_wsgi_app(*args) - - -def _warn_if_string(iterable): - """Helper for the response objects to check if the iterable returned - to the WSGI server is not a string. - """ - if isinstance(iterable, string_types): - warn(Warning('response iterable was set to a string. This appears ' - 'to work but means that the server will send the ' - 'data to the client char, by char. This is almost ' - 'never intended behavior, use response.data to assign ' - 'strings to the response object.'), stacklevel=2) - - -def _assert_not_shallow(request): - if request.shallow: - raise RuntimeError('A shallow request tried to consume ' - 'form data. If you really want to do ' - 'that, set `shallow` to False.') - - -def _iter_encoded(iterable, charset): - for item in iterable: - if isinstance(item, text_type): - yield item.encode(charset) - else: - yield item - - -def _clean_accept_ranges(accept_ranges): - if accept_ranges is True: - return "bytes" - elif accept_ranges is False: - return "none" - elif isinstance(accept_ranges, text_type): - return to_native(accept_ranges) - raise ValueError("Invalid accept_ranges value") - - -class BaseRequest(object): - - """Very basic request object. This does not implement advanced stuff like - entity tag parsing or cache controls. The request object is created with - the WSGI environment as first argument and will add itself to the WSGI - environment as ``'werkzeug.request'`` unless it's created with - `populate_request` set to False. - - There are a couple of mixins available that add additional functionality - to the request object, there is also a class called `Request` which - subclasses `BaseRequest` and all the important mixins. - - It's a good idea to create a custom subclass of the :class:`BaseRequest` - and add missing functionality either via mixins or direct implementation. - Here an example for such subclasses:: - - from werkzeug.wrappers import BaseRequest, ETagRequestMixin - - class Request(BaseRequest, ETagRequestMixin): - pass - - Request objects are **read only**. As of 0.5 modifications are not - allowed in any place. Unlike the lower level parsing functions the - request object will use immutable objects everywhere possible. - - Per default the request object will assume all the text data is `utf-8` - encoded. Please refer to `the unicode chapter `_ for more - details about customizing the behavior. - - Per default the request object will be added to the WSGI - environment as `werkzeug.request` to support the debugging system. - If you don't want that, set `populate_request` to `False`. - - If `shallow` is `True` the environment is initialized as shallow - object around the environ. Every operation that would modify the - environ in any way (such as consuming form data) raises an exception - unless the `shallow` attribute is explicitly set to `False`. This - is useful for middlewares where you don't want to consume the form - data by accident. A shallow request is not populated to the WSGI - environment. - - .. versionchanged:: 0.5 - read-only mode was enforced by using immutables classes for all - data. - """ - - #: the charset for the request, defaults to utf-8 - charset = 'utf-8' - - #: the error handling procedure for errors, defaults to 'replace' - encoding_errors = 'replace' - - #: the maximum content length. This is forwarded to the form data - #: parsing function (:func:`parse_form_data`). When set and the - #: :attr:`form` or :attr:`files` attribute is accessed and the - #: parsing fails because more than the specified value is transmitted - #: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised. - #: - #: Have a look at :ref:`dealing-with-request-data` for more details. - #: - #: .. versionadded:: 0.5 - max_content_length = None - - #: the maximum form field size. This is forwarded to the form data - #: parsing function (:func:`parse_form_data`). When set and the - #: :attr:`form` or :attr:`files` attribute is accessed and the - #: data in memory for post data is longer than the specified value a - #: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised. - #: - #: Have a look at :ref:`dealing-with-request-data` for more details. - #: - #: .. versionadded:: 0.5 - max_form_memory_size = None - - #: the class to use for `args` and `form`. The default is an - #: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports - #: multiple values per key. alternatively it makes sense to use an - #: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which - #: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict` - #: which is the fastest but only remembers the last key. It is also - #: possible to use mutable structures, but this is not recommended. - #: - #: .. versionadded:: 0.6 - parameter_storage_class = ImmutableMultiDict - - #: the type to be used for list values from the incoming WSGI environment. - #: By default an :class:`~werkzeug.datastructures.ImmutableList` is used - #: (for example for :attr:`access_list`). - #: - #: .. versionadded:: 0.6 - list_storage_class = ImmutableList - - #: the type to be used for dict values from the incoming WSGI environment. - #: By default an - #: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used - #: (for example for :attr:`cookies`). - #: - #: .. versionadded:: 0.6 - dict_storage_class = ImmutableTypeConversionDict - - #: The form data parser that shoud be used. Can be replaced to customize - #: the form date parsing. - form_data_parser_class = FormDataParser - - #: Optionally a list of hosts that is trusted by this request. By default - #: all hosts are trusted which means that whatever the client sends the - #: host is will be accepted. - #: - #: This is the recommended setup as a webserver should manually be set up - #: to only route correct hosts to the application, and remove the - #: `X-Forwarded-Host` header if it is not being used (see - #: :func:`werkzeug.wsgi.get_host`). - #: - #: .. versionadded:: 0.9 - trusted_hosts = None - - #: Indicates whether the data descriptor should be allowed to read and - #: buffer up the input stream. By default it's enabled. - #: - #: .. versionadded:: 0.9 - disable_data_descriptor = False - - def __init__(self, environ, populate_request=True, shallow=False): - self.environ = environ - if populate_request and not shallow: - self.environ['werkzeug.request'] = self - self.shallow = shallow - - def __repr__(self): - # make sure the __repr__ even works if the request was created - # from an invalid WSGI environment. If we display the request - # in a debug session we don't want the repr to blow up. - args = [] - try: - args.append("'%s'" % to_native(self.url, self.url_charset)) - args.append('[%s]' % self.method) - except Exception: - args.append('(invalid WSGI environ)') - - return '<%s %s>' % ( - self.__class__.__name__, - ' '.join(args) - ) - - @property - def url_charset(self): - """The charset that is assumed for URLs. Defaults to the value - of :attr:`charset`. - - .. versionadded:: 0.6 - """ - return self.charset - - @classmethod - def from_values(cls, *args, **kwargs): - """Create a new request object based on the values provided. If - environ is given missing values are filled from there. This method is - useful for small scripts when you need to simulate a request from an URL. - Do not use this method for unittesting, there is a full featured client - object (:class:`Client`) that allows to create multipart requests, - support for cookies etc. - - This accepts the same options as the - :class:`~werkzeug.test.EnvironBuilder`. - - .. versionchanged:: 0.5 - This method now accepts the same arguments as - :class:`~werkzeug.test.EnvironBuilder`. Because of this the - `environ` parameter is now called `environ_overrides`. - - :return: request object - """ - from werkzeug.test import EnvironBuilder - charset = kwargs.pop('charset', cls.charset) - kwargs['charset'] = charset - builder = EnvironBuilder(*args, **kwargs) - try: - return builder.get_request(cls) - finally: - builder.close() - - @classmethod - def application(cls, f): - """Decorate a function as responder that accepts the request as first - argument. This works like the :func:`responder` decorator but the - function is passed the request object as first argument and the - request object will be closed automatically:: - - @Request.application - def my_wsgi_app(request): - return Response('Hello World!') - - As of Werkzeug 0.14 HTTP exceptions are automatically caught and - converted to responses instead of failing. - - :param f: the WSGI callable to decorate - :return: a new WSGI callable - """ - #: return a callable that wraps the -2nd argument with the request - #: and calls the function with all the arguments up to that one and - #: the request. The return value is then called with the latest - #: two arguments. This makes it possible to use this decorator for - #: both methods and standalone WSGI functions. - from werkzeug.exceptions import HTTPException - - def application(*args): - request = cls(args[-2]) - with request: - try: - resp = f(*args[:-2] + (request,)) - except HTTPException as e: - resp = e.get_response(args[-2]) - return resp(*args[-2:]) - - return update_wrapper(application, f) - - def _get_file_stream(self, total_content_length, content_type, filename=None, - content_length=None): - """Called to get a stream for the file upload. - - This must provide a file-like class with `read()`, `readline()` - and `seek()` methods that is both writeable and readable. - - The default implementation returns a temporary file if the total - content length is higher than 500KB. Because many browsers do not - provide a content length for the files only the total content - length matters. - - :param total_content_length: the total content length of all the - data in the request combined. This value - is guaranteed to be there. - :param content_type: the mimetype of the uploaded file. - :param filename: the filename of the uploaded file. May be `None`. - :param content_length: the length of this file. This value is usually - not provided because webbrowsers do not provide - this value. - """ - return default_stream_factory( - total_content_length=total_content_length, - content_type=content_type, - filename=filename, - content_length=content_length) - - @property - def want_form_data_parsed(self): - """Returns True if the request method carries content. As of - Werkzeug 0.9 this will be the case if a content type is transmitted. - - .. versionadded:: 0.8 - """ - return bool(self.environ.get('CONTENT_TYPE')) - - def make_form_data_parser(self): - """Creates the form data parser. Instantiates the - :attr:`form_data_parser_class` with some parameters. - - .. versionadded:: 0.8 - """ - return self.form_data_parser_class(self._get_file_stream, - self.charset, - self.encoding_errors, - self.max_form_memory_size, - self.max_content_length, - self.parameter_storage_class) - - def _load_form_data(self): - """Method used internally to retrieve submitted data. After calling - this sets `form` and `files` on the request object to multi dicts - filled with the incoming form data. As a matter of fact the input - stream will be empty afterwards. You can also call this method to - force the parsing of the form data. - - .. versionadded:: 0.8 - """ - # abort early if we have already consumed the stream - if 'form' in self.__dict__: - return - - _assert_not_shallow(self) - - if self.want_form_data_parsed: - content_type = self.environ.get('CONTENT_TYPE', '') - content_length = get_content_length(self.environ) - mimetype, options = parse_options_header(content_type) - parser = self.make_form_data_parser() - data = parser.parse(self._get_stream_for_parsing(), - mimetype, content_length, options) - else: - data = (self.stream, self.parameter_storage_class(), - self.parameter_storage_class()) - - # inject the values into the instance dict so that we bypass - # our cached_property non-data descriptor. - d = self.__dict__ - d['stream'], d['form'], d['files'] = data - - def _get_stream_for_parsing(self): - """This is the same as accessing :attr:`stream` with the difference - that if it finds cached data from calling :meth:`get_data` first it - will create a new stream out of the cached data. - - .. versionadded:: 0.9.3 - """ - cached_data = getattr(self, '_cached_data', None) - if cached_data is not None: - return BytesIO(cached_data) - return self.stream - - def close(self): - """Closes associated resources of this request object. This - closes all file handles explicitly. You can also use the request - object in a with statement which will automatically close it. - - .. versionadded:: 0.9 - """ - files = self.__dict__.get('files') - for key, value in iter_multi_items(files or ()): - value.close() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close() - - @cached_property - def stream(self): - """ - If the incoming form data was not encoded with a known mimetype - the data is stored unmodified in this stream for consumption. Most - of the time it is a better idea to use :attr:`data` which will give - you that data as a string. The stream only returns the data once. - - Unlike :attr:`input_stream` this stream is properly guarded that you - can't accidentally read past the length of the input. Werkzeug will - internally always refer to this stream to read data which makes it - possible to wrap this object with a stream that does filtering. - - .. versionchanged:: 0.9 - This stream is now always available but might be consumed by the - form parser later on. Previously the stream was only set if no - parsing happened. - """ - _assert_not_shallow(self) - return get_input_stream(self.environ) - - input_stream = environ_property('wsgi.input', """ - The WSGI input stream. - - In general it's a bad idea to use this one because you can easily read past - the boundary. Use the :attr:`stream` instead. - """) - - @cached_property - def args(self): - """The parsed URL parameters (the part in the URL after the question - mark). - - By default an - :class:`~werkzeug.datastructures.ImmutableMultiDict` - is returned from this function. This can be changed by setting - :attr:`parameter_storage_class` to a different type. This might - be necessary if the order of the form data is important. - """ - return url_decode(wsgi_get_bytes(self.environ.get('QUERY_STRING', '')), - self.url_charset, errors=self.encoding_errors, - cls=self.parameter_storage_class) - - @cached_property - def data(self): - """ - Contains the incoming request data as string in case it came with - a mimetype Werkzeug does not handle. - """ - - if self.disable_data_descriptor: - raise AttributeError('data descriptor is disabled') - # XXX: this should eventually be deprecated. - - # We trigger form data parsing first which means that the descriptor - # will not cache the data that would otherwise be .form or .files - # data. This restores the behavior that was there in Werkzeug - # before 0.9. New code should use :meth:`get_data` explicitly as - # this will make behavior explicit. - return self.get_data(parse_form_data=True) - - def get_data(self, cache=True, as_text=False, parse_form_data=False): - """This reads the buffered incoming data from the client into one - bytestring. By default this is cached but that behavior can be - changed by setting `cache` to `False`. - - Usually it's a bad idea to call this method without checking the - content length first as a client could send dozens of megabytes or more - to cause memory problems on the server. - - Note that if the form data was already parsed this method will not - return anything as form data parsing does not cache the data like - this method does. To implicitly invoke form data parsing function - set `parse_form_data` to `True`. When this is done the return value - of this method will be an empty string if the form parser handles - the data. This generally is not necessary as if the whole data is - cached (which is the default) the form parser will used the cached - data to parse the form data. Please be generally aware of checking - the content length first in any case before calling this method - to avoid exhausting server memory. - - If `as_text` is set to `True` the return value will be a decoded - unicode string. - - .. versionadded:: 0.9 - """ - rv = getattr(self, '_cached_data', None) - if rv is None: - if parse_form_data: - self._load_form_data() - rv = self.stream.read() - if cache: - self._cached_data = rv - if as_text: - rv = rv.decode(self.charset, self.encoding_errors) - return rv - - @cached_property - def form(self): - """The form parameters. By default an - :class:`~werkzeug.datastructures.ImmutableMultiDict` - is returned from this function. This can be changed by setting - :attr:`parameter_storage_class` to a different type. This might - be necessary if the order of the form data is important. - - Please keep in mind that file uploads will not end up here, but instead - in the :attr:`files` attribute. - - .. versionchanged:: 0.9 - - Previous to Werkzeug 0.9 this would only contain form data for POST - and PUT requests. - """ - self._load_form_data() - return self.form - - @cached_property - def values(self): - """A :class:`werkzeug.datastructures.CombinedMultiDict` that combines - :attr:`args` and :attr:`form`.""" - args = [] - for d in self.args, self.form: - if not isinstance(d, MultiDict): - d = MultiDict(d) - args.append(d) - return CombinedMultiDict(args) - - @cached_property - def files(self): - """:class:`~werkzeug.datastructures.MultiDict` object containing - all uploaded files. Each key in :attr:`files` is the name from the - ````. Each value in :attr:`files` is a - Werkzeug :class:`~werkzeug.datastructures.FileStorage` object. - - It basically behaves like a standard file object you know from Python, - with the difference that it also has a - :meth:`~werkzeug.datastructures.FileStorage.save` function that can - store the file on the filesystem. - - Note that :attr:`files` will only contain data if the request method was - POST, PUT or PATCH and the ``

    `` that posted to the request had - ``enctype="multipart/form-data"``. It will be empty otherwise. - - See the :class:`~werkzeug.datastructures.MultiDict` / - :class:`~werkzeug.datastructures.FileStorage` documentation for - more details about the used data structure. - """ - self._load_form_data() - return self.files - - @cached_property - def cookies(self): - """A :class:`dict` with the contents of all cookies transmitted with - the request.""" - return parse_cookie(self.environ, self.charset, - self.encoding_errors, - cls=self.dict_storage_class) - - @cached_property - def headers(self): - """The headers from the WSGI environ as immutable - :class:`~werkzeug.datastructures.EnvironHeaders`. - """ - return EnvironHeaders(self.environ) - - @cached_property - def path(self): - """Requested path as unicode. This works a bit like the regular path - info in the WSGI environment but will always include a leading slash, - even if the URL root is accessed. - """ - raw_path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '', - self.charset, self.encoding_errors) - return '/' + raw_path.lstrip('/') - - @cached_property - def full_path(self): - """Requested path as unicode, including the query string.""" - return self.path + u'?' + to_unicode(self.query_string, self.url_charset) - - @cached_property - def script_root(self): - """The root path of the script without the trailing slash.""" - raw_path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '', - self.charset, self.encoding_errors) - return raw_path.rstrip('/') - - @cached_property - def url(self): - """The reconstructed current URL as IRI. - See also: :attr:`trusted_hosts`. - """ - return get_current_url(self.environ, - trusted_hosts=self.trusted_hosts) - - @cached_property - def base_url(self): - """Like :attr:`url` but without the querystring - See also: :attr:`trusted_hosts`. - """ - return get_current_url(self.environ, strip_querystring=True, - trusted_hosts=self.trusted_hosts) - - @cached_property - def url_root(self): - """The full URL root (with hostname), this is the application - root as IRI. - See also: :attr:`trusted_hosts`. - """ - return get_current_url(self.environ, True, - trusted_hosts=self.trusted_hosts) - - @cached_property - def host_url(self): - """Just the host with scheme as IRI. - See also: :attr:`trusted_hosts`. - """ - return get_current_url(self.environ, host_only=True, - trusted_hosts=self.trusted_hosts) - - @cached_property - def host(self): - """Just the host including the port if available. - See also: :attr:`trusted_hosts`. - """ - return get_host(self.environ, trusted_hosts=self.trusted_hosts) - - query_string = environ_property( - 'QUERY_STRING', '', read_only=True, - load_func=wsgi_get_bytes, doc='The URL parameters as raw bytestring.') - method = environ_property( - 'REQUEST_METHOD', 'GET', read_only=True, - load_func=lambda x: x.upper(), - doc="The request method. (For example ``'GET'`` or ``'POST'``).") - - @cached_property - def access_route(self): - """If a forwarded header exists this is a list of all ip addresses - from the client ip to the last proxy server. - """ - if 'HTTP_X_FORWARDED_FOR' in self.environ: - addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',') - return self.list_storage_class([x.strip() for x in addr]) - elif 'REMOTE_ADDR' in self.environ: - return self.list_storage_class([self.environ['REMOTE_ADDR']]) - return self.list_storage_class() - - @property - def remote_addr(self): - """The remote address of the client.""" - return self.environ.get('REMOTE_ADDR') - - remote_user = environ_property('REMOTE_USER', doc=''' - If the server supports user authentication, and the script is - protected, this attribute contains the username the user has - authenticated as.''') - - scheme = environ_property('wsgi.url_scheme', doc=''' - URL scheme (http or https). - - .. versionadded:: 0.7''') - - @property - def is_xhr(self): - """True if the request was triggered via a JavaScript XMLHttpRequest. - This only works with libraries that support the ``X-Requested-With`` - header and set it to "XMLHttpRequest". Libraries that do that are - prototype, jQuery and Mochikit and probably some more. - - .. deprecated:: 0.13 - ``X-Requested-With`` is not standard and is unreliable. - """ - warn(DeprecationWarning( - 'Request.is_xhr is deprecated. Given that the X-Requested-With ' - 'header is not a part of any spec, it is not reliable' - ), stacklevel=2) - return self.environ.get( - 'HTTP_X_REQUESTED_WITH', '' - ).lower() == 'xmlhttprequest' - - is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https', - doc='`True` if the request is secure.') - is_multithread = environ_property('wsgi.multithread', doc=''' - boolean that is `True` if the application is served by - a multithreaded WSGI server.''') - is_multiprocess = environ_property('wsgi.multiprocess', doc=''' - boolean that is `True` if the application is served by - a WSGI server that spawns multiple processes.''') - is_run_once = environ_property('wsgi.run_once', doc=''' - boolean that is `True` if the application will be executed only - once in a process lifetime. This is the case for CGI for example, - but it's not guaranteed that the execution only happens one time.''') - - -class BaseResponse(object): - - """Base response class. The most important fact about a response object - is that it's a regular WSGI application. It's initialized with a couple - of response parameters (headers, body, status code etc.) and will start a - valid WSGI response when called with the environ and start response - callable. - - Because it's a WSGI application itself processing usually ends before the - actual response is sent to the server. This helps debugging systems - because they can catch all the exceptions before responses are started. - - Here a small example WSGI application that takes advantage of the - response objects:: - - from werkzeug.wrappers import BaseResponse as Response - - def index(): - return Response('Index page') - - def application(environ, start_response): - path = environ.get('PATH_INFO') or '/' - if path == '/': - response = index() - else: - response = Response('Not Found', status=404) - return response(environ, start_response) - - Like :class:`BaseRequest` which object is lacking a lot of functionality - implemented in mixins. This gives you a better control about the actual - API of your response objects, so you can create subclasses and add custom - functionality. A full featured response object is available as - :class:`Response` which implements a couple of useful mixins. - - To enforce a new type of already existing responses you can use the - :meth:`force_type` method. This is useful if you're working with different - subclasses of response objects and you want to post process them with a - known interface. - - Per default the response object will assume all the text data is `utf-8` - encoded. Please refer to `the unicode chapter `_ for more - details about customizing the behavior. - - Response can be any kind of iterable or string. If it's a string it's - considered being an iterable with one item which is the string passed. - Headers can be a list of tuples or a - :class:`~werkzeug.datastructures.Headers` object. - - Special note for `mimetype` and `content_type`: For most mime types - `mimetype` and `content_type` work the same, the difference affects - only 'text' mimetypes. If the mimetype passed with `mimetype` is a - mimetype starting with `text/`, the charset parameter of the response - object is appended to it. In contrast the `content_type` parameter is - always added as header unmodified. - - .. versionchanged:: 0.5 - the `direct_passthrough` parameter was added. - - :param response: a string or response iterable. - :param status: a string with a status or an integer with the status code. - :param headers: a list of headers or a - :class:`~werkzeug.datastructures.Headers` object. - :param mimetype: the mimetype for the response. See notice above. - :param content_type: the content type for the response. See notice above. - :param direct_passthrough: if set to `True` :meth:`iter_encoded` is not - called before iteration which makes it - possible to pass special iterators through - unchanged (see :func:`wrap_file` for more - details.) - """ - - #: the charset of the response. - charset = 'utf-8' - - #: the default status if none is provided. - default_status = 200 - - #: the default mimetype if none is provided. - default_mimetype = 'text/plain' - - #: if set to `False` accessing properties on the response object will - #: not try to consume the response iterator and convert it into a list. - #: - #: .. versionadded:: 0.6.2 - #: - #: That attribute was previously called `implicit_seqence_conversion`. - #: (Notice the typo). If you did use this feature, you have to adapt - #: your code to the name change. - implicit_sequence_conversion = True - - #: Should this response object correct the location header to be RFC - #: conformant? This is true by default. - #: - #: .. versionadded:: 0.8 - autocorrect_location_header = True - - #: Should this response object automatically set the content-length - #: header if possible? This is true by default. - #: - #: .. versionadded:: 0.8 - automatically_set_content_length = True - - #: Warn if a cookie header exceeds this size. The default, 4093, should be - #: safely `supported by most browsers `_. A cookie larger than - #: this size will still be sent, but it may be ignored or handled - #: incorrectly by some browsers. Set to 0 to disable this check. - #: - #: .. versionadded:: 0.13 - #: - #: .. _`cookie`: http://browsercookielimits.squawky.net/ - max_cookie_size = 4093 - - def __init__(self, response=None, status=None, headers=None, - mimetype=None, content_type=None, direct_passthrough=False): - if isinstance(headers, Headers): - self.headers = headers - elif not headers: - self.headers = Headers() - else: - self.headers = Headers(headers) - - if content_type is None: - if mimetype is None and 'content-type' not in self.headers: - mimetype = self.default_mimetype - if mimetype is not None: - mimetype = get_content_type(mimetype, self.charset) - content_type = mimetype - if content_type is not None: - self.headers['Content-Type'] = content_type - if status is None: - status = self.default_status - if isinstance(status, integer_types): - self.status_code = status - else: - self.status = status - - self.direct_passthrough = direct_passthrough - self._on_close = [] - - # we set the response after the headers so that if a class changes - # the charset attribute, the data is set in the correct charset. - if response is None: - self.response = [] - elif isinstance(response, (text_type, bytes, bytearray)): - self.set_data(response) - else: - self.response = response - - def call_on_close(self, func): - """Adds a function to the internal list of functions that should - be called as part of closing down the response. Since 0.7 this - function also returns the function that was passed so that this - can be used as a decorator. - - .. versionadded:: 0.6 - """ - self._on_close.append(func) - return func - - def __repr__(self): - if self.is_sequence: - body_info = '%d bytes' % sum(map(len, self.iter_encoded())) - else: - body_info = 'streamed' if self.is_streamed else 'likely-streamed' - return '<%s %s [%s]>' % ( - self.__class__.__name__, - body_info, - self.status - ) - - @classmethod - def force_type(cls, response, environ=None): - """Enforce that the WSGI response is a response object of the current - type. Werkzeug will use the :class:`BaseResponse` internally in many - situations like the exceptions. If you call :meth:`get_response` on an - exception you will get back a regular :class:`BaseResponse` object, even - if you are using a custom subclass. - - This method can enforce a given response type, and it will also - convert arbitrary WSGI callables into response objects if an environ - is provided:: - - # convert a Werkzeug response object into an instance of the - # MyResponseClass subclass. - response = MyResponseClass.force_type(response) - - # convert any WSGI application into a response object - response = MyResponseClass.force_type(response, environ) - - This is especially useful if you want to post-process responses in - the main dispatcher and use functionality provided by your subclass. - - Keep in mind that this will modify response objects in place if - possible! - - :param response: a response object or wsgi application. - :param environ: a WSGI environment object. - :return: a response object. - """ - if not isinstance(response, BaseResponse): - if environ is None: - raise TypeError('cannot convert WSGI application into ' - 'response objects without an environ') - response = BaseResponse(*_run_wsgi_app(response, environ)) - response.__class__ = cls - return response - - @classmethod - def from_app(cls, app, environ, buffered=False): - """Create a new response object from an application output. This - works best if you pass it an application that returns a generator all - the time. Sometimes applications may use the `write()` callable - returned by the `start_response` function. This tries to resolve such - edge cases automatically. But if you don't get the expected output - you should set `buffered` to `True` which enforces buffering. - - :param app: the WSGI application to execute. - :param environ: the WSGI environment to execute against. - :param buffered: set to `True` to enforce buffering. - :return: a response object. - """ - return cls(*_run_wsgi_app(app, environ, buffered)) - - def _get_status_code(self): - return self._status_code - - def _set_status_code(self, code): - self._status_code = code - try: - self._status = '%d %s' % (code, HTTP_STATUS_CODES[code].upper()) - except KeyError: - self._status = '%d UNKNOWN' % code - status_code = property(_get_status_code, _set_status_code, - doc='The HTTP Status code as number') - del _get_status_code, _set_status_code - - def _get_status(self): - return self._status - - def _set_status(self, value): - try: - self._status = to_native(value) - except AttributeError: - raise TypeError('Invalid status argument') - - try: - self._status_code = int(self._status.split(None, 1)[0]) - except ValueError: - self._status_code = 0 - self._status = '0 %s' % self._status - except IndexError: - raise ValueError('Empty status argument') - status = property(_get_status, _set_status, doc='The HTTP Status code') - del _get_status, _set_status - - def get_data(self, as_text=False): - """The string representation of the request body. Whenever you call - this property the request iterable is encoded and flattened. This - can lead to unwanted behavior if you stream big data. - - This behavior can be disabled by setting - :attr:`implicit_sequence_conversion` to `False`. - - If `as_text` is set to `True` the return value will be a decoded - unicode string. - - .. versionadded:: 0.9 - """ - self._ensure_sequence() - rv = b''.join(self.iter_encoded()) - if as_text: - rv = rv.decode(self.charset) - return rv - - def set_data(self, value): - """Sets a new string as response. The value set must either by a - unicode or bytestring. If a unicode string is set it's encoded - automatically to the charset of the response (utf-8 by default). - - .. versionadded:: 0.9 - """ - # if an unicode string is set, it's encoded directly so that we - # can set the content length - if isinstance(value, text_type): - value = value.encode(self.charset) - else: - value = bytes(value) - self.response = [value] - if self.automatically_set_content_length: - self.headers['Content-Length'] = str(len(value)) - - data = property(get_data, set_data, doc=''' - A descriptor that calls :meth:`get_data` and :meth:`set_data`. This - should not be used and will eventually get deprecated. - ''') - - def calculate_content_length(self): - """Returns the content length if available or `None` otherwise.""" - try: - self._ensure_sequence() - except RuntimeError: - return None - return sum(len(x) for x in self.iter_encoded()) - - def _ensure_sequence(self, mutable=False): - """This method can be called by methods that need a sequence. If - `mutable` is true, it will also ensure that the response sequence - is a standard Python list. - - .. versionadded:: 0.6 - """ - if self.is_sequence: - # if we need a mutable object, we ensure it's a list. - if mutable and not isinstance(self.response, list): - self.response = list(self.response) - return - if self.direct_passthrough: - raise RuntimeError('Attempted implicit sequence conversion ' - 'but the response object is in direct ' - 'passthrough mode.') - if not self.implicit_sequence_conversion: - raise RuntimeError('The response object required the iterable ' - 'to be a sequence, but the implicit ' - 'conversion was disabled. Call ' - 'make_sequence() yourself.') - self.make_sequence() - - def make_sequence(self): - """Converts the response iterator in a list. By default this happens - automatically if required. If `implicit_sequence_conversion` is - disabled, this method is not automatically called and some properties - might raise exceptions. This also encodes all the items. - - .. versionadded:: 0.6 - """ - if not self.is_sequence: - # if we consume an iterable we have to ensure that the close - # method of the iterable is called if available when we tear - # down the response - close = getattr(self.response, 'close', None) - self.response = list(self.iter_encoded()) - if close is not None: - self.call_on_close(close) - - def iter_encoded(self): - """Iter the response encoded with the encoding of the response. - If the response object is invoked as WSGI application the return - value of this method is used as application iterator unless - :attr:`direct_passthrough` was activated. - """ - if __debug__: - _warn_if_string(self.response) - # Encode in a separate function so that self.response is fetched - # early. This allows us to wrap the response with the return - # value from get_app_iter or iter_encoded. - return _iter_encoded(self.response, self.charset) - - def set_cookie(self, key, value='', max_age=None, expires=None, - path='/', domain=None, secure=False, httponly=False, - samesite=None): - """Sets a cookie. The parameters are the same as in the cookie `Morsel` - object in the Python standard library but it accepts unicode data, too. - - A warning is raised if the size of the cookie header exceeds - :attr:`max_cookie_size`, but the header will still be set. - - :param key: the key (name) of the cookie to be set. - :param value: the value of the cookie. - :param max_age: should be a number of seconds, or `None` (default) if - the cookie should last only as long as the client's - browser session. - :param expires: should be a `datetime` object or UNIX timestamp. - :param path: limits the cookie to a given path, per default it will - span the whole domain. - :param domain: if you want to set a cross-domain cookie. For example, - ``domain=".example.com"`` will set a cookie that is - readable by the domain ``www.example.com``, - ``foo.example.com`` etc. Otherwise, a cookie will only - be readable by the domain that set it. - :param secure: If `True`, the cookie will only be available via HTTPS - :param httponly: disallow JavaScript to access the cookie. This is an - extension to the cookie standard and probably not - supported by all browsers. - :param samesite: Limits the scope of the cookie such that it will only - be attached to requests if those requests are - "same-site". - """ - self.headers.add('Set-Cookie', dump_cookie( - key, - value=value, - max_age=max_age, - expires=expires, - path=path, - domain=domain, - secure=secure, - httponly=httponly, - charset=self.charset, - max_size=self.max_cookie_size, - samesite=samesite - )) - - def delete_cookie(self, key, path='/', domain=None): - """Delete a cookie. Fails silently if key doesn't exist. - - :param key: the key (name) of the cookie to be deleted. - :param path: if the cookie that should be deleted was limited to a - path, the path has to be defined here. - :param domain: if the cookie that should be deleted was limited to a - domain, that domain has to be defined here. - """ - self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain) - - @property - def is_streamed(self): - """If the response is streamed (the response is not an iterable with - a length information) this property is `True`. In this case streamed - means that there is no information about the number of iterations. - This is usually `True` if a generator is passed to the response object. - - This is useful for checking before applying some sort of post - filtering that should not take place for streamed responses. - """ - try: - len(self.response) - except (TypeError, AttributeError): - return True - return False - - @property - def is_sequence(self): - """If the iterator is buffered, this property will be `True`. A - response object will consider an iterator to be buffered if the - response attribute is a list or tuple. - - .. versionadded:: 0.6 - """ - return isinstance(self.response, (tuple, list)) - - def close(self): - """Close the wrapped response if possible. You can also use the object - in a with statement which will automatically close it. - - .. versionadded:: 0.9 - Can now be used in a with statement. - """ - if hasattr(self.response, 'close'): - self.response.close() - for func in self._on_close: - func() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close() - - def freeze(self): - """Call this method if you want to make your response object ready for - being pickled. This buffers the generator if there is one. It will - also set the `Content-Length` header to the length of the body. - - .. versionchanged:: 0.6 - The `Content-Length` header is now set. - """ - # we explicitly set the length to a list of the *encoded* response - # iterator. Even if the implicit sequence conversion is disabled. - self.response = list(self.iter_encoded()) - self.headers['Content-Length'] = str(sum(map(len, self.response))) - - def get_wsgi_headers(self, environ): - """This is automatically called right before the response is started - and returns headers modified for the given environment. It returns a - copy of the headers from the response with some modifications applied - if necessary. - - For example the location header (if present) is joined with the root - URL of the environment. Also the content length is automatically set - to zero here for certain status codes. - - .. versionchanged:: 0.6 - Previously that function was called `fix_headers` and modified - the response object in place. Also since 0.6, IRIs in location - and content-location headers are handled properly. - - Also starting with 0.6, Werkzeug will attempt to set the content - length if it is able to figure it out on its own. This is the - case if all the strings in the response iterable are already - encoded and the iterable is buffered. - - :param environ: the WSGI environment of the request. - :return: returns a new :class:`~werkzeug.datastructures.Headers` - object. - """ - headers = Headers(self.headers) - location = None - content_location = None - content_length = None - status = self.status_code - - # iterate over the headers to find all values in one go. Because - # get_wsgi_headers is used each response that gives us a tiny - # speedup. - for key, value in headers: - ikey = key.lower() - if ikey == u'location': - location = value - elif ikey == u'content-location': - content_location = value - elif ikey == u'content-length': - content_length = value - - # make sure the location header is an absolute URL - if location is not None: - old_location = location - if isinstance(location, text_type): - # Safe conversion is necessary here as we might redirect - # to a broken URI scheme (for instance itms-services). - location = iri_to_uri(location, safe_conversion=True) - - if self.autocorrect_location_header: - current_url = get_current_url(environ, root_only=True) - if isinstance(current_url, text_type): - current_url = iri_to_uri(current_url) - location = url_join(current_url, location) - if location != old_location: - headers['Location'] = location - - # make sure the content location is a URL - if content_location is not None and \ - isinstance(content_location, text_type): - headers['Content-Location'] = iri_to_uri(content_location) - - if status in (304, 412): - remove_entity_headers(headers) - - # if we can determine the content length automatically, we - # should try to do that. But only if this does not involve - # flattening the iterator or encoding of unicode strings in - # the response. We however should not do that if we have a 304 - # response. - if self.automatically_set_content_length and \ - self.is_sequence and content_length is None and \ - status not in (204, 304) and \ - not (100 <= status < 200): - try: - content_length = sum(len(to_bytes(x, 'ascii')) - for x in self.response) - except UnicodeError: - # aha, something non-bytestringy in there, too bad, we - # can't safely figure out the length of the response. - pass - else: - headers['Content-Length'] = str(content_length) - - return headers - - def get_app_iter(self, environ): - """Returns the application iterator for the given environ. Depending - on the request method and the current status code the return value - might be an empty response rather than the one from the response. - - If the request method is `HEAD` or the status code is in a range - where the HTTP specification requires an empty response, an empty - iterable is returned. - - .. versionadded:: 0.6 - - :param environ: the WSGI environment of the request. - :return: a response iterable. - """ - status = self.status_code - if environ['REQUEST_METHOD'] == 'HEAD' or \ - 100 <= status < 200 or status in (204, 304, 412): - iterable = () - elif self.direct_passthrough: - if __debug__: - _warn_if_string(self.response) - return self.response - else: - iterable = self.iter_encoded() - return ClosingIterator(iterable, self.close) - - def get_wsgi_response(self, environ): - """Returns the final WSGI response as tuple. The first item in - the tuple is the application iterator, the second the status and - the third the list of headers. The response returned is created - specially for the given environment. For example if the request - method in the WSGI environment is ``'HEAD'`` the response will - be empty and only the headers and status code will be present. - - .. versionadded:: 0.6 - - :param environ: the WSGI environment of the request. - :return: an ``(app_iter, status, headers)`` tuple. - """ - headers = self.get_wsgi_headers(environ) - app_iter = self.get_app_iter(environ) - return app_iter, self.status, headers.to_wsgi_list() - - def __call__(self, environ, start_response): - """Process this response as WSGI application. - - :param environ: the WSGI environment. - :param start_response: the response callable provided by the WSGI - server. - :return: an application iterator - """ - app_iter, status, headers = self.get_wsgi_response(environ) - start_response(status, headers) - return app_iter - - -class AcceptMixin(object): - - """A mixin for classes with an :attr:`~BaseResponse.environ` attribute - to get all the HTTP accept headers as - :class:`~werkzeug.datastructures.Accept` objects (or subclasses - thereof). - """ - - @cached_property - def accept_mimetypes(self): - """List of mimetypes this client supports as - :class:`~werkzeug.datastructures.MIMEAccept` object. - """ - return parse_accept_header(self.environ.get('HTTP_ACCEPT'), MIMEAccept) - - @cached_property - def accept_charsets(self): - """List of charsets this client supports as - :class:`~werkzeug.datastructures.CharsetAccept` object. - """ - return parse_accept_header(self.environ.get('HTTP_ACCEPT_CHARSET'), - CharsetAccept) - - @cached_property - def accept_encodings(self): - """List of encodings this client accepts. Encodings in a HTTP term - are compression encodings such as gzip. For charsets have a look at - :attr:`accept_charset`. - """ - return parse_accept_header(self.environ.get('HTTP_ACCEPT_ENCODING')) - - @cached_property - def accept_languages(self): - """List of languages this client accepts as - :class:`~werkzeug.datastructures.LanguageAccept` object. - - .. versionchanged 0.5 - In previous versions this was a regular - :class:`~werkzeug.datastructures.Accept` object. - """ - return parse_accept_header(self.environ.get('HTTP_ACCEPT_LANGUAGE'), - LanguageAccept) - - -class ETagRequestMixin(object): - - """Add entity tag and cache descriptors to a request object or object with - a WSGI environment available as :attr:`~BaseRequest.environ`. This not - only provides access to etags but also to the cache control header. - """ - - @cached_property - def cache_control(self): - """A :class:`~werkzeug.datastructures.RequestCacheControl` object - for the incoming cache control headers. - """ - cache_control = self.environ.get('HTTP_CACHE_CONTROL') - return parse_cache_control_header(cache_control, None, - RequestCacheControl) - - @cached_property - def if_match(self): - """An object containing all the etags in the `If-Match` header. - - :rtype: :class:`~werkzeug.datastructures.ETags` - """ - return parse_etags(self.environ.get('HTTP_IF_MATCH')) - - @cached_property - def if_none_match(self): - """An object containing all the etags in the `If-None-Match` header. - - :rtype: :class:`~werkzeug.datastructures.ETags` - """ - return parse_etags(self.environ.get('HTTP_IF_NONE_MATCH')) - - @cached_property - def if_modified_since(self): - """The parsed `If-Modified-Since` header as datetime object.""" - return parse_date(self.environ.get('HTTP_IF_MODIFIED_SINCE')) - - @cached_property - def if_unmodified_since(self): - """The parsed `If-Unmodified-Since` header as datetime object.""" - return parse_date(self.environ.get('HTTP_IF_UNMODIFIED_SINCE')) - - @cached_property - def if_range(self): - """The parsed `If-Range` header. - - .. versionadded:: 0.7 - - :rtype: :class:`~werkzeug.datastructures.IfRange` - """ - return parse_if_range_header(self.environ.get('HTTP_IF_RANGE')) - - @cached_property - def range(self): - """The parsed `Range` header. - - .. versionadded:: 0.7 - - :rtype: :class:`~werkzeug.datastructures.Range` - """ - return parse_range_header(self.environ.get('HTTP_RANGE')) - - -class UserAgentMixin(object): - - """Adds a `user_agent` attribute to the request object which contains the - parsed user agent of the browser that triggered the request as a - :class:`~werkzeug.useragents.UserAgent` object. - """ - - @cached_property - def user_agent(self): - """The current user agent.""" - from werkzeug.useragents import UserAgent - return UserAgent(self.environ) - - -class AuthorizationMixin(object): - - """Adds an :attr:`authorization` property that represents the parsed - value of the `Authorization` header as - :class:`~werkzeug.datastructures.Authorization` object. - """ - - @cached_property - def authorization(self): - """The `Authorization` object in parsed form.""" - header = self.environ.get('HTTP_AUTHORIZATION') - return parse_authorization_header(header) - - -class StreamOnlyMixin(object): - - """If mixed in before the request object this will change the bahavior - of it to disable handling of form parsing. This disables the - :attr:`files`, :attr:`form` attributes and will just provide a - :attr:`stream` attribute that however is always available. - - .. versionadded:: 0.9 - """ - - disable_data_descriptor = True - want_form_data_parsed = False - - -class ETagResponseMixin(object): - - """Adds extra functionality to a response object for etag and cache - handling. This mixin requires an object with at least a `headers` - object that implements a dict like interface similar to - :class:`~werkzeug.datastructures.Headers`. - - If you want the :meth:`freeze` method to automatically add an etag, you - have to mixin this method before the response base class. The default - response class does not do that. - """ - - @property - def cache_control(self): - """The Cache-Control general-header field is used to specify - directives that MUST be obeyed by all caching mechanisms along the - request/response chain. - """ - def on_update(cache_control): - if not cache_control and 'cache-control' in self.headers: - del self.headers['cache-control'] - elif cache_control: - self.headers['Cache-Control'] = cache_control.to_header() - return parse_cache_control_header(self.headers.get('cache-control'), - on_update, - ResponseCacheControl) - - def _wrap_response(self, start, length): - """Wrap existing Response in case of Range Request context.""" - if self.status_code == 206: - self.response = _RangeWrapper(self.response, start, length) - - def _is_range_request_processable(self, environ): - """Return ``True`` if `Range` header is present and if underlying - resource is considered unchanged when compared with `If-Range` header. - """ - return ( - 'HTTP_IF_RANGE' not in environ - or not is_resource_modified( - environ, self.headers.get('etag'), None, - self.headers.get('last-modified'), ignore_if_range=False - ) - ) and 'HTTP_RANGE' in environ - - def _process_range_request(self, environ, complete_length=None, accept_ranges=None): - """Handle Range Request related headers (RFC7233). If `Accept-Ranges` - header is valid, and Range Request is processable, we set the headers - as described by the RFC, and wrap the underlying response in a - RangeWrapper. - - Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise. - - :raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable` - if `Range` header could not be parsed or satisfied. - """ - from werkzeug.exceptions import RequestedRangeNotSatisfiable - if accept_ranges is None: - return False - self.headers['Accept-Ranges'] = accept_ranges - if not self._is_range_request_processable(environ) or complete_length is None: - return False - parsed_range = parse_range_header(environ.get('HTTP_RANGE')) - if parsed_range is None: - raise RequestedRangeNotSatisfiable(complete_length) - range_tuple = parsed_range.range_for_length(complete_length) - content_range_header = parsed_range.to_content_range_header(complete_length) - if range_tuple is None or content_range_header is None: - raise RequestedRangeNotSatisfiable(complete_length) - content_length = range_tuple[1] - range_tuple[0] - # Be sure not to send 206 response - # if requested range is the full content. - if content_length != complete_length: - self.headers['Content-Length'] = content_length - self.content_range = content_range_header - self.status_code = 206 - self._wrap_response(range_tuple[0], content_length) - return True - return False - - def make_conditional(self, request_or_environ, accept_ranges=False, - complete_length=None): - """Make the response conditional to the request. This method works - best if an etag was defined for the response already. The `add_etag` - method can be used to do that. If called without etag just the date - header is set. - - This does nothing if the request method in the request or environ is - anything but GET or HEAD. - - For optimal performance when handling range requests, it's recommended - that your response data object implements `seekable`, `seek` and `tell` - methods as described by :py:class:`io.IOBase`. Objects returned by - :meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods. - - It does not remove the body of the response because that's something - the :meth:`__call__` function does for us automatically. - - Returns self so that you can do ``return resp.make_conditional(req)`` - but modifies the object in-place. - - :param request_or_environ: a request object or WSGI environment to be - used to make the response conditional - against. - :param accept_ranges: This parameter dictates the value of - `Accept-Ranges` header. If ``False`` (default), - the header is not set. If ``True``, it will be set - to ``"bytes"``. If ``None``, it will be set to - ``"none"``. If it's a string, it will use this - value. - :param complete_length: Will be used only in valid Range Requests. - It will set `Content-Range` complete length - value and compute `Content-Length` real value. - This parameter is mandatory for successful - Range Requests completion. - :raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable` - if `Range` header could not be parsed or satisfied. - """ - environ = _get_environ(request_or_environ) - if environ['REQUEST_METHOD'] in ('GET', 'HEAD'): - # if the date is not in the headers, add it now. We however - # will not override an already existing header. Unfortunately - # this header will be overriden by many WSGI servers including - # wsgiref. - if 'date' not in self.headers: - self.headers['Date'] = http_date() - accept_ranges = _clean_accept_ranges(accept_ranges) - is206 = self._process_range_request(environ, complete_length, accept_ranges) - if not is206 and not is_resource_modified( - environ, self.headers.get('etag'), None, - self.headers.get('last-modified') - ): - if parse_etags(environ.get('HTTP_IF_MATCH')): - self.status_code = 412 - else: - self.status_code = 304 - if self.automatically_set_content_length and 'content-length' not in self.headers: - length = self.calculate_content_length() - if length is not None: - self.headers['Content-Length'] = length - return self - - def add_etag(self, overwrite=False, weak=False): - """Add an etag for the current response if there is none yet.""" - if overwrite or 'etag' not in self.headers: - self.set_etag(generate_etag(self.get_data()), weak) - - def set_etag(self, etag, weak=False): - """Set the etag, and override the old one if there was one.""" - self.headers['ETag'] = quote_etag(etag, weak) - - def get_etag(self): - """Return a tuple in the form ``(etag, is_weak)``. If there is no - ETag the return value is ``(None, None)``. - """ - return unquote_etag(self.headers.get('ETag')) - - def freeze(self, no_etag=False): - """Call this method if you want to make your response object ready for - pickeling. This buffers the generator if there is one. This also - sets the etag unless `no_etag` is set to `True`. - """ - if not no_etag: - self.add_etag() - super(ETagResponseMixin, self).freeze() - - accept_ranges = header_property('Accept-Ranges', doc=''' - The `Accept-Ranges` header. Even though the name would indicate - that multiple values are supported, it must be one string token only. - - The values ``'bytes'`` and ``'none'`` are common. - - .. versionadded:: 0.7''') - - def _get_content_range(self): - def on_update(rng): - if not rng: - del self.headers['content-range'] - else: - self.headers['Content-Range'] = rng.to_header() - rv = parse_content_range_header(self.headers.get('content-range'), - on_update) - # always provide a content range object to make the descriptor - # more user friendly. It provides an unset() method that can be - # used to remove the header quickly. - if rv is None: - rv = ContentRange(None, None, None, on_update=on_update) - return rv - - def _set_content_range(self, value): - if not value: - del self.headers['content-range'] - elif isinstance(value, string_types): - self.headers['Content-Range'] = value - else: - self.headers['Content-Range'] = value.to_header() - content_range = property(_get_content_range, _set_content_range, doc=''' - The `Content-Range` header as - :class:`~werkzeug.datastructures.ContentRange` object. Even if the - header is not set it wil provide such an object for easier - manipulation. - - .. versionadded:: 0.7''') - del _get_content_range, _set_content_range - - -class ResponseStream(object): - - """A file descriptor like object used by the :class:`ResponseStreamMixin` to - represent the body of the stream. It directly pushes into the response - iterable of the response object. - """ - - mode = 'wb+' - - def __init__(self, response): - self.response = response - self.closed = False - - def write(self, value): - if self.closed: - raise ValueError('I/O operation on closed file') - self.response._ensure_sequence(mutable=True) - self.response.response.append(value) - self.response.headers.pop('Content-Length', None) - return len(value) - - def writelines(self, seq): - for item in seq: - self.write(item) - - def close(self): - self.closed = True - - def flush(self): - if self.closed: - raise ValueError('I/O operation on closed file') - - def isatty(self): - if self.closed: - raise ValueError('I/O operation on closed file') - return False - - def tell(self): - self.response._ensure_sequence() - return sum(map(len, self.response.response)) - - @property - def encoding(self): - return self.response.charset - - -class ResponseStreamMixin(object): - - """Mixin for :class:`BaseRequest` subclasses. Classes that inherit from - this mixin will automatically get a :attr:`stream` property that provides - a write-only interface to the response iterable. - """ - - @cached_property - def stream(self): - """The response iterable as write-only stream.""" - return ResponseStream(self) - - -class CommonRequestDescriptorsMixin(object): - - """A mixin for :class:`BaseRequest` subclasses. Request objects that - mix this class in will automatically get descriptors for a couple of - HTTP headers with automatic type conversion. - - .. versionadded:: 0.5 - """ - - content_type = environ_property('CONTENT_TYPE', doc=''' - The Content-Type entity-header field indicates the media type of - the entity-body sent to the recipient or, in the case of the HEAD - method, the media type that would have been sent had the request - been a GET.''') - - @cached_property - def content_length(self): - """The Content-Length entity-header field indicates the size of the - entity-body in bytes or, in the case of the HEAD method, the size of - the entity-body that would have been sent had the request been a - GET. - """ - return get_content_length(self.environ) - - content_encoding = environ_property('HTTP_CONTENT_ENCODING', doc=''' - The Content-Encoding entity-header field is used as a modifier to the - media-type. When present, its value indicates what additional content - codings have been applied to the entity-body, and thus what decoding - mechanisms must be applied in order to obtain the media-type - referenced by the Content-Type header field. - - .. versionadded:: 0.9''') - content_md5 = environ_property('HTTP_CONTENT_MD5', doc=''' - The Content-MD5 entity-header field, as defined in RFC 1864, is an - MD5 digest of the entity-body for the purpose of providing an - end-to-end message integrity check (MIC) of the entity-body. (Note: - a MIC is good for detecting accidental modification of the - entity-body in transit, but is not proof against malicious attacks.) - - .. versionadded:: 0.9''') - referrer = environ_property('HTTP_REFERER', doc=''' - The Referer[sic] request-header field allows the client to specify, - for the server's benefit, the address (URI) of the resource from which - the Request-URI was obtained (the "referrer", although the header - field is misspelled).''') - date = environ_property('HTTP_DATE', None, parse_date, doc=''' - The Date general-header field represents the date and time at which - the message was originated, having the same semantics as orig-date - in RFC 822.''') - max_forwards = environ_property('HTTP_MAX_FORWARDS', None, int, doc=''' - The Max-Forwards request-header field provides a mechanism with the - TRACE and OPTIONS methods to limit the number of proxies or gateways - that can forward the request to the next inbound server.''') - - def _parse_content_type(self): - if not hasattr(self, '_parsed_content_type'): - self._parsed_content_type = \ - parse_options_header(self.environ.get('CONTENT_TYPE', '')) - - @property - def mimetype(self): - """Like :attr:`content_type`, but without parameters (eg, without - charset, type etc.) and always lowercase. For example if the content - type is ``text/HTML; charset=utf-8`` the mimetype would be - ``'text/html'``. - """ - self._parse_content_type() - return self._parsed_content_type[0].lower() - - @property - def mimetype_params(self): - """The mimetype parameters as dict. For example if the content - type is ``text/html; charset=utf-8`` the params would be - ``{'charset': 'utf-8'}``. - """ - self._parse_content_type() - return self._parsed_content_type[1] - - @cached_property - def pragma(self): - """The Pragma general-header field is used to include - implementation-specific directives that might apply to any recipient - along the request/response chain. All pragma directives specify - optional behavior from the viewpoint of the protocol; however, some - systems MAY require that behavior be consistent with the directives. - """ - return parse_set_header(self.environ.get('HTTP_PRAGMA', '')) - - -class CommonResponseDescriptorsMixin(object): - - """A mixin for :class:`BaseResponse` subclasses. Response objects that - mix this class in will automatically get descriptors for a couple of - HTTP headers with automatic type conversion. - """ - - def _get_mimetype(self): - ct = self.headers.get('content-type') - if ct: - return ct.split(';')[0].strip() - - def _set_mimetype(self, value): - self.headers['Content-Type'] = get_content_type(value, self.charset) - - def _get_mimetype_params(self): - def on_update(d): - self.headers['Content-Type'] = \ - dump_options_header(self.mimetype, d) - d = parse_options_header(self.headers.get('content-type', ''))[1] - return CallbackDict(d, on_update) - - mimetype = property(_get_mimetype, _set_mimetype, doc=''' - The mimetype (content type without charset etc.)''') - mimetype_params = property(_get_mimetype_params, doc=''' - The mimetype parameters as dict. For example if the content - type is ``text/html; charset=utf-8`` the params would be - ``{'charset': 'utf-8'}``. - - .. versionadded:: 0.5 - ''') - location = header_property('Location', doc=''' - The Location response-header field is used to redirect the recipient - to a location other than the Request-URI for completion of the request - or identification of a new resource.''') - age = header_property('Age', None, parse_age, dump_age, doc=''' - The Age response-header field conveys the sender's estimate of the - amount of time since the response (or its revalidation) was - generated at the origin server. - - Age values are non-negative decimal integers, representing time in - seconds.''') - content_type = header_property('Content-Type', doc=''' - The Content-Type entity-header field indicates the media type of the - entity-body sent to the recipient or, in the case of the HEAD method, - the media type that would have been sent had the request been a GET. - ''') - content_length = header_property('Content-Length', None, int, str, doc=''' - The Content-Length entity-header field indicates the size of the - entity-body, in decimal number of OCTETs, sent to the recipient or, - in the case of the HEAD method, the size of the entity-body that would - have been sent had the request been a GET.''') - content_location = header_property('Content-Location', doc=''' - The Content-Location entity-header field MAY be used to supply the - resource location for the entity enclosed in the message when that - entity is accessible from a location separate from the requested - resource's URI.''') - content_encoding = header_property('Content-Encoding', doc=''' - The Content-Encoding entity-header field is used as a modifier to the - media-type. When present, its value indicates what additional content - codings have been applied to the entity-body, and thus what decoding - mechanisms must be applied in order to obtain the media-type - referenced by the Content-Type header field.''') - content_md5 = header_property('Content-MD5', doc=''' - The Content-MD5 entity-header field, as defined in RFC 1864, is an - MD5 digest of the entity-body for the purpose of providing an - end-to-end message integrity check (MIC) of the entity-body. (Note: - a MIC is good for detecting accidental modification of the - entity-body in transit, but is not proof against malicious attacks.) - ''') - date = header_property('Date', None, parse_date, http_date, doc=''' - The Date general-header field represents the date and time at which - the message was originated, having the same semantics as orig-date - in RFC 822.''') - expires = header_property('Expires', None, parse_date, http_date, doc=''' - The Expires entity-header field gives the date/time after which the - response is considered stale. A stale cache entry may not normally be - returned by a cache.''') - last_modified = header_property('Last-Modified', None, parse_date, - http_date, doc=''' - The Last-Modified entity-header field indicates the date and time at - which the origin server believes the variant was last modified.''') - - def _get_retry_after(self): - value = self.headers.get('retry-after') - if value is None: - return - elif value.isdigit(): - return datetime.utcnow() + timedelta(seconds=int(value)) - return parse_date(value) - - def _set_retry_after(self, value): - if value is None: - if 'retry-after' in self.headers: - del self.headers['retry-after'] - return - elif isinstance(value, datetime): - value = http_date(value) - else: - value = str(value) - self.headers['Retry-After'] = value - - retry_after = property(_get_retry_after, _set_retry_after, doc=''' - The Retry-After response-header field can be used with a 503 (Service - Unavailable) response to indicate how long the service is expected - to be unavailable to the requesting client. - - Time in seconds until expiration or date.''') - - def _set_property(name, doc=None): - def fget(self): - def on_update(header_set): - if not header_set and name in self.headers: - del self.headers[name] - elif header_set: - self.headers[name] = header_set.to_header() - return parse_set_header(self.headers.get(name), on_update) - - def fset(self, value): - if not value: - del self.headers[name] - elif isinstance(value, string_types): - self.headers[name] = value - else: - self.headers[name] = dump_header(value) - return property(fget, fset, doc=doc) - - vary = _set_property('Vary', doc=''' - The Vary field value indicates the set of request-header fields that - fully determines, while the response is fresh, whether a cache is - permitted to use the response to reply to a subsequent request - without revalidation.''') - content_language = _set_property('Content-Language', doc=''' - The Content-Language entity-header field describes the natural - language(s) of the intended audience for the enclosed entity. Note - that this might not be equivalent to all the languages used within - the entity-body.''') - allow = _set_property('Allow', doc=''' - The Allow entity-header field lists the set of methods supported - by the resource identified by the Request-URI. The purpose of this - field is strictly to inform the recipient of valid methods - associated with the resource. An Allow header field MUST be - present in a 405 (Method Not Allowed) response.''') - - del _set_property, _get_mimetype, _set_mimetype, _get_retry_after, \ - _set_retry_after - - -class WWWAuthenticateMixin(object): - - """Adds a :attr:`www_authenticate` property to a response object.""" - - @property - def www_authenticate(self): - """The `WWW-Authenticate` header in a parsed form.""" - def on_update(www_auth): - if not www_auth and 'www-authenticate' in self.headers: - del self.headers['www-authenticate'] - elif www_auth: - self.headers['WWW-Authenticate'] = www_auth.to_header() - header = self.headers.get('www-authenticate') - return parse_www_authenticate_header(header, on_update) - - -class Request(BaseRequest, AcceptMixin, ETagRequestMixin, - UserAgentMixin, AuthorizationMixin, - CommonRequestDescriptorsMixin): - - """Full featured request object implementing the following mixins: - - - :class:`AcceptMixin` for accept header parsing - - :class:`ETagRequestMixin` for etag and cache control handling - - :class:`UserAgentMixin` for user agent introspection - - :class:`AuthorizationMixin` for http auth handling - - :class:`CommonRequestDescriptorsMixin` for common headers - """ - - -class PlainRequest(StreamOnlyMixin, Request): - - """A request object without special form parsing capabilities. - - .. versionadded:: 0.9 - """ - - -class Response(BaseResponse, ETagResponseMixin, ResponseStreamMixin, - CommonResponseDescriptorsMixin, - WWWAuthenticateMixin): - - """Full featured response object implementing the following mixins: - - - :class:`ETagResponseMixin` for etag and cache control handling - - :class:`ResponseStreamMixin` to add support for the `stream` property - - :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors - - :class:`WWWAuthenticateMixin` for HTTP authentication support - """ diff --git a/pyextra/werkzeug/wsgi.py b/pyextra/werkzeug/wsgi.py deleted file mode 100644 index c30021a7b..000000000 --- a/pyextra/werkzeug/wsgi.py +++ /dev/null @@ -1,1364 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.wsgi - ~~~~~~~~~~~~~ - - This module implements WSGI related helpers. - - :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. - :license: BSD, see LICENSE for more details. -""" -import io -try: - import httplib -except ImportError: - from http import client as httplib -import mimetypes -import os -import posixpath -import re -import socket -from datetime import datetime -from functools import partial, update_wrapper -from itertools import chain -from time import mktime, time -from zlib import adler32 - -from werkzeug._compat import BytesIO, PY2, implements_iterator, iteritems, \ - make_literal_wrapper, string_types, text_type, to_bytes, to_unicode, \ - try_coerce_native, wsgi_get_bytes -from werkzeug._internal import _empty_stream, _encode_idna -from werkzeug.filesystem import get_filesystem_encoding -from werkzeug.http import http_date, is_resource_modified, \ - is_hop_by_hop_header -from werkzeug.urls import uri_to_iri, url_join, url_parse, url_quote -from werkzeug.datastructures import EnvironHeaders - - -def responder(f): - """Marks a function as responder. Decorate a function with it and it - will automatically call the return value as WSGI application. - - Example:: - - @responder - def application(environ, start_response): - return Response('Hello World!') - """ - return update_wrapper(lambda *a: f(*a)(*a[-2:]), f) - - -def get_current_url(environ, root_only=False, strip_querystring=False, - host_only=False, trusted_hosts=None): - """A handy helper function that recreates the full URL as IRI for the - current request or parts of it. Here's an example: - - >>> from werkzeug.test import create_environ - >>> env = create_environ("/?param=foo", "http://localhost/script") - >>> get_current_url(env) - 'http://localhost/script/?param=foo' - >>> get_current_url(env, root_only=True) - 'http://localhost/script/' - >>> get_current_url(env, host_only=True) - 'http://localhost/' - >>> get_current_url(env, strip_querystring=True) - 'http://localhost/script/' - - This optionally it verifies that the host is in a list of trusted hosts. - If the host is not in there it will raise a - :exc:`~werkzeug.exceptions.SecurityError`. - - Note that the string returned might contain unicode characters as the - representation is an IRI not an URI. If you need an ASCII only - representation you can use the :func:`~werkzeug.urls.iri_to_uri` - function: - - >>> from werkzeug.urls import iri_to_uri - >>> iri_to_uri(get_current_url(env)) - 'http://localhost/script/?param=foo' - - :param environ: the WSGI environment to get the current URL from. - :param root_only: set `True` if you only want the root URL. - :param strip_querystring: set to `True` if you don't want the querystring. - :param host_only: set to `True` if the host URL should be returned. - :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` - for more information. - """ - tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)] - cat = tmp.append - if host_only: - return uri_to_iri(''.join(tmp) + '/') - cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/')) - cat('/') - if not root_only: - cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/'))) - if not strip_querystring: - qs = get_query_string(environ) - if qs: - cat('?' + qs) - return uri_to_iri(''.join(tmp)) - - -def host_is_trusted(hostname, trusted_list): - """Checks if a host is trusted against a list. This also takes care - of port normalization. - - .. versionadded:: 0.9 - - :param hostname: the hostname to check - :param trusted_list: a list of hostnames to check against. If a - hostname starts with a dot it will match against - all subdomains as well. - """ - if not hostname: - return False - - if isinstance(trusted_list, string_types): - trusted_list = [trusted_list] - - def _normalize(hostname): - if ':' in hostname: - hostname = hostname.rsplit(':', 1)[0] - return _encode_idna(hostname) - - try: - hostname = _normalize(hostname) - except UnicodeError: - return False - for ref in trusted_list: - if ref.startswith('.'): - ref = ref[1:] - suffix_match = True - else: - suffix_match = False - try: - ref = _normalize(ref) - except UnicodeError: - return False - if ref == hostname: - return True - if suffix_match and hostname.endswith(b'.' + ref): - return True - return False - - -def get_host(environ, trusted_hosts=None): - """Return the real host for the given WSGI environment. This first checks - the `X-Forwarded-Host` header, then the normal `Host` header, and finally - the `SERVER_NAME` environment variable (using the first one it finds). - - Optionally it verifies that the host is in a list of trusted hosts. - If the host is not in there it will raise a - :exc:`~werkzeug.exceptions.SecurityError`. - - :param environ: the WSGI environment to get the host of. - :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` - for more information. - """ - if 'HTTP_X_FORWARDED_HOST' in environ: - rv = environ['HTTP_X_FORWARDED_HOST'].split(',', 1)[0].strip() - elif 'HTTP_HOST' in environ: - rv = environ['HTTP_HOST'] - else: - rv = environ['SERVER_NAME'] - if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \ - in (('https', '443'), ('http', '80')): - rv += ':' + environ['SERVER_PORT'] - if trusted_hosts is not None: - if not host_is_trusted(rv, trusted_hosts): - from werkzeug.exceptions import SecurityError - raise SecurityError('Host "%s" is not trusted' % rv) - return rv - - -def get_content_length(environ): - """Returns the content length from the WSGI environment as - integer. If it's not available or chunked transfer encoding is used, - ``None`` is returned. - - .. versionadded:: 0.9 - - :param environ: the WSGI environ to fetch the content length from. - """ - if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked': - return None - - content_length = environ.get('CONTENT_LENGTH') - if content_length is not None: - try: - return max(0, int(content_length)) - except (ValueError, TypeError): - pass - - -def get_input_stream(environ, safe_fallback=True): - """Returns the input stream from the WSGI environment and wraps it - in the most sensible way possible. The stream returned is not the - raw WSGI stream in most cases but one that is safe to read from - without taking into account the content length. - - If content length is not set, the stream will be empty for safety reasons. - If the WSGI server supports chunked or infinite streams, it should set - the ``wsgi.input_terminated`` value in the WSGI environ to indicate that. - - .. versionadded:: 0.9 - - :param environ: the WSGI environ to fetch the stream from. - :param safe_fallback: use an empty stream as a safe fallback when the - content length is not set. Disabling this allows infinite streams, - which can be a denial-of-service risk. - """ - stream = environ['wsgi.input'] - content_length = get_content_length(environ) - - # A wsgi extension that tells us if the input is terminated. In - # that case we return the stream unchanged as we know we can safely - # read it until the end. - if environ.get('wsgi.input_terminated'): - return stream - - # If the request doesn't specify a content length, returning the stream is - # potentially dangerous because it could be infinite, malicious or not. If - # safe_fallback is true, return an empty stream instead for safety. - if content_length is None: - return safe_fallback and _empty_stream or stream - - # Otherwise limit the stream to the content length - return LimitedStream(stream, content_length) - - -def get_query_string(environ): - """Returns the `QUERY_STRING` from the WSGI environment. This also takes - care about the WSGI decoding dance on Python 3 environments as a - native string. The string returned will be restricted to ASCII - characters. - - .. versionadded:: 0.9 - - :param environ: the WSGI environment object to get the query string from. - """ - qs = wsgi_get_bytes(environ.get('QUERY_STRING', '')) - # QUERY_STRING really should be ascii safe but some browsers - # will send us some unicode stuff (I am looking at you IE). - # In that case we want to urllib quote it badly. - return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),')) - - -def get_path_info(environ, charset='utf-8', errors='replace'): - """Returns the `PATH_INFO` from the WSGI environment and properly - decodes it. This also takes care about the WSGI decoding dance - on Python 3 environments. if the `charset` is set to `None` a - bytestring is returned. - - .. versionadded:: 0.9 - - :param environ: the WSGI environment object to get the path from. - :param charset: the charset for the path info, or `None` if no - decoding should be performed. - :param errors: the decoding error handling. - """ - path = wsgi_get_bytes(environ.get('PATH_INFO', '')) - return to_unicode(path, charset, errors, allow_none_charset=True) - - -def get_script_name(environ, charset='utf-8', errors='replace'): - """Returns the `SCRIPT_NAME` from the WSGI environment and properly - decodes it. This also takes care about the WSGI decoding dance - on Python 3 environments. if the `charset` is set to `None` a - bytestring is returned. - - .. versionadded:: 0.9 - - :param environ: the WSGI environment object to get the path from. - :param charset: the charset for the path, or `None` if no - decoding should be performed. - :param errors: the decoding error handling. - """ - path = wsgi_get_bytes(environ.get('SCRIPT_NAME', '')) - return to_unicode(path, charset, errors, allow_none_charset=True) - - -def pop_path_info(environ, charset='utf-8', errors='replace'): - """Removes and returns the next segment of `PATH_INFO`, pushing it onto - `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. - - If the `charset` is set to `None` a bytestring is returned. - - If there are empty segments (``'/foo//bar``) these are ignored but - properly pushed to the `SCRIPT_NAME`: - - >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} - >>> pop_path_info(env) - 'a' - >>> env['SCRIPT_NAME'] - '/foo/a' - >>> pop_path_info(env) - 'b' - >>> env['SCRIPT_NAME'] - '/foo/a/b' - - .. versionadded:: 0.5 - - .. versionchanged:: 0.9 - The path is now decoded and a charset and encoding - parameter can be provided. - - :param environ: the WSGI environment that is modified. - """ - path = environ.get('PATH_INFO') - if not path: - return None - - script_name = environ.get('SCRIPT_NAME', '') - - # shift multiple leading slashes over - old_path = path - path = path.lstrip('/') - if path != old_path: - script_name += '/' * (len(old_path) - len(path)) - - if '/' not in path: - environ['PATH_INFO'] = '' - environ['SCRIPT_NAME'] = script_name + path - rv = wsgi_get_bytes(path) - else: - segment, path = path.split('/', 1) - environ['PATH_INFO'] = '/' + path - environ['SCRIPT_NAME'] = script_name + segment - rv = wsgi_get_bytes(segment) - - return to_unicode(rv, charset, errors, allow_none_charset=True) - - -def peek_path_info(environ, charset='utf-8', errors='replace'): - """Returns the next segment on the `PATH_INFO` or `None` if there - is none. Works like :func:`pop_path_info` without modifying the - environment: - - >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} - >>> peek_path_info(env) - 'a' - >>> peek_path_info(env) - 'a' - - If the `charset` is set to `None` a bytestring is returned. - - .. versionadded:: 0.5 - - .. versionchanged:: 0.9 - The path is now decoded and a charset and encoding - parameter can be provided. - - :param environ: the WSGI environment that is checked. - """ - segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1) - if segments: - return to_unicode(wsgi_get_bytes(segments[0]), - charset, errors, allow_none_charset=True) - - -def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8', - errors='replace', collapse_http_schemes=True): - """Extracts the path info from the given URL (or WSGI environment) and - path. The path info returned is a unicode string, not a bytestring - suitable for a WSGI environment. The URLs might also be IRIs. - - If the path info could not be determined, `None` is returned. - - Some examples: - - >>> extract_path_info('http://example.com/app', '/app/hello') - u'/hello' - >>> extract_path_info('http://example.com/app', - ... 'https://example.com/app/hello') - u'/hello' - >>> extract_path_info('http://example.com/app', - ... 'https://example.com/app/hello', - ... collapse_http_schemes=False) is None - True - - Instead of providing a base URL you can also pass a WSGI environment. - - .. versionadded:: 0.6 - - :param environ_or_baseurl: a WSGI environment dict, a base URL or - base IRI. This is the root of the - application. - :param path_or_url: an absolute path from the server root, a - relative path (in which case it's the path info) - or a full URL. Also accepts IRIs and unicode - parameters. - :param charset: the charset for byte data in URLs - :param errors: the error handling on decode - :param collapse_http_schemes: if set to `False` the algorithm does - not assume that http and https on the - same server point to the same - resource. - """ - def _normalize_netloc(scheme, netloc): - parts = netloc.split(u'@', 1)[-1].split(u':', 1) - if len(parts) == 2: - netloc, port = parts - if (scheme == u'http' and port == u'80') or \ - (scheme == u'https' and port == u'443'): - port = None - else: - netloc = parts[0] - port = None - if port is not None: - netloc += u':' + port - return netloc - - # make sure whatever we are working on is a IRI and parse it - path = uri_to_iri(path_or_url, charset, errors) - if isinstance(environ_or_baseurl, dict): - environ_or_baseurl = get_current_url(environ_or_baseurl, - root_only=True) - base_iri = uri_to_iri(environ_or_baseurl, charset, errors) - base_scheme, base_netloc, base_path = url_parse(base_iri)[:3] - cur_scheme, cur_netloc, cur_path, = \ - url_parse(url_join(base_iri, path))[:3] - - # normalize the network location - base_netloc = _normalize_netloc(base_scheme, base_netloc) - cur_netloc = _normalize_netloc(cur_scheme, cur_netloc) - - # is that IRI even on a known HTTP scheme? - if collapse_http_schemes: - for scheme in base_scheme, cur_scheme: - if scheme not in (u'http', u'https'): - return None - else: - if not (base_scheme in (u'http', u'https') and - base_scheme == cur_scheme): - return None - - # are the netlocs compatible? - if base_netloc != cur_netloc: - return None - - # are we below the application path? - base_path = base_path.rstrip(u'/') - if not cur_path.startswith(base_path): - return None - - return u'/' + cur_path[len(base_path):].lstrip(u'/') - - -class ProxyMiddleware(object): - """This middleware routes some requests to the provided WSGI app and - proxies some requests to an external server. This is not something that - can generally be done on the WSGI layer and some HTTP requests will not - tunnel through correctly (for instance websocket requests cannot be - proxied through WSGI). As a result this is only really useful for some - basic requests that can be forwarded. - - Example configuration:: - - app = ProxyMiddleware(app, { - '/static/': { - 'target': 'http://127.0.0.1:5001/', - } - }) - - For each host options can be specified. The following options are - supported: - - ``target``: - the target URL to dispatch to - ``remove_prefix``: - if set to `True` the prefix is chopped off the URL before - dispatching it to the server. - ``host``: - When set to ``''`` which is the default the host header is - automatically rewritten to the URL of the target. If set to `None` - then the host header is unmodified from the client request. Any - other value overwrites the host header with that value. - ``headers``: - An optional dictionary of headers that should be sent with the - request to the target host. - ``ssl_context``: - In case this is an HTTPS target host then an SSL context can be - provided here (:class:`ssl.SSLContext`). This can be used for instance - to disable SSL verification. - - In this case everything below ``'/static/'`` is proxied to the server on - port 5001. The host header is automatically rewritten and so are request - URLs (eg: the leading `/static/` prefix here gets chopped off). - - .. versionadded:: 0.14 - """ - - def __init__(self, app, targets, chunk_size=2 << 13, timeout=10): - def _set_defaults(opts): - opts.setdefault('remove_prefix', False) - opts.setdefault('host', '') - opts.setdefault('headers', {}) - opts.setdefault('ssl_context', None) - return opts - self.app = app - self.targets = dict(('/%s/' % k.strip('/'), _set_defaults(v)) - for k, v in iteritems(targets)) - self.chunk_size = chunk_size - self.timeout = timeout - - def proxy_to(self, opts, path, prefix): - target = url_parse(opts['target']) - - def application(environ, start_response): - headers = list(EnvironHeaders(environ).items()) - headers[:] = [(k, v) for k, v in headers - if not is_hop_by_hop_header(k) and - k.lower() not in ('content-length', 'host')] - headers.append(('Connection', 'close')) - if opts['host'] == '': - headers.append(('Host', target.ascii_host)) - elif opts['host'] is None: - headers.append(('Host', environ['HTTP_HOST'])) - else: - headers.append(('Host', opts['host'])) - headers.extend(opts['headers'].items()) - - remote_path = path - if opts['remove_prefix']: - remote_path = '%s/%s' % ( - target.path.rstrip('/'), - remote_path[len(prefix):].lstrip('/') - ) - - content_length = environ.get('CONTENT_LENGTH') - chunked = False - if content_length not in ('', None): - headers.append(('Content-Length', content_length)) - elif content_length is not None: - headers.append(('Transfer-Encoding', 'chunked')) - chunked = True - - try: - if target.scheme == 'http': - con = httplib.HTTPConnection( - target.ascii_host, target.port or 80, - timeout=self.timeout) - elif target.scheme == 'https': - con = httplib.HTTPSConnection( - target.ascii_host, target.port or 443, - timeout=self.timeout, - context=opts['ssl_context']) - con.connect() - con.putrequest(environ['REQUEST_METHOD'], url_quote(remote_path), - skip_host=True) - - for k, v in headers: - if k.lower() == 'connection': - v = 'close' - con.putheader(k, v) - con.endheaders() - - stream = get_input_stream(environ) - while 1: - data = stream.read(self.chunk_size) - if not data: - break - if chunked: - con.send(b'%x\r\n%s\r\n' % (len(data), data)) - else: - con.send(data) - - resp = con.getresponse() - except socket.error: - from werkzeug.exceptions import BadGateway - return BadGateway()(environ, start_response) - - start_response('%d %s' % (resp.status, resp.reason), - [(k.title(), v) for k, v in resp.getheaders() - if not is_hop_by_hop_header(k)]) - - def read(): - while 1: - try: - data = resp.read(self.chunk_size) - except socket.error: - break - if not data: - break - yield data - return read() - return application - - def __call__(self, environ, start_response): - path = environ['PATH_INFO'] - app = self.app - for prefix, opts in iteritems(self.targets): - if path.startswith(prefix): - app = self.proxy_to(opts, path, prefix) - break - return app(environ, start_response) - - -class SharedDataMiddleware(object): - - """A WSGI middleware that provides static content for development - environments or simple server setups. Usage is quite simple:: - - import os - from werkzeug.wsgi import SharedDataMiddleware - - app = SharedDataMiddleware(app, { - '/shared': os.path.join(os.path.dirname(__file__), 'shared') - }) - - The contents of the folder ``./shared`` will now be available on - ``http://example.com/shared/``. This is pretty useful during development - because a standalone media server is not required. One can also mount - files on the root folder and still continue to use the application because - the shared data middleware forwards all unhandled requests to the - application, even if the requests are below one of the shared folders. - - If `pkg_resources` is available you can also tell the middleware to serve - files from package data:: - - app = SharedDataMiddleware(app, { - '/shared': ('myapplication', 'shared_files') - }) - - This will then serve the ``shared_files`` folder in the `myapplication` - Python package. - - The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch` - rules for files that are not accessible from the web. If `cache` is set to - `False` no caching headers are sent. - - Currently the middleware does not support non ASCII filenames. If the - encoding on the file system happens to be the encoding of the URI it may - work but this could also be by accident. We strongly suggest using ASCII - only file names for static files. - - The middleware will guess the mimetype using the Python `mimetype` - module. If it's unable to figure out the charset it will fall back - to `fallback_mimetype`. - - .. versionchanged:: 0.5 - The cache timeout is configurable now. - - .. versionadded:: 0.6 - The `fallback_mimetype` parameter was added. - - :param app: the application to wrap. If you don't want to wrap an - application you can pass it :exc:`NotFound`. - :param exports: a list or dict of exported files and folders. - :param disallow: a list of :func:`~fnmatch.fnmatch` rules. - :param fallback_mimetype: the fallback mimetype for unknown files. - :param cache: enable or disable caching headers. - :param cache_timeout: the cache timeout in seconds for the headers. - """ - - def __init__(self, app, exports, disallow=None, cache=True, - cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'): - self.app = app - self.exports = [] - self.cache = cache - self.cache_timeout = cache_timeout - if hasattr(exports, 'items'): - exports = iteritems(exports) - for key, value in exports: - if isinstance(value, tuple): - loader = self.get_package_loader(*value) - elif isinstance(value, string_types): - if os.path.isfile(value): - loader = self.get_file_loader(value) - else: - loader = self.get_directory_loader(value) - else: - raise TypeError('unknown def %r' % value) - self.exports.append((key, loader)) - if disallow is not None: - from fnmatch import fnmatch - self.is_allowed = lambda x: not fnmatch(x, disallow) - self.fallback_mimetype = fallback_mimetype - - def is_allowed(self, filename): - """Subclasses can override this method to disallow the access to - certain files. However by providing `disallow` in the constructor - this method is overwritten. - """ - return True - - def _opener(self, filename): - return lambda: ( - open(filename, 'rb'), - datetime.utcfromtimestamp(os.path.getmtime(filename)), - int(os.path.getsize(filename)) - ) - - def get_file_loader(self, filename): - return lambda x: (os.path.basename(filename), self._opener(filename)) - - def get_package_loader(self, package, package_path): - from pkg_resources import DefaultProvider, ResourceManager, \ - get_provider - loadtime = datetime.utcnow() - provider = get_provider(package) - manager = ResourceManager() - filesystem_bound = isinstance(provider, DefaultProvider) - - def loader(path): - if path is None: - return None, None - path = posixpath.join(package_path, path) - if not provider.has_resource(path): - return None, None - basename = posixpath.basename(path) - if filesystem_bound: - return basename, self._opener( - provider.get_resource_filename(manager, path)) - s = provider.get_resource_string(manager, path) - return basename, lambda: ( - BytesIO(s), - loadtime, - len(s) - ) - return loader - - def get_directory_loader(self, directory): - def loader(path): - if path is not None: - path = os.path.join(directory, path) - else: - path = directory - if os.path.isfile(path): - return os.path.basename(path), self._opener(path) - return None, None - return loader - - def generate_etag(self, mtime, file_size, real_filename): - if not isinstance(real_filename, bytes): - real_filename = real_filename.encode(get_filesystem_encoding()) - return 'wzsdm-%d-%s-%s' % ( - mktime(mtime.timetuple()), - file_size, - adler32(real_filename) & 0xffffffff - ) - - def __call__(self, environ, start_response): - cleaned_path = get_path_info(environ) - if PY2: - cleaned_path = cleaned_path.encode(get_filesystem_encoding()) - # sanitize the path for non unix systems - cleaned_path = cleaned_path.strip('/') - for sep in os.sep, os.altsep: - if sep and sep != '/': - cleaned_path = cleaned_path.replace(sep, '/') - path = '/' + '/'.join(x for x in cleaned_path.split('/') - if x and x != '..') - file_loader = None - for search_path, loader in self.exports: - if search_path == path: - real_filename, file_loader = loader(None) - if file_loader is not None: - break - if not search_path.endswith('/'): - search_path += '/' - if path.startswith(search_path): - real_filename, file_loader = loader(path[len(search_path):]) - if file_loader is not None: - break - if file_loader is None or not self.is_allowed(real_filename): - return self.app(environ, start_response) - - guessed_type = mimetypes.guess_type(real_filename) - mime_type = guessed_type[0] or self.fallback_mimetype - f, mtime, file_size = file_loader() - - headers = [('Date', http_date())] - if self.cache: - timeout = self.cache_timeout - etag = self.generate_etag(mtime, file_size, real_filename) - headers += [ - ('Etag', '"%s"' % etag), - ('Cache-Control', 'max-age=%d, public' % timeout) - ] - if not is_resource_modified(environ, etag, last_modified=mtime): - f.close() - start_response('304 Not Modified', headers) - return [] - headers.append(('Expires', http_date(time() + timeout))) - else: - headers.append(('Cache-Control', 'public')) - - headers.extend(( - ('Content-Type', mime_type), - ('Content-Length', str(file_size)), - ('Last-Modified', http_date(mtime)) - )) - start_response('200 OK', headers) - return wrap_file(environ, f) - - -class DispatcherMiddleware(object): - - """Allows one to mount middlewares or applications in a WSGI application. - This is useful if you want to combine multiple WSGI applications:: - - app = DispatcherMiddleware(app, { - '/app2': app2, - '/app3': app3 - }) - """ - - def __init__(self, app, mounts=None): - self.app = app - self.mounts = mounts or {} - - def __call__(self, environ, start_response): - script = environ.get('PATH_INFO', '') - path_info = '' - while '/' in script: - if script in self.mounts: - app = self.mounts[script] - break - script, last_item = script.rsplit('/', 1) - path_info = '/%s%s' % (last_item, path_info) - else: - app = self.mounts.get(script, self.app) - original_script_name = environ.get('SCRIPT_NAME', '') - environ['SCRIPT_NAME'] = original_script_name + script - environ['PATH_INFO'] = path_info - return app(environ, start_response) - - -@implements_iterator -class ClosingIterator(object): - - """The WSGI specification requires that all middlewares and gateways - respect the `close` callback of an iterator. Because it is useful to add - another close action to a returned iterator and adding a custom iterator - is a boring task this class can be used for that:: - - return ClosingIterator(app(environ, start_response), [cleanup_session, - cleanup_locals]) - - If there is just one close function it can be passed instead of the list. - - A closing iterator is not needed if the application uses response objects - and finishes the processing if the response is started:: - - try: - return response(environ, start_response) - finally: - cleanup_session() - cleanup_locals() - """ - - def __init__(self, iterable, callbacks=None): - iterator = iter(iterable) - self._next = partial(next, iterator) - if callbacks is None: - callbacks = [] - elif callable(callbacks): - callbacks = [callbacks] - else: - callbacks = list(callbacks) - iterable_close = getattr(iterator, 'close', None) - if iterable_close: - callbacks.insert(0, iterable_close) - self._callbacks = callbacks - - def __iter__(self): - return self - - def __next__(self): - return self._next() - - def close(self): - for callback in self._callbacks: - callback() - - -def wrap_file(environ, file, buffer_size=8192): - """Wraps a file. This uses the WSGI server's file wrapper if available - or otherwise the generic :class:`FileWrapper`. - - .. versionadded:: 0.5 - - If the file wrapper from the WSGI server is used it's important to not - iterate over it from inside the application but to pass it through - unchanged. If you want to pass out a file wrapper inside a response - object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`. - - More information about file wrappers are available in :pep:`333`. - - :param file: a :class:`file`-like object with a :meth:`~file.read` method. - :param buffer_size: number of bytes for one iteration. - """ - return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size) - - -@implements_iterator -class FileWrapper(object): - - """This class can be used to convert a :class:`file`-like object into - an iterable. It yields `buffer_size` blocks until the file is fully - read. - - You should not use this class directly but rather use the - :func:`wrap_file` function that uses the WSGI server's file wrapper - support if it's available. - - .. versionadded:: 0.5 - - If you're using this object together with a :class:`BaseResponse` you have - to use the `direct_passthrough` mode. - - :param file: a :class:`file`-like object with a :meth:`~file.read` method. - :param buffer_size: number of bytes for one iteration. - """ - - def __init__(self, file, buffer_size=8192): - self.file = file - self.buffer_size = buffer_size - - def close(self): - if hasattr(self.file, 'close'): - self.file.close() - - def seekable(self): - if hasattr(self.file, 'seekable'): - return self.file.seekable() - if hasattr(self.file, 'seek'): - return True - return False - - def seek(self, *args): - if hasattr(self.file, 'seek'): - self.file.seek(*args) - - def tell(self): - if hasattr(self.file, 'tell'): - return self.file.tell() - return None - - def __iter__(self): - return self - - def __next__(self): - data = self.file.read(self.buffer_size) - if data: - return data - raise StopIteration() - - -@implements_iterator -class _RangeWrapper(object): - # private for now, but should we make it public in the future ? - - """This class can be used to convert an iterable object into - an iterable that will only yield a piece of the underlying content. - It yields blocks until the underlying stream range is fully read. - The yielded blocks will have a size that can't exceed the original - iterator defined block size, but that can be smaller. - - If you're using this object together with a :class:`BaseResponse` you have - to use the `direct_passthrough` mode. - - :param iterable: an iterable object with a :meth:`__next__` method. - :param start_byte: byte from which read will start. - :param byte_range: how many bytes to read. - """ - - def __init__(self, iterable, start_byte=0, byte_range=None): - self.iterable = iter(iterable) - self.byte_range = byte_range - self.start_byte = start_byte - self.end_byte = None - if byte_range is not None: - self.end_byte = self.start_byte + self.byte_range - self.read_length = 0 - self.seekable = hasattr(iterable, 'seekable') and iterable.seekable() - self.end_reached = False - - def __iter__(self): - return self - - def _next_chunk(self): - try: - chunk = next(self.iterable) - self.read_length += len(chunk) - return chunk - except StopIteration: - self.end_reached = True - raise - - def _first_iteration(self): - chunk = None - if self.seekable: - self.iterable.seek(self.start_byte) - self.read_length = self.iterable.tell() - contextual_read_length = self.read_length - else: - while self.read_length <= self.start_byte: - chunk = self._next_chunk() - if chunk is not None: - chunk = chunk[self.start_byte - self.read_length:] - contextual_read_length = self.start_byte - return chunk, contextual_read_length - - def _next(self): - if self.end_reached: - raise StopIteration() - chunk = None - contextual_read_length = self.read_length - if self.read_length == 0: - chunk, contextual_read_length = self._first_iteration() - if chunk is None: - chunk = self._next_chunk() - if self.end_byte is not None and self.read_length >= self.end_byte: - self.end_reached = True - return chunk[:self.end_byte - contextual_read_length] - return chunk - - def __next__(self): - chunk = self._next() - if chunk: - return chunk - self.end_reached = True - raise StopIteration() - - def close(self): - if hasattr(self.iterable, 'close'): - self.iterable.close() - - -def _make_chunk_iter(stream, limit, buffer_size): - """Helper for the line and chunk iter functions.""" - if isinstance(stream, (bytes, bytearray, text_type)): - raise TypeError('Passed a string or byte object instead of ' - 'true iterator or stream.') - if not hasattr(stream, 'read'): - for item in stream: - if item: - yield item - return - if not isinstance(stream, LimitedStream) and limit is not None: - stream = LimitedStream(stream, limit) - _read = stream.read - while 1: - item = _read(buffer_size) - if not item: - break - yield item - - -def make_line_iter(stream, limit=None, buffer_size=10 * 1024, - cap_at_buffer=False): - """Safely iterates line-based over an input stream. If the input stream - is not a :class:`LimitedStream` the `limit` parameter is mandatory. - - This uses the stream's :meth:`~file.read` method internally as opposite - to the :meth:`~file.readline` method that is unsafe and can only be used - in violation of the WSGI specification. The same problem applies to the - `__iter__` function of the input stream which calls :meth:`~file.readline` - without arguments. - - If you need line-by-line processing it's strongly recommended to iterate - over the input stream using this helper function. - - .. versionchanged:: 0.8 - This function now ensures that the limit was reached. - - .. versionadded:: 0.9 - added support for iterators as input stream. - - .. versionadded:: 0.11.10 - added support for the `cap_at_buffer` parameter. - - :param stream: the stream or iterate to iterate over. - :param limit: the limit in bytes for the stream. (Usually - content length. Not necessary if the `stream` - is a :class:`LimitedStream`. - :param buffer_size: The optional buffer size. - :param cap_at_buffer: if this is set chunks are split if they are longer - than the buffer size. Internally this is implemented - that the buffer size might be exhausted by a factor - of two however. - """ - _iter = _make_chunk_iter(stream, limit, buffer_size) - - first_item = next(_iter, '') - if not first_item: - return - - s = make_literal_wrapper(first_item) - empty = s('') - cr = s('\r') - lf = s('\n') - crlf = s('\r\n') - - _iter = chain((first_item,), _iter) - - def _iter_basic_lines(): - _join = empty.join - buffer = [] - while 1: - new_data = next(_iter, '') - if not new_data: - break - new_buf = [] - buf_size = 0 - for item in chain(buffer, new_data.splitlines(True)): - new_buf.append(item) - buf_size += len(item) - if item and item[-1:] in crlf: - yield _join(new_buf) - new_buf = [] - elif cap_at_buffer and buf_size >= buffer_size: - rv = _join(new_buf) - while len(rv) >= buffer_size: - yield rv[:buffer_size] - rv = rv[buffer_size:] - new_buf = [rv] - buffer = new_buf - if buffer: - yield _join(buffer) - - # This hackery is necessary to merge 'foo\r' and '\n' into one item - # of 'foo\r\n' if we were unlucky and we hit a chunk boundary. - previous = empty - for item in _iter_basic_lines(): - if item == lf and previous[-1:] == cr: - previous += item - item = empty - if previous: - yield previous - previous = item - if previous: - yield previous - - -def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024, - cap_at_buffer=False): - """Works like :func:`make_line_iter` but accepts a separator - which divides chunks. If you want newline based processing - you should use :func:`make_line_iter` instead as it - supports arbitrary newline markers. - - .. versionadded:: 0.8 - - .. versionadded:: 0.9 - added support for iterators as input stream. - - .. versionadded:: 0.11.10 - added support for the `cap_at_buffer` parameter. - - :param stream: the stream or iterate to iterate over. - :param separator: the separator that divides chunks. - :param limit: the limit in bytes for the stream. (Usually - content length. Not necessary if the `stream` - is otherwise already limited). - :param buffer_size: The optional buffer size. - :param cap_at_buffer: if this is set chunks are split if they are longer - than the buffer size. Internally this is implemented - that the buffer size might be exhausted by a factor - of two however. - """ - _iter = _make_chunk_iter(stream, limit, buffer_size) - - first_item = next(_iter, '') - if not first_item: - return - - _iter = chain((first_item,), _iter) - if isinstance(first_item, text_type): - separator = to_unicode(separator) - _split = re.compile(r'(%s)' % re.escape(separator)).split - _join = u''.join - else: - separator = to_bytes(separator) - _split = re.compile(b'(' + re.escape(separator) + b')').split - _join = b''.join - - buffer = [] - while 1: - new_data = next(_iter, '') - if not new_data: - break - chunks = _split(new_data) - new_buf = [] - buf_size = 0 - for item in chain(buffer, chunks): - if item == separator: - yield _join(new_buf) - new_buf = [] - buf_size = 0 - else: - buf_size += len(item) - new_buf.append(item) - - if cap_at_buffer and buf_size >= buffer_size: - rv = _join(new_buf) - while len(rv) >= buffer_size: - yield rv[:buffer_size] - rv = rv[buffer_size:] - new_buf = [rv] - buf_size = len(rv) - - buffer = new_buf - if buffer: - yield _join(buffer) - - -@implements_iterator -class LimitedStream(io.IOBase): - - """Wraps a stream so that it doesn't read more than n bytes. If the - stream is exhausted and the caller tries to get more bytes from it - :func:`on_exhausted` is called which by default returns an empty - string. The return value of that function is forwarded - to the reader function. So if it returns an empty string - :meth:`read` will return an empty string as well. - - The limit however must never be higher than what the stream can - output. Otherwise :meth:`readlines` will try to read past the - limit. - - .. admonition:: Note on WSGI compliance - - calls to :meth:`readline` and :meth:`readlines` are not - WSGI compliant because it passes a size argument to the - readline methods. Unfortunately the WSGI PEP is not safely - implementable without a size argument to :meth:`readline` - because there is no EOF marker in the stream. As a result - of that the use of :meth:`readline` is discouraged. - - For the same reason iterating over the :class:`LimitedStream` - is not portable. It internally calls :meth:`readline`. - - We strongly suggest using :meth:`read` only or using the - :func:`make_line_iter` which safely iterates line-based - over a WSGI input stream. - - :param stream: the stream to wrap. - :param limit: the limit for the stream, must not be longer than - what the string can provide if the stream does not - end with `EOF` (like `wsgi.input`) - """ - - def __init__(self, stream, limit): - self._read = stream.read - self._readline = stream.readline - self._pos = 0 - self.limit = limit - - def __iter__(self): - return self - - @property - def is_exhausted(self): - """If the stream is exhausted this attribute is `True`.""" - return self._pos >= self.limit - - def on_exhausted(self): - """This is called when the stream tries to read past the limit. - The return value of this function is returned from the reading - function. - """ - # Read null bytes from the stream so that we get the - # correct end of stream marker. - return self._read(0) - - def on_disconnect(self): - """What should happen if a disconnect is detected? The return - value of this function is returned from read functions in case - the client went away. By default a - :exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised. - """ - from werkzeug.exceptions import ClientDisconnected - raise ClientDisconnected() - - def exhaust(self, chunk_size=1024 * 64): - """Exhaust the stream. This consumes all the data left until the - limit is reached. - - :param chunk_size: the size for a chunk. It will read the chunk - until the stream is exhausted and throw away - the results. - """ - to_read = self.limit - self._pos - chunk = chunk_size - while to_read > 0: - chunk = min(to_read, chunk) - self.read(chunk) - to_read -= chunk - - def read(self, size=None): - """Read `size` bytes or if size is not provided everything is read. - - :param size: the number of bytes read. - """ - if self._pos >= self.limit: - return self.on_exhausted() - if size is None or size == -1: # -1 is for consistence with file - size = self.limit - to_read = min(self.limit - self._pos, size) - try: - read = self._read(to_read) - except (IOError, ValueError): - return self.on_disconnect() - if to_read and len(read) != to_read: - return self.on_disconnect() - self._pos += len(read) - return read - - def readline(self, size=None): - """Reads one line from the stream.""" - if self._pos >= self.limit: - return self.on_exhausted() - if size is None: - size = self.limit - self._pos - else: - size = min(size, self.limit - self._pos) - try: - line = self._readline(size) - except (ValueError, IOError): - return self.on_disconnect() - if size and not line: - return self.on_disconnect() - self._pos += len(line) - return line - - def readlines(self, size=None): - """Reads a file into a list of strings. It calls :meth:`readline` - until the file is read to the end. It does support the optional - `size` argument if the underlaying stream supports it for - `readline`. - """ - last_pos = self._pos - result = [] - if size is not None: - end = min(self.limit, last_pos + size) - else: - end = self.limit - while 1: - if size is not None: - size -= last_pos - self._pos - if self._pos >= end: - break - result.append(self.readline(size)) - if size is not None: - last_pos = self._pos - return result - - def tell(self): - """Returns the position of the stream. - - .. versionadded:: 0.9 - """ - return self._pos - - def __next__(self): - line = self.readline() - if not line: - raise StopIteration() - return line - - def readable(self): - return True