diff --git a/.circleci/config.yml b/.circleci/config.yml index db815191..9101dabc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -88,6 +88,41 @@ executors: macos: xcode: 11.4 jobs: + build: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - restore_cache: + keys: + - poetry-{{ checksum "poetry.lock" }} + - run: + name: Install Tools + command: | + curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python + - run: + name: Build + command: | + source $HOME/.poetry/env + poetry install + poetry run poetry-dynamic-versioning + poetry build + - save_cache: + key: poetry-{{ checksum "poetry.lock" }} + paths: + - ~/.poetry + - store_artifacts: + path: dist + - store_artifacts: + path: output + - persist_to_workspace: + # Must be an absolute path, or relative path from working_directory. This is a directory on the container which is + # taken to be the root directory of the workspace. + root: . + # Must be relative path from root + paths: + - dist + - output test-unit: parameters: os: @@ -97,7 +132,8 @@ jobs: executor: << parameters.os >> steps: - checkout - #- python/load-cache + - attach_workspace: + at: /tmp/workspace - run: name: install deps command: | @@ -109,18 +145,15 @@ jobs: eval "$(pyenv init -)" pyenv install << parameters.python-version >> pyenv global << parameters.python-version >> - virtualenv .venv - pip install --upgrade virtualenv - source .venv/bin/activate - pip install -r requirements.txt - pip install -e . + pyenv local << parameters.python-version >> + pip install /tmp/workspace/dist/* + pip install pytest + pip install pytest-cov + pip install pytest-expect + pip install pyaml + pip install mock mkdir test-results || true - - python/save-cache - - run: - command: | - source .venv/bin/activate python -m pytest --junitxml=test-results/results.xml --cov=solnlib --cov-report=html tests - name: Test - store_test_results: path: test-results - store_artifacts: @@ -166,6 +199,8 @@ jobs: echo PASSWORD=Chang3d! >>$SPLUNK_HOME/etc/system/local/user-seed.conf /opt/splunk/bin/splunk start --accept-license #- python/load-cache + - attach_workspace: + at: /tmp/workspace - run: name: Install deps command: | @@ -177,18 +212,14 @@ jobs: eval "$(pyenv init -)" pyenv install << parameters.python-version >> pyenv global << parameters.python-version >> - virtualenv .venv - pip install --upgrade virtualenv - source .venv/bin/activate - pip install -r requirements.txt - pip install -e . + pyenv local << parameters.python-version >> + pip install /tmp/workspace/dist/* + pip install pytest + pip install pytest-cov + pip install pytest-expect + pip install pyaml mkdir test-results || true - - python/save-cache - - run: - command: | - source .venv/bin/activate SPLUNK_HOME=/opt/splunk/ python -m pytest --junitxml=test-results/results.xml -v examples - name: Test - store_test_results: path: test-results - store_artifacts: @@ -202,21 +233,29 @@ jobs: steps: - setup_remote_docker: docker_layer_caching: true - - attach_workspace: - at: /tmp/workspace - checkout + - restore_cache: + keys: + - poetry-{{ checksum "poetry.lock" }} + - run: + name: Install Tools + command: | + curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python - run: - name: "Publish on PyPI" + name: Build command: | - PATH=$PATH:/usr/local/go/bin - pip install twine - python setup.py sdist bdist_wheel - twine upload dist/* + source $HOME/.poetry/env + poetry install + poetry run poetry-dynamic-versioning + poetry publish --build -u ${TWINE_USERNAME} -p ${TWINE_PASSWORD} workflows: main: jobs: + - build - test-unit: + requires: + - build matrix: parameters: os: [linux] @@ -225,6 +264,8 @@ workflows: branches: only: /.*/ - test-splunk: + requires: + - build matrix: parameters: os: [linux] diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index e1455fca..00000000 --- a/MANIFEST.in +++ /dev/null @@ -1,4 +0,0 @@ -include LICENSE -include README.txt -include versioneer.py -include solnlib/_version.py diff --git a/README.md b/README.md new file mode 100644 index 00000000..5f871d72 --- /dev/null +++ b/README.md @@ -0,0 +1,18 @@ +# README + +Splunk Solutions SDK is an open source packaged solution for getting data into Splunk using modular inputs. +This SDK is used by Splunk Add-on builder, and Splunk UCC based add-ons and is intended for use by partner +developers. This SDK/Library extends the Splunk SDK for python + +## Support + +Splunk Solutions SDK is an open source product developed by Splunkers. This SDK is not "Supported Software" by Splunk, Inc. issues and defects can be reported +via the public issue tracker + +## Contributing + +We do not accept external contributions at this time + +## License + +* Configuration and documentation licensed subject to [APACHE-2.0](LICENSE) diff --git a/README.txt b/README.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/examples/test__kvstore.py b/examples/test__kvstore.py index a97eba8c..fd9d998a 100644 --- a/examples/test__kvstore.py +++ b/examples/test__kvstore.py @@ -10,9 +10,9 @@ sys.path.insert(0, op.dirname(op.dirname(op.abspath(__file__)))) from solnlib.credentials import get_session_key import context -from solnlib.packages.splunklib import binding -from solnlib.packages.splunklib import client -from solnlib.packages.splunklib.binding import HTTPError +from splunklib import binding +from splunklib import client +from splunklib.binding import HTTPError def test_kvstore(): diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..60e75173 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,827 @@ +[[package]] +category = "dev" +description = "Atomic file writes." +marker = "python_version >= \"2.7\" and python_version < \"3.0\" or python_version >= \"3.7\" and python_version < \"4.0\" and sys_platform == \"win32\"" +name = "atomicwrites" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.4.0" + +[[package]] +category = "dev" +description = "Classes Without Boilerplate" +marker = "python_version >= \"2.7\" and python_version < \"3.0\" or python_version >= \"3.7\" and python_version < \"4.0\"" +name = "attrs" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "20.2.0" + +[package.extras] +dev = ["coverage (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface", "sphinx", "sphinx-rtd-theme", "pre-commit"] +docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"] +tests = ["coverage (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "zope.interface"] +tests_no_zope = ["coverage (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six"] + +[[package]] +category = "dev" +description = "Backport of functools.lru_cache" +marker = "python_version >= \"2.7\" and python_version < \"3.0\"" +name = "backports.functools-lru-cache" +optional = false +python-versions = ">=2.6" +version = "1.6.1" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] +testing = ["pytest (>=3.5,<3.7.3 || >3.7.3)", "pytest-checkdocs (>=1.2.3)", "pytest-flake8", "pytest-black-multipy", "pytest-cov"] + +[[package]] +category = "main" +description = "Python package for providing Mozilla's CA Bundle." +name = "certifi" +optional = false +python-versions = "*" +version = "2020.6.20" + +[[package]] +category = "main" +description = "Universal encoding detector for Python 2 and 3" +name = "chardet" +optional = false +python-versions = "*" +version = "3.0.4" + +[[package]] +category = "dev" +description = "Cross-platform colored terminal text." +marker = "python_version >= \"2.7\" and python_version < \"3.0\" and sys_platform == \"win32\" or python_version >= \"3.7\" and python_version < \"4.0\" and sys_platform == \"win32\"" +name = "colorama" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "0.4.3" + +[[package]] +category = "dev" +description = "Updated configparser from Python 3.7 for Python 2.6+." +marker = "python_version >= \"2.7\" and python_version < \"3.0\"" +name = "configparser" +optional = false +python-versions = ">=2.6" +version = "4.0.2" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] +testing = ["pytest (>=3.5,<3.7.3 || >3.7.3)", "pytest-checkdocs (>=1.2)", "pytest-flake8", "pytest-black-multipy"] + +[[package]] +category = "dev" +description = "Backports and enhancements for the contextlib module" +marker = "python_version >= \"2.7\" and python_version < \"3.0\"" +name = "contextlib2" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "0.6.0.post1" + +[[package]] +category = "dev" +description = "Code coverage measurement for Python" +name = "coverage" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +version = "5.3" + +[package.extras] +toml = ["toml"] + +[[package]] +category = "dev" +description = "Dynamic version generation" +marker = "python_version >= \"3.7\" and python_version < \"4.0\"" +name = "dunamai" +optional = false +python-versions = ">=3.5,<4.0" +version = "1.3.0" + +[[package]] +category = "dev" +description = "Python function signatures from PEP362 for Python 2.6, 2.7 and 3.2+" +marker = "python_version >= \"2.7\" and python_version < \"3.0\"" +name = "funcsigs" +optional = false +python-versions = "*" +version = "1.0.2" + +[[package]] +category = "main" +description = "Clean single-source support for Python 3 and 2" +name = "future" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +version = "0.18.2" + +[[package]] +category = "main" +description = "Internationalized Domain Names in Applications (IDNA)" +name = "idna" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "2.10" + +[[package]] +category = "dev" +description = "Read metadata from Python packages" +marker = "python_version >= \"2.7\" and python_version < \"3.0\" or python_version >= \"3.7\" and python_version < \"3.8\"" +name = "importlib-metadata" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +version = "1.7.0" + +[package.dependencies] +zipp = ">=0.5" + +[package.dependencies.configparser] +python = "<3" +version = ">=3.5" + +[package.dependencies.contextlib2] +python = "<3" +version = "*" + +[package.dependencies.pathlib2] +python = "<3" +version = "*" + +[package.extras] +docs = ["sphinx", "rst.linker"] +testing = ["packaging", "pep517", "importlib-resources (>=1.3)"] + +[[package]] +category = "dev" +description = "iniconfig: brain-dead simple config-ini parsing" +marker = "python_version >= \"3.7\" and python_version < \"4.0\"" +name = "iniconfig" +optional = false +python-versions = "*" +version = "1.0.1" + +[[package]] +category = "dev" +description = "A very fast and expressive template engine." +marker = "python_version >= \"3.7\" and python_version < \"4.0\"" +name = "jinja2" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "2.11.2" + +[package.dependencies] +MarkupSafe = ">=0.23" + +[package.extras] +i18n = ["Babel (>=0.8)"] + +[[package]] +category = "dev" +description = "Safely add untrusted strings to HTML/XML markup." +marker = "python_version >= \"3.7\" and python_version < \"4.0\"" +name = "markupsafe" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +version = "1.1.1" + +[[package]] +category = "dev" +description = "Rolling backport of unittest.mock for all Pythons" +marker = "python_version >= \"2.7\" and python_version < \"3.0\"" +name = "mock" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "3.0.5" + +[package.dependencies] +six = "*" + +[package.dependencies.funcsigs] +python = "<3.3" +version = ">=1" + +[package.extras] +build = ["twine", "wheel", "blurb"] +docs = ["sphinx"] +test = ["pytest", "pytest-cov"] + +[[package]] +category = "dev" +description = "Rolling backport of unittest.mock for all Pythons" +marker = "python_version >= \"3.7\" and python_version < \"4.0\"" +name = "mock" +optional = false +python-versions = ">=3.6" +version = "4.0.2" + +[package.extras] +build = ["twine", "wheel", "blurb"] +docs = ["sphinx"] +test = ["pytest", "pytest-cov"] + +[[package]] +category = "dev" +description = "More routines for operating on iterables, beyond itertools" +marker = "python_version >= \"2.7\" and python_version <= \"2.7\"" +name = "more-itertools" +optional = false +python-versions = "*" +version = "5.0.0" + +[package.dependencies] +six = ">=1.0.0,<2.0.0" + +[[package]] +category = "dev" +description = "More routines for operating on iterables, beyond itertools" +marker = "python_version >= \"3.7\" and python_version < \"4.0\"" +name = "more-itertools" +optional = false +python-versions = ">=3.5" +version = "8.5.0" + +[[package]] +category = "dev" +description = "Core utilities for Python packages" +marker = "python_version >= \"2.7\" and python_version < \"3.0\" or python_version >= \"3.7\" and python_version < \"4.0\"" +name = "packaging" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "20.4" + +[package.dependencies] +pyparsing = ">=2.0.2" +six = "*" + +[[package]] +category = "dev" +description = "Object-oriented filesystem paths" +marker = "python_version >= \"2.7\" and python_version < \"3.0\"" +name = "pathlib2" +optional = false +python-versions = "*" +version = "2.3.5" + +[package.dependencies] +six = "*" + +[package.dependencies.scandir] +python = "<3.5" +version = "*" + +[[package]] +category = "dev" +description = "plugin and hook calling mechanisms for python" +marker = "python_version >= \"2.7\" and python_version < \"3.0\" or python_version >= \"3.7\" and python_version < \"4.0\"" +name = "pluggy" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "0.13.1" + +[package.dependencies] +[package.dependencies.importlib-metadata] +python = "<3.8" +version = ">=0.12" + +[package.extras] +dev = ["pre-commit", "tox"] + +[[package]] +category = "dev" +description = "Plugin for Poetry to enable dynamic versioning based on VCS tags" +marker = "python_version >= \"3.7\" and python_version < \"4.0\"" +name = "poetry-dynamic-versioning" +optional = false +python-versions = ">=3.5,<4.0" +version = "0.8.3" + +[package.dependencies] +dunamai = ">=1.3,<2.0" +jinja2 = ">=2.11.1,<3.0.0" +tomlkit = ">=0.4" + +[[package]] +category = "dev" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +marker = "python_version >= \"2.7\" and python_version < \"3.0\" or python_version >= \"3.7\" and python_version < \"4.0\"" +name = "py" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.9.0" + +[[package]] +category = "dev" +description = "Python parsing module" +marker = "python_version >= \"2.7\" and python_version < \"3.0\" or python_version >= \"3.7\" and python_version < \"4.0\"" +name = "pyparsing" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +version = "2.4.7" + +[[package]] +category = "dev" +description = "pytest: simple powerful testing with Python" +name = "pytest" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +version = "4.6.11" + +[package.dependencies] +atomicwrites = ">=1.0" +attrs = ">=17.4.0" +packaging = "*" +pluggy = ">=0.12,<1.0" +py = ">=1.5.0" +six = ">=1.10.0" +wcwidth = "*" + +[package.dependencies.colorama] +python = "<3.4.0 || >=3.5.0" +version = "*" + +[package.dependencies.funcsigs] +python = "<3.0" +version = ">=1.0" + +[package.dependencies.importlib-metadata] +python = "<3.8" +version = ">=0.12" + +[package.dependencies.more-itertools] +python = "<2.8" +version = ">=4.0.0,<6.0.0" + +[package.dependencies.pathlib2] +python = "<3.6" +version = ">=2.2.0" + +[package.extras] +testing = ["argcomplete", "hypothesis (>=3.56)", "nose", "requests", "mock"] + +[[package]] +category = "dev" +description = "pytest: simple powerful testing with Python" +name = "pytest" +optional = false +python-versions = ">=3.5" +version = "6.0.2" + +[package.dependencies] +atomicwrites = ">=1.0" +attrs = ">=17.4.0" +colorama = "*" +iniconfig = "*" +more-itertools = ">=4.0.0" +packaging = "*" +pluggy = ">=0.12,<1.0" +py = ">=1.8.2" +toml = "*" + +[package.dependencies.importlib-metadata] +python = "<3.8" +version = ">=0.12" + +[package.extras] +checkqa_mypy = ["mypy (0.780)"] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] + +[[package]] +category = "dev" +description = "Pytest plugin for measuring coverage." +name = "pytest-cov" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "2.10.1" + +[package.dependencies] +coverage = ">=4.4" +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests (2.0.2)", "six", "pytest-xdist", "virtualenv"] + +[[package]] +category = "dev" +description = "py.test plugin to store test expectations and mark tests based on them" +name = "pytest-expect" +optional = false +python-versions = "*" +version = "1.1.0" + +[package.dependencies] +pytest = "*" +u-msgpack-python = "*" + +[[package]] +category = "dev" +description = "YAML parser and emitter for Python" +name = "pyyaml" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "5.3.1" + +[[package]] +category = "main" +description = "Python HTTP for Humans." +name = "requests" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "2.24.0" + +[package.dependencies] +certifi = ">=2017.4.17" +chardet = ">=3.0.2,<4" +idna = ">=2.5,<3" +urllib3 = ">=1.21.1,<1.25.0 || >1.25.0,<1.25.1 || >1.25.1,<1.26" + +[package.extras] +security = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)"] +socks = ["PySocks (>=1.5.6,<1.5.7 || >1.5.7)", "win-inet-pton"] + +[[package]] +category = "dev" +description = "scandir, a better directory iterator and faster os.walk()" +marker = "python_version >= \"2.7\" and python_version < \"3.0\"" +name = "scandir" +optional = false +python-versions = "*" +version = "1.10.0" + +[[package]] +category = "main" +description = "Python Data Structures for Humans" +name = "schematics" +optional = false +python-versions = "*" +version = "2.1.0" + +[[package]] +category = "dev" +description = "Python 2 and 3 compatibility utilities" +marker = "python_version >= \"2.7\" and python_version < \"3.0\" or python_version >= \"2.7\" and python_version <= \"2.7\" or python_version >= \"3.7\" and python_version < \"4.0\"" +name = "six" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +version = "1.15.0" + +[[package]] +category = "main" +description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" +name = "sortedcontainers" +optional = false +python-versions = "*" +version = "2.2.2" + +[[package]] +category = "main" +description = "The Splunk Software Development Kit for Python." +name = "splunk-sdk" +optional = false +python-versions = "*" +version = "1.6.14" + +[[package]] +category = "dev" +description = "Python Library for Tom's Obvious, Minimal Language" +marker = "python_version >= \"3.7\" and python_version < \"4.0\"" +name = "toml" +optional = false +python-versions = "*" +version = "0.10.1" + +[[package]] +category = "dev" +description = "Style preserving TOML library" +marker = "python_version >= \"3.7\" and python_version < \"4.0\"" +name = "tomlkit" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +version = "0.7.0" + +[[package]] +category = "dev" +description = "A portable, lightweight MessagePack serializer and deserializer written in pure Python." +name = "u-msgpack-python" +optional = false +python-versions = "*" +version = "2.7.0" + +[[package]] +category = "main" +description = "HTTP library with thread-safe connection pooling, file post, and more." +name = "urllib3" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +version = "1.25.10" + +[package.extras] +brotli = ["brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "pyOpenSSL (>=0.14)", "ipaddress"] +socks = ["PySocks (>=1.5.6,<1.5.7 || >1.5.7,<2.0)"] + +[[package]] +category = "dev" +description = "Measures the displayed width of unicode strings in a terminal" +marker = "python_version >= \"2.7\" and python_version < \"3.0\"" +name = "wcwidth" +optional = false +python-versions = "*" +version = "0.2.5" + +[package.dependencies] +[package.dependencies."backports.functools-lru-cache"] +python = "<3.2" +version = ">=1.2.1" + +[[package]] +category = "dev" +description = "Backport of pathlib-compatible object wrapper for zip files" +marker = "python_version >= \"2.7\" and python_version < \"3.0\"" +name = "zipp" +optional = false +python-versions = ">=2.7" +version = "1.2.0" + +[package.dependencies] +[package.dependencies.contextlib2] +python = "<3.4" +version = "*" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] +testing = ["pathlib2", "unittest2", "jaraco.itertools", "func-timeout"] + +[[package]] +category = "dev" +description = "Backport of pathlib-compatible object wrapper for zip files" +marker = "python_version >= \"3.7\" and python_version < \"3.8\"" +name = "zipp" +optional = false +python-versions = ">=3.6" +version = "3.1.0" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=3.2)", "rst.linker (>=1.9)"] +testing = ["jaraco.itertools", "func-timeout"] + +[metadata] +content-hash = "fae8fa049e3912dc86cd2d31672987ded8c9da635448988bac9f21d82282550a" +lock-version = "1.0" +python-versions = "~2.7 || ^3.7" + +[metadata.files] +atomicwrites = [ + {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, + {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, +] +attrs = [ + {file = "attrs-20.2.0-py2.py3-none-any.whl", hash = "sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc"}, + {file = "attrs-20.2.0.tar.gz", hash = "sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594"}, +] +"backports.functools-lru-cache" = [ + {file = "backports.functools_lru_cache-1.6.1-py2.py3-none-any.whl", hash = "sha256:0bada4c2f8a43d533e4ecb7a12214d9420e66eb206d54bf2d682581ca4b80848"}, + {file = "backports.functools_lru_cache-1.6.1.tar.gz", hash = "sha256:8fde5f188da2d593bd5bc0be98d9abc46c95bb8a9dde93429570192ee6cc2d4a"}, +] +certifi = [ + {file = "certifi-2020.6.20-py2.py3-none-any.whl", hash = "sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41"}, + {file = "certifi-2020.6.20.tar.gz", hash = "sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3"}, +] +chardet = [ + {file = "chardet-3.0.4-py2.py3-none-any.whl", hash = "sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"}, + {file = "chardet-3.0.4.tar.gz", hash = "sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae"}, +] +colorama = [ + {file = "colorama-0.4.3-py2.py3-none-any.whl", hash = "sha256:7d73d2a99753107a36ac6b455ee49046802e59d9d076ef8e47b61499fa29afff"}, + {file = "colorama-0.4.3.tar.gz", hash = "sha256:e96da0d330793e2cb9485e9ddfd918d456036c7149416295932478192f4436a1"}, +] +configparser = [ + {file = "configparser-4.0.2-py2.py3-none-any.whl", hash = "sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c"}, + {file = "configparser-4.0.2.tar.gz", hash = "sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df"}, +] +contextlib2 = [ + {file = "contextlib2-0.6.0.post1-py2.py3-none-any.whl", hash = "sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b"}, + {file = "contextlib2-0.6.0.post1.tar.gz", hash = "sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e"}, +] +coverage = [ + {file = "coverage-5.3-cp27-cp27m-macosx_10_13_intel.whl", hash = "sha256:bd3166bb3b111e76a4f8e2980fa1addf2920a4ca9b2b8ca36a3bc3dedc618270"}, + {file = "coverage-5.3-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:9342dd70a1e151684727c9c91ea003b2fb33523bf19385d4554f7897ca0141d4"}, + {file = "coverage-5.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:63808c30b41f3bbf65e29f7280bf793c79f54fb807057de7e5238ffc7cc4d7b9"}, + {file = "coverage-5.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:4d6a42744139a7fa5b46a264874a781e8694bb32f1d76d8137b68138686f1729"}, + {file = "coverage-5.3-cp27-cp27m-win32.whl", hash = "sha256:86e9f8cd4b0cdd57b4ae71a9c186717daa4c5a99f3238a8723f416256e0b064d"}, + {file = "coverage-5.3-cp27-cp27m-win_amd64.whl", hash = "sha256:7858847f2d84bf6e64c7f66498e851c54de8ea06a6f96a32a1d192d846734418"}, + {file = "coverage-5.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:530cc8aaf11cc2ac7430f3614b04645662ef20c348dce4167c22d99bec3480e9"}, + {file = "coverage-5.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:381ead10b9b9af5f64646cd27107fb27b614ee7040bb1226f9c07ba96625cbb5"}, + {file = "coverage-5.3-cp35-cp35m-macosx_10_13_x86_64.whl", hash = "sha256:71b69bd716698fa62cd97137d6f2fdf49f534decb23a2c6fc80813e8b7be6822"}, + {file = "coverage-5.3-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:1d44bb3a652fed01f1f2c10d5477956116e9b391320c94d36c6bf13b088a1097"}, + {file = "coverage-5.3-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:1c6703094c81fa55b816f5ae542c6ffc625fec769f22b053adb42ad712d086c9"}, + {file = "coverage-5.3-cp35-cp35m-win32.whl", hash = "sha256:cedb2f9e1f990918ea061f28a0f0077a07702e3819602d3507e2ff98c8d20636"}, + {file = "coverage-5.3-cp35-cp35m-win_amd64.whl", hash = "sha256:7f43286f13d91a34fadf61ae252a51a130223c52bfefb50310d5b2deb062cf0f"}, + {file = "coverage-5.3-cp36-cp36m-macosx_10_13_x86_64.whl", hash = "sha256:c851b35fc078389bc16b915a0a7c1d5923e12e2c5aeec58c52f4aa8085ac8237"}, + {file = "coverage-5.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:aac1ba0a253e17889550ddb1b60a2063f7474155465577caa2a3b131224cfd54"}, + {file = "coverage-5.3-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2b31f46bf7b31e6aa690d4c7a3d51bb262438c6dcb0d528adde446531d0d3bb7"}, + {file = "coverage-5.3-cp36-cp36m-win32.whl", hash = "sha256:c5f17ad25d2c1286436761b462e22b5020d83316f8e8fcb5deb2b3151f8f1d3a"}, + {file = "coverage-5.3-cp36-cp36m-win_amd64.whl", hash = "sha256:aef72eae10b5e3116bac6957de1df4d75909fc76d1499a53fb6387434b6bcd8d"}, + {file = "coverage-5.3-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:e8caf961e1b1a945db76f1b5fa9c91498d15f545ac0ababbe575cfab185d3bd8"}, + {file = "coverage-5.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:29a6272fec10623fcbe158fdf9abc7a5fa032048ac1d8631f14b50fbfc10d17f"}, + {file = "coverage-5.3-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:2d43af2be93ffbad25dd959899b5b809618a496926146ce98ee0b23683f8c51c"}, + {file = "coverage-5.3-cp37-cp37m-win32.whl", hash = "sha256:c3888a051226e676e383de03bf49eb633cd39fc829516e5334e69b8d81aae751"}, + {file = "coverage-5.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9669179786254a2e7e57f0ecf224e978471491d660aaca833f845b72a2df3709"}, + {file = "coverage-5.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0203acd33d2298e19b57451ebb0bed0ab0c602e5cf5a818591b4918b1f97d516"}, + {file = "coverage-5.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:582ddfbe712025448206a5bc45855d16c2e491c2dd102ee9a2841418ac1c629f"}, + {file = "coverage-5.3-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0f313707cdecd5cd3e217fc68c78a960b616604b559e9ea60cc16795c4304259"}, + {file = "coverage-5.3-cp38-cp38-win32.whl", hash = "sha256:78e93cc3571fd928a39c0b26767c986188a4118edc67bc0695bc7a284da22e82"}, + {file = "coverage-5.3-cp38-cp38-win_amd64.whl", hash = "sha256:8f264ba2701b8c9f815b272ad568d555ef98dfe1576802ab3149c3629a9f2221"}, + {file = "coverage-5.3-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:50691e744714856f03a86df3e2bff847c2acede4c191f9a1da38f088df342978"}, + {file = "coverage-5.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:9361de40701666b034c59ad9e317bae95c973b9ff92513dd0eced11c6adf2e21"}, + {file = "coverage-5.3-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:c1b78fb9700fc961f53386ad2fd86d87091e06ede5d118b8a50dea285a071c24"}, + {file = "coverage-5.3-cp39-cp39-win32.whl", hash = "sha256:cb7df71de0af56000115eafd000b867d1261f786b5eebd88a0ca6360cccfaca7"}, + {file = "coverage-5.3-cp39-cp39-win_amd64.whl", hash = "sha256:47a11bdbd8ada9b7ee628596f9d97fbd3851bd9999d398e9436bd67376dbece7"}, + {file = "coverage-5.3.tar.gz", hash = "sha256:280baa8ec489c4f542f8940f9c4c2181f0306a8ee1a54eceba071a449fb870a0"}, +] +dunamai = [ + {file = "dunamai-1.3.0-py3-none-any.whl", hash = "sha256:6f7b313a3e817ed08069147f0e25fbc7e85cf1c1394709d896cb76e552cabbff"}, + {file = "dunamai-1.3.0.tar.gz", hash = "sha256:5685cfe8c69fda7a4cf6e1f33de6ce5f0655c1e53c5caf8a680dfc81cd53f6b8"}, +] +funcsigs = [ + {file = "funcsigs-1.0.2-py2.py3-none-any.whl", hash = "sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca"}, + {file = "funcsigs-1.0.2.tar.gz", hash = "sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50"}, +] +future = [ + {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, +] +idna = [ + {file = "idna-2.10-py2.py3-none-any.whl", hash = "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"}, + {file = "idna-2.10.tar.gz", hash = "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"}, +] +importlib-metadata = [ + {file = "importlib_metadata-1.7.0-py2.py3-none-any.whl", hash = "sha256:dc15b2969b4ce36305c51eebe62d418ac7791e9a157911d58bfb1f9ccd8e2070"}, + {file = "importlib_metadata-1.7.0.tar.gz", hash = "sha256:90bb658cdbbf6d1735b6341ce708fc7024a3e14e99ffdc5783edea9f9b077f83"}, +] +iniconfig = [ + {file = "iniconfig-1.0.1-py3-none-any.whl", hash = "sha256:80cf40c597eb564e86346103f609d74efce0f6b4d4f30ec8ce9e2c26411ba437"}, + {file = "iniconfig-1.0.1.tar.gz", hash = "sha256:e5f92f89355a67de0595932a6c6c02ab4afddc6fcdc0bfc5becd0d60884d3f69"}, +] +jinja2 = [ + {file = "Jinja2-2.11.2-py2.py3-none-any.whl", hash = "sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035"}, + {file = "Jinja2-2.11.2.tar.gz", hash = "sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0"}, +] +markupsafe = [ + {file = "MarkupSafe-1.1.1-cp27-cp27m-macosx_10_6_intel.whl", hash = "sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161"}, + {file = "MarkupSafe-1.1.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7"}, + {file = "MarkupSafe-1.1.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183"}, + {file = "MarkupSafe-1.1.1-cp27-cp27m-win32.whl", hash = "sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b"}, + {file = "MarkupSafe-1.1.1-cp27-cp27m-win_amd64.whl", hash = "sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e"}, + {file = "MarkupSafe-1.1.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f"}, + {file = "MarkupSafe-1.1.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1"}, + {file = "MarkupSafe-1.1.1-cp34-cp34m-macosx_10_6_intel.whl", hash = "sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5"}, + {file = "MarkupSafe-1.1.1-cp34-cp34m-manylinux1_i686.whl", hash = "sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1"}, + {file = "MarkupSafe-1.1.1-cp34-cp34m-manylinux1_x86_64.whl", hash = "sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735"}, + {file = "MarkupSafe-1.1.1-cp34-cp34m-win32.whl", hash = "sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21"}, + {file = "MarkupSafe-1.1.1-cp34-cp34m-win_amd64.whl", hash = "sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235"}, + {file = "MarkupSafe-1.1.1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b"}, + {file = "MarkupSafe-1.1.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f"}, + {file = "MarkupSafe-1.1.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905"}, + {file = "MarkupSafe-1.1.1-cp35-cp35m-win32.whl", hash = "sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1"}, + {file = "MarkupSafe-1.1.1-cp35-cp35m-win_amd64.whl", hash = "sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-macosx_10_6_intel.whl", hash = "sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-win32.whl", hash = "sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66"}, + {file = "MarkupSafe-1.1.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-macosx_10_6_intel.whl", hash = "sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-win32.whl", hash = "sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2"}, + {file = "MarkupSafe-1.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-win32.whl", hash = "sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b"}, + {file = "MarkupSafe-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be"}, + {file = "MarkupSafe-1.1.1.tar.gz", hash = "sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b"}, +] +mock = [ + {file = "mock-3.0.5-py2.py3-none-any.whl", hash = "sha256:d157e52d4e5b938c550f39eb2fd15610db062441a9c2747d3dbfa9298211d0f8"}, + {file = "mock-3.0.5.tar.gz", hash = "sha256:83657d894c90d5681d62155c82bda9c1187827525880eda8ff5df4ec813437c3"}, + {file = "mock-4.0.2-py3-none-any.whl", hash = "sha256:3f9b2c0196c60d21838f307f5825a7b86b678cedc58ab9e50a8988187b4d81e0"}, + {file = "mock-4.0.2.tar.gz", hash = "sha256:dd33eb70232b6118298d516bbcecd26704689c386594f0f3c4f13867b2c56f72"}, +] +more-itertools = [ + {file = "more-itertools-5.0.0.tar.gz", hash = "sha256:38a936c0a6d98a38bcc2d03fdaaedaba9f412879461dd2ceff8d37564d6522e4"}, + {file = "more_itertools-5.0.0-py2-none-any.whl", hash = "sha256:c0a5785b1109a6bd7fac76d6837fd1feca158e54e521ccd2ae8bfe393cc9d4fc"}, + {file = "more_itertools-5.0.0-py3-none-any.whl", hash = "sha256:fe7a7cae1ccb57d33952113ff4fa1bc5f879963600ed74918f1236e212ee50b9"}, + {file = "more-itertools-8.5.0.tar.gz", hash = "sha256:6f83822ae94818eae2612063a5101a7311e68ae8002005b5e05f03fd74a86a20"}, + {file = "more_itertools-8.5.0-py3-none-any.whl", hash = "sha256:9b30f12df9393f0d28af9210ff8efe48d10c94f73e5daf886f10c4b0b0b4f03c"}, +] +packaging = [ + {file = "packaging-20.4-py2.py3-none-any.whl", hash = "sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181"}, + {file = "packaging-20.4.tar.gz", hash = "sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8"}, +] +pathlib2 = [ + {file = "pathlib2-2.3.5-py2.py3-none-any.whl", hash = "sha256:0ec8205a157c80d7acc301c0b18fbd5d44fe655968f5d947b6ecef5290fc35db"}, + {file = "pathlib2-2.3.5.tar.gz", hash = "sha256:6cd9a47b597b37cc57de1c05e56fb1a1c9cc9fab04fe78c29acd090418529868"}, +] +pluggy = [ + {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, + {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, +] +poetry-dynamic-versioning = [ + {file = "poetry-dynamic-versioning-0.8.3.tar.gz", hash = "sha256:493cf972312947ffce99c9f0e38b504312960b9f616c11954fb3c62cc24c2337"}, + {file = "poetry_dynamic_versioning-0.8.3-py3-none-any.whl", hash = "sha256:75dab648b63d68c359f4d921e00c97ec4d347a8cda21393ca9b61f370d9de004"}, +] +py = [ + {file = "py-1.9.0-py2.py3-none-any.whl", hash = "sha256:366389d1db726cd2fcfc79732e75410e5fe4d31db13692115529d34069a043c2"}, + {file = "py-1.9.0.tar.gz", hash = "sha256:9ca6883ce56b4e8da7e79ac18787889fa5206c79dcc67fb065376cd2fe03f342"}, +] +pyparsing = [ + {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"}, + {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"}, +] +pytest = [ + {file = "pytest-4.6.11-py2.py3-none-any.whl", hash = "sha256:a00a7d79cbbdfa9d21e7d0298392a8dd4123316bfac545075e6f8f24c94d8c97"}, + {file = "pytest-4.6.11.tar.gz", hash = "sha256:50fa82392f2120cc3ec2ca0a75ee615be4c479e66669789771f1758332be4353"}, + {file = "pytest-6.0.2-py3-none-any.whl", hash = "sha256:0e37f61339c4578776e090c3b8f6b16ce4db333889d65d0efb305243ec544b40"}, + {file = "pytest-6.0.2.tar.gz", hash = "sha256:c8f57c2a30983f469bf03e68cdfa74dc474ce56b8f280ddcb080dfd91df01043"}, +] +pytest-cov = [ + {file = "pytest-cov-2.10.1.tar.gz", hash = "sha256:47bd0ce14056fdd79f93e1713f88fad7bdcc583dcd7783da86ef2f085a0bb88e"}, + {file = "pytest_cov-2.10.1-py2.py3-none-any.whl", hash = "sha256:45ec2d5182f89a81fc3eb29e3d1ed3113b9e9a873bcddb2a71faaab066110191"}, +] +pytest-expect = [ + {file = "pytest-expect-1.1.0.tar.gz", hash = "sha256:36b4462704450798197d090809a05f4e13649d9cba9acdc557ce9517da1fd847"}, +] +pyyaml = [ + {file = "PyYAML-5.3.1-cp27-cp27m-win32.whl", hash = "sha256:74809a57b329d6cc0fdccee6318f44b9b8649961fa73144a98735b0aaf029f1f"}, + {file = "PyYAML-5.3.1-cp27-cp27m-win_amd64.whl", hash = "sha256:240097ff019d7c70a4922b6869d8a86407758333f02203e0fc6ff79c5dcede76"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win32.whl", hash = "sha256:4f4b913ca1a7319b33cfb1369e91e50354d6f07a135f3b901aca02aa95940bd2"}, + {file = "PyYAML-5.3.1-cp35-cp35m-win_amd64.whl", hash = "sha256:cc8955cfbfc7a115fa81d85284ee61147059a753344bc51098f3ccd69b0d7e0c"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win32.whl", hash = "sha256:7739fc0fa8205b3ee8808aea45e968bc90082c10aef6ea95e855e10abf4a37b2"}, + {file = "PyYAML-5.3.1-cp36-cp36m-win_amd64.whl", hash = "sha256:69f00dca373f240f842b2931fb2c7e14ddbacd1397d57157a9b005a6a9942648"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win32.whl", hash = "sha256:d13155f591e6fcc1ec3b30685d50bf0711574e2c0dfffd7644babf8b5102ca1a"}, + {file = "PyYAML-5.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:73f099454b799e05e5ab51423c7bcf361c58d3206fa7b0d555426b1f4d9a3eaf"}, + {file = "PyYAML-5.3.1-cp38-cp38-win32.whl", hash = "sha256:06a0d7ba600ce0b2d2fe2e78453a470b5a6e000a985dd4a4e54e436cc36b0e97"}, + {file = "PyYAML-5.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:95f71d2af0ff4227885f7a6605c37fd53d3a106fcab511b8860ecca9fcf400ee"}, + {file = "PyYAML-5.3.1.tar.gz", hash = "sha256:b8eac752c5e14d3eca0e6dd9199cd627518cb5ec06add0de9d32baeee6fe645d"}, +] +requests = [ + {file = "requests-2.24.0-py2.py3-none-any.whl", hash = "sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898"}, + {file = "requests-2.24.0.tar.gz", hash = "sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b"}, +] +scandir = [ + {file = "scandir-1.10.0-cp27-cp27m-win32.whl", hash = "sha256:92c85ac42f41ffdc35b6da57ed991575bdbe69db895507af88b9f499b701c188"}, + {file = "scandir-1.10.0-cp27-cp27m-win_amd64.whl", hash = "sha256:cb925555f43060a1745d0a321cca94bcea927c50114b623d73179189a4e100ac"}, + {file = "scandir-1.10.0-cp34-cp34m-win32.whl", hash = "sha256:2c712840c2e2ee8dfaf36034080108d30060d759c7b73a01a52251cc8989f11f"}, + {file = "scandir-1.10.0-cp34-cp34m-win_amd64.whl", hash = "sha256:2586c94e907d99617887daed6c1d102b5ca28f1085f90446554abf1faf73123e"}, + {file = "scandir-1.10.0-cp35-cp35m-win32.whl", hash = "sha256:2b8e3888b11abb2217a32af0766bc06b65cc4a928d8727828ee68af5a967fa6f"}, + {file = "scandir-1.10.0-cp35-cp35m-win_amd64.whl", hash = "sha256:8c5922863e44ffc00c5c693190648daa6d15e7c1207ed02d6f46a8dcc2869d32"}, + {file = "scandir-1.10.0-cp36-cp36m-win32.whl", hash = "sha256:2ae41f43797ca0c11591c0c35f2f5875fa99f8797cb1a1fd440497ec0ae4b022"}, + {file = "scandir-1.10.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7d2d7a06a252764061a020407b997dd036f7bd6a175a5ba2b345f0a357f0b3f4"}, + {file = "scandir-1.10.0-cp37-cp37m-win32.whl", hash = "sha256:67f15b6f83e6507fdc6fca22fedf6ef8b334b399ca27c6b568cbfaa82a364173"}, + {file = "scandir-1.10.0-cp37-cp37m-win_amd64.whl", hash = "sha256:b24086f2375c4a094a6b51e78b4cf7ca16c721dcee2eddd7aa6494b42d6d519d"}, + {file = "scandir-1.10.0.tar.gz", hash = "sha256:4d4631f6062e658e9007ab3149a9b914f3548cb38bfb021c64f39a025ce578ae"}, +] +schematics = [ + {file = "schematics-2.1.0-py2.py3-none-any.whl", hash = "sha256:8fcc6182606fd0b24410a1dbb066d9bbddbe8da9c9509f47b743495706239283"}, + {file = "schematics-2.1.0.tar.gz", hash = "sha256:a40b20635c0e43d18d3aff76220f6cd95ea4decb3f37765e49529b17d81b0439"}, +] +six = [ + {file = "six-1.15.0-py2.py3-none-any.whl", hash = "sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced"}, + {file = "six-1.15.0.tar.gz", hash = "sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"}, +] +sortedcontainers = [ + {file = "sortedcontainers-2.2.2-py2.py3-none-any.whl", hash = "sha256:c633ebde8580f241f274c1f8994a665c0e54a17724fecd0cae2f079e09c36d3f"}, + {file = "sortedcontainers-2.2.2.tar.gz", hash = "sha256:4e73a757831fc3ca4de2859c422564239a31d8213d09a2a666e375807034d2ba"}, +] +splunk-sdk = [ + {file = "splunk-sdk-1.6.14.tar.gz", hash = "sha256:9f0fa01cbf706f3777d055d3d3dc05b0d3b51ec42978e5917d49cd09a7e7750f"}, +] +toml = [ + {file = "toml-0.10.1-py2.py3-none-any.whl", hash = "sha256:bda89d5935c2eac546d648028b9901107a595863cb36bae0c73ac804a9b4ce88"}, + {file = "toml-0.10.1.tar.gz", hash = "sha256:926b612be1e5ce0634a2ca03470f95169cf16f939018233a670519cb4ac58b0f"}, +] +tomlkit = [ + {file = "tomlkit-0.7.0-py2.py3-none-any.whl", hash = "sha256:6babbd33b17d5c9691896b0e68159215a9387ebfa938aa3ac42f4a4beeb2b831"}, + {file = "tomlkit-0.7.0.tar.gz", hash = "sha256:ac57f29693fab3e309ea789252fcce3061e19110085aa31af5446ca749325618"}, +] +u-msgpack-python = [ + {file = "u-msgpack-python-2.7.0.tar.gz", hash = "sha256:996e4c4454771f0ff0fd2a7566b1a159d305d3611cd755addf444e3533e2bc54"}, + {file = "u_msgpack_python-2.7.0-py2.py3-none-any.whl", hash = "sha256:2a53251deed0184b0357e740916a6b5dd015b6f5c8b338379b67e40745e2e783"}, +] +urllib3 = [ + {file = "urllib3-1.25.10-py2.py3-none-any.whl", hash = "sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461"}, + {file = "urllib3-1.25.10.tar.gz", hash = "sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a"}, +] +wcwidth = [ + {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, + {file = "wcwidth-0.2.5.tar.gz", hash = "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"}, +] +zipp = [ + {file = "zipp-1.2.0-py2.py3-none-any.whl", hash = "sha256:e0d9e63797e483a30d27e09fffd308c59a700d365ec34e93cc100844168bf921"}, + {file = "zipp-1.2.0.tar.gz", hash = "sha256:c70410551488251b0fee67b460fb9a536af8d6f9f008ad10ac51f615b6a521b1"}, + {file = "zipp-3.1.0-py3-none-any.whl", hash = "sha256:aa36550ff0c0b7ef7fa639055d797116ee891440eac1a56f378e2d3179e0320b"}, + {file = "zipp-3.1.0.tar.gz", hash = "sha256:c599e4d75c98f6798c509911d08a22e6c021d074469042177c8c86fb92eefd96"}, +] diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..9bb4df17 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,35 @@ +[tool.poetry] +name = "solnlib" +version = "0.0.0" +description = "The Splunk Software Development Kit for Splunk Solutions" +authors = ["Splunk "] +license = "Apache-2.0" + +[tool.poetry.dependencies] +python = "~2.7 || ^3.7" +requests = "^2.24" +future = "^0" +splunk-sdk = "^1.6" +schematics = "^2.1" +sortedcontainers = "^2.2" + +[tool.poetry.dev-dependencies] +mock = [ + { version = "^3", python = "^2.7" }, + { version = "^4", python = "^3.7" } +] +pytest-expect = "^1.1.0" +pytest = [ + { version = "^4.6", python = "^2.7" }, + { version = "^6.0", python = "^3.7" } +] +pytest-cov = "^2" +poetry-dynamic-versioning = { version = "^0.8", python = "^3.7" } +pyyaml = "^5.3" + +[tool.poetry-dynamic-versioning] +enable = true + +[build-system] +requires = ["poetry>=1.0.0"] +build-backend = "poetry.masonry.api" \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index ed5c5a87..3d07adbd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,25 @@ -setuptools-lint==0.4.1 -pytest==2.9.1 -pytest-cov==2.2.1 -mock==2.0.0 -requests==2.24.0 -future==0.18.2 -pytest-expect \ No newline at end of file +certifi==2020.6.20 \ + --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 \ + --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 +chardet==3.0.4 \ + --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 \ + --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae +future==0.18.2 \ + --hash=sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d +idna==2.10 \ + --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 \ + --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 +requests==2.24.0 \ + --hash=sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898 \ + --hash=sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b +schematics==2.1.0 \ + --hash=sha256:8fcc6182606fd0b24410a1dbb066d9bbddbe8da9c9509f47b743495706239283 \ + --hash=sha256:a40b20635c0e43d18d3aff76220f6cd95ea4decb3f37765e49529b17d81b0439 +sortedcontainers==2.2.2 \ + --hash=sha256:c633ebde8580f241f274c1f8994a665c0e54a17724fecd0cae2f079e09c36d3f \ + --hash=sha256:4e73a757831fc3ca4de2859c422564239a31d8213d09a2a666e375807034d2ba +splunk-sdk==1.6.14 \ + --hash=sha256:9f0fa01cbf706f3777d055d3d3dc05b0d3b51ec42978e5917d49cd09a7e7750f +urllib3==1.25.10 \ + --hash=sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461 \ + --hash=sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 44c78c29..00000000 --- a/setup.cfg +++ /dev/null @@ -1,26 +0,0 @@ -[bdist_wheel] -universal = 1 - -[flake8] -exclude = docs - -[aliases] -# Define setup.py command aliases here -test = pytest - -[tool:pytest] -collect_ignore = ['setup.py'] - -[metadata] -# ensure that the LICENSE file is included in the built wheels -license_file = LICENSE - -[devpi:upload] -formats=sdist,bdist_wheel - -[versioneer] -VCS = git -style = pep440 -versionfile_source = solnlib/_version.py -versionfile_build = solnlib/_version.py -tag_prefix = v \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100644 index 566477bb..00000000 --- a/setup.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2016 Splunk, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"): you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import re -import os.path as op -from setuptools import setup, Command, find_packages -import versioneer - -with open('solnlib/__init__.py', 'r') as fd: - version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', - fd.read(), re.MULTILINE).group(1) - -if not version: - raise RuntimeError('Cannot find version information') - - -class TestCommand(Command): - ''' - Command to run the whole test suite. - ''' - description = 'Run full test suite.' - user_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - import pytest - tests_dir = op.sep.join([op.dirname(op.abspath(__file__)), 'tests']) - pytest.main(['-v', tests_dir]) - - -class JTestCommand(Command): - ''' - Command to run the whole test suite with junit report. - ''' - description = 'Run full test suite with junit report.' - user_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - import pytest - tests_dir = op.sep.join([op.dirname(op.abspath(__file__)), 'tests']) - pytest.main(['-v', '--junitxml=junit_report.xml', tests_dir]) - - -class CoverageCommand(Command): - ''' - Command to run the whole coverage. - ''' - description = 'Run full coverage.' - user_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - import pytest - - tests_dir = op.sep.join([op.dirname(op.abspath(__file__)), 'tests']) - pytest.main(['-v', '--cov=solnlib', tests_dir]) - - -class CoverageHtmlCommand(Command): - ''' - Command to run the whole coverage. - ''' - description = 'Run full coverage.' - user_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def run(self): - import pytest - - tests_dir = op.sep.join([op.dirname(op.abspath(__file__)), 'tests']) - pytest.main(['-v', '--cov=solnlib', '--cov-report=html', tests_dir]) - - -setup( - name='solnlib', - - description='The Splunk Software Development Kit for Splunk Solutions', - - author='Splunk, Inc.', - - author_email='Shanghai-TA-dev@splunk.com', - - license='http://www.apache.org/licenses/LICENSE-2.0', - - url='https://git.splunk.com/scm/solnsc/lib-solutions-python.git', - - packages=find_packages(exclude=['tests', 'examples']), - - package_data={'': ['LICENSE']}, - - install_requires=[ - 'requests' - ], - version=versioneer.get_version(), - cmdclass={'test': TestCommand, - 'jtest': JTestCommand, - 'cov': CoverageCommand, - 'cov_html': CoverageHtmlCommand}, - - classifiers=[ - 'Programming Language :: Python', - "Development Status :: 6 - Mature", - 'Environment :: Other Environment', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', - 'Topic :: Software Development :: Libraries :: Python Modules', - 'Topic :: Software Development :: Libraries :: Application Frameworks'] -) diff --git a/solnlib/_version.py b/solnlib/_version.py deleted file mode 100644 index 1c66cb39..00000000 --- a/solnlib/_version.py +++ /dev/null @@ -1,520 +0,0 @@ - -# This file helps to compute a version number in source trees obtained from -# git-archive tarball (such as those provided by githubs download-from-tag -# feature). Distribution tarballs (built by setup.py sdist) and build -# directories (produced by setup.py build) will contain a much shorter file -# that just contains the computed version number. - -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) - -"""Git implementation of _version.py.""" - -import errno -import os -import re -import subprocess -import sys - - -def get_keywords(): - """Get the keywords needed to look up the version information.""" - # these strings will be replaced by git during git-archive. - # setup.py/versioneer.py will grep for the variable names, so they must - # each be defined on a line of their own. _version.py will just call - # get_keywords(). - git_refnames = "$Format:%d$" - git_full = "$Format:%H$" - git_date = "$Format:%ci$" - keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} - return keywords - - -class VersioneerConfig: - """Container for Versioneer configuration parameters.""" - - -def get_config(): - """Create, populate and return the VersioneerConfig() object.""" - # these strings are filled in when 'setup.py versioneer' creates - # _version.py - cfg = VersioneerConfig() - cfg.VCS = "git" - cfg.style = "pep440" - cfg.tag_prefix = "None" - cfg.parentdir_prefix = "None" - cfg.versionfile_source = "solnlib/_version.py" - cfg.verbose = False - return cfg - - -class NotThisMethod(Exception): - """Exception raised if a method is not valid for the current scenario.""" - - -LONG_VERSION_PY = {} -HANDLERS = {} - - -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): - """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f - return f - return decorate - - -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): - """Call the given command(s).""" - assert isinstance(commands, list) - p = None - for c in commands: - try: - dispcmd = str([c] + args) - # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) - break - except EnvironmentError: - e = sys.exc_info()[1] - if e.errno == errno.ENOENT: - continue - if verbose: - print("unable to run %s" % dispcmd) - print(e) - return None, None - else: - if verbose: - print("unable to find command, tried %s" % (commands,)) - return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: - if verbose: - print("unable to run %s (error)" % dispcmd) - print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode - - -def versions_from_parentdir(parentdir_prefix, root, verbose): - """Try to determine the version from the parent directory name. - - Source tarballs conventionally unpack into a directory that includes both - the project name and a version string. We will also support searching up - two directory levels for an appropriately named parent directory - """ - rootdirs = [] - - for i in range(3): - dirname = os.path.basename(root) - if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level - - if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) - raise NotThisMethod("rootdir doesn't start with parentdir_prefix") - - -@register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): - """Extract version information from the given file.""" - # the code embedded in _version.py can just fetch the value of these - # keywords. When used from setup.py, we don't want to import _version.py, - # so we do it with a regexp instead. This function is not used from - # _version.py. - keywords = {} - try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: - pass - return keywords - - -@register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): - """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") - date = keywords.get("date") - if date is not None: - # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant - # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 - # -like" string, which we must then edit to make compliant), because - # it's been around since git-1.5.3, and it's too difficult to - # discover which version we're using, or to work around using an - # older one. - date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - refnames = keywords["refnames"].strip() - if refnames.startswith("$Format"): - if verbose: - print("keywords are unexpanded, not using") - raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) - # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of - # just "foo-1.0". If we see a "tag: " prefix, prefer those. - TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) - if not tags: - # Either we're using git < 1.8.3, or there really are no tags. We use - # a heuristic: assume all version tags have a digit. The old git %d - # expansion behaves like git log --decorate=short and strips out the - # refs/heads/ and refs/tags/ prefixes that would let us distinguish - # between branches and tags. By ignoring refnames without digits, we - # filter out many common branch names like "release" and - # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) - if verbose: - print("discarding '%s', no digits" % ",".join(refs - tags)) - if verbose: - print("likely tags: %s" % ",".join(sorted(tags))) - for ref in sorted(tags): - # sorting will prefer e.g. "2.0" over "2.0rc1" - if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] - if verbose: - print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} - # no suitable tags, so version is "0+unknown", but full hex is still there - if verbose: - print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} - - -@register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): - """Get version from 'git describe' in the root of the source tree. - - This only gets called if the git-archive 'subst' keywords were *not* - expanded, and _version.py hasn't already been rewritten with a short - version string, meaning we're inside a checked out source tree. - """ - GITS = ["git"] - if sys.platform == "win32": - GITS = ["git.cmd", "git.exe"] - - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) - if rc != 0: - if verbose: - print("Directory %s not under git control" % root) - raise NotThisMethod("'git rev-parse --git-dir' returned error") - - # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] - # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) - # --long was added in git-1.5.5 - if describe_out is None: - raise NotThisMethod("'git describe' failed") - describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) - if full_out is None: - raise NotThisMethod("'git rev-parse' failed") - full_out = full_out.strip() - - pieces = {} - pieces["long"] = full_out - pieces["short"] = full_out[:7] # maybe improved later - pieces["error"] = None - - # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] - # TAG might have hyphens. - git_describe = describe_out - - # look for -dirty suffix - dirty = git_describe.endswith("-dirty") - pieces["dirty"] = dirty - if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] - - # now we have TAG-NUM-gHEX or HEX - - if "-" in git_describe: - # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) - if not mo: - # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) - return pieces - - # tag - full_tag = mo.group(1) - if not full_tag.startswith(tag_prefix): - if verbose: - fmt = "tag '%s' doesn't start with prefix '%s'" - print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) - return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] - - # distance: number of commits since tag - pieces["distance"] = int(mo.group(2)) - - # commit: short hex revision ID - pieces["short"] = mo.group(3) - - else: - # HEX: no tags - pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits - - # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() - pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) - - return pieces - - -def plus_or_dot(pieces): - """Return a + if we don't already have one, else return a .""" - if "+" in pieces.get("closest-tag", ""): - return "." - return "+" - - -def render_pep440(pieces): - """Build up version string, with post-release "local version identifier". - - Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you - get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty - - Exceptions: - 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += plus_or_dot(pieces) - rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - else: - # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) - if pieces["dirty"]: - rendered += ".dirty" - return rendered - - -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. - - Exceptions: - 1: no tags. 0.post.devDISTANCE - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] - else: - # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] - return rendered - - -def render_pep440_post(pieces): - """TAG[.postDISTANCE[.dev0]+gHEX] . - - The ".dev0" means dirty. Note that .dev0 sorts backwards - (a dirty tree will appear "older" than the corresponding clean one), - but you shouldn't be releasing software with -dirty anyways. - - Exceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += plus_or_dot(pieces) - rendered += "g%s" % pieces["short"] - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - rendered += "+g%s" % pieces["short"] - return rendered - - -def render_pep440_old(pieces): - """TAG[.postDISTANCE[.dev0]] . - - The ".dev0" means dirty. - - Eexceptions: - 1: no tags. 0.postDISTANCE[.dev0] - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"] or pieces["dirty"]: - rendered += ".post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - else: - # exception #1 - rendered = "0.post%d" % pieces["distance"] - if pieces["dirty"]: - rendered += ".dev0" - return rendered - - -def render_git_describe(pieces): - """TAG[-DISTANCE-gHEX][-dirty]. - - Like 'git describe --tags --dirty --always'. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - if pieces["distance"]: - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render_git_describe_long(pieces): - """TAG-DISTANCE-gHEX[-dirty]. - - Like 'git describe --tags --dirty --always -long'. - The distance/hash is unconditional. - - Exceptions: - 1: no tags. HEX[-dirty] (note: no 'g' prefix) - """ - if pieces["closest-tag"]: - rendered = pieces["closest-tag"] - rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) - else: - # exception #1 - rendered = pieces["short"] - if pieces["dirty"]: - rendered += "-dirty" - return rendered - - -def render(pieces, style): - """Render the given version pieces into the requested style.""" - if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} - - if not style or style == "default": - style = "pep440" # the default - - if style == "pep440": - rendered = render_pep440(pieces) - elif style == "pep440-pre": - rendered = render_pep440_pre(pieces) - elif style == "pep440-post": - rendered = render_pep440_post(pieces) - elif style == "pep440-old": - rendered = render_pep440_old(pieces) - elif style == "git-describe": - rendered = render_git_describe(pieces) - elif style == "git-describe-long": - rendered = render_git_describe_long(pieces) - else: - raise ValueError("unknown style '%s'" % style) - - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} - - -def get_versions(): - """Get version information or return default if unable to do so.""" - # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have - # __file__, we can work backwards from there to the root. Some - # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which - # case we can only use expanded keywords. - - cfg = get_config() - verbose = cfg.verbose - - try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) - except NotThisMethod: - pass - - try: - root = os.path.realpath(__file__) - # versionfile_source is the relative path from the top of the source - # tree (where the .git directory might live) to this file. Invert - # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): - root = os.path.dirname(root) - except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} - - try: - pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) - return render(pieces, cfg.style) - except NotThisMethod: - pass - - try: - if cfg.parentdir_prefix: - return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) - except NotThisMethod: - pass - - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} diff --git a/solnlib/acl.py b/solnlib/acl.py index 88da2f67..4bf931a5 100644 --- a/solnlib/acl.py +++ b/solnlib/acl.py @@ -19,7 +19,7 @@ import json from . import splunk_rest_client as rest_client -from .packages.splunklib import binding +from splunklib import binding from .utils import retry __all__ = ['ACLException', diff --git a/solnlib/api_documenter.py b/solnlib/api_documenter.py index b9df3078..f95d59ea 100644 --- a/solnlib/api_documenter.py +++ b/solnlib/api_documenter.py @@ -100,7 +100,7 @@ import tempfile from . import splunk_rest_client as rest -from .packages import yaml +import yaml __all__ = ['api', 'api_model', diff --git a/solnlib/conf_manager.py b/solnlib/conf_manager.py index 03e9bb44..d2e43682 100644 --- a/solnlib/conf_manager.py +++ b/solnlib/conf_manager.py @@ -25,7 +25,7 @@ from . import splunk_rest_client as rest_client from .credentials import CredentialManager from .credentials import CredentialNotExistException -from .packages.splunklib import binding +from splunklib import binding from .utils import retry __all__ = ['ConfStanzaNotExistException', diff --git a/solnlib/credentials.py b/solnlib/credentials.py index 9ca7de7c..2ed6829c 100644 --- a/solnlib/credentials.py +++ b/solnlib/credentials.py @@ -24,7 +24,7 @@ from .net_utils import is_valid_hostname from .net_utils import is_valid_port from .net_utils import is_valid_scheme -from .packages.splunklib import binding +from splunklib import binding from .splunkenv import get_splunkd_access_info from .utils import retry diff --git a/solnlib/hec_config.py b/solnlib/hec_config.py index 05c79ea3..21aa0177 100644 --- a/solnlib/hec_config.py +++ b/solnlib/hec_config.py @@ -13,7 +13,7 @@ # under the License. from . import splunk_rest_client as rest_client -from .packages.splunklib import binding +from splunklib import binding from .utils import retry __all__ = ['HECConfig'] diff --git a/solnlib/log.py b/solnlib/log.py index 93468211..639d1ef6 100644 --- a/solnlib/log.py +++ b/solnlib/log.py @@ -21,7 +21,7 @@ import os.path as op from threading import Lock -from .packages.splunklib.six import with_metaclass +from splunklib.six import with_metaclass from .pattern import Singleton from .splunkenv import make_splunkhome_path diff --git a/solnlib/modular_input/__init__.py b/solnlib/modular_input/__init__.py index 4a655613..58ed0ea0 100644 --- a/solnlib/modular_input/__init__.py +++ b/solnlib/modular_input/__init__.py @@ -26,7 +26,7 @@ from .event_writer import HECEventWriter from .modular_input import ModularInput from .modular_input import ModularInputException -from ..packages.splunklib.modularinput.argument import Argument +from splunklib.modularinput.argument import Argument __all__ = ['EventException', 'XMLEvent', diff --git a/solnlib/modular_input/checkpointer.py b/solnlib/modular_input/checkpointer.py index 9f46808c..5eb342d2 100644 --- a/solnlib/modular_input/checkpointer.py +++ b/solnlib/modular_input/checkpointer.py @@ -27,8 +27,8 @@ from abc import ABCMeta, abstractmethod from .. import splunk_rest_client as rest_client -from ..packages.splunklib import binding -from ..packages.splunklib.six import with_metaclass +from splunklib import binding +from splunklib.six import with_metaclass from ..utils import retry __all__ = ['CheckpointerException', diff --git a/solnlib/modular_input/event_writer.py b/solnlib/modular_input/event_writer.py index 9815227a..bd1fd2e8 100644 --- a/solnlib/modular_input/event_writer.py +++ b/solnlib/modular_input/event_writer.py @@ -28,8 +28,8 @@ from .. import splunk_rest_client as rest_client from .. import utils from ..hec_config import HECConfig -from ..packages.splunklib import binding -from ..packages.splunklib.six import with_metaclass +from splunklib import binding +from splunklib.six import with_metaclass from ..splunkenv import get_splunkd_access_info from ..utils import retry from random import randint diff --git a/solnlib/modular_input/modular_input.py b/solnlib/modular_input/modular_input.py index 7307fb13..13ead318 100644 --- a/solnlib/modular_input/modular_input.py +++ b/solnlib/modular_input/modular_input.py @@ -31,18 +31,18 @@ from urllib import parse as urlparse from abc import ABCMeta, abstractmethod -from ..packages.splunklib.six import with_metaclass +from splunklib.six import with_metaclass try: import xml.etree.cElementTree as ET except ImportError: import xml.etree.ElementTree as ET -from ..packages.splunklib import binding -from ..packages.splunklib.modularinput.argument import Argument -from ..packages.splunklib.modularinput.scheme import Scheme -from ..packages.splunklib.modularinput.input_definition import InputDefinition -from ..packages.splunklib.modularinput.validation_definition import ValidationDefinition +from splunklib import binding +from splunklib.modularinput.argument import Argument +from splunklib.modularinput.scheme import Scheme +from splunklib.modularinput.input_definition import InputDefinition +from splunklib.modularinput.validation_definition import ValidationDefinition from .. import utils from . import checkpointer diff --git a/solnlib/packages/__init__.py b/solnlib/packages/__init__.py deleted file mode 100644 index 6b2d4e9f..00000000 --- a/solnlib/packages/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2016 Splunk, Inc. -# -# Licensed under the Apache License, Version 2.0 (the 'License'): you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from __future__ import absolute_import - -import sys - -try: - from . import requests -except ImportError: - import requests - - sys.modules['%s.requests' % __name__] = requests - -try: - from . import splunklib -except ImportError: - import splunklib - - sys.modules['%s.splunklib' % __name__] = splunklib - -try: - from . import sortedcontainers -except ImportError: - import sortedcontainers - - sys.modules['%s.sortedcontainers' % __name__] = sortedcontainers - -try: - from . import schematics -except ImportError: - import schematics - - sys.modules['%s.schematics' % __name__] = schematics - -if sys.version_info[0] >= 3: - try: - from . import yamlpy3 as yaml - except ImportError: - import yaml - - sys.modules['%s.yaml' % __name__] = yaml -else: - try: - from . import yamlpy2 as yaml - except ImportError: - import yaml - - sys.modules['%s.yaml' % __name__] = yaml - diff --git a/solnlib/packages/requests/__init__.py b/solnlib/packages/requests/__init__.py deleted file mode 100644 index 9a899df6..00000000 --- a/solnlib/packages/requests/__init__.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- - -# __ -# /__) _ _ _ _ _/ _ -# / ( (- (/ (/ (- _) / _) -# / - -""" -Requests HTTP Library -~~~~~~~~~~~~~~~~~~~~~ - -Requests is an HTTP library, written in Python, for human beings. Basic GET -usage: - - >>> import requests - >>> r = requests.get('https://www.python.org') - >>> r.status_code - 200 - >>> 'Python is a programming language' in r.content - True - -... or POST: - - >>> payload = dict(key1='value1', key2='value2') - >>> r = requests.post('https://httpbin.org/post', data=payload) - >>> print(r.text) - { - ... - "form": { - "key2": "value2", - "key1": "value1" - }, - ... - } - -The other HTTP methods are supported - see `requests.api`. Full documentation -is at . - -:copyright: (c) 2017 by Kenneth Reitz. -:license: Apache 2.0, see LICENSE for more details. -""" - -import urllib3 -import chardet -import warnings -from .exceptions import RequestsDependencyWarning - - -def check_compatibility(urllib3_version, chardet_version): - urllib3_version = urllib3_version.split('.') - assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. - - # Sometimes, urllib3 only reports its version as 16.1. - if len(urllib3_version) == 2: - urllib3_version.append('0') - - # Check urllib3 for compatibility. - major, minor, patch = urllib3_version # noqa: F811 - major, minor, patch = int(major), int(minor), int(patch) - # urllib3 >= 1.21.1, <= 1.25 - assert major == 1 - assert minor >= 21 - assert minor <= 25 - - # Check chardet for compatibility. - major, minor, patch = chardet_version.split('.')[:3] - major, minor, patch = int(major), int(minor), int(patch) - # chardet >= 3.0.2, < 3.1.0 - assert major == 3 - assert minor < 1 - assert patch >= 2 - - -def _check_cryptography(cryptography_version): - # cryptography < 1.3.4 - try: - cryptography_version = list(map(int, cryptography_version.split('.'))) - except ValueError: - return - - if cryptography_version < [1, 3, 4]: - warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) - warnings.warn(warning, RequestsDependencyWarning) - -# Check imported dependencies for compatibility. -try: - check_compatibility(urllib3.__version__, chardet.__version__) -except (AssertionError, ValueError): - warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported " - "version!".format(urllib3.__version__, chardet.__version__), - RequestsDependencyWarning) - -# Attempt to enable urllib3's SNI support, if possible -try: - from urllib3.contrib import pyopenssl - pyopenssl.inject_into_urllib3() - - # Check cryptography version - from cryptography import __version__ as cryptography_version - _check_cryptography(cryptography_version) -except ImportError: - pass - -# urllib3's DependencyWarnings should be silenced. -from urllib3.exceptions import DependencyWarning -warnings.simplefilter('ignore', DependencyWarning) - -from .__version__ import __title__, __description__, __url__, __version__ -from .__version__ import __build__, __author__, __author_email__, __license__ -from .__version__ import __copyright__, __cake__ - -from . import utils -from . import packages -from .models import Request, Response, PreparedRequest -from .api import request, get, head, post, patch, put, delete, options -from .sessions import session, Session -from .status_codes import codes -from .exceptions import ( - RequestException, Timeout, URLRequired, - TooManyRedirects, HTTPError, ConnectionError, - FileModeWarning, ConnectTimeout, ReadTimeout -) - -# Set default logging handler to avoid "No handler found" warnings. -import logging -from logging import NullHandler - -logging.getLogger(__name__).addHandler(NullHandler()) - -# FileModeWarnings go off per the default. -warnings.simplefilter('default', FileModeWarning, append=True) diff --git a/solnlib/packages/requests/__version__.py b/solnlib/packages/requests/__version__.py deleted file mode 100644 index 9844f740..00000000 --- a/solnlib/packages/requests/__version__.py +++ /dev/null @@ -1,14 +0,0 @@ -# .-. .-. .-. . . .-. .-. .-. .-. -# |( |- |.| | | |- `-. | `-. -# ' ' `-' `-`.`-' `-' `-' ' `-' - -__title__ = 'requests' -__description__ = 'Python HTTP for Humans.' -__url__ = 'http://python-requests.org' -__version__ = '2.22.0' -__build__ = 0x022200 -__author__ = 'Kenneth Reitz' -__author_email__ = 'me@kennethreitz.org' -__license__ = 'Apache 2.0' -__copyright__ = 'Copyright 2019 Kenneth Reitz' -__cake__ = u'\u2728 \U0001f370 \u2728' diff --git a/solnlib/packages/requests/_internal_utils.py b/solnlib/packages/requests/_internal_utils.py deleted file mode 100644 index 759d9a56..00000000 --- a/solnlib/packages/requests/_internal_utils.py +++ /dev/null @@ -1,42 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests._internal_utils -~~~~~~~~~~~~~~ - -Provides utility functions that are consumed internally by Requests -which depend on extremely few external helpers (such as compat) -""" - -from .compat import is_py2, builtin_str, str - - -def to_native_string(string, encoding='ascii'): - """Given a string object, regardless of type, returns a representation of - that string in the native string type, encoding and decoding where - necessary. This assumes ASCII unless told otherwise. - """ - if isinstance(string, builtin_str): - out = string - else: - if is_py2: - out = string.encode(encoding) - else: - out = string.decode(encoding) - - return out - - -def unicode_is_ascii(u_string): - """Determine if unicode string only contains ASCII characters. - - :param str u_string: unicode string to check. Must be unicode - and not Python 2 `str`. - :rtype: bool - """ - assert isinstance(u_string, str) - try: - u_string.encode('ascii') - return True - except UnicodeEncodeError: - return False diff --git a/solnlib/packages/requests/adapters.py b/solnlib/packages/requests/adapters.py deleted file mode 100644 index fa4d9b3c..00000000 --- a/solnlib/packages/requests/adapters.py +++ /dev/null @@ -1,533 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.adapters -~~~~~~~~~~~~~~~~~ - -This module contains the transport adapters that Requests uses to define -and maintain connections. -""" - -import os.path -import socket - -from urllib3.poolmanager import PoolManager, proxy_from_url -from urllib3.response import HTTPResponse -from urllib3.util import parse_url -from urllib3.util import Timeout as TimeoutSauce -from urllib3.util.retry import Retry -from urllib3.exceptions import ClosedPoolError -from urllib3.exceptions import ConnectTimeoutError -from urllib3.exceptions import HTTPError as _HTTPError -from urllib3.exceptions import MaxRetryError -from urllib3.exceptions import NewConnectionError -from urllib3.exceptions import ProxyError as _ProxyError -from urllib3.exceptions import ProtocolError -from urllib3.exceptions import ReadTimeoutError -from urllib3.exceptions import SSLError as _SSLError -from urllib3.exceptions import ResponseError -from urllib3.exceptions import LocationValueError - -from .models import Response -from .compat import urlparse, basestring -from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, - get_encoding_from_headers, prepend_scheme_if_needed, - get_auth_from_url, urldefragauth, select_proxy) -from .structures import CaseInsensitiveDict -from .cookies import extract_cookies_to_jar -from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, - ProxyError, RetryError, InvalidSchema, InvalidProxyURL, - InvalidURL) -from .auth import _basic_auth_str - -try: - from urllib3.contrib.socks import SOCKSProxyManager -except ImportError: - def SOCKSProxyManager(*args, **kwargs): - raise InvalidSchema("Missing dependencies for SOCKS support.") - -DEFAULT_POOLBLOCK = False -DEFAULT_POOLSIZE = 10 -DEFAULT_RETRIES = 0 -DEFAULT_POOL_TIMEOUT = None - - -class BaseAdapter(object): - """The Base Transport Adapter""" - - def __init__(self): - super(BaseAdapter, self).__init__() - - def send(self, request, stream=False, timeout=None, verify=True, - cert=None, proxies=None): - """Sends PreparedRequest object. Returns Response object. - - :param request: The :class:`PreparedRequest ` being sent. - :param stream: (optional) Whether to stream the request content. - :param timeout: (optional) How long to wait for the server to send - data before giving up, as a float, or a :ref:`(connect timeout, - read timeout) ` tuple. - :type timeout: float or tuple - :param verify: (optional) Either a boolean, in which case it controls whether we verify - the server's TLS certificate, or a string, in which case it must be a path - to a CA bundle to use - :param cert: (optional) Any user-provided SSL certificate to be trusted. - :param proxies: (optional) The proxies dictionary to apply to the request. - """ - raise NotImplementedError - - def close(self): - """Cleans up adapter specific items.""" - raise NotImplementedError - - -class HTTPAdapter(BaseAdapter): - """The built-in HTTP Adapter for urllib3. - - Provides a general-case interface for Requests sessions to contact HTTP and - HTTPS urls by implementing the Transport Adapter interface. This class will - usually be created by the :class:`Session ` class under the - covers. - - :param pool_connections: The number of urllib3 connection pools to cache. - :param pool_maxsize: The maximum number of connections to save in the pool. - :param max_retries: The maximum number of retries each connection - should attempt. Note, this applies only to failed DNS lookups, socket - connections and connection timeouts, never to requests where data has - made it to the server. By default, Requests does not retry failed - connections. If you need granular control over the conditions under - which we retry a request, import urllib3's ``Retry`` class and pass - that instead. - :param pool_block: Whether the connection pool should block for connections. - - Usage:: - - >>> import requests - >>> s = requests.Session() - >>> a = requests.adapters.HTTPAdapter(max_retries=3) - >>> s.mount('http://', a) - """ - __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', - '_pool_block'] - - def __init__(self, pool_connections=DEFAULT_POOLSIZE, - pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, - pool_block=DEFAULT_POOLBLOCK): - if max_retries == DEFAULT_RETRIES: - self.max_retries = Retry(0, read=False) - else: - self.max_retries = Retry.from_int(max_retries) - self.config = {} - self.proxy_manager = {} - - super(HTTPAdapter, self).__init__() - - self._pool_connections = pool_connections - self._pool_maxsize = pool_maxsize - self._pool_block = pool_block - - self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) - - def __getstate__(self): - return {attr: getattr(self, attr, None) for attr in self.__attrs__} - - def __setstate__(self, state): - # Can't handle by adding 'proxy_manager' to self.__attrs__ because - # self.poolmanager uses a lambda function, which isn't pickleable. - self.proxy_manager = {} - self.config = {} - - for attr, value in state.items(): - setattr(self, attr, value) - - self.init_poolmanager(self._pool_connections, self._pool_maxsize, - block=self._pool_block) - - def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): - """Initializes a urllib3 PoolManager. - - This method should not be called from user code, and is only - exposed for use when subclassing the - :class:`HTTPAdapter `. - - :param connections: The number of urllib3 connection pools to cache. - :param maxsize: The maximum number of connections to save in the pool. - :param block: Block when no free connections are available. - :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. - """ - # save these values for pickling - self._pool_connections = connections - self._pool_maxsize = maxsize - self._pool_block = block - - self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, - block=block, strict=True, **pool_kwargs) - - def proxy_manager_for(self, proxy, **proxy_kwargs): - """Return urllib3 ProxyManager for the given proxy. - - This method should not be called from user code, and is only - exposed for use when subclassing the - :class:`HTTPAdapter `. - - :param proxy: The proxy to return a urllib3 ProxyManager for. - :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. - :returns: ProxyManager - :rtype: urllib3.ProxyManager - """ - if proxy in self.proxy_manager: - manager = self.proxy_manager[proxy] - elif proxy.lower().startswith('socks'): - username, password = get_auth_from_url(proxy) - manager = self.proxy_manager[proxy] = SOCKSProxyManager( - proxy, - username=username, - password=password, - num_pools=self._pool_connections, - maxsize=self._pool_maxsize, - block=self._pool_block, - **proxy_kwargs - ) - else: - proxy_headers = self.proxy_headers(proxy) - manager = self.proxy_manager[proxy] = proxy_from_url( - proxy, - proxy_headers=proxy_headers, - num_pools=self._pool_connections, - maxsize=self._pool_maxsize, - block=self._pool_block, - **proxy_kwargs) - - return manager - - def cert_verify(self, conn, url, verify, cert): - """Verify a SSL certificate. This method should not be called from user - code, and is only exposed for use when subclassing the - :class:`HTTPAdapter `. - - :param conn: The urllib3 connection object associated with the cert. - :param url: The requested URL. - :param verify: Either a boolean, in which case it controls whether we verify - the server's TLS certificate, or a string, in which case it must be a path - to a CA bundle to use - :param cert: The SSL certificate to verify. - """ - if url.lower().startswith('https') and verify: - - cert_loc = None - - # Allow self-specified cert location. - if verify is not True: - cert_loc = verify - - if not cert_loc: - cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) - - if not cert_loc or not os.path.exists(cert_loc): - raise IOError("Could not find a suitable TLS CA certificate bundle, " - "invalid path: {}".format(cert_loc)) - - conn.cert_reqs = 'CERT_REQUIRED' - - if not os.path.isdir(cert_loc): - conn.ca_certs = cert_loc - else: - conn.ca_cert_dir = cert_loc - else: - conn.cert_reqs = 'CERT_NONE' - conn.ca_certs = None - conn.ca_cert_dir = None - - if cert: - if not isinstance(cert, basestring): - conn.cert_file = cert[0] - conn.key_file = cert[1] - else: - conn.cert_file = cert - conn.key_file = None - if conn.cert_file and not os.path.exists(conn.cert_file): - raise IOError("Could not find the TLS certificate file, " - "invalid path: {}".format(conn.cert_file)) - if conn.key_file and not os.path.exists(conn.key_file): - raise IOError("Could not find the TLS key file, " - "invalid path: {}".format(conn.key_file)) - - def build_response(self, req, resp): - """Builds a :class:`Response ` object from a urllib3 - response. This should not be called from user code, and is only exposed - for use when subclassing the - :class:`HTTPAdapter ` - - :param req: The :class:`PreparedRequest ` used to generate the response. - :param resp: The urllib3 response object. - :rtype: requests.Response - """ - response = Response() - - # Fallback to None if there's no status_code, for whatever reason. - response.status_code = getattr(resp, 'status', None) - - # Make headers case-insensitive. - response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) - - # Set encoding. - response.encoding = get_encoding_from_headers(response.headers) - response.raw = resp - response.reason = response.raw.reason - - if isinstance(req.url, bytes): - response.url = req.url.decode('utf-8') - else: - response.url = req.url - - # Add new cookies from the server. - extract_cookies_to_jar(response.cookies, req, resp) - - # Give the Response some context. - response.request = req - response.connection = self - - return response - - def get_connection(self, url, proxies=None): - """Returns a urllib3 connection for the given URL. This should not be - called from user code, and is only exposed for use when subclassing the - :class:`HTTPAdapter `. - - :param url: The URL to connect to. - :param proxies: (optional) A Requests-style dictionary of proxies used on this request. - :rtype: urllib3.ConnectionPool - """ - proxy = select_proxy(url, proxies) - - if proxy: - proxy = prepend_scheme_if_needed(proxy, 'http') - proxy_url = parse_url(proxy) - if not proxy_url.host: - raise InvalidProxyURL("Please check proxy URL. It is malformed" - " and could be missing the host.") - proxy_manager = self.proxy_manager_for(proxy) - conn = proxy_manager.connection_from_url(url) - else: - # Only scheme should be lower case - parsed = urlparse(url) - url = parsed.geturl() - conn = self.poolmanager.connection_from_url(url) - - return conn - - def close(self): - """Disposes of any internal state. - - Currently, this closes the PoolManager and any active ProxyManager, - which closes any pooled connections. - """ - self.poolmanager.clear() - for proxy in self.proxy_manager.values(): - proxy.clear() - - def request_url(self, request, proxies): - """Obtain the url to use when making the final request. - - If the message is being sent through a HTTP proxy, the full URL has to - be used. Otherwise, we should only use the path portion of the URL. - - This should not be called from user code, and is only exposed for use - when subclassing the - :class:`HTTPAdapter `. - - :param request: The :class:`PreparedRequest ` being sent. - :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. - :rtype: str - """ - proxy = select_proxy(request.url, proxies) - scheme = urlparse(request.url).scheme - - is_proxied_http_request = (proxy and scheme != 'https') - using_socks_proxy = False - if proxy: - proxy_scheme = urlparse(proxy).scheme.lower() - using_socks_proxy = proxy_scheme.startswith('socks') - - url = request.path_url - if is_proxied_http_request and not using_socks_proxy: - url = urldefragauth(request.url) - - return url - - def add_headers(self, request, **kwargs): - """Add any headers needed by the connection. As of v2.0 this does - nothing by default, but is left for overriding by users that subclass - the :class:`HTTPAdapter `. - - This should not be called from user code, and is only exposed for use - when subclassing the - :class:`HTTPAdapter `. - - :param request: The :class:`PreparedRequest ` to add headers to. - :param kwargs: The keyword arguments from the call to send(). - """ - pass - - def proxy_headers(self, proxy): - """Returns a dictionary of the headers to add to any request sent - through a proxy. This works with urllib3 magic to ensure that they are - correctly sent to the proxy, rather than in a tunnelled request if - CONNECT is being used. - - This should not be called from user code, and is only exposed for use - when subclassing the - :class:`HTTPAdapter `. - - :param proxy: The url of the proxy being used for this request. - :rtype: dict - """ - headers = {} - username, password = get_auth_from_url(proxy) - - if username: - headers['Proxy-Authorization'] = _basic_auth_str(username, - password) - - return headers - - def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): - """Sends PreparedRequest object. Returns Response object. - - :param request: The :class:`PreparedRequest ` being sent. - :param stream: (optional) Whether to stream the request content. - :param timeout: (optional) How long to wait for the server to send - data before giving up, as a float, or a :ref:`(connect timeout, - read timeout) ` tuple. - :type timeout: float or tuple or urllib3 Timeout object - :param verify: (optional) Either a boolean, in which case it controls whether - we verify the server's TLS certificate, or a string, in which case it - must be a path to a CA bundle to use - :param cert: (optional) Any user-provided SSL certificate to be trusted. - :param proxies: (optional) The proxies dictionary to apply to the request. - :rtype: requests.Response - """ - - try: - conn = self.get_connection(request.url, proxies) - except LocationValueError as e: - raise InvalidURL(e, request=request) - - self.cert_verify(conn, request.url, verify, cert) - url = self.request_url(request, proxies) - self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) - - chunked = not (request.body is None or 'Content-Length' in request.headers) - - if isinstance(timeout, tuple): - try: - connect, read = timeout - timeout = TimeoutSauce(connect=connect, read=read) - except ValueError as e: - # this may raise a string formatting error. - err = ("Invalid timeout {}. Pass a (connect, read) " - "timeout tuple, or a single float to set " - "both timeouts to the same value".format(timeout)) - raise ValueError(err) - elif isinstance(timeout, TimeoutSauce): - pass - else: - timeout = TimeoutSauce(connect=timeout, read=timeout) - - try: - if not chunked: - resp = conn.urlopen( - method=request.method, - url=url, - body=request.body, - headers=request.headers, - redirect=False, - assert_same_host=False, - preload_content=False, - decode_content=False, - retries=self.max_retries, - timeout=timeout - ) - - # Send the request. - else: - if hasattr(conn, 'proxy_pool'): - conn = conn.proxy_pool - - low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) - - try: - low_conn.putrequest(request.method, - url, - skip_accept_encoding=True) - - for header, value in request.headers.items(): - low_conn.putheader(header, value) - - low_conn.endheaders() - - for i in request.body: - low_conn.send(hex(len(i))[2:].encode('utf-8')) - low_conn.send(b'\r\n') - low_conn.send(i) - low_conn.send(b'\r\n') - low_conn.send(b'0\r\n\r\n') - - # Receive the response from the server - try: - # For Python 2.7, use buffering of HTTP responses - r = low_conn.getresponse(buffering=True) - except TypeError: - # For compatibility with Python 3.3+ - r = low_conn.getresponse() - - resp = HTTPResponse.from_httplib( - r, - pool=conn, - connection=low_conn, - preload_content=False, - decode_content=False - ) - except: - # If we hit any problems here, clean up the connection. - # Then, reraise so that we can handle the actual exception. - low_conn.close() - raise - - except (ProtocolError, socket.error) as err: - raise ConnectionError(err, request=request) - - except MaxRetryError as e: - if isinstance(e.reason, ConnectTimeoutError): - # TODO: Remove this in 3.0.0: see #2811 - if not isinstance(e.reason, NewConnectionError): - raise ConnectTimeout(e, request=request) - - if isinstance(e.reason, ResponseError): - raise RetryError(e, request=request) - - if isinstance(e.reason, _ProxyError): - raise ProxyError(e, request=request) - - if isinstance(e.reason, _SSLError): - # This branch is for urllib3 v1.22 and later. - raise SSLError(e, request=request) - - raise ConnectionError(e, request=request) - - except ClosedPoolError as e: - raise ConnectionError(e, request=request) - - except _ProxyError as e: - raise ProxyError(e) - - except (_SSLError, _HTTPError) as e: - if isinstance(e, _SSLError): - # This branch is for urllib3 versions earlier than v1.22 - raise SSLError(e, request=request) - elif isinstance(e, ReadTimeoutError): - raise ReadTimeout(e, request=request) - else: - raise - - return self.build_response(request, resp) diff --git a/solnlib/packages/requests/api.py b/solnlib/packages/requests/api.py deleted file mode 100644 index ef71d075..00000000 --- a/solnlib/packages/requests/api.py +++ /dev/null @@ -1,158 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.api -~~~~~~~~~~~~ - -This module implements the Requests API. - -:copyright: (c) 2012 by Kenneth Reitz. -:license: Apache2, see LICENSE for more details. -""" - -from . import sessions - - -def request(method, url, **kwargs): - """Constructs and sends a :class:`Request `. - - :param method: method for the new :class:`Request` object. - :param url: URL for the new :class:`Request` object. - :param params: (optional) Dictionary, list of tuples or bytes to send - in the query string for the :class:`Request`. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. - :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. - :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. - :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. - ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` - or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string - defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers - to add for the file. - :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. - :param timeout: (optional) How many seconds to wait for the server to send data - before giving up, as a float, or a :ref:`(connect timeout, read - timeout) ` tuple. - :type timeout: float or tuple - :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. - :type allow_redirects: bool - :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. - :param verify: (optional) Either a boolean, in which case it controls whether we verify - the server's TLS certificate, or a string, in which case it must be a path - to a CA bundle to use. Defaults to ``True``. - :param stream: (optional) if ``False``, the response content will be immediately downloaded. - :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. - :return: :class:`Response ` object - :rtype: requests.Response - - Usage:: - - >>> import requests - >>> req = requests.request('GET', 'https://httpbin.org/get') - - """ - - # By using the 'with' statement we are sure the session is closed, thus we - # avoid leaving sockets open which can trigger a ResourceWarning in some - # cases, and look like a memory leak in others. - with sessions.Session() as session: - return session.request(method=method, url=url, **kwargs) - - -def get(url, params=None, **kwargs): - r"""Sends a GET request. - - :param url: URL for the new :class:`Request` object. - :param params: (optional) Dictionary, list of tuples or bytes to send - in the query string for the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response ` object - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', True) - return request('get', url, params=params, **kwargs) - - -def options(url, **kwargs): - r"""Sends an OPTIONS request. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response ` object - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', True) - return request('options', url, **kwargs) - - -def head(url, **kwargs): - r"""Sends a HEAD request. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response ` object - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', False) - return request('head', url, **kwargs) - - -def post(url, data=None, json=None, **kwargs): - r"""Sends a POST request. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json data to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response ` object - :rtype: requests.Response - """ - - return request('post', url, data=data, json=json, **kwargs) - - -def put(url, data=None, **kwargs): - r"""Sends a PUT request. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json data to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response ` object - :rtype: requests.Response - """ - - return request('put', url, data=data, **kwargs) - - -def patch(url, data=None, **kwargs): - r"""Sends a PATCH request. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json data to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response ` object - :rtype: requests.Response - """ - - return request('patch', url, data=data, **kwargs) - - -def delete(url, **kwargs): - r"""Sends a DELETE request. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :return: :class:`Response ` object - :rtype: requests.Response - """ - - return request('delete', url, **kwargs) diff --git a/solnlib/packages/requests/auth.py b/solnlib/packages/requests/auth.py deleted file mode 100644 index bdde51c7..00000000 --- a/solnlib/packages/requests/auth.py +++ /dev/null @@ -1,305 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.auth -~~~~~~~~~~~~~ - -This module contains the authentication handlers for Requests. -""" - -import os -import re -import time -import hashlib -import threading -import warnings - -from base64 import b64encode - -from .compat import urlparse, str, basestring -from .cookies import extract_cookies_to_jar -from ._internal_utils import to_native_string -from .utils import parse_dict_header - -CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' -CONTENT_TYPE_MULTI_PART = 'multipart/form-data' - - -def _basic_auth_str(username, password): - """Returns a Basic Auth string.""" - - # "I want us to put a big-ol' comment on top of it that - # says that this behaviour is dumb but we need to preserve - # it because people are relying on it." - # - Lukasa - # - # These are here solely to maintain backwards compatibility - # for things like ints. This will be removed in 3.0.0. - if not isinstance(username, basestring): - warnings.warn( - "Non-string usernames will no longer be supported in Requests " - "3.0.0. Please convert the object you've passed in ({!r}) to " - "a string or bytes object in the near future to avoid " - "problems.".format(username), - category=DeprecationWarning, - ) - username = str(username) - - if not isinstance(password, basestring): - warnings.warn( - "Non-string passwords will no longer be supported in Requests " - "3.0.0. Please convert the object you've passed in ({!r}) to " - "a string or bytes object in the near future to avoid " - "problems.".format(password), - category=DeprecationWarning, - ) - password = str(password) - # -- End Removal -- - - if isinstance(username, str): - username = username.encode('latin1') - - if isinstance(password, str): - password = password.encode('latin1') - - authstr = 'Basic ' + to_native_string( - b64encode(b':'.join((username, password))).strip() - ) - - return authstr - - -class AuthBase(object): - """Base class that all auth implementations derive from""" - - def __call__(self, r): - raise NotImplementedError('Auth hooks must be callable.') - - -class HTTPBasicAuth(AuthBase): - """Attaches HTTP Basic Authentication to the given Request object.""" - - def __init__(self, username, password): - self.username = username - self.password = password - - def __eq__(self, other): - return all([ - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None) - ]) - - def __ne__(self, other): - return not self == other - - def __call__(self, r): - r.headers['Authorization'] = _basic_auth_str(self.username, self.password) - return r - - -class HTTPProxyAuth(HTTPBasicAuth): - """Attaches HTTP Proxy Authentication to a given Request object.""" - - def __call__(self, r): - r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) - return r - - -class HTTPDigestAuth(AuthBase): - """Attaches HTTP Digest Authentication to the given Request object.""" - - def __init__(self, username, password): - self.username = username - self.password = password - # Keep state in per-thread local storage - self._thread_local = threading.local() - - def init_per_thread_state(self): - # Ensure state is initialized just once per-thread - if not hasattr(self._thread_local, 'init'): - self._thread_local.init = True - self._thread_local.last_nonce = '' - self._thread_local.nonce_count = 0 - self._thread_local.chal = {} - self._thread_local.pos = None - self._thread_local.num_401_calls = None - - def build_digest_header(self, method, url): - """ - :rtype: str - """ - - realm = self._thread_local.chal['realm'] - nonce = self._thread_local.chal['nonce'] - qop = self._thread_local.chal.get('qop') - algorithm = self._thread_local.chal.get('algorithm') - opaque = self._thread_local.chal.get('opaque') - hash_utf8 = None - - if algorithm is None: - _algorithm = 'MD5' - else: - _algorithm = algorithm.upper() - # lambdas assume digest modules are imported at the top level - if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': - def md5_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.md5(x).hexdigest() - hash_utf8 = md5_utf8 - elif _algorithm == 'SHA': - def sha_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.sha1(x).hexdigest() - hash_utf8 = sha_utf8 - elif _algorithm == 'SHA-256': - def sha256_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.sha256(x).hexdigest() - hash_utf8 = sha256_utf8 - elif _algorithm == 'SHA-512': - def sha512_utf8(x): - if isinstance(x, str): - x = x.encode('utf-8') - return hashlib.sha512(x).hexdigest() - hash_utf8 = sha512_utf8 - - KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) - - if hash_utf8 is None: - return None - - # XXX not implemented yet - entdig = None - p_parsed = urlparse(url) - #: path is request-uri defined in RFC 2616 which should not be empty - path = p_parsed.path or "/" - if p_parsed.query: - path += '?' + p_parsed.query - - A1 = '%s:%s:%s' % (self.username, realm, self.password) - A2 = '%s:%s' % (method, path) - - HA1 = hash_utf8(A1) - HA2 = hash_utf8(A2) - - if nonce == self._thread_local.last_nonce: - self._thread_local.nonce_count += 1 - else: - self._thread_local.nonce_count = 1 - ncvalue = '%08x' % self._thread_local.nonce_count - s = str(self._thread_local.nonce_count).encode('utf-8') - s += nonce.encode('utf-8') - s += time.ctime().encode('utf-8') - s += os.urandom(8) - - cnonce = (hashlib.sha1(s).hexdigest()[:16]) - if _algorithm == 'MD5-SESS': - HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) - - if not qop: - respdig = KD(HA1, "%s:%s" % (nonce, HA2)) - elif qop == 'auth' or 'auth' in qop.split(','): - noncebit = "%s:%s:%s:%s:%s" % ( - nonce, ncvalue, cnonce, 'auth', HA2 - ) - respdig = KD(HA1, noncebit) - else: - # XXX handle auth-int. - return None - - self._thread_local.last_nonce = nonce - - # XXX should the partial digests be encoded too? - base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ - 'response="%s"' % (self.username, realm, nonce, path, respdig) - if opaque: - base += ', opaque="%s"' % opaque - if algorithm: - base += ', algorithm="%s"' % algorithm - if entdig: - base += ', digest="%s"' % entdig - if qop: - base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) - - return 'Digest %s' % (base) - - def handle_redirect(self, r, **kwargs): - """Reset num_401_calls counter on redirects.""" - if r.is_redirect: - self._thread_local.num_401_calls = 1 - - def handle_401(self, r, **kwargs): - """ - Takes the given response and tries digest-auth, if needed. - - :rtype: requests.Response - """ - - # If response is not 4xx, do not auth - # See https://github.com/requests/requests/issues/3772 - if not 400 <= r.status_code < 500: - self._thread_local.num_401_calls = 1 - return r - - if self._thread_local.pos is not None: - # Rewind the file position indicator of the body to where - # it was to resend the request. - r.request.body.seek(self._thread_local.pos) - s_auth = r.headers.get('www-authenticate', '') - - if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: - - self._thread_local.num_401_calls += 1 - pat = re.compile(r'digest ', flags=re.IGNORECASE) - self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) - - # Consume content and release the original connection - # to allow our new request to reuse the same one. - r.content - r.close() - prep = r.request.copy() - extract_cookies_to_jar(prep._cookies, r.request, r.raw) - prep.prepare_cookies(prep._cookies) - - prep.headers['Authorization'] = self.build_digest_header( - prep.method, prep.url) - _r = r.connection.send(prep, **kwargs) - _r.history.append(r) - _r.request = prep - - return _r - - self._thread_local.num_401_calls = 1 - return r - - def __call__(self, r): - # Initialize per-thread state, if needed - self.init_per_thread_state() - # If we have a saved nonce, skip the 401 - if self._thread_local.last_nonce: - r.headers['Authorization'] = self.build_digest_header(r.method, r.url) - try: - self._thread_local.pos = r.body.tell() - except AttributeError: - # In the case of HTTPDigestAuth being reused and the body of - # the previous request was a file-like object, pos has the - # file position of the previous body. Ensure it's set to - # None. - self._thread_local.pos = None - r.register_hook('response', self.handle_401) - r.register_hook('response', self.handle_redirect) - self._thread_local.num_401_calls = 1 - - return r - - def __eq__(self, other): - return all([ - self.username == getattr(other, 'username', None), - self.password == getattr(other, 'password', None) - ]) - - def __ne__(self, other): - return not self == other diff --git a/solnlib/packages/requests/certs.py b/solnlib/packages/requests/certs.py deleted file mode 100644 index d1a378d7..00000000 --- a/solnlib/packages/requests/certs.py +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -requests.certs -~~~~~~~~~~~~~~ - -This module returns the preferred default CA certificate bundle. There is -only one — the one from the certifi package. - -If you are packaging Requests, e.g., for a Linux distribution or a managed -environment, you can change the definition of where() to return a separately -packaged CA bundle. -""" -from certifi import where - -if __name__ == '__main__': - print(where()) diff --git a/solnlib/packages/requests/compat.py b/solnlib/packages/requests/compat.py deleted file mode 100644 index c44b35ef..00000000 --- a/solnlib/packages/requests/compat.py +++ /dev/null @@ -1,70 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.compat -~~~~~~~~~~~~~~~ - -This module handles import compatibility issues between Python 2 and -Python 3. -""" - -import chardet - -import sys - -# ------- -# Pythons -# ------- - -# Syntax sugar. -_ver = sys.version_info - -#: Python 2.x? -is_py2 = (_ver[0] == 2) - -#: Python 3.x? -is_py3 = (_ver[0] == 3) - -try: - import simplejson as json -except ImportError: - import json - -# --------- -# Specifics -# --------- - -if is_py2: - from urllib import ( - quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, - proxy_bypass, proxy_bypass_environment, getproxies_environment) - from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag - from urllib2 import parse_http_list - import cookielib - from Cookie import Morsel - from StringIO import StringIO - from collections import Callable, Mapping, MutableMapping, OrderedDict - - - builtin_str = str - bytes = str - str = unicode - basestring = basestring - numeric_types = (int, long, float) - integer_types = (int, long) - -elif is_py3: - from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag - from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment - from http import cookiejar as cookielib - from http.cookies import Morsel - from io import StringIO - from collections import OrderedDict - from collections.abc import Callable, Mapping, MutableMapping - - builtin_str = str - str = str - bytes = bytes - basestring = (str, bytes) - numeric_types = (int, float) - integer_types = (int,) diff --git a/solnlib/packages/requests/cookies.py b/solnlib/packages/requests/cookies.py deleted file mode 100644 index 56fccd9c..00000000 --- a/solnlib/packages/requests/cookies.py +++ /dev/null @@ -1,549 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.cookies -~~~~~~~~~~~~~~~~ - -Compatibility code to be able to use `cookielib.CookieJar` with requests. - -requests.utils imports from here, so be careful with imports. -""" - -import copy -import time -import calendar - -from ._internal_utils import to_native_string -from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping - -try: - import threading -except ImportError: - import dummy_threading as threading - - -class MockRequest(object): - """Wraps a `requests.Request` to mimic a `urllib2.Request`. - - The code in `cookielib.CookieJar` expects this interface in order to correctly - manage cookie policies, i.e., determine whether a cookie can be set, given the - domains of the request and the cookie. - - The original request object is read-only. The client is responsible for collecting - the new headers via `get_new_headers()` and interpreting them appropriately. You - probably want `get_cookie_header`, defined below. - """ - - def __init__(self, request): - self._r = request - self._new_headers = {} - self.type = urlparse(self._r.url).scheme - - def get_type(self): - return self.type - - def get_host(self): - return urlparse(self._r.url).netloc - - def get_origin_req_host(self): - return self.get_host() - - def get_full_url(self): - # Only return the response's URL if the user hadn't set the Host - # header - if not self._r.headers.get('Host'): - return self._r.url - # If they did set it, retrieve it and reconstruct the expected domain - host = to_native_string(self._r.headers['Host'], encoding='utf-8') - parsed = urlparse(self._r.url) - # Reconstruct the URL as we expect it - return urlunparse([ - parsed.scheme, host, parsed.path, parsed.params, parsed.query, - parsed.fragment - ]) - - def is_unverifiable(self): - return True - - def has_header(self, name): - return name in self._r.headers or name in self._new_headers - - def get_header(self, name, default=None): - return self._r.headers.get(name, self._new_headers.get(name, default)) - - def add_header(self, key, val): - """cookielib has no legitimate use for this method; add it back if you find one.""" - raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") - - def add_unredirected_header(self, name, value): - self._new_headers[name] = value - - def get_new_headers(self): - return self._new_headers - - @property - def unverifiable(self): - return self.is_unverifiable() - - @property - def origin_req_host(self): - return self.get_origin_req_host() - - @property - def host(self): - return self.get_host() - - -class MockResponse(object): - """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. - - ...what? Basically, expose the parsed HTTP headers from the server response - the way `cookielib` expects to see them. - """ - - def __init__(self, headers): - """Make a MockResponse for `cookielib` to read. - - :param headers: a httplib.HTTPMessage or analogous carrying the headers - """ - self._headers = headers - - def info(self): - return self._headers - - def getheaders(self, name): - self._headers.getheaders(name) - - -def extract_cookies_to_jar(jar, request, response): - """Extract the cookies from the response into a CookieJar. - - :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) - :param request: our own requests.Request object - :param response: urllib3.HTTPResponse object - """ - if not (hasattr(response, '_original_response') and - response._original_response): - return - # the _original_response field is the wrapped httplib.HTTPResponse object, - req = MockRequest(request) - # pull out the HTTPMessage with the headers and put it in the mock: - res = MockResponse(response._original_response.msg) - jar.extract_cookies(res, req) - - -def get_cookie_header(jar, request): - """ - Produce an appropriate Cookie header string to be sent with `request`, or None. - - :rtype: str - """ - r = MockRequest(request) - jar.add_cookie_header(r) - return r.get_new_headers().get('Cookie') - - -def remove_cookie_by_name(cookiejar, name, domain=None, path=None): - """Unsets a cookie by name, by default over all domains and paths. - - Wraps CookieJar.clear(), is O(n). - """ - clearables = [] - for cookie in cookiejar: - if cookie.name != name: - continue - if domain is not None and domain != cookie.domain: - continue - if path is not None and path != cookie.path: - continue - clearables.append((cookie.domain, cookie.path, cookie.name)) - - for domain, path, name in clearables: - cookiejar.clear(domain, path, name) - - -class CookieConflictError(RuntimeError): - """There are two cookies that meet the criteria specified in the cookie jar. - Use .get and .set and include domain and path args in order to be more specific. - """ - - -class RequestsCookieJar(cookielib.CookieJar, MutableMapping): - """Compatibility class; is a cookielib.CookieJar, but exposes a dict - interface. - - This is the CookieJar we create by default for requests and sessions that - don't specify one, since some clients may expect response.cookies and - session.cookies to support dict operations. - - Requests does not use the dict interface internally; it's just for - compatibility with external client code. All requests code should work - out of the box with externally provided instances of ``CookieJar``, e.g. - ``LWPCookieJar`` and ``FileCookieJar``. - - Unlike a regular CookieJar, this class is pickleable. - - .. warning:: dictionary operations that are normally O(1) may be O(n). - """ - - def get(self, name, default=None, domain=None, path=None): - """Dict-like get() that also supports optional domain and path args in - order to resolve naming collisions from using one cookie jar over - multiple domains. - - .. warning:: operation is O(n), not O(1). - """ - try: - return self._find_no_duplicates(name, domain, path) - except KeyError: - return default - - def set(self, name, value, **kwargs): - """Dict-like set() that also supports optional domain and path args in - order to resolve naming collisions from using one cookie jar over - multiple domains. - """ - # support client code that unsets cookies by assignment of a None value: - if value is None: - remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) - return - - if isinstance(value, Morsel): - c = morsel_to_cookie(value) - else: - c = create_cookie(name, value, **kwargs) - self.set_cookie(c) - return c - - def iterkeys(self): - """Dict-like iterkeys() that returns an iterator of names of cookies - from the jar. - - .. seealso:: itervalues() and iteritems(). - """ - for cookie in iter(self): - yield cookie.name - - def keys(self): - """Dict-like keys() that returns a list of names of cookies from the - jar. - - .. seealso:: values() and items(). - """ - return list(self.iterkeys()) - - def itervalues(self): - """Dict-like itervalues() that returns an iterator of values of cookies - from the jar. - - .. seealso:: iterkeys() and iteritems(). - """ - for cookie in iter(self): - yield cookie.value - - def values(self): - """Dict-like values() that returns a list of values of cookies from the - jar. - - .. seealso:: keys() and items(). - """ - return list(self.itervalues()) - - def iteritems(self): - """Dict-like iteritems() that returns an iterator of name-value tuples - from the jar. - - .. seealso:: iterkeys() and itervalues(). - """ - for cookie in iter(self): - yield cookie.name, cookie.value - - def items(self): - """Dict-like items() that returns a list of name-value tuples from the - jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a - vanilla python dict of key value pairs. - - .. seealso:: keys() and values(). - """ - return list(self.iteritems()) - - def list_domains(self): - """Utility method to list all the domains in the jar.""" - domains = [] - for cookie in iter(self): - if cookie.domain not in domains: - domains.append(cookie.domain) - return domains - - def list_paths(self): - """Utility method to list all the paths in the jar.""" - paths = [] - for cookie in iter(self): - if cookie.path not in paths: - paths.append(cookie.path) - return paths - - def multiple_domains(self): - """Returns True if there are multiple domains in the jar. - Returns False otherwise. - - :rtype: bool - """ - domains = [] - for cookie in iter(self): - if cookie.domain is not None and cookie.domain in domains: - return True - domains.append(cookie.domain) - return False # there is only one domain in jar - - def get_dict(self, domain=None, path=None): - """Takes as an argument an optional domain and path and returns a plain - old Python dict of name-value pairs of cookies that meet the - requirements. - - :rtype: dict - """ - dictionary = {} - for cookie in iter(self): - if ( - (domain is None or cookie.domain == domain) and - (path is None or cookie.path == path) - ): - dictionary[cookie.name] = cookie.value - return dictionary - - def __contains__(self, name): - try: - return super(RequestsCookieJar, self).__contains__(name) - except CookieConflictError: - return True - - def __getitem__(self, name): - """Dict-like __getitem__() for compatibility with client code. Throws - exception if there are more than one cookie with name. In that case, - use the more explicit get() method instead. - - .. warning:: operation is O(n), not O(1). - """ - return self._find_no_duplicates(name) - - def __setitem__(self, name, value): - """Dict-like __setitem__ for compatibility with client code. Throws - exception if there is already a cookie of that name in the jar. In that - case, use the more explicit set() method instead. - """ - self.set(name, value) - - def __delitem__(self, name): - """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s - ``remove_cookie_by_name()``. - """ - remove_cookie_by_name(self, name) - - def set_cookie(self, cookie, *args, **kwargs): - if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): - cookie.value = cookie.value.replace('\\"', '') - return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) - - def update(self, other): - """Updates this jar with cookies from another CookieJar or dict-like""" - if isinstance(other, cookielib.CookieJar): - for cookie in other: - self.set_cookie(copy.copy(cookie)) - else: - super(RequestsCookieJar, self).update(other) - - def _find(self, name, domain=None, path=None): - """Requests uses this method internally to get cookie values. - - If there are conflicting cookies, _find arbitrarily chooses one. - See _find_no_duplicates if you want an exception thrown if there are - conflicting cookies. - - :param name: a string containing name of cookie - :param domain: (optional) string containing domain of cookie - :param path: (optional) string containing path of cookie - :return: cookie.value - """ - for cookie in iter(self): - if cookie.name == name: - if domain is None or cookie.domain == domain: - if path is None or cookie.path == path: - return cookie.value - - raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) - - def _find_no_duplicates(self, name, domain=None, path=None): - """Both ``__get_item__`` and ``get`` call this function: it's never - used elsewhere in Requests. - - :param name: a string containing name of cookie - :param domain: (optional) string containing domain of cookie - :param path: (optional) string containing path of cookie - :raises KeyError: if cookie is not found - :raises CookieConflictError: if there are multiple cookies - that match name and optionally domain and path - :return: cookie.value - """ - toReturn = None - for cookie in iter(self): - if cookie.name == name: - if domain is None or cookie.domain == domain: - if path is None or cookie.path == path: - if toReturn is not None: # if there are multiple cookies that meet passed in criteria - raise CookieConflictError('There are multiple cookies with name, %r' % (name)) - toReturn = cookie.value # we will eventually return this as long as no cookie conflict - - if toReturn: - return toReturn - raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) - - def __getstate__(self): - """Unlike a normal CookieJar, this class is pickleable.""" - state = self.__dict__.copy() - # remove the unpickleable RLock object - state.pop('_cookies_lock') - return state - - def __setstate__(self, state): - """Unlike a normal CookieJar, this class is pickleable.""" - self.__dict__.update(state) - if '_cookies_lock' not in self.__dict__: - self._cookies_lock = threading.RLock() - - def copy(self): - """Return a copy of this RequestsCookieJar.""" - new_cj = RequestsCookieJar() - new_cj.set_policy(self.get_policy()) - new_cj.update(self) - return new_cj - - def get_policy(self): - """Return the CookiePolicy instance used.""" - return self._policy - - -def _copy_cookie_jar(jar): - if jar is None: - return None - - if hasattr(jar, 'copy'): - # We're dealing with an instance of RequestsCookieJar - return jar.copy() - # We're dealing with a generic CookieJar instance - new_jar = copy.copy(jar) - new_jar.clear() - for cookie in jar: - new_jar.set_cookie(copy.copy(cookie)) - return new_jar - - -def create_cookie(name, value, **kwargs): - """Make a cookie from underspecified parameters. - - By default, the pair of `name` and `value` will be set for the domain '' - and sent on every request (this is sometimes called a "supercookie"). - """ - result = { - 'version': 0, - 'name': name, - 'value': value, - 'port': None, - 'domain': '', - 'path': '/', - 'secure': False, - 'expires': None, - 'discard': True, - 'comment': None, - 'comment_url': None, - 'rest': {'HttpOnly': None}, - 'rfc2109': False, - } - - badargs = set(kwargs) - set(result) - if badargs: - err = 'create_cookie() got unexpected keyword arguments: %s' - raise TypeError(err % list(badargs)) - - result.update(kwargs) - result['port_specified'] = bool(result['port']) - result['domain_specified'] = bool(result['domain']) - result['domain_initial_dot'] = result['domain'].startswith('.') - result['path_specified'] = bool(result['path']) - - return cookielib.Cookie(**result) - - -def morsel_to_cookie(morsel): - """Convert a Morsel object into a Cookie containing the one k/v pair.""" - - expires = None - if morsel['max-age']: - try: - expires = int(time.time() + int(morsel['max-age'])) - except ValueError: - raise TypeError('max-age: %s must be integer' % morsel['max-age']) - elif morsel['expires']: - time_template = '%a, %d-%b-%Y %H:%M:%S GMT' - expires = calendar.timegm( - time.strptime(morsel['expires'], time_template) - ) - return create_cookie( - comment=morsel['comment'], - comment_url=bool(morsel['comment']), - discard=False, - domain=morsel['domain'], - expires=expires, - name=morsel.key, - path=morsel['path'], - port=None, - rest={'HttpOnly': morsel['httponly']}, - rfc2109=False, - secure=bool(morsel['secure']), - value=morsel.value, - version=morsel['version'] or 0, - ) - - -def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): - """Returns a CookieJar from a key/value dictionary. - - :param cookie_dict: Dict of key/values to insert into CookieJar. - :param cookiejar: (optional) A cookiejar to add the cookies to. - :param overwrite: (optional) If False, will not replace cookies - already in the jar with new ones. - :rtype: CookieJar - """ - if cookiejar is None: - cookiejar = RequestsCookieJar() - - if cookie_dict is not None: - names_from_jar = [cookie.name for cookie in cookiejar] - for name in cookie_dict: - if overwrite or (name not in names_from_jar): - cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) - - return cookiejar - - -def merge_cookies(cookiejar, cookies): - """Add cookies to cookiejar and returns a merged CookieJar. - - :param cookiejar: CookieJar object to add the cookies to. - :param cookies: Dictionary or CookieJar object to be added. - :rtype: CookieJar - """ - if not isinstance(cookiejar, cookielib.CookieJar): - raise ValueError('You can only merge into CookieJar') - - if isinstance(cookies, dict): - cookiejar = cookiejar_from_dict( - cookies, cookiejar=cookiejar, overwrite=False) - elif isinstance(cookies, cookielib.CookieJar): - try: - cookiejar.update(cookies) - except AttributeError: - for cookie_in_jar in cookies: - cookiejar.set_cookie(cookie_in_jar) - - return cookiejar diff --git a/solnlib/packages/requests/exceptions.py b/solnlib/packages/requests/exceptions.py deleted file mode 100644 index a80cad80..00000000 --- a/solnlib/packages/requests/exceptions.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.exceptions -~~~~~~~~~~~~~~~~~~~ - -This module contains the set of Requests' exceptions. -""" -from urllib3.exceptions import HTTPError as BaseHTTPError - - -class RequestException(IOError): - """There was an ambiguous exception that occurred while handling your - request. - """ - - def __init__(self, *args, **kwargs): - """Initialize RequestException with `request` and `response` objects.""" - response = kwargs.pop('response', None) - self.response = response - self.request = kwargs.pop('request', None) - if (response is not None and not self.request and - hasattr(response, 'request')): - self.request = self.response.request - super(RequestException, self).__init__(*args, **kwargs) - - -class HTTPError(RequestException): - """An HTTP error occurred.""" - - -class ConnectionError(RequestException): - """A Connection error occurred.""" - - -class ProxyError(ConnectionError): - """A proxy error occurred.""" - - -class SSLError(ConnectionError): - """An SSL error occurred.""" - - -class Timeout(RequestException): - """The request timed out. - - Catching this error will catch both - :exc:`~requests.exceptions.ConnectTimeout` and - :exc:`~requests.exceptions.ReadTimeout` errors. - """ - - -class ConnectTimeout(ConnectionError, Timeout): - """The request timed out while trying to connect to the remote server. - - Requests that produced this error are safe to retry. - """ - - -class ReadTimeout(Timeout): - """The server did not send any data in the allotted amount of time.""" - - -class URLRequired(RequestException): - """A valid URL is required to make a request.""" - - -class TooManyRedirects(RequestException): - """Too many redirects.""" - - -class MissingSchema(RequestException, ValueError): - """The URL schema (e.g. http or https) is missing.""" - - -class InvalidSchema(RequestException, ValueError): - """See defaults.py for valid schemas.""" - - -class InvalidURL(RequestException, ValueError): - """The URL provided was somehow invalid.""" - - -class InvalidHeader(RequestException, ValueError): - """The header value provided was somehow invalid.""" - - -class InvalidProxyURL(InvalidURL): - """The proxy URL provided is invalid.""" - - -class ChunkedEncodingError(RequestException): - """The server declared chunked encoding but sent an invalid chunk.""" - - -class ContentDecodingError(RequestException, BaseHTTPError): - """Failed to decode response content""" - - -class StreamConsumedError(RequestException, TypeError): - """The content for this response was already consumed""" - - -class RetryError(RequestException): - """Custom retries logic failed""" - - -class UnrewindableBodyError(RequestException): - """Requests encountered an error when trying to rewind a body""" - -# Warnings - - -class RequestsWarning(Warning): - """Base warning for Requests.""" - pass - - -class FileModeWarning(RequestsWarning, DeprecationWarning): - """A file was opened in text mode, but Requests determined its binary length.""" - pass - - -class RequestsDependencyWarning(RequestsWarning): - """An imported dependency doesn't match the expected version range.""" - pass diff --git a/solnlib/packages/requests/help.py b/solnlib/packages/requests/help.py deleted file mode 100644 index e53d35ef..00000000 --- a/solnlib/packages/requests/help.py +++ /dev/null @@ -1,119 +0,0 @@ -"""Module containing bug report helper(s).""" -from __future__ import print_function - -import json -import platform -import sys -import ssl - -import idna -import urllib3 -import chardet - -from . import __version__ as requests_version - -try: - from urllib3.contrib import pyopenssl -except ImportError: - pyopenssl = None - OpenSSL = None - cryptography = None -else: - import OpenSSL - import cryptography - - -def _implementation(): - """Return a dict with the Python implementation and version. - - Provide both the name and the version of the Python implementation - currently running. For example, on CPython 2.7.5 it will return - {'name': 'CPython', 'version': '2.7.5'}. - - This function works best on CPython and PyPy: in particular, it probably - doesn't work for Jython or IronPython. Future investigation should be done - to work out the correct shape of the code for those platforms. - """ - implementation = platform.python_implementation() - - if implementation == 'CPython': - implementation_version = platform.python_version() - elif implementation == 'PyPy': - implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, - sys.pypy_version_info.minor, - sys.pypy_version_info.micro) - if sys.pypy_version_info.releaselevel != 'final': - implementation_version = ''.join([ - implementation_version, sys.pypy_version_info.releaselevel - ]) - elif implementation == 'Jython': - implementation_version = platform.python_version() # Complete Guess - elif implementation == 'IronPython': - implementation_version = platform.python_version() # Complete Guess - else: - implementation_version = 'Unknown' - - return {'name': implementation, 'version': implementation_version} - - -def info(): - """Generate information for a bug report.""" - try: - platform_info = { - 'system': platform.system(), - 'release': platform.release(), - } - except IOError: - platform_info = { - 'system': 'Unknown', - 'release': 'Unknown', - } - - implementation_info = _implementation() - urllib3_info = {'version': urllib3.__version__} - chardet_info = {'version': chardet.__version__} - - pyopenssl_info = { - 'version': None, - 'openssl_version': '', - } - if OpenSSL: - pyopenssl_info = { - 'version': OpenSSL.__version__, - 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, - } - cryptography_info = { - 'version': getattr(cryptography, '__version__', ''), - } - idna_info = { - 'version': getattr(idna, '__version__', ''), - } - - system_ssl = ssl.OPENSSL_VERSION_NUMBER - system_ssl_info = { - 'version': '%x' % system_ssl if system_ssl is not None else '' - } - - return { - 'platform': platform_info, - 'implementation': implementation_info, - 'system_ssl': system_ssl_info, - 'using_pyopenssl': pyopenssl is not None, - 'pyOpenSSL': pyopenssl_info, - 'urllib3': urllib3_info, - 'chardet': chardet_info, - 'cryptography': cryptography_info, - 'idna': idna_info, - 'requests': { - 'version': requests_version, - }, - } - - -def main(): - """Pretty-print the bug information as JSON.""" - print(json.dumps(info(), sort_keys=True, indent=2)) - - -if __name__ == '__main__': - main() diff --git a/solnlib/packages/requests/hooks.py b/solnlib/packages/requests/hooks.py deleted file mode 100644 index 7a51f212..00000000 --- a/solnlib/packages/requests/hooks.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.hooks -~~~~~~~~~~~~~~ - -This module provides the capabilities for the Requests hooks system. - -Available hooks: - -``response``: - The response generated from a Request. -""" -HOOKS = ['response'] - - -def default_hooks(): - return {event: [] for event in HOOKS} - -# TODO: response is the only one - - -def dispatch_hook(key, hooks, hook_data, **kwargs): - """Dispatches a hook dictionary on a given piece of data.""" - hooks = hooks or {} - hooks = hooks.get(key) - if hooks: - if hasattr(hooks, '__call__'): - hooks = [hooks] - for hook in hooks: - _hook_data = hook(hook_data, **kwargs) - if _hook_data is not None: - hook_data = _hook_data - return hook_data diff --git a/solnlib/packages/requests/models.py b/solnlib/packages/requests/models.py deleted file mode 100644 index 62dcd0b7..00000000 --- a/solnlib/packages/requests/models.py +++ /dev/null @@ -1,953 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.models -~~~~~~~~~~~~~~~ - -This module contains the primary objects that power Requests. -""" - -import datetime -import sys - -# Import encoding now, to avoid implicit import later. -# Implicit import within threads may cause LookupError when standard library is in a ZIP, -# such as in Embedded Python. See https://github.com/requests/requests/issues/3578. -import encodings.idna - -from urllib3.fields import RequestField -from urllib3.filepost import encode_multipart_formdata -from urllib3.util import parse_url -from urllib3.exceptions import ( - DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) - -from io import UnsupportedOperation -from .hooks import default_hooks -from .structures import CaseInsensitiveDict - -from .auth import HTTPBasicAuth -from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar -from .exceptions import ( - HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, - ContentDecodingError, ConnectionError, StreamConsumedError) -from ._internal_utils import to_native_string, unicode_is_ascii -from .utils import ( - guess_filename, get_auth_from_url, requote_uri, - stream_decode_response_unicode, to_key_val_list, parse_header_links, - iter_slices, guess_json_utf, super_len, check_header_validity) -from .compat import ( - Callable, Mapping, - cookielib, urlunparse, urlsplit, urlencode, str, bytes, - is_py2, chardet, builtin_str, basestring) -from .compat import json as complexjson -from .status_codes import codes - -#: The set of HTTP status codes that indicate an automatically -#: processable redirect. -REDIRECT_STATI = ( - codes.moved, # 301 - codes.found, # 302 - codes.other, # 303 - codes.temporary_redirect, # 307 - codes.permanent_redirect, # 308 -) - -DEFAULT_REDIRECT_LIMIT = 30 -CONTENT_CHUNK_SIZE = 10 * 1024 -ITER_CHUNK_SIZE = 512 - - -class RequestEncodingMixin(object): - @property - def path_url(self): - """Build the path URL to use.""" - - url = [] - - p = urlsplit(self.url) - - path = p.path - if not path: - path = '/' - - url.append(path) - - query = p.query - if query: - url.append('?') - url.append(query) - - return ''.join(url) - - @staticmethod - def _encode_params(data): - """Encode parameters in a piece of data. - - Will successfully encode parameters when passed as a dict or a list of - 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary - if parameters are supplied as a dict. - """ - - if isinstance(data, (str, bytes)): - return data - elif hasattr(data, 'read'): - return data - elif hasattr(data, '__iter__'): - result = [] - for k, vs in to_key_val_list(data): - if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): - vs = [vs] - for v in vs: - if v is not None: - result.append( - (k.encode('utf-8') if isinstance(k, str) else k, - v.encode('utf-8') if isinstance(v, str) else v)) - return urlencode(result, doseq=True) - else: - return data - - @staticmethod - def _encode_files(files, data): - """Build the body for a multipart/form-data request. - - Will successfully encode files when passed as a dict or a list of - tuples. Order is retained if data is a list of tuples but arbitrary - if parameters are supplied as a dict. - The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) - or 4-tuples (filename, fileobj, contentype, custom_headers). - """ - if (not files): - raise ValueError("Files must be provided.") - elif isinstance(data, basestring): - raise ValueError("Data must not be a string.") - - new_fields = [] - fields = to_key_val_list(data or {}) - files = to_key_val_list(files or {}) - - for field, val in fields: - if isinstance(val, basestring) or not hasattr(val, '__iter__'): - val = [val] - for v in val: - if v is not None: - # Don't call str() on bytestrings: in Py3 it all goes wrong. - if not isinstance(v, bytes): - v = str(v) - - new_fields.append( - (field.decode('utf-8') if isinstance(field, bytes) else field, - v.encode('utf-8') if isinstance(v, str) else v)) - - for (k, v) in files: - # support for explicit filename - ft = None - fh = None - if isinstance(v, (tuple, list)): - if len(v) == 2: - fn, fp = v - elif len(v) == 3: - fn, fp, ft = v - else: - fn, fp, ft, fh = v - else: - fn = guess_filename(v) or k - fp = v - - if isinstance(fp, (str, bytes, bytearray)): - fdata = fp - elif hasattr(fp, 'read'): - fdata = fp.read() - elif fp is None: - continue - else: - fdata = fp - - rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) - rf.make_multipart(content_type=ft) - new_fields.append(rf) - - body, content_type = encode_multipart_formdata(new_fields) - - return body, content_type - - -class RequestHooksMixin(object): - def register_hook(self, event, hook): - """Properly register a hook.""" - - if event not in self.hooks: - raise ValueError('Unsupported event specified, with event name "%s"' % (event)) - - if isinstance(hook, Callable): - self.hooks[event].append(hook) - elif hasattr(hook, '__iter__'): - self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) - - def deregister_hook(self, event, hook): - """Deregister a previously registered hook. - Returns True if the hook existed, False if not. - """ - - try: - self.hooks[event].remove(hook) - return True - except ValueError: - return False - - -class Request(RequestHooksMixin): - """A user-created :class:`Request ` object. - - Used to prepare a :class:`PreparedRequest `, which is sent to the server. - - :param method: HTTP method to use. - :param url: URL to send. - :param headers: dictionary of headers to send. - :param files: dictionary of {filename: fileobject} files to multipart upload. - :param data: the body to attach to the request. If a dictionary or - list of tuples ``[(key, value)]`` is provided, form-encoding will - take place. - :param json: json for the body to attach to the request (if files or data is not specified). - :param params: URL parameters to append to the URL. If a dictionary or - list of tuples ``[(key, value)]`` is provided, form-encoding will - take place. - :param auth: Auth handler or (user, pass) tuple. - :param cookies: dictionary or CookieJar of cookies to attach to this request. - :param hooks: dictionary of callback hooks, for internal usage. - - Usage:: - - >>> import requests - >>> req = requests.Request('GET', 'https://httpbin.org/get') - >>> req.prepare() - - """ - - def __init__(self, - method=None, url=None, headers=None, files=None, data=None, - params=None, auth=None, cookies=None, hooks=None, json=None): - - # Default empty dicts for dict params. - data = [] if data is None else data - files = [] if files is None else files - headers = {} if headers is None else headers - params = {} if params is None else params - hooks = {} if hooks is None else hooks - - self.hooks = default_hooks() - for (k, v) in list(hooks.items()): - self.register_hook(event=k, hook=v) - - self.method = method - self.url = url - self.headers = headers - self.files = files - self.data = data - self.json = json - self.params = params - self.auth = auth - self.cookies = cookies - - def __repr__(self): - return '' % (self.method) - - def prepare(self): - """Constructs a :class:`PreparedRequest ` for transmission and returns it.""" - p = PreparedRequest() - p.prepare( - method=self.method, - url=self.url, - headers=self.headers, - files=self.files, - data=self.data, - json=self.json, - params=self.params, - auth=self.auth, - cookies=self.cookies, - hooks=self.hooks, - ) - return p - - -class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): - """The fully mutable :class:`PreparedRequest ` object, - containing the exact bytes that will be sent to the server. - - Generated from either a :class:`Request ` object or manually. - - Usage:: - - >>> import requests - >>> req = requests.Request('GET', 'https://httpbin.org/get') - >>> r = req.prepare() - - - >>> s = requests.Session() - >>> s.send(r) - - """ - - def __init__(self): - #: HTTP verb to send to the server. - self.method = None - #: HTTP URL to send the request to. - self.url = None - #: dictionary of HTTP headers. - self.headers = None - # The `CookieJar` used to create the Cookie header will be stored here - # after prepare_cookies is called - self._cookies = None - #: request body to send to the server. - self.body = None - #: dictionary of callback hooks, for internal usage. - self.hooks = default_hooks() - #: integer denoting starting position of a readable file-like body. - self._body_position = None - - def prepare(self, - method=None, url=None, headers=None, files=None, data=None, - params=None, auth=None, cookies=None, hooks=None, json=None): - """Prepares the entire request with the given parameters.""" - - self.prepare_method(method) - self.prepare_url(url, params) - self.prepare_headers(headers) - self.prepare_cookies(cookies) - self.prepare_body(data, files, json) - self.prepare_auth(auth, url) - - # Note that prepare_auth must be last to enable authentication schemes - # such as OAuth to work on a fully prepared request. - - # This MUST go after prepare_auth. Authenticators could add a hook - self.prepare_hooks(hooks) - - def __repr__(self): - return '' % (self.method) - - def copy(self): - p = PreparedRequest() - p.method = self.method - p.url = self.url - p.headers = self.headers.copy() if self.headers is not None else None - p._cookies = _copy_cookie_jar(self._cookies) - p.body = self.body - p.hooks = self.hooks - p._body_position = self._body_position - return p - - def prepare_method(self, method): - """Prepares the given HTTP method.""" - self.method = method - if self.method is not None: - self.method = to_native_string(self.method.upper()) - - @staticmethod - def _get_idna_encoded_host(host): - import idna - - try: - host = idna.encode(host, uts46=True).decode('utf-8') - except idna.IDNAError: - raise UnicodeError - return host - - def prepare_url(self, url, params): - """Prepares the given HTTP URL.""" - #: Accept objects that have string representations. - #: We're unable to blindly call unicode/str functions - #: as this will include the bytestring indicator (b'') - #: on python 3.x. - #: https://github.com/requests/requests/pull/2238 - if isinstance(url, bytes): - url = url.decode('utf8') - else: - url = unicode(url) if is_py2 else str(url) - - # Remove leading whitespaces from url - url = url.lstrip() - - # Don't do any URL preparation for non-HTTP schemes like `mailto`, - # `data` etc to work around exceptions from `url_parse`, which - # handles RFC 3986 only. - if ':' in url and not url.lower().startswith('http'): - self.url = url - return - - # Support for unicode domain names and paths. - try: - scheme, auth, host, port, path, query, fragment = parse_url(url) - except LocationParseError as e: - raise InvalidURL(*e.args) - - if not scheme: - error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?") - error = error.format(to_native_string(url, 'utf8')) - - raise MissingSchema(error) - - if not host: - raise InvalidURL("Invalid URL %r: No host supplied" % url) - - # In general, we want to try IDNA encoding the hostname if the string contains - # non-ASCII characters. This allows users to automatically get the correct IDNA - # behaviour. For strings containing only ASCII characters, we need to also verify - # it doesn't start with a wildcard (*), before allowing the unencoded hostname. - if not unicode_is_ascii(host): - try: - host = self._get_idna_encoded_host(host) - except UnicodeError: - raise InvalidURL('URL has an invalid label.') - elif host.startswith(u'*'): - raise InvalidURL('URL has an invalid label.') - - # Carefully reconstruct the network location - netloc = auth or '' - if netloc: - netloc += '@' - netloc += host - if port: - netloc += ':' + str(port) - - # Bare domains aren't valid URLs. - if not path: - path = '/' - - if is_py2: - if isinstance(scheme, str): - scheme = scheme.encode('utf-8') - if isinstance(netloc, str): - netloc = netloc.encode('utf-8') - if isinstance(path, str): - path = path.encode('utf-8') - if isinstance(query, str): - query = query.encode('utf-8') - if isinstance(fragment, str): - fragment = fragment.encode('utf-8') - - if isinstance(params, (str, bytes)): - params = to_native_string(params) - - enc_params = self._encode_params(params) - if enc_params: - if query: - query = '%s&%s' % (query, enc_params) - else: - query = enc_params - - url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) - self.url = url - - def prepare_headers(self, headers): - """Prepares the given HTTP headers.""" - - self.headers = CaseInsensitiveDict() - if headers: - for header in headers.items(): - # Raise exception on invalid header value. - check_header_validity(header) - name, value = header - self.headers[to_native_string(name)] = value - - def prepare_body(self, data, files, json=None): - """Prepares the given HTTP body data.""" - - # Check if file, fo, generator, iterator. - # If not, run through normal process. - - # Nottin' on you. - body = None - content_type = None - - if not data and json is not None: - # urllib3 requires a bytes-like body. Python 2's json.dumps - # provides this natively, but Python 3 gives a Unicode string. - content_type = 'application/json' - body = complexjson.dumps(json) - if not isinstance(body, bytes): - body = body.encode('utf-8') - - is_stream = all([ - hasattr(data, '__iter__'), - not isinstance(data, (basestring, list, tuple, Mapping)) - ]) - - try: - length = super_len(data) - except (TypeError, AttributeError, UnsupportedOperation): - length = None - - if is_stream: - body = data - - if getattr(body, 'tell', None) is not None: - # Record the current file position before reading. - # This will allow us to rewind a file in the event - # of a redirect. - try: - self._body_position = body.tell() - except (IOError, OSError): - # This differentiates from None, allowing us to catch - # a failed `tell()` later when trying to rewind the body - self._body_position = object() - - if files: - raise NotImplementedError('Streamed bodies and files are mutually exclusive.') - - if length: - self.headers['Content-Length'] = builtin_str(length) - else: - self.headers['Transfer-Encoding'] = 'chunked' - else: - # Multi-part file uploads. - if files: - (body, content_type) = self._encode_files(files, data) - else: - if data: - body = self._encode_params(data) - if isinstance(data, basestring) or hasattr(data, 'read'): - content_type = None - else: - content_type = 'application/x-www-form-urlencoded' - - self.prepare_content_length(body) - - # Add content-type if it wasn't explicitly provided. - if content_type and ('content-type' not in self.headers): - self.headers['Content-Type'] = content_type - - self.body = body - - def prepare_content_length(self, body): - """Prepare Content-Length header based on request method and body""" - if body is not None: - length = super_len(body) - if length: - # If length exists, set it. Otherwise, we fallback - # to Transfer-Encoding: chunked. - self.headers['Content-Length'] = builtin_str(length) - elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: - # Set Content-Length to 0 for methods that can have a body - # but don't provide one. (i.e. not GET or HEAD) - self.headers['Content-Length'] = '0' - - def prepare_auth(self, auth, url=''): - """Prepares the given HTTP auth data.""" - - # If no Auth is explicitly provided, extract it from the URL first. - if auth is None: - url_auth = get_auth_from_url(self.url) - auth = url_auth if any(url_auth) else None - - if auth: - if isinstance(auth, tuple) and len(auth) == 2: - # special-case basic HTTP auth - auth = HTTPBasicAuth(*auth) - - # Allow auth to make its changes. - r = auth(self) - - # Update self to reflect the auth changes. - self.__dict__.update(r.__dict__) - - # Recompute Content-Length - self.prepare_content_length(self.body) - - def prepare_cookies(self, cookies): - """Prepares the given HTTP cookie data. - - This function eventually generates a ``Cookie`` header from the - given cookies using cookielib. Due to cookielib's design, the header - will not be regenerated if it already exists, meaning this function - can only be called once for the life of the - :class:`PreparedRequest ` object. Any subsequent calls - to ``prepare_cookies`` will have no actual effect, unless the "Cookie" - header is removed beforehand. - """ - if isinstance(cookies, cookielib.CookieJar): - self._cookies = cookies - else: - self._cookies = cookiejar_from_dict(cookies) - - cookie_header = get_cookie_header(self._cookies, self) - if cookie_header is not None: - self.headers['Cookie'] = cookie_header - - def prepare_hooks(self, hooks): - """Prepares the given hooks.""" - # hooks can be passed as None to the prepare method and to this - # method. To prevent iterating over None, simply use an empty list - # if hooks is False-y - hooks = hooks or [] - for event in hooks: - self.register_hook(event, hooks[event]) - - -class Response(object): - """The :class:`Response ` object, which contains a - server's response to an HTTP request. - """ - - __attrs__ = [ - '_content', 'status_code', 'headers', 'url', 'history', - 'encoding', 'reason', 'cookies', 'elapsed', 'request' - ] - - def __init__(self): - self._content = False - self._content_consumed = False - self._next = None - - #: Integer Code of responded HTTP Status, e.g. 404 or 200. - self.status_code = None - - #: Case-insensitive Dictionary of Response Headers. - #: For example, ``headers['content-encoding']`` will return the - #: value of a ``'Content-Encoding'`` response header. - self.headers = CaseInsensitiveDict() - - #: File-like object representation of response (for advanced usage). - #: Use of ``raw`` requires that ``stream=True`` be set on the request. - # This requirement does not apply for use internally to Requests. - self.raw = None - - #: Final URL location of Response. - self.url = None - - #: Encoding to decode with when accessing r.text. - self.encoding = None - - #: A list of :class:`Response ` objects from - #: the history of the Request. Any redirect responses will end - #: up here. The list is sorted from the oldest to the most recent request. - self.history = [] - - #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". - self.reason = None - - #: A CookieJar of Cookies the server sent back. - self.cookies = cookiejar_from_dict({}) - - #: The amount of time elapsed between sending the request - #: and the arrival of the response (as a timedelta). - #: This property specifically measures the time taken between sending - #: the first byte of the request and finishing parsing the headers. It - #: is therefore unaffected by consuming the response content or the - #: value of the ``stream`` keyword argument. - self.elapsed = datetime.timedelta(0) - - #: The :class:`PreparedRequest ` object to which this - #: is a response. - self.request = None - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - def __getstate__(self): - # Consume everything; accessing the content attribute makes - # sure the content has been fully read. - if not self._content_consumed: - self.content - - return {attr: getattr(self, attr, None) for attr in self.__attrs__} - - def __setstate__(self, state): - for name, value in state.items(): - setattr(self, name, value) - - # pickled objects do not have .raw - setattr(self, '_content_consumed', True) - setattr(self, 'raw', None) - - def __repr__(self): - return '' % (self.status_code) - - def __bool__(self): - """Returns True if :attr:`status_code` is less than 400. - - This attribute checks if the status code of the response is between - 400 and 600 to see if there was a client error or a server error. If - the status code, is between 200 and 400, this will return True. This - is **not** a check to see if the response code is ``200 OK``. - """ - return self.ok - - def __nonzero__(self): - """Returns True if :attr:`status_code` is less than 400. - - This attribute checks if the status code of the response is between - 400 and 600 to see if there was a client error or a server error. If - the status code, is between 200 and 400, this will return True. This - is **not** a check to see if the response code is ``200 OK``. - """ - return self.ok - - def __iter__(self): - """Allows you to use a response as an iterator.""" - return self.iter_content(128) - - @property - def ok(self): - """Returns True if :attr:`status_code` is less than 400, False if not. - - This attribute checks if the status code of the response is between - 400 and 600 to see if there was a client error or a server error. If - the status code is between 200 and 400, this will return True. This - is **not** a check to see if the response code is ``200 OK``. - """ - try: - self.raise_for_status() - except HTTPError: - return False - return True - - @property - def is_redirect(self): - """True if this Response is a well-formed HTTP redirect that could have - been processed automatically (by :meth:`Session.resolve_redirects`). - """ - return ('location' in self.headers and self.status_code in REDIRECT_STATI) - - @property - def is_permanent_redirect(self): - """True if this Response one of the permanent versions of redirect.""" - return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) - - @property - def next(self): - """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" - return self._next - - @property - def apparent_encoding(self): - """The apparent encoding, provided by the chardet library.""" - return chardet.detect(self.content)['encoding'] - - def iter_content(self, chunk_size=1, decode_unicode=False): - """Iterates over the response data. When stream=True is set on the - request, this avoids reading the content at once into memory for - large responses. The chunk size is the number of bytes it should - read into memory. This is not necessarily the length of each item - returned as decoding can take place. - - chunk_size must be of type int or None. A value of None will - function differently depending on the value of `stream`. - stream=True will read data as it arrives in whatever size the - chunks are received. If stream=False, data is returned as - a single chunk. - - If decode_unicode is True, content will be decoded using the best - available encoding based on the response. - """ - - def generate(): - # Special case for urllib3. - if hasattr(self.raw, 'stream'): - try: - for chunk in self.raw.stream(chunk_size, decode_content=True): - yield chunk - except ProtocolError as e: - raise ChunkedEncodingError(e) - except DecodeError as e: - raise ContentDecodingError(e) - except ReadTimeoutError as e: - raise ConnectionError(e) - else: - # Standard file-like object. - while True: - chunk = self.raw.read(chunk_size) - if not chunk: - break - yield chunk - - self._content_consumed = True - - if self._content_consumed and isinstance(self._content, bool): - raise StreamConsumedError() - elif chunk_size is not None and not isinstance(chunk_size, int): - raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size)) - # simulate reading small chunks of the content - reused_chunks = iter_slices(self._content, chunk_size) - - stream_chunks = generate() - - chunks = reused_chunks if self._content_consumed else stream_chunks - - if decode_unicode: - chunks = stream_decode_response_unicode(chunks, self) - - return chunks - - def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): - """Iterates over the response data, one line at a time. When - stream=True is set on the request, this avoids reading the - content at once into memory for large responses. - - .. note:: This method is not reentrant safe. - """ - - pending = None - - for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): - - if pending is not None: - chunk = pending + chunk - - if delimiter: - lines = chunk.split(delimiter) - else: - lines = chunk.splitlines() - - if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: - pending = lines.pop() - else: - pending = None - - for line in lines: - yield line - - if pending is not None: - yield pending - - @property - def content(self): - """Content of the response, in bytes.""" - - if self._content is False: - # Read the contents. - if self._content_consumed: - raise RuntimeError( - 'The content for this response was already consumed') - - if self.status_code == 0 or self.raw is None: - self._content = None - else: - self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b'' - - self._content_consumed = True - # don't need to release the connection; that's been handled by urllib3 - # since we exhausted the data. - return self._content - - @property - def text(self): - """Content of the response, in unicode. - - If Response.encoding is None, encoding will be guessed using - ``chardet``. - - The encoding of the response content is determined based solely on HTTP - headers, following RFC 2616 to the letter. If you can take advantage of - non-HTTP knowledge to make a better guess at the encoding, you should - set ``r.encoding`` appropriately before accessing this property. - """ - - # Try charset from content-type - content = None - encoding = self.encoding - - if not self.content: - return str('') - - # Fallback to auto-detected encoding. - if self.encoding is None: - encoding = self.apparent_encoding - - # Decode unicode from given encoding. - try: - content = str(self.content, encoding, errors='replace') - except (LookupError, TypeError): - # A LookupError is raised if the encoding was not found which could - # indicate a misspelling or similar mistake. - # - # A TypeError can be raised if encoding is None - # - # So we try blindly encoding. - content = str(self.content, errors='replace') - - return content - - def json(self, **kwargs): - r"""Returns the json-encoded content of a response, if any. - - :param \*\*kwargs: Optional arguments that ``json.loads`` takes. - :raises ValueError: If the response body does not contain valid json. - """ - - if not self.encoding and self.content and len(self.content) > 3: - # No encoding set. JSON RFC 4627 section 3 states we should expect - # UTF-8, -16 or -32. Detect which one to use; If the detection or - # decoding fails, fall back to `self.text` (using chardet to make - # a best guess). - encoding = guess_json_utf(self.content) - if encoding is not None: - try: - return complexjson.loads( - self.content.decode(encoding), **kwargs - ) - except UnicodeDecodeError: - # Wrong UTF codec detected; usually because it's not UTF-8 - # but some other 8-bit codec. This is an RFC violation, - # and the server didn't bother to tell us what codec *was* - # used. - pass - return complexjson.loads(self.text, **kwargs) - - @property - def links(self): - """Returns the parsed header links of the response, if any.""" - - header = self.headers.get('link') - - # l = MultiDict() - l = {} - - if header: - links = parse_header_links(header) - - for link in links: - key = link.get('rel') or link.get('url') - l[key] = link - - return l - - def raise_for_status(self): - """Raises stored :class:`HTTPError`, if one occurred.""" - - http_error_msg = '' - if isinstance(self.reason, bytes): - # We attempt to decode utf-8 first because some servers - # choose to localize their reason strings. If the string - # isn't utf-8, we fall back to iso-8859-1 for all other - # encodings. (See PR #3538) - try: - reason = self.reason.decode('utf-8') - except UnicodeDecodeError: - reason = self.reason.decode('iso-8859-1') - else: - reason = self.reason - - if 400 <= self.status_code < 500: - http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url) - - elif 500 <= self.status_code < 600: - http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) - - if http_error_msg: - raise HTTPError(http_error_msg, response=self) - - def close(self): - """Releases the connection back to the pool. Once this method has been - called the underlying ``raw`` object must not be accessed again. - - *Note: Should not normally need to be called explicitly.* - """ - if not self._content_consumed: - self.raw.close() - - release_conn = getattr(self.raw, 'release_conn', None) - if release_conn is not None: - release_conn() diff --git a/solnlib/packages/requests/packages.py b/solnlib/packages/requests/packages.py deleted file mode 100644 index 7232fe0f..00000000 --- a/solnlib/packages/requests/packages.py +++ /dev/null @@ -1,14 +0,0 @@ -import sys - -# This code exists for backwards compatibility reasons. -# I don't like it either. Just look the other way. :) - -for package in ('urllib3', 'idna', 'chardet'): - locals()[package] = __import__(package) - # This traversal is apparently necessary such that the identities are - # preserved (requests.packages.urllib3.* is urllib3.*) - for mod in list(sys.modules): - if mod == package or mod.startswith(package + '.'): - sys.modules['requests.packages.' + mod] = sys.modules[mod] - -# Kinda cool, though, right? diff --git a/solnlib/packages/requests/sessions.py b/solnlib/packages/requests/sessions.py deleted file mode 100644 index d73d700f..00000000 --- a/solnlib/packages/requests/sessions.py +++ /dev/null @@ -1,770 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.session -~~~~~~~~~~~~~~~~ - -This module provides a Session object to manage and persist settings across -requests (cookies, auth, proxies). -""" -import os -import sys -import time -from datetime import timedelta - -from .auth import _basic_auth_str -from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse, Mapping -from .cookies import ( - cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) -from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT -from .hooks import default_hooks, dispatch_hook -from ._internal_utils import to_native_string -from .utils import to_key_val_list, default_headers, DEFAULT_PORTS -from .exceptions import ( - TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) - -from .structures import CaseInsensitiveDict -from .adapters import HTTPAdapter - -from .utils import ( - requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, - get_auth_from_url, rewind_body -) - -from .status_codes import codes - -# formerly defined here, reexposed here for backward compatibility -from .models import REDIRECT_STATI - -# Preferred clock, based on which one is more accurate on a given system. -if sys.platform == 'win32': - try: # Python 3.4+ - preferred_clock = time.perf_counter - except AttributeError: # Earlier than Python 3. - preferred_clock = time.clock -else: - preferred_clock = time.time - - -def merge_setting(request_setting, session_setting, dict_class=OrderedDict): - """Determines appropriate setting for a given request, taking into account - the explicit setting on that request, and the setting in the session. If a - setting is a dictionary, they will be merged together using `dict_class` - """ - - if session_setting is None: - return request_setting - - if request_setting is None: - return session_setting - - # Bypass if not a dictionary (e.g. verify) - if not ( - isinstance(session_setting, Mapping) and - isinstance(request_setting, Mapping) - ): - return request_setting - - merged_setting = dict_class(to_key_val_list(session_setting)) - merged_setting.update(to_key_val_list(request_setting)) - - # Remove keys that are set to None. Extract keys first to avoid altering - # the dictionary during iteration. - none_keys = [k for (k, v) in merged_setting.items() if v is None] - for key in none_keys: - del merged_setting[key] - - return merged_setting - - -def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): - """Properly merges both requests and session hooks. - - This is necessary because when request_hooks == {'response': []}, the - merge breaks Session hooks entirely. - """ - if session_hooks is None or session_hooks.get('response') == []: - return request_hooks - - if request_hooks is None or request_hooks.get('response') == []: - return session_hooks - - return merge_setting(request_hooks, session_hooks, dict_class) - - -class SessionRedirectMixin(object): - - def get_redirect_target(self, resp): - """Receives a Response. Returns a redirect URI or ``None``""" - # Due to the nature of how requests processes redirects this method will - # be called at least once upon the original response and at least twice - # on each subsequent redirect response (if any). - # If a custom mixin is used to handle this logic, it may be advantageous - # to cache the redirect location onto the response object as a private - # attribute. - if resp.is_redirect: - location = resp.headers['location'] - # Currently the underlying http module on py3 decode headers - # in latin1, but empirical evidence suggests that latin1 is very - # rarely used with non-ASCII characters in HTTP headers. - # It is more likely to get UTF8 header rather than latin1. - # This causes incorrect handling of UTF8 encoded location headers. - # To solve this, we re-encode the location in latin1. - if is_py3: - location = location.encode('latin1') - return to_native_string(location, 'utf8') - return None - - def should_strip_auth(self, old_url, new_url): - """Decide whether Authorization header should be removed when redirecting""" - old_parsed = urlparse(old_url) - new_parsed = urlparse(new_url) - if old_parsed.hostname != new_parsed.hostname: - return True - # Special case: allow http -> https redirect when using the standard - # ports. This isn't specified by RFC 7235, but is kept to avoid - # breaking backwards compatibility with older versions of requests - # that allowed any redirects on the same host. - if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) - and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): - return False - - # Handle default port usage corresponding to scheme. - changed_port = old_parsed.port != new_parsed.port - changed_scheme = old_parsed.scheme != new_parsed.scheme - default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) - if (not changed_scheme and old_parsed.port in default_port - and new_parsed.port in default_port): - return False - - # Standard case: root URI must match - return changed_port or changed_scheme - - def resolve_redirects(self, resp, req, stream=False, timeout=None, - verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): - """Receives a Response. Returns a generator of Responses or Requests.""" - - hist = [] # keep track of history - - url = self.get_redirect_target(resp) - previous_fragment = urlparse(req.url).fragment - while url: - prepared_request = req.copy() - - # Update history and keep track of redirects. - # resp.history must ignore the original request in this loop - hist.append(resp) - resp.history = hist[1:] - - try: - resp.content # Consume socket so it can be released - except (ChunkedEncodingError, ContentDecodingError, RuntimeError): - resp.raw.read(decode_content=False) - - if len(resp.history) >= self.max_redirects: - raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp) - - # Release the connection back into the pool. - resp.close() - - # Handle redirection without scheme (see: RFC 1808 Section 4) - if url.startswith('//'): - parsed_rurl = urlparse(resp.url) - url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url) - - # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) - parsed = urlparse(url) - if parsed.fragment == '' and previous_fragment: - parsed = parsed._replace(fragment=previous_fragment) - elif parsed.fragment: - previous_fragment = parsed.fragment - url = parsed.geturl() - - # Facilitate relative 'location' headers, as allowed by RFC 7231. - # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') - # Compliant with RFC3986, we percent encode the url. - if not parsed.netloc: - url = urljoin(resp.url, requote_uri(url)) - else: - url = requote_uri(url) - - prepared_request.url = to_native_string(url) - - self.rebuild_method(prepared_request, resp) - - # https://github.com/requests/requests/issues/1084 - if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): - # https://github.com/requests/requests/issues/3490 - purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') - for header in purged_headers: - prepared_request.headers.pop(header, None) - prepared_request.body = None - - headers = prepared_request.headers - try: - del headers['Cookie'] - except KeyError: - pass - - # Extract any cookies sent on the response to the cookiejar - # in the new request. Because we've mutated our copied prepared - # request, use the old one that we haven't yet touched. - extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) - merge_cookies(prepared_request._cookies, self.cookies) - prepared_request.prepare_cookies(prepared_request._cookies) - - # Rebuild auth and proxy information. - proxies = self.rebuild_proxies(prepared_request, proxies) - self.rebuild_auth(prepared_request, resp) - - # A failed tell() sets `_body_position` to `object()`. This non-None - # value ensures `rewindable` will be True, allowing us to raise an - # UnrewindableBodyError, instead of hanging the connection. - rewindable = ( - prepared_request._body_position is not None and - ('Content-Length' in headers or 'Transfer-Encoding' in headers) - ) - - # Attempt to rewind consumed file-like object. - if rewindable: - rewind_body(prepared_request) - - # Override the original request. - req = prepared_request - - if yield_requests: - yield req - else: - - resp = self.send( - req, - stream=stream, - timeout=timeout, - verify=verify, - cert=cert, - proxies=proxies, - allow_redirects=False, - **adapter_kwargs - ) - - extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) - - # extract redirect url, if any, for the next loop - url = self.get_redirect_target(resp) - yield resp - - def rebuild_auth(self, prepared_request, response): - """When being redirected we may want to strip authentication from the - request to avoid leaking credentials. This method intelligently removes - and reapplies authentication where possible to avoid credential loss. - """ - headers = prepared_request.headers - url = prepared_request.url - - if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): - # If we get redirected to a new host, we should strip out any - # authentication headers. - del headers['Authorization'] - - # .netrc might have more auth for us on our new host. - new_auth = get_netrc_auth(url) if self.trust_env else None - if new_auth is not None: - prepared_request.prepare_auth(new_auth) - - return - - def rebuild_proxies(self, prepared_request, proxies): - """This method re-evaluates the proxy configuration by considering the - environment variables. If we are redirected to a URL covered by - NO_PROXY, we strip the proxy configuration. Otherwise, we set missing - proxy keys for this URL (in case they were stripped by a previous - redirect). - - This method also replaces the Proxy-Authorization header where - necessary. - - :rtype: dict - """ - proxies = proxies if proxies is not None else {} - headers = prepared_request.headers - url = prepared_request.url - scheme = urlparse(url).scheme - new_proxies = proxies.copy() - no_proxy = proxies.get('no_proxy') - - bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) - if self.trust_env and not bypass_proxy: - environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) - - proxy = environ_proxies.get(scheme, environ_proxies.get('all')) - - if proxy: - new_proxies.setdefault(scheme, proxy) - - if 'Proxy-Authorization' in headers: - del headers['Proxy-Authorization'] - - try: - username, password = get_auth_from_url(new_proxies[scheme]) - except KeyError: - username, password = None, None - - if username and password: - headers['Proxy-Authorization'] = _basic_auth_str(username, password) - - return new_proxies - - def rebuild_method(self, prepared_request, response): - """When being redirected we may want to change the method of the request - based on certain specs or browser behavior. - """ - method = prepared_request.method - - # https://tools.ietf.org/html/rfc7231#section-6.4.4 - if response.status_code == codes.see_other and method != 'HEAD': - method = 'GET' - - # Do what the browsers do, despite standards... - # First, turn 302s into GETs. - if response.status_code == codes.found and method != 'HEAD': - method = 'GET' - - # Second, if a POST is responded to with a 301, turn it into a GET. - # This bizarre behaviour is explained in Issue 1704. - if response.status_code == codes.moved and method == 'POST': - method = 'GET' - - prepared_request.method = method - - -class Session(SessionRedirectMixin): - """A Requests session. - - Provides cookie persistence, connection-pooling, and configuration. - - Basic Usage:: - - >>> import requests - >>> s = requests.Session() - >>> s.get('https://httpbin.org/get') - - - Or as a context manager:: - - >>> with requests.Session() as s: - >>> s.get('https://httpbin.org/get') - - """ - - __attrs__ = [ - 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', - 'cert', 'prefetch', 'adapters', 'stream', 'trust_env', - 'max_redirects', - ] - - def __init__(self): - - #: A case-insensitive dictionary of headers to be sent on each - #: :class:`Request ` sent from this - #: :class:`Session `. - self.headers = default_headers() - - #: Default Authentication tuple or object to attach to - #: :class:`Request `. - self.auth = None - - #: Dictionary mapping protocol or protocol and host to the URL of the proxy - #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to - #: be used on each :class:`Request `. - self.proxies = {} - - #: Event-handling hooks. - self.hooks = default_hooks() - - #: Dictionary of querystring data to attach to each - #: :class:`Request `. The dictionary values may be lists for - #: representing multivalued query parameters. - self.params = {} - - #: Stream response content default. - self.stream = False - - #: SSL Verification default. - self.verify = True - - #: SSL client certificate default, if String, path to ssl client - #: cert file (.pem). If Tuple, ('cert', 'key') pair. - self.cert = None - - #: Maximum number of redirects allowed. If the request exceeds this - #: limit, a :class:`TooManyRedirects` exception is raised. - #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is - #: 30. - self.max_redirects = DEFAULT_REDIRECT_LIMIT - - #: Trust environment settings for proxy configuration, default - #: authentication and similar. - self.trust_env = True - - #: A CookieJar containing all currently outstanding cookies set on this - #: session. By default it is a - #: :class:`RequestsCookieJar `, but - #: may be any other ``cookielib.CookieJar`` compatible object. - self.cookies = cookiejar_from_dict({}) - - # Default connection adapters. - self.adapters = OrderedDict() - self.mount('https://', HTTPAdapter()) - self.mount('http://', HTTPAdapter()) - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - def prepare_request(self, request): - """Constructs a :class:`PreparedRequest ` for - transmission and returns it. The :class:`PreparedRequest` has settings - merged from the :class:`Request ` instance and those of the - :class:`Session`. - - :param request: :class:`Request` instance to prepare with this - session's settings. - :rtype: requests.PreparedRequest - """ - cookies = request.cookies or {} - - # Bootstrap CookieJar. - if not isinstance(cookies, cookielib.CookieJar): - cookies = cookiejar_from_dict(cookies) - - # Merge with session cookies - merged_cookies = merge_cookies( - merge_cookies(RequestsCookieJar(), self.cookies), cookies) - - # Set environment's basic authentication if not explicitly set. - auth = request.auth - if self.trust_env and not auth and not self.auth: - auth = get_netrc_auth(request.url) - - p = PreparedRequest() - p.prepare( - method=request.method.upper(), - url=request.url, - files=request.files, - data=request.data, - json=request.json, - headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), - params=merge_setting(request.params, self.params), - auth=merge_setting(auth, self.auth), - cookies=merged_cookies, - hooks=merge_hooks(request.hooks, self.hooks), - ) - return p - - def request(self, method, url, - params=None, data=None, headers=None, cookies=None, files=None, - auth=None, timeout=None, allow_redirects=True, proxies=None, - hooks=None, stream=None, verify=None, cert=None, json=None): - """Constructs a :class:`Request `, prepares it and sends it. - Returns :class:`Response ` object. - - :param method: method for the new :class:`Request` object. - :param url: URL for the new :class:`Request` object. - :param params: (optional) Dictionary or bytes to be sent in the query - string for the :class:`Request`. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json to send in the body of the - :class:`Request`. - :param headers: (optional) Dictionary of HTTP Headers to send with the - :class:`Request`. - :param cookies: (optional) Dict or CookieJar object to send with the - :class:`Request`. - :param files: (optional) Dictionary of ``'filename': file-like-objects`` - for multipart encoding upload. - :param auth: (optional) Auth tuple or callable to enable - Basic/Digest/Custom HTTP Auth. - :param timeout: (optional) How long to wait for the server to send - data before giving up, as a float, or a :ref:`(connect timeout, - read timeout) ` tuple. - :type timeout: float or tuple - :param allow_redirects: (optional) Set to True by default. - :type allow_redirects: bool - :param proxies: (optional) Dictionary mapping protocol or protocol and - hostname to the URL of the proxy. - :param stream: (optional) whether to immediately download the response - content. Defaults to ``False``. - :param verify: (optional) Either a boolean, in which case it controls whether we verify - the server's TLS certificate, or a string, in which case it must be a path - to a CA bundle to use. Defaults to ``True``. - :param cert: (optional) if String, path to ssl client cert file (.pem). - If Tuple, ('cert', 'key') pair. - :rtype: requests.Response - """ - # Create the Request. - req = Request( - method=method.upper(), - url=url, - headers=headers, - files=files, - data=data or {}, - json=json, - params=params or {}, - auth=auth, - cookies=cookies, - hooks=hooks, - ) - prep = self.prepare_request(req) - - proxies = proxies or {} - - settings = self.merge_environment_settings( - prep.url, proxies, stream, verify, cert - ) - - # Send the request. - send_kwargs = { - 'timeout': timeout, - 'allow_redirects': allow_redirects, - } - send_kwargs.update(settings) - resp = self.send(prep, **send_kwargs) - - return resp - - def get(self, url, **kwargs): - r"""Sends a GET request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', True) - return self.request('GET', url, **kwargs) - - def options(self, url, **kwargs): - r"""Sends a OPTIONS request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', True) - return self.request('OPTIONS', url, **kwargs) - - def head(self, url, **kwargs): - r"""Sends a HEAD request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - kwargs.setdefault('allow_redirects', False) - return self.request('HEAD', url, **kwargs) - - def post(self, url, data=None, json=None, **kwargs): - r"""Sends a POST request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param json: (optional) json to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('POST', url, data=data, json=json, **kwargs) - - def put(self, url, data=None, **kwargs): - r"""Sends a PUT request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('PUT', url, data=data, **kwargs) - - def patch(self, url, data=None, **kwargs): - r"""Sends a PATCH request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param data: (optional) Dictionary, list of tuples, bytes, or file-like - object to send in the body of the :class:`Request`. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('PATCH', url, data=data, **kwargs) - - def delete(self, url, **kwargs): - r"""Sends a DELETE request. Returns :class:`Response` object. - - :param url: URL for the new :class:`Request` object. - :param \*\*kwargs: Optional arguments that ``request`` takes. - :rtype: requests.Response - """ - - return self.request('DELETE', url, **kwargs) - - def send(self, request, **kwargs): - """Send a given PreparedRequest. - - :rtype: requests.Response - """ - # Set defaults that the hooks can utilize to ensure they always have - # the correct parameters to reproduce the previous request. - kwargs.setdefault('stream', self.stream) - kwargs.setdefault('verify', self.verify) - kwargs.setdefault('cert', self.cert) - kwargs.setdefault('proxies', self.proxies) - - # It's possible that users might accidentally send a Request object. - # Guard against that specific failure case. - if isinstance(request, Request): - raise ValueError('You can only send PreparedRequests.') - - # Set up variables needed for resolve_redirects and dispatching of hooks - allow_redirects = kwargs.pop('allow_redirects', True) - stream = kwargs.get('stream') - hooks = request.hooks - - # Get the appropriate adapter to use - adapter = self.get_adapter(url=request.url) - - # Start time (approximately) of the request - start = preferred_clock() - - # Send the request - r = adapter.send(request, **kwargs) - - # Total elapsed time of the request (approximately) - elapsed = preferred_clock() - start - r.elapsed = timedelta(seconds=elapsed) - - # Response manipulation hooks - r = dispatch_hook('response', hooks, r, **kwargs) - - # Persist cookies - if r.history: - - # If the hooks create history then we want those cookies too - for resp in r.history: - extract_cookies_to_jar(self.cookies, resp.request, resp.raw) - - extract_cookies_to_jar(self.cookies, request, r.raw) - - # Redirect resolving generator. - gen = self.resolve_redirects(r, request, **kwargs) - - # Resolve redirects if allowed. - history = [resp for resp in gen] if allow_redirects else [] - - # Shuffle things around if there's history. - if history: - # Insert the first (original) request at the start - history.insert(0, r) - # Get the last request made - r = history.pop() - r.history = history - - # If redirects aren't being followed, store the response on the Request for Response.next(). - if not allow_redirects: - try: - r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) - except StopIteration: - pass - - if not stream: - r.content - - return r - - def merge_environment_settings(self, url, proxies, stream, verify, cert): - """ - Check the environment and merge it with some settings. - - :rtype: dict - """ - # Gather clues from the surrounding environment. - if self.trust_env: - # Set environment's proxies. - no_proxy = proxies.get('no_proxy') if proxies is not None else None - env_proxies = get_environ_proxies(url, no_proxy=no_proxy) - for (k, v) in env_proxies.items(): - proxies.setdefault(k, v) - - # Look for requests environment configuration and be compatible - # with cURL. - if verify is True or verify is None: - verify = (os.environ.get('REQUESTS_CA_BUNDLE') or - os.environ.get('CURL_CA_BUNDLE')) - - # Merge all the kwargs. - proxies = merge_setting(proxies, self.proxies) - stream = merge_setting(stream, self.stream) - verify = merge_setting(verify, self.verify) - cert = merge_setting(cert, self.cert) - - return {'verify': verify, 'proxies': proxies, 'stream': stream, - 'cert': cert} - - def get_adapter(self, url): - """ - Returns the appropriate connection adapter for the given URL. - - :rtype: requests.adapters.BaseAdapter - """ - for (prefix, adapter) in self.adapters.items(): - - if url.lower().startswith(prefix.lower()): - return adapter - - # Nothing matches :-/ - raise InvalidSchema("No connection adapters were found for '%s'" % url) - - def close(self): - """Closes all adapters and as such the session""" - for v in self.adapters.values(): - v.close() - - def mount(self, prefix, adapter): - """Registers a connection adapter to a prefix. - - Adapters are sorted in descending order by prefix length. - """ - self.adapters[prefix] = adapter - keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] - - for key in keys_to_move: - self.adapters[key] = self.adapters.pop(key) - - def __getstate__(self): - state = {attr: getattr(self, attr, None) for attr in self.__attrs__} - return state - - def __setstate__(self, state): - for attr, value in state.items(): - setattr(self, attr, value) - - -def session(): - """ - Returns a :class:`Session` for context-management. - - .. deprecated:: 1.0.0 - - This method has been deprecated since version 1.0.0 and is only kept for - backwards compatibility. New code should use :class:`~requests.sessions.Session` - to create a session. This may be removed at a future date. - - :rtype: Session - """ - return Session() diff --git a/solnlib/packages/requests/status_codes.py b/solnlib/packages/requests/status_codes.py deleted file mode 100644 index 813e8c4e..00000000 --- a/solnlib/packages/requests/status_codes.py +++ /dev/null @@ -1,120 +0,0 @@ -# -*- coding: utf-8 -*- - -r""" -The ``codes`` object defines a mapping from common names for HTTP statuses -to their numerical codes, accessible either as attributes or as dictionary -items. - ->>> requests.codes['temporary_redirect'] -307 ->>> requests.codes.teapot -418 ->>> requests.codes['\o/'] -200 - -Some codes have multiple names, and both upper- and lower-case versions of -the names are allowed. For example, ``codes.ok``, ``codes.OK``, and -``codes.okay`` all correspond to the HTTP status code 200. -""" - -from .structures import LookupDict - -_codes = { - - # Informational. - 100: ('continue',), - 101: ('switching_protocols',), - 102: ('processing',), - 103: ('checkpoint',), - 122: ('uri_too_long', 'request_uri_too_long'), - 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), - 201: ('created',), - 202: ('accepted',), - 203: ('non_authoritative_info', 'non_authoritative_information'), - 204: ('no_content',), - 205: ('reset_content', 'reset'), - 206: ('partial_content', 'partial'), - 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), - 208: ('already_reported',), - 226: ('im_used',), - - # Redirection. - 300: ('multiple_choices',), - 301: ('moved_permanently', 'moved', '\\o-'), - 302: ('found',), - 303: ('see_other', 'other'), - 304: ('not_modified',), - 305: ('use_proxy',), - 306: ('switch_proxy',), - 307: ('temporary_redirect', 'temporary_moved', 'temporary'), - 308: ('permanent_redirect', - 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 - - # Client Error. - 400: ('bad_request', 'bad'), - 401: ('unauthorized',), - 402: ('payment_required', 'payment'), - 403: ('forbidden',), - 404: ('not_found', '-o-'), - 405: ('method_not_allowed', 'not_allowed'), - 406: ('not_acceptable',), - 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), - 408: ('request_timeout', 'timeout'), - 409: ('conflict',), - 410: ('gone',), - 411: ('length_required',), - 412: ('precondition_failed', 'precondition'), - 413: ('request_entity_too_large',), - 414: ('request_uri_too_large',), - 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), - 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), - 417: ('expectation_failed',), - 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), - 421: ('misdirected_request',), - 422: ('unprocessable_entity', 'unprocessable'), - 423: ('locked',), - 424: ('failed_dependency', 'dependency'), - 425: ('unordered_collection', 'unordered'), - 426: ('upgrade_required', 'upgrade'), - 428: ('precondition_required', 'precondition'), - 429: ('too_many_requests', 'too_many'), - 431: ('header_fields_too_large', 'fields_too_large'), - 444: ('no_response', 'none'), - 449: ('retry_with', 'retry'), - 450: ('blocked_by_windows_parental_controls', 'parental_controls'), - 451: ('unavailable_for_legal_reasons', 'legal_reasons'), - 499: ('client_closed_request',), - - # Server Error. - 500: ('internal_server_error', 'server_error', '/o\\', '✗'), - 501: ('not_implemented',), - 502: ('bad_gateway',), - 503: ('service_unavailable', 'unavailable'), - 504: ('gateway_timeout',), - 505: ('http_version_not_supported', 'http_version'), - 506: ('variant_also_negotiates',), - 507: ('insufficient_storage',), - 509: ('bandwidth_limit_exceeded', 'bandwidth'), - 510: ('not_extended',), - 511: ('network_authentication_required', 'network_auth', 'network_authentication'), -} - -codes = LookupDict(name='status_codes') - -def _init(): - for code, titles in _codes.items(): - for title in titles: - setattr(codes, title, code) - if not title.startswith(('\\', '/')): - setattr(codes, title.upper(), code) - - def doc(code): - names = ', '.join('``%s``' % n for n in _codes[code]) - return '* %d: %s' % (code, names) - - global __doc__ - __doc__ = (__doc__ + '\n' + - '\n'.join(doc(code) for code in sorted(_codes)) - if __doc__ is not None else None) - -_init() diff --git a/solnlib/packages/requests/structures.py b/solnlib/packages/requests/structures.py deleted file mode 100644 index da930e28..00000000 --- a/solnlib/packages/requests/structures.py +++ /dev/null @@ -1,103 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.structures -~~~~~~~~~~~~~~~~~~~ - -Data structures that power Requests. -""" - -from .compat import OrderedDict, Mapping, MutableMapping - - -class CaseInsensitiveDict(MutableMapping): - """A case-insensitive ``dict``-like object. - - Implements all methods and operations of - ``MutableMapping`` as well as dict's ``copy``. Also - provides ``lower_items``. - - All keys are expected to be strings. The structure remembers the - case of the last key to be set, and ``iter(instance)``, - ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` - will contain case-sensitive keys. However, querying and contains - testing is case insensitive:: - - cid = CaseInsensitiveDict() - cid['Accept'] = 'application/json' - cid['aCCEPT'] == 'application/json' # True - list(cid) == ['Accept'] # True - - For example, ``headers['content-encoding']`` will return the - value of a ``'Content-Encoding'`` response header, regardless - of how the header name was originally stored. - - If the constructor, ``.update``, or equality comparison - operations are given keys that have equal ``.lower()``s, the - behavior is undefined. - """ - - def __init__(self, data=None, **kwargs): - self._store = OrderedDict() - if data is None: - data = {} - self.update(data, **kwargs) - - def __setitem__(self, key, value): - # Use the lowercased key for lookups, but store the actual - # key alongside the value. - self._store[key.lower()] = (key, value) - - def __getitem__(self, key): - return self._store[key.lower()][1] - - def __delitem__(self, key): - del self._store[key.lower()] - - def __iter__(self): - return (casedkey for casedkey, mappedvalue in self._store.values()) - - def __len__(self): - return len(self._store) - - def lower_items(self): - """Like iteritems(), but with all lowercase keys.""" - return ( - (lowerkey, keyval[1]) - for (lowerkey, keyval) - in self._store.items() - ) - - def __eq__(self, other): - if isinstance(other, Mapping): - other = CaseInsensitiveDict(other) - else: - return NotImplemented - # Compare insensitively - return dict(self.lower_items()) == dict(other.lower_items()) - - # Copy is required - def copy(self): - return CaseInsensitiveDict(self._store.values()) - - def __repr__(self): - return str(dict(self.items())) - - -class LookupDict(dict): - """Dictionary lookup object.""" - - def __init__(self, name=None): - self.name = name - super(LookupDict, self).__init__() - - def __repr__(self): - return '' % (self.name) - - def __getitem__(self, key): - # We allow fall-through here, so values default to None - - return self.__dict__.get(key, None) - - def get(self, key, default=None): - return self.__dict__.get(key, default) diff --git a/solnlib/packages/requests/utils.py b/solnlib/packages/requests/utils.py deleted file mode 100644 index 8170a8d2..00000000 --- a/solnlib/packages/requests/utils.py +++ /dev/null @@ -1,977 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -requests.utils -~~~~~~~~~~~~~~ - -This module provides utility functions that are used within Requests -that are also useful for external consumption. -""" - -import codecs -import contextlib -import io -import os -import re -import socket -import struct -import sys -import tempfile -import warnings -import zipfile - -from .__version__ import __version__ -from . import certs -# to_native_string is unused here, but imported here for backwards compatibility -from ._internal_utils import to_native_string -from .compat import parse_http_list as _parse_list_header -from .compat import ( - quote, urlparse, bytes, str, OrderedDict, unquote, getproxies, - proxy_bypass, urlunparse, basestring, integer_types, is_py3, - proxy_bypass_environment, getproxies_environment, Mapping) -from .cookies import cookiejar_from_dict -from .structures import CaseInsensitiveDict -from .exceptions import ( - InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError) - -NETRC_FILES = ('.netrc', '_netrc') - -DEFAULT_CA_BUNDLE_PATH = certs.where() - -DEFAULT_PORTS = {'http': 80, 'https': 443} - - -if sys.platform == 'win32': - # provide a proxy_bypass version on Windows without DNS lookups - - def proxy_bypass_registry(host): - try: - if is_py3: - import winreg - else: - import _winreg as winreg - except ImportError: - return False - - try: - internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, - r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') - # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it - proxyEnable = int(winreg.QueryValueEx(internetSettings, - 'ProxyEnable')[0]) - # ProxyOverride is almost always a string - proxyOverride = winreg.QueryValueEx(internetSettings, - 'ProxyOverride')[0] - except OSError: - return False - if not proxyEnable or not proxyOverride: - return False - - # make a check value list from the registry entry: replace the - # '' string by the localhost entry and the corresponding - # canonical entry. - proxyOverride = proxyOverride.split(';') - # now check if we match one of the registry values. - for test in proxyOverride: - if test == '': - if '.' not in host: - return True - test = test.replace(".", r"\.") # mask dots - test = test.replace("*", r".*") # change glob sequence - test = test.replace("?", r".") # change glob char - if re.match(test, host, re.I): - return True - return False - - def proxy_bypass(host): # noqa - """Return True, if the host should be bypassed. - - Checks proxy settings gathered from the environment, if specified, - or the registry. - """ - if getproxies_environment(): - return proxy_bypass_environment(host) - else: - return proxy_bypass_registry(host) - - -def dict_to_sequence(d): - """Returns an internal sequence dictionary update.""" - - if hasattr(d, 'items'): - d = d.items() - - return d - - -def super_len(o): - total_length = None - current_position = 0 - - if hasattr(o, '__len__'): - total_length = len(o) - - elif hasattr(o, 'len'): - total_length = o.len - - elif hasattr(o, 'fileno'): - try: - fileno = o.fileno() - except io.UnsupportedOperation: - pass - else: - total_length = os.fstat(fileno).st_size - - # Having used fstat to determine the file length, we need to - # confirm that this file was opened up in binary mode. - if 'b' not in o.mode: - warnings.warn(( - "Requests has determined the content-length for this " - "request using the binary size of the file: however, the " - "file has been opened in text mode (i.e. without the 'b' " - "flag in the mode). This may lead to an incorrect " - "content-length. In Requests 3.0, support will be removed " - "for files in text mode."), - FileModeWarning - ) - - if hasattr(o, 'tell'): - try: - current_position = o.tell() - except (OSError, IOError): - # This can happen in some weird situations, such as when the file - # is actually a special file descriptor like stdin. In this - # instance, we don't know what the length is, so set it to zero and - # let requests chunk it instead. - if total_length is not None: - current_position = total_length - else: - if hasattr(o, 'seek') and total_length is None: - # StringIO and BytesIO have seek but no useable fileno - try: - # seek to end of file - o.seek(0, 2) - total_length = o.tell() - - # seek back to current position to support - # partially read file-like objects - o.seek(current_position or 0) - except (OSError, IOError): - total_length = 0 - - if total_length is None: - total_length = 0 - - return max(0, total_length - current_position) - - -def get_netrc_auth(url, raise_errors=False): - """Returns the Requests tuple auth for a given url from netrc.""" - - try: - from netrc import netrc, NetrcParseError - - netrc_path = None - - for f in NETRC_FILES: - try: - loc = os.path.expanduser('~/{}'.format(f)) - except KeyError: - # os.path.expanduser can fail when $HOME is undefined and - # getpwuid fails. See https://bugs.python.org/issue20164 & - # https://github.com/requests/requests/issues/1846 - return - - if os.path.exists(loc): - netrc_path = loc - break - - # Abort early if there isn't one. - if netrc_path is None: - return - - ri = urlparse(url) - - # Strip port numbers from netloc. This weird `if...encode`` dance is - # used for Python 3.2, which doesn't support unicode literals. - splitstr = b':' - if isinstance(url, str): - splitstr = splitstr.decode('ascii') - host = ri.netloc.split(splitstr)[0] - - try: - _netrc = netrc(netrc_path).authenticators(host) - if _netrc: - # Return with login / password - login_i = (0 if _netrc[0] else 1) - return (_netrc[login_i], _netrc[2]) - except (NetrcParseError, IOError): - # If there was a parsing error or a permissions issue reading the file, - # we'll just skip netrc auth unless explicitly asked to raise errors. - if raise_errors: - raise - - # AppEngine hackiness. - except (ImportError, AttributeError): - pass - - -def guess_filename(obj): - """Tries to guess the filename of the given object.""" - name = getattr(obj, 'name', None) - if (name and isinstance(name, basestring) and name[0] != '<' and - name[-1] != '>'): - return os.path.basename(name) - - -def extract_zipped_paths(path): - """Replace nonexistent paths that look like they refer to a member of a zip - archive with the location of an extracted copy of the target, or else - just return the provided path unchanged. - """ - if os.path.exists(path): - # this is already a valid path, no need to do anything further - return path - - # find the first valid part of the provided path and treat that as a zip archive - # assume the rest of the path is the name of a member in the archive - archive, member = os.path.split(path) - while archive and not os.path.exists(archive): - archive, prefix = os.path.split(archive) - member = '/'.join([prefix, member]) - - if not zipfile.is_zipfile(archive): - return path - - zip_file = zipfile.ZipFile(archive) - if member not in zip_file.namelist(): - return path - - # we have a valid zip archive and a valid member of that archive - tmp = tempfile.gettempdir() - extracted_path = os.path.join(tmp, *member.split('/')) - if not os.path.exists(extracted_path): - extracted_path = zip_file.extract(member, path=tmp) - - return extracted_path - - -def from_key_val_list(value): - """Take an object and test to see if it can be represented as a - dictionary. Unless it can not be represented as such, return an - OrderedDict, e.g., - - :: - - >>> from_key_val_list([('key', 'val')]) - OrderedDict([('key', 'val')]) - >>> from_key_val_list('string') - ValueError: cannot encode objects that are not 2-tuples - >>> from_key_val_list({'key': 'val'}) - OrderedDict([('key', 'val')]) - - :rtype: OrderedDict - """ - if value is None: - return None - - if isinstance(value, (str, bytes, bool, int)): - raise ValueError('cannot encode objects that are not 2-tuples') - - return OrderedDict(value) - - -def to_key_val_list(value): - """Take an object and test to see if it can be represented as a - dictionary. If it can be, return a list of tuples, e.g., - - :: - - >>> to_key_val_list([('key', 'val')]) - [('key', 'val')] - >>> to_key_val_list({'key': 'val'}) - [('key', 'val')] - >>> to_key_val_list('string') - ValueError: cannot encode objects that are not 2-tuples. - - :rtype: list - """ - if value is None: - return None - - if isinstance(value, (str, bytes, bool, int)): - raise ValueError('cannot encode objects that are not 2-tuples') - - if isinstance(value, Mapping): - value = value.items() - - return list(value) - - -# From mitsuhiko/werkzeug (used with permission). -def parse_list_header(value): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Quotes are removed automatically after parsing. - - It basically works like :func:`parse_set_header` just that items - may appear multiple times and case sensitivity is preserved. - - The return value is a standard :class:`list`: - - >>> parse_list_header('token, "quoted value"') - ['token', 'quoted value'] - - To create a header from the :class:`list` again, use the - :func:`dump_header` function. - - :param value: a string with a list header. - :return: :class:`list` - :rtype: list - """ - result = [] - for item in _parse_list_header(value): - if item[:1] == item[-1:] == '"': - item = unquote_header_value(item[1:-1]) - result.append(item) - return result - - -# From mitsuhiko/werkzeug (used with permission). -def parse_dict_header(value): - """Parse lists of key, value pairs as described by RFC 2068 Section 2 and - convert them into a python dict: - - >>> d = parse_dict_header('foo="is a fish", bar="as well"') - >>> type(d) is dict - True - >>> sorted(d.items()) - [('bar', 'as well'), ('foo', 'is a fish')] - - If there is no value for a key it will be `None`: - - >>> parse_dict_header('key_without_value') - {'key_without_value': None} - - To create a header from the :class:`dict` again, use the - :func:`dump_header` function. - - :param value: a string with a dict header. - :return: :class:`dict` - :rtype: dict - """ - result = {} - for item in _parse_list_header(value): - if '=' not in item: - result[item] = None - continue - name, value = item.split('=', 1) - if value[:1] == value[-1:] == '"': - value = unquote_header_value(value[1:-1]) - result[name] = value - return result - - -# From mitsuhiko/werkzeug (used with permission). -def unquote_header_value(value, is_filename=False): - r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). - This does not use the real unquoting but what browsers are actually - using for quoting. - - :param value: the header value to unquote. - :rtype: str - """ - if value and value[0] == value[-1] == '"': - # this is not the real unquoting, but fixing this so that the - # RFC is met will result in bugs with internet explorer and - # probably some other browsers as well. IE for example is - # uploading files with "C:\foo\bar.txt" as filename - value = value[1:-1] - - # if this is a filename and the starting characters look like - # a UNC path, then just return the value without quotes. Using the - # replace sequence below on a UNC path has the effect of turning - # the leading double slash into a single slash and then - # _fix_ie_filename() doesn't work correctly. See #458. - if not is_filename or value[:2] != '\\\\': - return value.replace('\\\\', '\\').replace('\\"', '"') - return value - - -def dict_from_cookiejar(cj): - """Returns a key/value dictionary from a CookieJar. - - :param cj: CookieJar object to extract cookies from. - :rtype: dict - """ - - cookie_dict = {} - - for cookie in cj: - cookie_dict[cookie.name] = cookie.value - - return cookie_dict - - -def add_dict_to_cookiejar(cj, cookie_dict): - """Returns a CookieJar from a key/value dictionary. - - :param cj: CookieJar to insert cookies into. - :param cookie_dict: Dict of key/values to insert into CookieJar. - :rtype: CookieJar - """ - - return cookiejar_from_dict(cookie_dict, cj) - - -def get_encodings_from_content(content): - """Returns encodings from given content string. - - :param content: bytestring to extract encodings from. - """ - warnings.warn(( - 'In requests 3.0, get_encodings_from_content will be removed. For ' - 'more information, please see the discussion on issue #2266. (This' - ' warning should only appear once.)'), - DeprecationWarning) - - charset_re = re.compile(r']', flags=re.I) - pragma_re = re.compile(r']', flags=re.I) - xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') - - return (charset_re.findall(content) + - pragma_re.findall(content) + - xml_re.findall(content)) - - -def _parse_content_type_header(header): - """Returns content type and parameters from given header - - :param header: string - :return: tuple containing content type and dictionary of - parameters - """ - - tokens = header.split(';') - content_type, params = tokens[0].strip(), tokens[1:] - params_dict = {} - items_to_strip = "\"' " - - for param in params: - param = param.strip() - if param: - key, value = param, True - index_of_equals = param.find("=") - if index_of_equals != -1: - key = param[:index_of_equals].strip(items_to_strip) - value = param[index_of_equals + 1:].strip(items_to_strip) - params_dict[key.lower()] = value - return content_type, params_dict - - -def get_encoding_from_headers(headers): - """Returns encodings from given HTTP Header Dict. - - :param headers: dictionary to extract encoding from. - :rtype: str - """ - - content_type = headers.get('content-type') - - if not content_type: - return None - - content_type, params = _parse_content_type_header(content_type) - - if 'charset' in params: - return params['charset'].strip("'\"") - - if 'text' in content_type: - return 'ISO-8859-1' - - -def stream_decode_response_unicode(iterator, r): - """Stream decodes a iterator.""" - - if r.encoding is None: - for item in iterator: - yield item - return - - decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') - for chunk in iterator: - rv = decoder.decode(chunk) - if rv: - yield rv - rv = decoder.decode(b'', final=True) - if rv: - yield rv - - -def iter_slices(string, slice_length): - """Iterate over slices of a string.""" - pos = 0 - if slice_length is None or slice_length <= 0: - slice_length = len(string) - while pos < len(string): - yield string[pos:pos + slice_length] - pos += slice_length - - -def get_unicode_from_response(r): - """Returns the requested content back in unicode. - - :param r: Response object to get unicode content from. - - Tried: - - 1. charset from content-type - 2. fall back and replace all unicode characters - - :rtype: str - """ - warnings.warn(( - 'In requests 3.0, get_unicode_from_response will be removed. For ' - 'more information, please see the discussion on issue #2266. (This' - ' warning should only appear once.)'), - DeprecationWarning) - - tried_encodings = [] - - # Try charset from content-type - encoding = get_encoding_from_headers(r.headers) - - if encoding: - try: - return str(r.content, encoding) - except UnicodeError: - tried_encodings.append(encoding) - - # Fall back: - try: - return str(r.content, encoding, errors='replace') - except TypeError: - return r.content - - -# The unreserved URI characters (RFC 3986) -UNRESERVED_SET = frozenset( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") - - -def unquote_unreserved(uri): - """Un-escape any percent-escape sequences in a URI that are unreserved - characters. This leaves all reserved, illegal and non-ASCII bytes encoded. - - :rtype: str - """ - parts = uri.split('%') - for i in range(1, len(parts)): - h = parts[i][0:2] - if len(h) == 2 and h.isalnum(): - try: - c = chr(int(h, 16)) - except ValueError: - raise InvalidURL("Invalid percent-escape sequence: '%s'" % h) - - if c in UNRESERVED_SET: - parts[i] = c + parts[i][2:] - else: - parts[i] = '%' + parts[i] - else: - parts[i] = '%' + parts[i] - return ''.join(parts) - - -def requote_uri(uri): - """Re-quote the given URI. - - This function passes the given URI through an unquote/quote cycle to - ensure that it is fully and consistently quoted. - - :rtype: str - """ - safe_with_percent = "!#$%&'()*+,/:;=?@[]~" - safe_without_percent = "!#$&'()*+,/:;=?@[]~" - try: - # Unquote only the unreserved characters - # Then quote only illegal characters (do not quote reserved, - # unreserved, or '%') - return quote(unquote_unreserved(uri), safe=safe_with_percent) - except InvalidURL: - # We couldn't unquote the given URI, so let's try quoting it, but - # there may be unquoted '%'s in the URI. We need to make sure they're - # properly quoted so they do not cause issues elsewhere. - return quote(uri, safe=safe_without_percent) - - -def address_in_network(ip, net): - """This function allows you to check if an IP belongs to a network subnet - - Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 - returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 - - :rtype: bool - """ - ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] - netaddr, bits = net.split('/') - netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] - network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask - return (ipaddr & netmask) == (network & netmask) - - -def dotted_netmask(mask): - """Converts mask from /xx format to xxx.xxx.xxx.xxx - - Example: if mask is 24 function returns 255.255.255.0 - - :rtype: str - """ - bits = 0xffffffff ^ (1 << 32 - mask) - 1 - return socket.inet_ntoa(struct.pack('>I', bits)) - - -def is_ipv4_address(string_ip): - """ - :rtype: bool - """ - try: - socket.inet_aton(string_ip) - except socket.error: - return False - return True - - -def is_valid_cidr(string_network): - """ - Very simple check of the cidr format in no_proxy variable. - - :rtype: bool - """ - if string_network.count('/') == 1: - try: - mask = int(string_network.split('/')[1]) - except ValueError: - return False - - if mask < 1 or mask > 32: - return False - - try: - socket.inet_aton(string_network.split('/')[0]) - except socket.error: - return False - else: - return False - return True - - -@contextlib.contextmanager -def set_environ(env_name, value): - """Set the environment variable 'env_name' to 'value' - - Save previous value, yield, and then restore the previous value stored in - the environment variable 'env_name'. - - If 'value' is None, do nothing""" - value_changed = value is not None - if value_changed: - old_value = os.environ.get(env_name) - os.environ[env_name] = value - try: - yield - finally: - if value_changed: - if old_value is None: - del os.environ[env_name] - else: - os.environ[env_name] = old_value - - -def should_bypass_proxies(url, no_proxy): - """ - Returns whether we should bypass proxies or not. - - :rtype: bool - """ - # Prioritize lowercase environment variables over uppercase - # to keep a consistent behaviour with other http projects (curl, wget). - get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) - - # First check whether no_proxy is defined. If it is, check that the URL - # we're getting isn't in the no_proxy list. - no_proxy_arg = no_proxy - if no_proxy is None: - no_proxy = get_proxy('no_proxy') - parsed = urlparse(url) - - if parsed.hostname is None: - # URLs don't always have hostnames, e.g. file:/// urls. - return True - - if no_proxy: - # We need to check whether we match here. We need to see if we match - # the end of the hostname, both with and without the port. - no_proxy = ( - host for host in no_proxy.replace(' ', '').split(',') if host - ) - - if is_ipv4_address(parsed.hostname): - for proxy_ip in no_proxy: - if is_valid_cidr(proxy_ip): - if address_in_network(parsed.hostname, proxy_ip): - return True - elif parsed.hostname == proxy_ip: - # If no_proxy ip was defined in plain IP notation instead of cidr notation & - # matches the IP of the index - return True - else: - host_with_port = parsed.hostname - if parsed.port: - host_with_port += ':{}'.format(parsed.port) - - for host in no_proxy: - if parsed.hostname.endswith(host) or host_with_port.endswith(host): - # The URL does match something in no_proxy, so we don't want - # to apply the proxies on this URL. - return True - - with set_environ('no_proxy', no_proxy_arg): - # parsed.hostname can be `None` in cases such as a file URI. - try: - bypass = proxy_bypass(parsed.hostname) - except (TypeError, socket.gaierror): - bypass = False - - if bypass: - return True - - return False - - -def get_environ_proxies(url, no_proxy=None): - """ - Return a dict of environment proxies. - - :rtype: dict - """ - if should_bypass_proxies(url, no_proxy=no_proxy): - return {} - else: - return getproxies() - - -def select_proxy(url, proxies): - """Select a proxy for the url, if applicable. - - :param url: The url being for the request - :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs - """ - proxies = proxies or {} - urlparts = urlparse(url) - if urlparts.hostname is None: - return proxies.get(urlparts.scheme, proxies.get('all')) - - proxy_keys = [ - urlparts.scheme + '://' + urlparts.hostname, - urlparts.scheme, - 'all://' + urlparts.hostname, - 'all', - ] - proxy = None - for proxy_key in proxy_keys: - if proxy_key in proxies: - proxy = proxies[proxy_key] - break - - return proxy - - -def default_user_agent(name="python-requests"): - """ - Return a string representing the default user agent. - - :rtype: str - """ - return '%s/%s' % (name, __version__) - - -def default_headers(): - """ - :rtype: requests.structures.CaseInsensitiveDict - """ - return CaseInsensitiveDict({ - 'User-Agent': default_user_agent(), - 'Accept-Encoding': ', '.join(('gzip', 'deflate')), - 'Accept': '*/*', - 'Connection': 'keep-alive', - }) - - -def parse_header_links(value): - """Return a list of parsed link headers proxies. - - i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" - - :rtype: list - """ - - links = [] - - replace_chars = ' \'"' - - value = value.strip(replace_chars) - if not value: - return links - - for val in re.split(', *<', value): - try: - url, params = val.split(';', 1) - except ValueError: - url, params = val, '' - - link = {'url': url.strip('<> \'"')} - - for param in params.split(';'): - try: - key, value = param.split('=') - except ValueError: - break - - link[key.strip(replace_chars)] = value.strip(replace_chars) - - links.append(link) - - return links - - -# Null bytes; no need to recreate these on each call to guess_json_utf -_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 -_null2 = _null * 2 -_null3 = _null * 3 - - -def guess_json_utf(data): - """ - :rtype: str - """ - # JSON always starts with two ASCII characters, so detection is as - # easy as counting the nulls and from their location and count - # determine the encoding. Also detect a BOM, if present. - sample = data[:4] - if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): - return 'utf-32' # BOM included - if sample[:3] == codecs.BOM_UTF8: - return 'utf-8-sig' # BOM included, MS style (discouraged) - if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): - return 'utf-16' # BOM included - nullcount = sample.count(_null) - if nullcount == 0: - return 'utf-8' - if nullcount == 2: - if sample[::2] == _null2: # 1st and 3rd are null - return 'utf-16-be' - if sample[1::2] == _null2: # 2nd and 4th are null - return 'utf-16-le' - # Did not detect 2 valid UTF-16 ascii-range characters - if nullcount == 3: - if sample[:3] == _null3: - return 'utf-32-be' - if sample[1:] == _null3: - return 'utf-32-le' - # Did not detect a valid UTF-32 ascii-range character - return None - - -def prepend_scheme_if_needed(url, new_scheme): - """Given a URL that may or may not have a scheme, prepend the given scheme. - Does not replace a present scheme with the one provided as an argument. - - :rtype: str - """ - scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) - - # urlparse is a finicky beast, and sometimes decides that there isn't a - # netloc present. Assume that it's being over-cautious, and switch netloc - # and path if urlparse decided there was no netloc. - if not netloc: - netloc, path = path, netloc - - return urlunparse((scheme, netloc, path, params, query, fragment)) - - -def get_auth_from_url(url): - """Given a url with authentication components, extract them into a tuple of - username,password. - - :rtype: (str,str) - """ - parsed = urlparse(url) - - try: - auth = (unquote(parsed.username), unquote(parsed.password)) - except (AttributeError, TypeError): - auth = ('', '') - - return auth - - -# Moved outside of function to avoid recompile every call -_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$') -_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$') - - -def check_header_validity(header): - """Verifies that header value is a string which doesn't contain - leading whitespace or return characters. This prevents unintended - header injection. - - :param header: tuple, in the format (name, value). - """ - name, value = header - - if isinstance(value, bytes): - pat = _CLEAN_HEADER_REGEX_BYTE - else: - pat = _CLEAN_HEADER_REGEX_STR - try: - if not pat.match(value): - raise InvalidHeader("Invalid return character or leading space in header: %s" % name) - except TypeError: - raise InvalidHeader("Value for header {%s: %s} must be of type str or " - "bytes, not %s" % (name, value, type(value))) - - -def urldefragauth(url): - """ - Given a url remove the fragment and the authentication part. - - :rtype: str - """ - scheme, netloc, path, params, query, fragment = urlparse(url) - - # see func:`prepend_scheme_if_needed` - if not netloc: - netloc, path = path, netloc - - netloc = netloc.rsplit('@', 1)[-1] - - return urlunparse((scheme, netloc, path, params, query, '')) - - -def rewind_body(prepared_request): - """Move file pointer back to its recorded starting position - so it can be read again on redirect. - """ - body_seek = getattr(prepared_request.body, 'seek', None) - if body_seek is not None and isinstance(prepared_request._body_position, integer_types): - try: - body_seek(prepared_request._body_position) - except (IOError, OSError): - raise UnrewindableBodyError("An error occurred when rewinding request " - "body for redirect.") - else: - raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/solnlib/packages/schematics/LICENSE b/solnlib/packages/schematics/LICENSE deleted file mode 100644 index 54ff6b41..00000000 --- a/solnlib/packages/schematics/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -(The BSD License) - -Copyright (c) 2013–2016 Schematics Authors; see AUTHORS for details -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - 1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. Neither the name of Schematics nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/solnlib/packages/schematics/__init__.py b/solnlib/packages/schematics/__init__.py deleted file mode 100644 index 439f4f4c..00000000 --- a/solnlib/packages/schematics/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- - -__version__ = '2.1.0' - -# TODO: remove deprecated API -from . import deprecated -deprecated.patch_all() - -from . import types -from .models import Model, ModelMeta - -types.compound.Model = Model -types.compound.ModelMeta = ModelMeta - -__all__ = ['Model'] diff --git a/solnlib/packages/schematics/common.py b/solnlib/packages/schematics/common.py deleted file mode 100644 index ac12157f..00000000 --- a/solnlib/packages/schematics/common.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -Define constants and expose the compatibility overrides to all modules. -""" - -from __future__ import unicode_literals, absolute_import - -from .compat import * -from .compat import __all__ as compat_exports -from .util import Constant - - -NATIVE = Constant('NATIVE', 0) -PRIMITIVE = Constant('PRIMITIVE', 1) - -DROP = Constant('DROP', 0) -NONEMPTY = Constant('NONEMPTY', 1) -NOT_NONE = Constant('NOT_NONE', 2) -DEFAULT = Constant('DEFAULT', 10) -ALL = Constant('ALL', 99) - - -__all__ = ['NATIVE', 'PRIMITIVE', - 'DROP', 'NONEMPTY', 'NOT_NONE', 'DEFAULT', 'ALL'] + compat_exports -if PY2: - # Python 2 names cannot be unicode - __all__ = [n.encode('ascii') for n in __all__] diff --git a/solnlib/packages/schematics/compat.py b/solnlib/packages/schematics/compat.py deleted file mode 100644 index 7d7c1be7..00000000 --- a/solnlib/packages/schematics/compat.py +++ /dev/null @@ -1,91 +0,0 @@ -# pylint: skip-file - -from __future__ import absolute_import - -import functools -import operator -import sys - - -__all__ = ['PY2', 'PY3', 'string_type', 'iteritems', 'metaclass', 'py_native_string', 'reraise', 'str_compat'] - - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - - -if PY2: - __all__ += ['bytes', 'str', 'map', 'zip', 'range'] - bytes = str - str = unicode - string_type = basestring - range = xrange - from itertools import imap as map - from itertools import izip as zip - iteritems = operator.methodcaller('iteritems') - itervalues = operator.methodcaller('itervalues') - - # reraise code taken from werzeug BSD license at https://github.com/pallets/werkzeug/blob/master/LICENSE - exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') -else: - string_type = str - iteritems = operator.methodcaller('items') - itervalues = operator.methodcaller('values') - - # reraise code taken from werzeug BSD license at https://github.com/pallets/werkzeug/blob/master/LICENSE - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - -def metaclass(metaclass): - def make_class(cls): - attrs = cls.__dict__.copy() - if attrs.get('__dict__'): - del attrs['__dict__'] - del attrs['__weakref__'] - return metaclass(cls.__name__, cls.__bases__, attrs) - return make_class - - -def py_native_string(source): - """ - Converts Unicode strings to bytestrings on Python 2. The intended usage is to - wrap a function or a string in cases where Python 2 expects a native string. - """ - if PY2: - if isinstance(source, str): - return source.encode('ascii') - elif callable(source): - @functools.wraps(source) - def new_func(*args, **kwargs): - rv = source(*args, **kwargs) - if isinstance(rv, str): - rv = rv.encode('unicode-escape') - return rv - return new_func - return source - - -def str_compat(class_): - """ - On Python 2, patches the ``__str__`` and ``__repr__`` methods on the given class - so that the class can be written for Python 3 and Unicode. - """ - if PY2: - if '__str__' in class_.__dict__ and '__unicode__' not in class_.__dict__: - class_.__unicode__ = class_.__str__ - class_.__str__ = py_native_string(class_.__unicode__) - return class_ - - -def repr_compat(class_): - if PY2: - if '__repr__' in class_.__dict__: - class_.__repr__ = py_native_string(class_.__repr__) - return class_ - - -def _dict(mapping): - return dict((key, mapping[key]) for key in mapping) diff --git a/solnlib/packages/schematics/contrib/__init__.py b/solnlib/packages/schematics/contrib/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solnlib/packages/schematics/contrib/enum_type.py b/solnlib/packages/schematics/contrib/enum_type.py deleted file mode 100644 index 36bf4582..00000000 --- a/solnlib/packages/schematics/contrib/enum_type.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Type supporting native Python3 enum. It depends either on Py3.4+ or e.g. enum34. -""" -from __future__ import unicode_literals, absolute_import - -try: - from enum import Enum -except ImportError: - pass - -from ..exceptions import ConversionError -from ..translator import _ -from ..types import BaseType -from ..compat import string_type - - -class EnumType(BaseType): - """A field type allowing to use native enums as values. - Restricts values to enum members and (optionally) enum values. - `use_values` - if set to True allows do assign enumerated values to the field. - - >>> import enum - >>> class E(enum.Enum): - ... A = 1 - ... B = 2 - >>> from schematics import Model - >>> class AModel(Model): - ... foo = EnumType(E) - >>> a = AModel() - >>> a.foo = E.A - >>> a.foo.value == 1 - """ - MESSAGES = { - 'convert': _("Couldn't interpret '{0}' as member of {1}."), - } - - def __init__(self, enum, use_values=False, **kwargs): - """ - :param enum: Enum class to which restrict values assigned to the field. - :param use_values: If true, also values of the enum (right-hand side) can be assigned here. - Other args are passed to superclass. - """ - self._enum_class = enum - self._use_values = use_values - super(EnumType, self).__init__(**kwargs) - - def to_native(self, value, context=None): - if isinstance(value, self._enum_class): - return value - else: - by_name = self._find_by_name(value) - if by_name: - return by_name - by_value = self._find_by_value(value) - if by_value: - return by_value - raise ConversionError(self.messages['convert'].format(value, self._enum_class)) - - def _find_by_name(self, value): - if isinstance(value, string_type): - try: - return self._enum_class[value] - except KeyError: - pass - - def _find_by_value(self, value): - if not self._use_values: - return - for member in self._enum_class: - if member.value == value: - return member - - def to_primitive(self, value, context=None): - if isinstance(value, Enum): - if self._use_values: - return value.value - else: - return value.name - else: - return str(value) diff --git a/solnlib/packages/schematics/contrib/machine.py b/solnlib/packages/schematics/contrib/machine.py deleted file mode 100644 index 0d09aee1..00000000 --- a/solnlib/packages/schematics/contrib/machine.py +++ /dev/null @@ -1,64 +0,0 @@ - -import functools - -from ..transforms import convert, to_primitive -from ..validate import validate - - -def _callback_wrap(data, schema, transform, *args, **kwargs): - return transform(schema, data, *args, **kwargs) - - -class Machine(object): - """ A poor man's state machine. """ - - states = ('raw', 'converted', 'validated', 'serialized') - transitions = ( - {'trigger': 'init', 'to': 'raw'}, - {'trigger': 'convert', 'from': 'raw', 'to': 'converted'}, - {'trigger': 'validate', 'from': 'converted', 'to': 'validated'}, - {'trigger': 'serialize', 'from': 'validated', 'to': 'serialized'} - ) - callbacks = { - 'convert': functools.partial(_callback_wrap, transform=convert, partial=True), - 'validate': functools.partial(_callback_wrap, transform=validate, convert=False, partial=False), - 'serialize': functools.partial(_callback_wrap, transform=to_primitive) - } - - def __init__(self, data, *args): - self.state = self._transition(trigger='init')['to'] - self.data = data - self.args = args - - def __getattr__(self, name): - return functools.partial(self.trigger, name) - - def _transition(self, trigger=None, src_state=None, dst_state=None): - try: - return next(self._transitions(trigger=trigger, src_state=src_state, - dst_state=dst_state)) - except StopIteration: - return None - - def _transitions(self, trigger=None, src_state=None, dst_state=None): - def pred(d, key, var): - return d.get(key) == var if var is not None else True - return (d for d in self.transitions if - pred(d, 'trigger', trigger) and - pred(d, 'from', src_state) and - pred(d, 'to', dst_state) - ) - - def trigger(self, trigger): - transition = self._transition(trigger=trigger, src_state=self.state) - if not transition: - raise AttributeError(trigger) - callback = self.callbacks.get(trigger) - self.data = callback(self.data, *self.args) if callback else self.data - self.state = transition['to'] - - def can(self, state): - return bool(self._transition(src_state=self.state, dst_state=state)) - - def cannot(self, state): - return not self.can(state) diff --git a/solnlib/packages/schematics/contrib/mongo.py b/solnlib/packages/schematics/contrib/mongo.py deleted file mode 100644 index 650b3497..00000000 --- a/solnlib/packages/schematics/contrib/mongo.py +++ /dev/null @@ -1,47 +0,0 @@ -"""This module contains fields that depend on importing `bson`. `bson` is -a part of the pymongo distribution. -""" - -from __future__ import unicode_literals, absolute_import - -import bson - -from ..common import * -from ..translator import _ -from ..types import BaseType -from ..exceptions import ConversionError - -__all__ = ['ObjectIdType'] - - -class ObjectIdType(BaseType): - - """An field wrapper around MongoDB ObjectIds. It is correct to say they're - bson fields, but I am unaware of bson being used outside MongoDB. - - `auto_fill` is disabled by default for ObjectIdType's as they are - typically obtained after a successful save to Mongo. - """ - - MESSAGES = { - 'convert': _("Couldn't interpret value as an ObjectId."), - } - - def __init__(self, auto_fill=False, **kwargs): - self.auto_fill = auto_fill - super(ObjectIdType, self).__init__(**kwargs) - - def to_native(self, value, context=None): - if not isinstance(value, bson.objectid.ObjectId): - try: - value = bson.objectid.ObjectId(str(value)) - except bson.objectid.InvalidId: - raise ConversionError(self.messages['convert']) - return value - - def to_primitive(self, value, context=None): - return str(value) - -if PY2: - # Python 2 names cannot be unicode - __all__ = [n.encode('ascii') for n in __all__] diff --git a/solnlib/packages/schematics/datastructures.py b/solnlib/packages/schematics/datastructures.py deleted file mode 100644 index 2a061f11..00000000 --- a/solnlib/packages/schematics/datastructures.py +++ /dev/null @@ -1,341 +0,0 @@ -# -*- coding: utf-8 -*- -# pylint: skip-file - -from __future__ import unicode_literals, absolute_import -from collections import Mapping, Sequence - -from .compat import * - -__all__ = [] - - -class DataObject(object): - """ - An object for holding data as attributes. - - ``DataObject`` can be instantiated like ``dict``:: - - >>> d = DataObject({'one': 1, 'two': 2}, three=3) - >>> d.__dict__ - {'one': 1, 'two': 2, 'three': 3} - - Attributes are accessible via the regular dot notation (``d.x``) as well as - the subscription syntax (``d['x']``):: - - >>> d.one == d['one'] == 1 - True - - To convert a ``DataObject`` into a dictionary, use ``d._to_dict()``. - - ``DataObject`` implements the following collection-like operations: - - * iteration through attributes as name-value pairs - * ``'x' in d`` for membership tests - * ``len(d)`` to get the number of attributes - - Additionally, the following methods are equivalent to their ``dict` counterparts: - ``_clear``, ``_get``, ``_keys``, ``_items``, ``_pop``, ``_setdefault``, ``_update``. - - An advantage of ``DataObject`` over ``dict` subclasses is that every method name - in ``DataObject`` begins with an underscore, so attributes like ``"update"`` or - ``"values"`` are valid. - """ - - def __init__(self, *args, **kwargs): - source = args[0] if args else {} - self._update(source, **kwargs) - - def __repr__(self): - return self.__class__.__name__ + '(%s)' % repr(self.__dict__) - - def _copy(self): - return self.__class__(self) - - __copy__ = _copy - - def __eq__(self, other): - return isinstance(other, DataObject) and self.__dict__ == other.__dict__ - - def __iter__(self): - return iter(self.__dict__.items()) - - def _update(self, source=None, **kwargs): - if isinstance(source, DataObject): - source = source.__dict__ - self.__dict__.update(source, **kwargs) - - def _setdefaults(self, source): - if isinstance(source, dict): - source = source.items() - for name, value in source: - self._setdefault(name, value) - return self - - def _to_dict(self): - d = dict(self.__dict__) - for k, v in d.items(): - if isinstance(v, DataObject): - d[k] = v._to_dict() - return d - - def __setitem__(self, key, value): self.__dict__[key] = value - def __getitem__(self, key): return self.__dict__[key] - def __delitem__(self, key): del self.__dict__[key] - def __len__(self): return len(self.__dict__) - def __contains__(self, key): return key in self.__dict__ - - def _clear(self): return self.__dict__.clear() - def _get(self, *args): return self.__dict__.get(*args) - def _items(self): return self.__dict__.items() - def _keys(self): return self.__dict__.keys() - def _pop(self, *args): return self.__dict__.pop(*args) - def _setdefault(self, *args): return self.__dict__.setdefault(*args) - - - -class Context(DataObject): - - _fields = () - - def __init__(self, *args, **kwargs): - super(Context, self).__init__(*args, **kwargs) - if self._fields: - unknowns = [name for name in self._keys() if name not in self._fields] - if unknowns: - raise ValueError('Unexpected field names: %r' % unknowns) - - @classmethod - def _new(cls, *args, **kwargs): - if len(args) > len(cls._fields): - raise TypeError('Too many positional arguments') - return cls(zip(cls._fields, args), **kwargs) - - @classmethod - def _make(cls, obj): - if obj is None: - return cls() - elif isinstance(obj, cls): - return obj - else: - return cls(obj) - - def __setattr__(self, name, value): - if name in self: - raise TypeError("Field '{0}' already set".format(name)) - super(Context, self).__setattr__(name, value) - - def _branch(self, **kwargs): - if not kwargs: - return self - items = dict(((k, v) for k, v in kwargs.items() if v is not None and v != self[k])) - if items: - return self.__class__(self, **items) - else: - return self - - def _setdefaults(self, source): - if not isinstance(source, dict): - source = source.__dict__ - new_values = source.copy() - new_values.update(self.__dict__) - self.__dict__.update(new_values) - return self - - def __bool__(self): - return True - - __nonzero__ = __bool__ - - -try: - from collections import ChainMap -except ImportError: - """ Code extracted from CPython 3 stdlib: - https://github.com/python/cpython/blob/85f2c89ee8223590ba08e3aea97476f76c7e3734/Lib/collections/__init__.py#L852 - - """ - from collections import MutableMapping - - class ChainMap(MutableMapping): - ''' A ChainMap groups multiple dicts (or other mappings) together - to create a single, updateable view. - The underlying mappings are stored in a list. That list is public and can - be accessed or updated using the *maps* attribute. There is no other - state. - Lookups search the underlying mappings successively until a key is found. - In contrast, writes, updates, and deletions only operate on the first - mapping. - ''' - - def __init__(self, *maps): - '''Initialize a ChainMap by setting *maps* to the given mappings. - If no mappings are provided, a single empty dictionary is used. - ''' - self.maps = list(maps) or [{}] # always at least one map - - def __missing__(self, key): - raise KeyError(key) - - def __getitem__(self, key): - for mapping in self.maps: - try: - return mapping[key] # can't use 'key in mapping' with defaultdict - except KeyError: - pass - return self.__missing__(key) # support subclasses that define __missing__ - - def get(self, key, default=None): - return self[key] if key in self else default - - def __len__(self): - return len(set().union(*self.maps)) # reuses stored hash values if possible - - def __iter__(self): - return iter(set().union(*self.maps)) - - def __contains__(self, key): - return any(key in m for m in self.maps) - - def __bool__(self): - return any(self.maps) - - # @_recursive_repr() - def __repr__(self): - return '{0.__class__.__name__}({1})'.format( - self, ', '.join(map(repr, self.maps))) - - @classmethod - def fromkeys(cls, iterable, *args): - 'Create a ChainMap with a single dict created from the iterable.' - return cls(dict.fromkeys(iterable, *args)) - - def copy(self): - 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' - return self.__class__(self.maps[0].copy(), *self.maps[1:]) - - __copy__ = copy - - def new_child(self, m=None): # like Django's Context.push() - '''New ChainMap with a new map followed by all previous maps. - If no map is provided, an empty dict is used. - ''' - if m is None: - m = {} - return self.__class__(m, *self.maps) - - @property - def parents(self): # like Django's Context.pop() - 'New ChainMap from maps[1:].' - return self.__class__(*self.maps[1:]) - - def __setitem__(self, key, value): - self.maps[0][key] = value - - def __delitem__(self, key): - try: - del self.maps[0][key] - except KeyError: - raise KeyError('Key not found in the first mapping: {!r}'.format(key)) - - def popitem(self): - 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' - try: - return self.maps[0].popitem() - except KeyError: - raise KeyError('No keys found in the first mapping.') - - def pop(self, key, *args): - 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' - try: - return self.maps[0].pop(key, *args) - except KeyError: - raise KeyError('Key not found in the first mapping: {!r}'.format(key)) - - def clear(self): - 'Clear maps[0], leaving maps[1:] intact.' - self.maps[0].clear() - -try: - from types import MappingProxyType -except ImportError: - from collections import Mapping - - class MappingProxyType(Mapping): - def __init__(self, map): - self._map = map - - def __len__(self): - return len(self._map) - - def __iter__(self): - return iter(self._map) - - def __getitem__(self, key): - return self._map[key] - - def __repr__(self): - return '{0.__class__.__name__}({1})'.format(self, self._map) - - -class FrozenDict(Mapping): - - def __init__(self, value): - self._value = dict(value) - - def __getitem__(self, key): - return self._value[key] - - def __iter__(self): - return iter(self._value) - - def __len__(self): - return len(self._value) - - def __hash__(self): - if not hasattr(self, "_hash"): - _hash = 0 - for k, v in self._value.items(): - _hash ^= hash(k) - _hash ^= hash(v) - self._hash = _hash - return self._hash - - def __repr__(self): - return repr(self._value) - - def __str__(self): - return str(self._value) - - -class FrozenList(Sequence): - - def __init__(self, value): - self._list = list(value) - - def __getitem__(self, index): - return self._list[index] - - def __len__(self): - return len(self._list) - - def __hash__(self): - if not hasattr(self, "_hash"): - _hash = 0 - for e in self._list: - _hash ^= hash(e) - self._hash = _hash - return self._hash - - def __repr__(self): - return repr(self._list) - - def __str__(self): - return str(self._list) - - def __eq__(self, other): - if len(self) != len(other): - return False - for i in range(len(self)): - if self[i] != other[i]: - return False - return True diff --git a/solnlib/packages/schematics/deprecated.py b/solnlib/packages/schematics/deprecated.py deleted file mode 100644 index 4f877669..00000000 --- a/solnlib/packages/schematics/deprecated.py +++ /dev/null @@ -1,154 +0,0 @@ - -import warnings -import functools - -from collections import OrderedDict - -from .compat import iteritems -from .types.serializable import Serializable -from . import transforms - - -class SchematicsDeprecationWarning(DeprecationWarning): - pass - - -def deprecated(func): - @functools.wraps(func) - def new_func(*args, **kwargs): - warnings.warn( - "Call to deprecated function {0}.".format(func.__name__), - category=SchematicsDeprecationWarning, - stacklevel=2 - ) - return func(*args, **kwargs) - return new_func - - -class SchemaCompatibilityMixin(object): - """Compatibility layer for previous deprecated Schematics Model API.""" - - @property - @deprecated - def __name__(self): - return self.name - - @property - @deprecated - def _options(self): - return self.options - - @property - @deprecated - def _validator_functions(self): - return self.validators - - @property - @deprecated - def _fields(self): - return self.fields - - @property - @deprecated - def _valid_input_keys(self): - return self.valid_input_keys - - @property - @deprecated - def _serializables(self): - return OrderedDict((k, t) for k, t in iteritems(self.fields) if isinstance(t, Serializable)) - - -class class_property(property): - def __get__(self, instance, type=None): - if instance is None: - return super(class_property, self).__get__(type, type) - return super(class_property, self).__get__(instance, type) - - -class ModelCompatibilityMixin(object): - """Compatibility layer for previous deprecated Schematics Model API.""" - - @class_property - @deprecated - def _valid_input_keys(cls): - return cls._schema.valid_input_keys - - @class_property - @deprecated - def _options(cls): - return cls._schema.options - - @class_property - @deprecated - def fields(cls): - return cls._schema.fields - - @class_property - @deprecated - def _fields(cls): - return cls._schema.fields - - @class_property - @deprecated - def _field_list(cls): - return list(iteritems(cls._schema.fields)) - - @class_property - @deprecated - def _serializables(cls): - return cls._schema._serializables - - @class_property - @deprecated - def _validator_functions(cls): - return cls._schema.validators - - @classmethod - @deprecated - def convert(cls, raw_data, context=None, **kw): - return transforms.convert(cls._schema, raw_data, oo=True, - context=context, **kw) - - -class BaseErrorV1Mixin(object): - - @property - @deprecated - def messages(self): - """ an alias for errors, provided for compatibility with V1. """ - return self.errors - - -def patch_models(): - global models_Model - from . import schema - from . import models - models_Model = models.Model - class Model(ModelCompatibilityMixin, models.Model): - __doc__ = models.Model.__doc__ - models.Model = Model - models.ModelOptions = schema.SchemaOptions # deprecated alias - - -def patch_schema(): - global schema_Schema - from . import schema - schema_Schema = schema.Schema - class Schema(SchemaCompatibilityMixin, schema.Schema): - __doc__ = schema.Schema.__doc__ - schema.Schema = Schema - - -def patch_exceptions(): - from . import exceptions - exceptions.BaseError.messages = BaseErrorV1Mixin.messages - exceptions.ModelConversionError = exceptions.DataError # v1 - exceptions.ModelValidationError = exceptions.DataError # v1 - exceptions.StopValidation = exceptions.StopValidationError # v1 - - -def patch_all(): - patch_schema() - patch_models() - patch_exceptions() diff --git a/solnlib/packages/schematics/exceptions.py b/solnlib/packages/schematics/exceptions.py deleted file mode 100644 index 51b91933..00000000 --- a/solnlib/packages/schematics/exceptions.py +++ /dev/null @@ -1,252 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals, absolute_import - -import json - -from collections import Sequence, Mapping - -from .translator import LazyText -from .common import * -from .compat import string_type, str_compat -from .datastructures import FrozenDict, FrozenList - -__all__ = [ - 'BaseError', 'ErrorMessage', 'FieldError', 'ConversionError', - 'ValidationError', 'StopValidationError', 'CompoundError', 'DataError', - 'MockCreationError', 'UndefinedValueError', 'UnknownFieldError'] - - -@str_compat -class BaseError(Exception): - - def __init__(self, errors): - """ - The base class for all Schematics errors. - - message should be a human-readable message, - while errors is a machine-readable list, or dictionary. - - if None is passed as the message, and error is populated, - the primitive representation will be serialized. - - the Python logging module expects exceptions to be hashable - and therefore immutable. As a result, it is not possible to - mutate BaseError's error list or dict after initialization. - """ - errors = self._freeze(errors) - super(BaseError, self).__init__(errors) - - @property - def errors(self): - return self.args[0] - - def to_primitive(self): - """ - converts the errors dict to a primitive representation of dicts, - list and strings. - """ - if not hasattr(self, "_primitive"): - self._primitive = self._to_primitive(self.errors) - return self._primitive - - @staticmethod - def _freeze(obj): - """ freeze common data structures to something immutable. """ - if isinstance(obj, dict): - return FrozenDict(obj) - elif isinstance(obj, list): - return FrozenList(obj) - else: - return obj - - @classmethod - def _to_primitive(cls, obj): - """ recursive to_primitive for basic data types. """ - if isinstance(obj, string_type): - return obj - if isinstance(obj, Sequence): - return [cls._to_primitive(e) for e in obj] - elif isinstance(obj, Mapping): - return dict( - (k, cls._to_primitive(v)) for k, v in obj.items() - ) - else: - return str(obj) - - def __str__(self): - return json.dumps(self.to_primitive()) - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self.errors)) - - def __hash__(self): - return hash(self.errors) - - def __eq__(self, other): - if type(self) is type(other): - return self.errors == other.errors - else: - return self.errors == other - return False - - def __ne__(self, other): - return not (self == other) - - -@str_compat -class ErrorMessage(object): - - def __init__(self, summary, info=None): - self.type = None - self.summary = summary - self.info = info - - def __repr__(self): - return "%s(%s, %s)" % ( - self.__class__.__name__, - repr(self.summary), - repr(self.info) - ) - - def __str__(self): - if self.info: - return '%s: %s' % (self.summary, self._info_as_str()) - else: - return '%s' % self.summary - - def _info_as_str(self): - if isinstance(self.info, int): - return str(self.info) - elif isinstance(self.info, string_type): - return '"%s"' % self.info - else: - return str(self.info) - - def __eq__(self, other): - if isinstance(other, ErrorMessage): - return ( - self.summary == other.summary and - self.type == other.type and - self.info == other.info - ) - elif isinstance(other, string_type): - return self.summary == other - else: - return False - - def __ne__(self, other): - return not (self == other) - - def __hash__(self): - return hash((self.summary, self.type, self.info)) - - -class FieldError(BaseError, Sequence): - - type = None - - def __init__(self, *args, **kwargs): - - if type(self) is FieldError: - raise NotImplementedError("Please raise either ConversionError or ValidationError.") - if len(args) == 0: - raise TypeError("Please provide at least one error or error message.") - if kwargs: - items = [ErrorMessage(*args, **kwargs)] - elif len(args) == 1: - arg = args[0] - if isinstance(arg, list): - items = list(arg) - else: - items = [arg] - else: - items = args - errors = [] - for item in items: - if isinstance(item, (string_type, LazyText)): - errors.append(ErrorMessage(str(item))) - elif isinstance(item, tuple): - errors.append(ErrorMessage(*item)) - elif isinstance(item, ErrorMessage): - errors.append(item) - elif isinstance(item, self.__class__): - errors.extend(item.errors) - else: - raise TypeError("'{0}()' object is neither a {1} nor an error message."\ - .format(type(item).__name__, type(self).__name__)) - for error in errors: - error.type = self.type or type(self) - - super(FieldError, self).__init__(errors) - - def __contains__(self, value): - return value in self.errors - - def __getitem__(self, index): - return self.errors[index] - - def __iter__(self): - return iter(self.errors) - - def __len__(self): - return len(self.errors) - - -class ConversionError(FieldError, TypeError): - """ Exception raised when data cannot be converted to the correct python type """ - pass - - -class ValidationError(FieldError, ValueError): - """Exception raised when invalid data is encountered.""" - pass - - -class StopValidationError(ValidationError): - """Exception raised when no more validation need occur.""" - type = ValidationError - - -class CompoundError(BaseError): - - def __init__(self, errors): - if not isinstance(errors, dict): - raise TypeError("Compound errors must be reported as a dictionary.") - for key, value in errors.items(): - if isinstance(value, CompoundError): - errors[key] = value.errors - else: - errors[key] = value - super(CompoundError, self).__init__(errors) - - -class DataError(CompoundError): - - def __init__(self, errors, partial_data=None): - super(DataError, self).__init__(errors) - self.partial_data = partial_data - - -class MockCreationError(ValueError): - """Exception raised when a mock value cannot be generated.""" - pass - - -class UndefinedValueError(AttributeError, KeyError): - """Exception raised when accessing a field with an undefined value.""" - def __init__(self, model, name): - msg = "'%s' instance has no value for field '%s'" % (model.__class__.__name__, name) - super(UndefinedValueError, self).__init__(msg) - - -class UnknownFieldError(KeyError): - """Exception raised when attempting to access a nonexistent field using the subscription syntax.""" - def __init__(self, model, name): - msg = "Model '%s' has no field named '%s'" % (model.__class__.__name__, name) - super(UnknownFieldError, self).__init__(msg) - - -if PY2: - # Python 2 names cannot be unicode - __all__ = [n.encode('ascii') for n in __all__] diff --git a/solnlib/packages/schematics/iteration.py b/solnlib/packages/schematics/iteration.py deleted file mode 100644 index 8a3d3a1e..00000000 --- a/solnlib/packages/schematics/iteration.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Core loop over the data structures according to a defined schema. -""" - -from __future__ import unicode_literals, absolute_import -from collections import namedtuple - -from .compat import iteritems -from .undefined import Undefined - -try: - # optional type checking - import typing - if typing.TYPE_CHECKING: - from typing import Mapping, Tuple, Callable, Optional, Any, Iterable - from .schema import Schema -except ImportError: - pass - -Atom = namedtuple('Atom', ('name', 'field', 'value')) -Atom.__new__.__defaults__ = (None,) * len(Atom._fields) - - -def atoms(schema, mapping, keys=tuple(Atom._fields), filter=None): - # type: (Schema, Mapping, Tuple[str, str, str], Optional[Callable[[Atom], bool]]) -> Iterable[Atom] - """ - Iterator for the atomic components of a model definition and relevant - data that creates a 3-tuple of the field's name, its type instance and - its value. - - :type schema: schematics.schema.Schema - :param schema: - The Schema definition. - :type mapping: Mapping - :param mapping: - The structure where fields from schema are mapped to values. The only - expectation for this structure is that it implements a ``Mapping`` - interface. - :type keys: Tuple[str, str, str] - :param keys: - Tuple specifying the output of the iterator. Valid keys are: - `name`: the field name - `field`: the field descriptor object - `value`: the current value set on the field - Specifying invalid keys will raise an exception. - :type filter: Optional[Callable[[Atom], bool]] - :param filter: - Function to filter out atoms from the iteration. - - :rtype: Iterable[Atom] - """ - if not set(keys).issubset(Atom._fields): - raise TypeError('invalid key specified') - - has_name = 'name' in keys - has_field = 'field' in keys - has_value = (mapping is not None) and ('value' in keys) - - for field_name, field in iteritems(schema.fields): - value = Undefined - - if has_value: - try: - value = mapping[field_name] - except Exception: - value = Undefined - - atom_tuple = Atom( - name=field_name if has_name else None, - field=field if has_field else None, - value=value) - if filter is None: - yield atom_tuple - elif filter(atom_tuple): - yield atom_tuple - - -class atom_filter: - """Group for the default filter functions.""" - - @staticmethod - def has_setter(atom): - return getattr(atom.field, 'fset', None) is not None - - @staticmethod - def not_setter(atom): - return not atom_filter.has_setter(atom) diff --git a/solnlib/packages/schematics/models.py b/solnlib/packages/schematics/models.py deleted file mode 100644 index 13295ab6..00000000 --- a/solnlib/packages/schematics/models.py +++ /dev/null @@ -1,449 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals, absolute_import - -from copy import deepcopy -import inspect -from collections import OrderedDict -from types import FunctionType - -from .common import * -from .compat import str_compat, repr_compat, _dict -from .datastructures import Context, ChainMap, MappingProxyType -from .exceptions import * -from .iteration import atoms -from .transforms import ( - export_loop, convert, - to_native, to_primitive, -) -from .validate import validate, prepare_validator -from .types import BaseType -from .types.serializable import Serializable -from .undefined import Undefined -from .util import get_ident -from . import schema - -__all__ = [] - - -class FieldDescriptor(object): - """ - ``FieldDescriptor`` instances serve as field accessors on models. - """ - - def __init__(self, name): - """ - :param name: - The field's name - """ - self.name = name - - def __get__(self, instance, cls): - """ - For a model instance, returns the field's current value. - For a model class, returns the field's type object. - """ - if instance is None: - return cls._fields[self.name] - else: - value = instance._data.get(self.name, Undefined) - if value is Undefined: - raise UndefinedValueError(instance, self.name) - else: - return value - - def __set__(self, instance, value): - """ - Sets the field's value. - """ - field = instance._fields[self.name] - value = field.pre_setattr(value) - instance._data.converted[self.name] = value - - def __delete__(self, instance): - """ - Deletes the field's value. - """ - del instance._data[self.name] - - -class ModelMeta(type): - """ - Metaclass for Models. - """ - - def __new__(mcs, name, bases, attrs): - """ - This metaclass parses the declarative Model into a corresponding Schema, - then adding it as the `_schema` attribute to the host class. - """ - - # Structures used to accumulate meta info - fields = OrderedDict() - validator_functions = {} # Model level - options_members = {} - - # Accumulate metas info from parent classes - for base in reversed(bases): - if hasattr(base, '_schema'): - fields.update(deepcopy(base._schema.fields)) - options_members.update(dict(base._schema.options)) - validator_functions.update(base._schema.validators) - - # Parse this class's attributes into schema structures - for key, value in iteritems(attrs): - if key.startswith('validate_') and isinstance(value, (FunctionType, classmethod)): - validator_functions[key[9:]] = prepare_validator(value, 4) - if isinstance(value, BaseType): - fields[key] = value - elif isinstance(value, Serializable): - fields[key] = value - - # Convert declared fields into descriptors for new class - fields = OrderedDict(sorted( - (kv for kv in fields.items()), - key=lambda i: i[1]._position_hint, - )) - for key, field in iteritems(fields): - if isinstance(field, BaseType): - attrs[key] = FieldDescriptor(key) - elif isinstance(field, Serializable): - attrs[key] = field - - klass = type.__new__(mcs, name, bases, attrs) - klass = repr_compat(str_compat(klass)) - - # Parse schema options - options = mcs._read_options(name, bases, attrs, options_members) - - # Parse meta data into new schema - klass._schema = schema.Schema(name, model=klass, options=options, - validators=validator_functions, *(schema.Field(k, t) for k, t in iteritems(fields))) - - return klass - - @classmethod - def _read_options(mcs, name, bases, attrs, options_members): - """ - Parses model `Options` class into a `SchemaOptions` instance. - """ - options_class = attrs.get('__optionsclass__', schema.SchemaOptions) - if 'Options' in attrs: - for key, value in inspect.getmembers(attrs['Options']): - if key.startswith("__"): - continue - elif key.startswith("_"): - extras = options_members.get("extras", {}).copy() - extras.update({key: value}) - options_members["extras"] = extras - elif key == "roles": - roles = options_members.get("roles", {}).copy() - roles.update(value) - options_members[key] = roles - else: - options_members[key] = value - return options_class(**options_members) - - -class ModelDict(ChainMap): - - __slots__ = ['_unsafe', '_converted', '__valid', '_valid'] - - def __init__(self, unsafe=None, converted=None, valid=None): - self._unsafe = unsafe if unsafe is not None else {} - self._converted = converted if converted is not None else {} - self.__valid = valid if valid is not None else {} - self._valid = MappingProxyType(self.__valid) - super(ModelDict, self).__init__(self._unsafe, self._converted, self._valid) - - @property - def unsafe(self): - return self._unsafe - - @unsafe.setter - def unsafe(self, value): - self._unsafe = value - self.maps[0] = self._unsafe - - @property - def converted(self): - return self._converted - - @converted.setter - def converted(self, value): - self._converted = value - self.maps[1] = self._converted - - @property - def valid(self): - return self._valid - - @valid.setter - def valid(self, value): - self._valid = MappingProxyType(value) - self.maps[2] = self._valid - - def __delitem__(self, key): - did_delete = False - for data in [self.__valid, self._converted, self._unsafe]: - try: - del data[key] - did_delete = True - except KeyError: - pass - if not did_delete: - raise KeyError(key) - - def __repr__(self): - return repr(dict(self)) - - -@metaclass(ModelMeta) -class Model(object): - - """ - Enclosure for fields and validation. Same pattern deployed by Django - models, SQLAlchemy declarative extension and other developer friendly - libraries. - - :param Mapping raw_data: - The data to be imported into the model instance. - :param Mapping deserialize_mapping: - Can be used to provide alternative input names for fields. Values may be - strings or lists of strings, keyed by the actual field name. - :param bool partial: - Allow partial data to validate. Essentially drops the ``required=True`` - settings from field definitions. Default: True - :param bool strict: - Complain about unrecognized keys. Default: True - """ - - def __init__(self, raw_data=None, trusted_data=None, deserialize_mapping=None, - init=True, partial=True, strict=True, validate=False, app_data=None, - lazy=False, **kwargs): - kwargs.setdefault('init_values', init) - kwargs.setdefault('apply_defaults', init) - - if lazy: - self._data = ModelDict(unsafe=raw_data, valid=trusted_data) - return - - self._data = ModelDict(valid=trusted_data) - data = self._convert(raw_data, - trusted_data=trusted_data, mapping=deserialize_mapping, - partial=partial, strict=strict, validate=validate, new=True, - app_data=app_data, **kwargs) - self._data.converted = data - if validate: - self.validate(partial=partial, app_data=app_data, **kwargs) - - def validate(self, partial=False, convert=True, app_data=None, **kwargs): - """ - Validates the state of the model. If the data is invalid, raises a ``DataError`` - with error messages. - - :param bool partial: - Allow partial data to validate. Essentially drops the ``required=True`` - settings from field definitions. Default: False - :param convert: - Controls whether to perform import conversion before validating. - Can be turned off to skip an unnecessary conversion step if all values - are known to have the right datatypes (e.g., when validating immediately - after the initial import). Default: True - """ - if not self._data.converted and partial: - return # no new input data to validate - try: - data = self._convert(validate=True, - partial=partial, convert=convert, app_data=app_data, **kwargs) - self._data.valid = data - except DataError as e: - valid = dict(self._data.valid) - valid.update(e.partial_data) - self._data.valid = valid - raise - finally: - self._data.converted = {} - - def import_data(self, raw_data, recursive=False, **kwargs): - """ - Converts and imports the raw data into an existing model instance. - - :param raw_data: - The data to be imported. - """ - data = self._convert(raw_data, trusted_data=_dict(self), recursive=recursive, **kwargs) - self._data.converted.update(data) - if kwargs.get('validate'): - self.validate(convert=False) - return self - - def _convert(self, raw_data=None, context=None, **kwargs): - """ - Converts the instance raw data into richer Python constructs according - to the fields on the model, validating data if requested. - - :param raw_data: - New data to be imported and converted - """ - raw_data = _dict(raw_data) if raw_data else self._data.converted - kwargs['trusted_data'] = kwargs.get('trusted_data') or {} - kwargs['convert'] = getattr(context, 'convert', kwargs.get('convert', True)) - if self._data.unsafe: - self._data.unsafe.update(raw_data) - raw_data = self._data.unsafe - self._data.unsafe = {} - kwargs['convert'] = True - should_validate = getattr(context, 'validate', kwargs.get('validate', False)) - func = validate if should_validate else convert - return func(self._schema, self, raw_data=raw_data, oo=True, context=context, **kwargs) - - def export(self, field_converter=None, role=None, app_data=None, **kwargs): - return export_loop(self._schema, self, field_converter=field_converter, - role=role, app_data=app_data, **kwargs) - - def to_native(self, role=None, app_data=None, **kwargs): - return to_native(self._schema, self, role=role, app_data=app_data, **kwargs) - - def to_primitive(self, role=None, app_data=None, **kwargs): - return to_primitive(self._schema, self, role=role, app_data=app_data, **kwargs) - - def serialize(self, *args, **kwargs): - raw_data = self._data.converted - try: - self.validate(apply_defaults=True) - except DataError: - pass - data = self.to_primitive(*args, **kwargs) - self._data.converted = raw_data - return data - - def atoms(self): - """ - Iterator for the atomic components of a model definition and relevant - data that creates a 3-tuple of the field's name, its type instance and - its value. - """ - return atoms(self._schema, self) - - def __iter__(self): - return (k for k in self._schema.fields if k in self._data - and getattr(self._schema.fields[k], 'fset', None) is None) - - def keys(self): - return list(iter(self)) - - def items(self): - return [(k, self._data[k]) for k in self] - - def values(self): - return [self._data[k] for k in self] - - def get(self, key, default=None): - return getattr(self, key, default) - - @classmethod - def _append_field(cls, field_name, field_type): - """ - Add a new field to this class. - - :type field_name: str - :param field_name: - The name of the field to add. - :type field_type: BaseType - :param field_type: - The type to use for the field. - """ - cls._schema.append_field(schema.Field(field_name, field_type)) - setattr(cls, field_name, FieldDescriptor(field_name)) - - @classmethod - def get_mock_object(cls, context=None, overrides={}): - """Get a mock object. - - :param dict context: - :param dict overrides: overrides for the model - """ - context = Context._make(context) - context._setdefault('memo', set()) - context.memo.add(cls) - values = {} - for name, field in cls.fields.items(): - if name in overrides: - continue - if getattr(field, 'model_class', None) in context.memo: - continue - try: - values[name] = field.mock(context) - except MockCreationError as exc: - raise MockCreationError('%s: %s' % (name, exc.message)) - values.update(overrides) - return cls(values) - - def __getitem__(self, name): - if name in self._schema.fields: - return getattr(self, name) - else: - raise UnknownFieldError(self, name) - - def __setitem__(self, name, value): - if name in self._schema.fields: - return setattr(self, name, value) - else: - raise UnknownFieldError(self, name) - - def __delitem__(self, name): - if name in self._schema.fields: - return delattr(self, name) - else: - raise UnknownFieldError(self, name) - - def __contains__(self, name): - return (name in self._data and getattr(self, name, Undefined) is not Undefined) \ - or name in self._serializables - - def __len__(self): - return len(self._data) - - def __eq__(self, other, memo=set()): - if self is other: - return True - if type(self) is not type(other): - return NotImplemented - key = (id(self), id(other), get_ident()) - if key in memo: - return True - else: - memo.add(key) - try: - for k in self: - if self.get(k) != other.get(k): - return False - return True - finally: - memo.remove(key) - - def __ne__(self, other): - return not self == other - - def __repr__(self): - model = self.__class__.__name__ - info = self._repr_info() - if info: - return '<%s: %s>' % (model, info) - else: - return '<%s instance>' % model - - def _repr_info(self): - """ - Subclasses may implement this method to augment the ``__repr__()`` output for the instance:: - - class Person(Model): - ... - def _repr_info(self): - return self.name - - >>> Person({'name': 'Mr. Pink'}) - - """ - return None diff --git a/solnlib/packages/schematics/role.py b/solnlib/packages/schematics/role.py deleted file mode 100644 index be7cf782..00000000 --- a/solnlib/packages/schematics/role.py +++ /dev/null @@ -1,113 +0,0 @@ -import collections - -from .compat import str_compat, repr_compat - - -@repr_compat -@str_compat -class Role(collections.Set): - - """ - A ``Role`` object can be used to filter specific fields against a sequence. - - The ``Role`` contains two things: a set of names and a function. - The function describes how to filter, taking a field name as input and then - returning ``True`` or ``False`` to indicate that field should or should not - be skipped. - - A ``Role`` can be operated on as a ``Set`` object representing the fields - it has an opinion on. When Roles are combined with other roles, only the - filtering behavior of the first role is used. - """ - - def __init__(self, function, fields): - self.function = function - self.fields = set(fields) - - def _from_iterable(self, iterable): - return Role(self.function, iterable) - - def __contains__(self, value): - return value in self.fields - - def __iter__(self): - return iter(self.fields) - - def __len__(self): - return len(self.fields) - - def __eq__(self, other): - return (self.function.__name__ == other.function.__name__ and - self.fields == other.fields) - - def __str__(self): - return '%s(%s)' % (self.function.__name__, - ', '.join("'%s'" % f for f in self.fields)) - - def __repr__(self): - return '' % str(self) - - # edit role fields - def __add__(self, other): - fields = self.fields.union(other) - return self._from_iterable(fields) - - def __sub__(self, other): - fields = self.fields.difference(other) - return self._from_iterable(fields) - - # apply role to field - def __call__(self, name, value): - return self.function(name, value, self.fields) - - # static filter functions - @staticmethod - def wholelist(name, value, seq): - """ - Accepts a field name, value, and a field list. This function - implements acceptance of all fields by never requesting a field be - skipped, thus returns False for all input. - - :param name: - The field name to inspect. - :param value: - The field's value. - :param seq: - The list of fields associated with the ``Role``. - """ - return False - - @staticmethod - def whitelist(name, value, seq): - """ - Implements the behavior of a whitelist by requesting a field be skipped - whenever its name is not in the list of fields. - - :param name: - The field name to inspect. - :param value: - The field's value. - :param seq: - The list of fields associated with the ``Role``. - """ - - if seq is not None and len(seq) > 0: - return name not in seq - return True - - @staticmethod - def blacklist(name, value, seq): - """ - Implements the behavior of a blacklist by requesting a field be skipped - whenever its name is found in the list of fields. - - :param name: - The field name to inspect. - :param value: - The field's value. - :param seq: - The list of fields associated with the ``Role``. - """ - if seq is not None and len(seq) > 0: - return name in seq - return False diff --git a/solnlib/packages/schematics/schema.py b/solnlib/packages/schematics/schema.py deleted file mode 100644 index 63c3014a..00000000 --- a/solnlib/packages/schematics/schema.py +++ /dev/null @@ -1,66 +0,0 @@ - -import itertools -import inspect - -from collections import OrderedDict - -from .compat import itervalues -from .common import DEFAULT, NONEMPTY -from .types import BaseType -from .types.serializable import Serializable - - -class Schema(object): - - def __init__(self, name, *fields, **kw): - self.name = name - self.model = kw.get('model', None) - self.options = kw.get('options', SchemaOptions()) - self.validators = kw.get('validators', {}) - self.fields = OrderedDict() - for field in fields: - self.append_field(field) - - @property - def valid_input_keys(self): - return set(itertools.chain(*(t.get_input_keys() for t in itervalues(self.fields)))) - - def append_field(self, field): - self.fields[field.name] = field.type - field.type._setup(field.name, self.model) # TODO: remove model reference - - -class SchemaOptions(object): - - def __init__(self, namespace=None, roles=None, export_level=DEFAULT, - serialize_when_none=None, export_order=False, extras=None): - self.namespace = namespace - self.roles = roles or {} - self.export_level = export_level - if serialize_when_none is True: - self.export_level = DEFAULT - elif serialize_when_none is False: - self.export_level = NONEMPTY - self.export_order = export_order - self.extras = extras or {} - - for key, value in self.extras.items(): - setattr(self, key, value) - - def __iter__(self): - for key, value in inspect.getmembers(self): - if not key.startswith("_"): - yield key, value - - -class Field(object): - - __slots__ = ('name', 'type') - - def __init__(self, name, field_type): - assert isinstance(field_type, (BaseType, Serializable)) - self.name = name - self.type = field_type - - def is_settable(self): - return getattr(self.type, 'fset', None) is not None diff --git a/solnlib/packages/schematics/transforms.py b/solnlib/packages/schematics/transforms.py deleted file mode 100644 index d9d557fb..00000000 --- a/solnlib/packages/schematics/transforms.py +++ /dev/null @@ -1,436 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals, absolute_import - -import itertools -import types -from collections import OrderedDict - -from .common import * -from .datastructures import Context -from .exceptions import * -from .undefined import Undefined -from .util import listify -from .iteration import atoms, atom_filter -from .role import Role - -__all__ = [] - - -### -# Transform loops -### - - -def import_loop(schema, mutable, raw_data=None, field_converter=None, trusted_data=None, - mapping=None, partial=False, strict=False, init_values=False, - apply_defaults=False, convert=True, validate=False, new=False, - oo=False, recursive=False, app_data=None, context=None): - """ - The import loop is designed to take untrusted data and convert it into the - native types, as described in ``schema``. It does this by calling - ``field_converter`` on every field. - - Errors are aggregated and returned by throwing a ``ModelConversionError``. - - :param schema: - The Schema to use as source for validation. - :param mutable: - A mapping or instance that can be changed during validation by Schema - functions. - :param raw_data: - A mapping to be converted into types according to ``schema``. - :param field_converter: - This function is applied to every field found in ``instance_or_dict``. - :param trusted_data: - A ``dict``-like structure that may contain already validated data. - :param partial: - Allow partial data to validate; useful for PATCH requests. - Essentially drops the ``required=True`` arguments from field - definitions. Default: False - :param strict: - Complain about unrecognized keys. Default: False - :param apply_defaults: - Whether to set fields to their default values when not present in input data. - :param app_data: - An arbitrary container for application-specific data that needs to - be available during the conversion. - :param context: - A ``Context`` object that encapsulates configuration options and ``app_data``. - The context object is created upon the initial invocation of ``import_loop`` - and is then propagated through the entire process. - """ - if raw_data is None: - raw_data = mutable - got_data = raw_data is not None - - context = Context._make(context) - try: - context.initialized - except: - if type(field_converter) is types.FunctionType: - field_converter = BasicConverter(field_converter) - context._setdefaults({ - 'initialized': True, - 'field_converter': field_converter, - 'trusted_data': trusted_data or {}, - 'mapping': mapping or {}, - 'partial': partial, - 'strict': strict, - 'init_values': init_values, - 'apply_defaults': apply_defaults, - 'convert': convert, - 'validate': validate, - 'new': new, - 'oo': oo, - 'recursive': recursive, - 'app_data': app_data if app_data is not None else {} - }) - - raw_data = context.field_converter.pre(schema, raw_data, context) - - _field_converter = context.field_converter - _model_mapping = context.mapping.get('model_mapping') - - data = dict(context.trusted_data) if context.trusted_data else {} - errors = {} - - if got_data and context.validate: - errors = _mutate(schema, mutable, raw_data, context) - - if got_data: - # Determine all acceptable field input names - all_fields = schema._valid_input_keys - if context.mapping: - mapped_keys = (set(itertools.chain(*( - listify(input_keys) for target_key, input_keys in context.mapping.items() - if target_key != 'model_mapping')))) - all_fields = all_fields | mapped_keys - if context.strict: - # Check for rogues if strict is set - rogue_fields = set(raw_data) - all_fields - if rogue_fields: - for field in rogue_fields: - errors[field] = 'Rogue field' - - atoms_filter = None - if not context.validate: - # optimization: convert without validate doesn't require to touch setters - atoms_filter = atom_filter.not_setter - for field_name, field, value in atoms(schema, raw_data, filter=atoms_filter): - serialized_field_name = field.serialized_name or field_name - - if got_data and value is Undefined: - for key in field.get_input_keys(context.mapping): - if key and key != field_name and key in raw_data: - value = raw_data[key] - break - - if value is Undefined: - if field_name in data: - continue - if context.apply_defaults: - value = field.default - if value is Undefined and context.init_values: - value = None - - if got_data: - if field.is_compound: - if context.trusted_data and context.recursive: - td = context.trusted_data.get(field_name) - if not all(hasattr(td, attr) for attr in ('keys', '__getitem__')): - td = {field_name: td} - else: - td = {} - if _model_mapping: - submap = _model_mapping.get(field_name) - else: - submap = {} - field_context = context._branch(trusted_data=td, mapping=submap) - else: - field_context = context - try: - value = _field_converter(field, value, field_context) - except (FieldError, CompoundError) as exc: - errors[serialized_field_name] = exc - if context.apply_defaults: - value = field.default - if value is not Undefined: - data[field_name] = value - if isinstance(exc, DataError): - data[field_name] = exc.partial_data - continue - - if value is Undefined: - continue - - data[field_name] = value - - if not context.validate: - for field_name, field, value in atoms(schema, raw_data, filter=atom_filter.has_setter): - data[field_name] = value - - if errors: - raise DataError(errors, data) - - data = context.field_converter.post(schema, data, context) - - return data - - -def _mutate(schema, mutable, raw_data, context): - """ - Mutates the converted data before validation. Allows Schema fields to modify - and create data values on mutable. - """ - errors = {} - for field_name, field, value in atoms(schema, raw_data, filter=atom_filter.has_setter): - if value is Undefined: - continue - try: - value = context.field_converter(field, value, context) - field.__set__(mutable, value) - except (FieldError, CompoundError) as exc: - serialized_field_name = field.serialized_name or field_name - errors[serialized_field_name] = exc - continue - except AttributeError: - pass - raw_data.update((key, mutable[key]) for key in mutable) - return errors - - -def export_loop(schema, instance_or_dict, field_converter=None, role=None, raise_error_on_role=True, - export_level=None, app_data=None, context=None): - """ - The export_loop function is intended to be a general loop definition that - can be used for any form of data shaping, such as application of roles or - how a field is transformed. - - :param schema: - The Schema to use as source for validation. - :param instance_or_dict: - The structure where fields from schema are mapped to values. The only - expectation for this structure is that it implements a ``dict`` - interface. - :param field_converter: - This function is applied to every field found in ``instance_or_dict``. - :param role: - The role used to determine if fields should be left out of the - transformation. - :param raise_error_on_role: - This parameter enforces strict behavior which requires substructures - to have the same role definition as their parent structures. - :param app_data: - An arbitrary container for application-specific data that needs to - be available during the conversion. - :param context: - A ``Context`` object that encapsulates configuration options and ``app_data``. - The context object is created upon the initial invocation of ``import_loop`` - and is then propagated through the entire process. - """ - context = Context._make(context) - try: - context.initialized - except: - if type(field_converter) is types.FunctionType: - field_converter = BasicConverter(field_converter) - context._setdefaults({ - 'initialized': True, - 'field_converter': field_converter, - 'role': role, - 'raise_error_on_role': raise_error_on_role, - 'export_level': export_level, - 'app_data': app_data if app_data is not None else {} - }) - - instance_or_dict = context.field_converter.pre(schema, instance_or_dict, context) - - if schema._options.export_order: - data = OrderedDict() - else: - data = {} - - filter_func = (context.role if callable(context.role) else - schema._options.roles.get(context.role)) - if filter_func is None: - if context.role and context.raise_error_on_role: - error_msg = '%s Model has no role "%s"' - raise ValueError(error_msg % (schema.__name__, context.role)) - else: - filter_func = schema._options.roles.get("default") - - _field_converter = context.field_converter - - for field_name, field, value in atoms(schema, instance_or_dict): - serialized_name = field.serialized_name or field_name - - if filter_func is not None and filter_func(field_name, value): - continue - - _export_level = field.get_export_level(context) - - if _export_level == DROP: - continue - - elif value is not None and value is not Undefined: - value = _field_converter(field, value, context) - - if value is Undefined: - if _export_level <= DEFAULT: - continue - elif value is None: - if _export_level <= NOT_NONE: - continue - elif field.is_compound and len(value) == 0: - if _export_level <= NONEMPTY: - continue - - if value is Undefined: - value = None - - data[serialized_name] = value - - data = context.field_converter.post(schema, data, context) - - return data - - -### -# Field filtering -### - - -def wholelist(*field_list): - """ - Returns a function that evicts nothing. Exists mainly to be an explicit - allowance of all fields instead of a using an empty blacklist. - """ - return Role(Role.wholelist, field_list) - - -def whitelist(*field_list): - """ - Returns a function that operates as a whitelist for the provided list of - fields. - - A whitelist is a list of fields explicitly named that are allowed. - """ - return Role(Role.whitelist, field_list) - - -def blacklist(*field_list): - """ - Returns a function that operates as a blacklist for the provided list of - fields. - - A blacklist is a list of fields explicitly named that are not allowed. - """ - return Role(Role.blacklist, field_list) - - -### -# Field converter interface -### - - -class Converter(object): - - def __call__(self, field, value, context): - raise NotImplementedError - - def pre(self, model_class, instance_or_dict, context): - return instance_or_dict - - def post(self, model_class, data, context): - return data - - -class BasicConverter(Converter): - - def __init__(self, func): - self.func = func - - def __call__(self, *args): - return self.func(*args) - - -### -# Standard export converters -### - - -@BasicConverter -def to_native_converter(field, value, context): - return field.export(value, NATIVE, context) - - -@BasicConverter -def to_primitive_converter(field, value, context): - return field.export(value, PRIMITIVE, context) - - -### -# Standard import converters -### - - -@BasicConverter -def import_converter(field, value, context): - field.check_required(value, context) - if value is None or value is Undefined: - return value - return field.convert(value, context) - - -@BasicConverter -def validation_converter(field, value, context): - field.check_required(value, context) - if value is None or value is Undefined: - return value - return field.validate(value, context) - - -### -# Context stub factories -### - - -def get_import_context(field_converter=import_converter, **options): - import_options = { - 'field_converter': field_converter, - 'partial': False, - 'strict': False, - 'convert': True, - 'validate': False, - 'new': False, - 'oo': False - } - import_options.update(options) - return Context(**import_options) - - -def get_export_context(field_converter=to_native_converter, **options): - export_options = { - 'field_converter': field_converter, - 'export_level': None - } - export_options.update(options) - return Context(**export_options) - - -### -# Import and export functions -### - - -def convert(cls, mutable, raw_data=None, **kwargs): - return import_loop(cls, mutable, raw_data, import_converter, **kwargs) - - -def to_native(cls, instance_or_dict, **kwargs): - return export_loop(cls, instance_or_dict, to_native_converter, **kwargs) - - -def to_primitive(cls, instance_or_dict, **kwargs): - return export_loop(cls, instance_or_dict, to_primitive_converter, **kwargs) diff --git a/solnlib/packages/schematics/translator.py b/solnlib/packages/schematics/translator.py deleted file mode 100644 index cfc1f5a4..00000000 --- a/solnlib/packages/schematics/translator.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- - -from .compat import str_compat - - -@str_compat -class LazyText(object): - def __init__(self, message): - self.message = message - - def __str__(self): - translator = _.real_translator - return translator(self.message) if translator else self.message - - def __mod__(self, other): - return str(self) % other - - def format(self, *args, **kwargs): - return str(self).format(*args, **kwargs) - - -class Translator(object): - """A placeholder which could call a function like lazy_gettext and make messages translatable.""" - def __init__(self): - self.real_translator = None - - def __call__(self, message, lazy=True, *args, **kwargs): - return LazyText(message) if lazy else str(LazyText(message)) - - def register_translator(self, new_translator): - self.real_translator = new_translator - -_ = Translator() -register_translator = _.register_translator diff --git a/solnlib/packages/schematics/types/__init__.py b/solnlib/packages/schematics/types/__init__.py deleted file mode 100644 index 5413da9b..00000000 --- a/solnlib/packages/schematics/types/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ - -from .base import * -from .compound import * -from .serializable import * -from .net import * -from .union import * diff --git a/solnlib/packages/schematics/types/base.py b/solnlib/packages/schematics/types/base.py deleted file mode 100644 index 4e4e2205..00000000 --- a/solnlib/packages/schematics/types/base.py +++ /dev/null @@ -1,1195 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals, absolute_import - -try: - import typing -except ImportError: - pass - -import copy -import datetime -import decimal -import itertools -import numbers -import random -import re -import string -import uuid -from collections import Iterable, OrderedDict - -from ..common import * -from ..exceptions import * -from ..translator import _ -from ..undefined import Undefined -from ..util import listify -from ..validate import prepare_validator, get_validation_context - -__all__ = [ - 'BaseType', 'UUIDType', 'StringType', 'MultilingualStringType', - 'NumberType', 'IntType', 'LongType', 'FloatType', 'DecimalType', - 'HashType', 'MD5Type', 'SHA1Type', 'BooleanType', 'GeoPointType', - 'DateType', 'DateTimeType', 'UTCDateTimeType', 'TimestampType', - 'TimedeltaType'] - - -def fill_template(template, min_length, max_length): - return template % random_string( - get_value_in( - min_length, - max_length, - padding=len(template) - 2, - required_length=1)) - - -def get_range_endpoints(min_length, max_length, padding=0, required_length=0): - if min_length is None: - min_length = 0 - if max_length is None: - max_length = max(min_length * 2, 16) - - if padding: - max_length = max_length - padding - min_length = max(min_length - padding, 0) - - if max_length < required_length: - raise MockCreationError( - 'This field is too short to hold the mock data') - - min_length = max(min_length, required_length) - if max_length < min_length: - raise MockCreationError('Minimum is greater than maximum') - - return min_length, max_length - - -def get_value_in(min_length, max_length, padding=0, required_length=0): - return random.randint( - *get_range_endpoints(min_length, max_length, padding, required_length)) - - -_alphanumeric = string.ascii_letters + string.digits - - -def random_string(length, chars=_alphanumeric): - return ''.join(random.choice(chars) for _ in range(length)) - - -_last_position_hint = -1 -_next_position_hint = itertools.count() - - -class TypeMeta(type): - - """ - Meta class for BaseType. Merges `MESSAGES` dict and accumulates - validator methods. - """ - - def __new__(mcs, name, bases, attrs): - messages = {} - validators = OrderedDict() - - for base in reversed(bases): - if hasattr(base, 'MESSAGES'): - messages.update(base.MESSAGES) - - if hasattr(base, "_validators"): - validators.update(base._validators) - - if 'MESSAGES' in attrs: - messages.update(attrs['MESSAGES']) - - attrs['MESSAGES'] = messages - - for attr_name, attr in attrs.items(): - if attr_name.startswith("validate_"): - validators[attr_name] = 1 - attrs[attr_name] = prepare_validator(attr, 3) - - attrs["_validators"] = validators - - return type.__new__(mcs, name, bases, attrs) - - -@metaclass(TypeMeta) -class BaseType(object): - - """A base class for Types in a Schematics model. Instances of this - class may be added to subclasses of ``Model`` to define a model schema. - - Validators that need to access variables on the instance - can be defined be implementing methods whose names start with ``validate_`` - and accept one parameter (in addition to ``self``) - - :param required: - Invalidate field when value is None or is not supplied. Default: - False. - :param default: - When no data is provided default to this value. May be a callable. - Default: None. - :param serialized_name: - The name of this field defaults to the class attribute used in the - model. However if the field has another name in foreign data set this - argument. Serialized data will use this value for the key name too. - :param deserialize_from: - A name or list of named fields for which foreign data sets are - searched to provide a value for the given field. This only effects - inbound data. - :param choices: - A list of valid choices. This is the last step of the validator - chain. - :param validators: - A list of callables. Each callable receives the value after it has been - converted into a rich python type. Default: [] - :param serialize_when_none: - Dictates if the field should appear in the serialized data even if the - value is None. Default: None. - :param messages: - Override the error messages with a dict. You can also do this by - subclassing the Type and defining a `MESSAGES` dict attribute on the - class. A metaclass will merge all the `MESSAGES` and override the - resulting dict with instance level `messages` and assign to - `self.messages`. - :param metadata: - Dictionary for storing custom metadata associated with the field. - To encourage compatibility with external tools, we suggest these keys - for common metadata: - - *label* : Brief human-readable label - - *description* : Explanation of the purpose of the field. Used for - help, tooltips, documentation, etc. - """ - - primitive_type = None - native_type = None - - MESSAGES = { - 'required': _("This field is required."), - 'choices': _("Value must be one of {0}."), - } - - EXPORT_METHODS = { - NATIVE: 'to_native', - PRIMITIVE: 'to_primitive', - } - - def __init__(self, required=False, default=Undefined, serialized_name=None, - choices=None, validators=None, deserialize_from=None, - export_level=None, serialize_when_none=None, - messages=None, metadata=None): - super(BaseType, self).__init__() - - self.required = required - self._default = default - self.serialized_name = serialized_name - if choices and (isinstance(choices, string_type) or not isinstance(choices, Iterable)): - raise TypeError('"choices" must be a non-string Iterable') - self.choices = choices - self.deserialize_from = listify(deserialize_from) - - self.validators = [getattr(self, validator_name) for validator_name in self._validators] - if validators: - self.validators += (prepare_validator(func, 2) for func in validators) - - self._set_export_level(export_level, serialize_when_none) - - self.messages = dict(self.MESSAGES, **(messages or {})) - self.metadata = metadata or {} - self._position_hint = next(_next_position_hint) # For ordering of fields - - self.name = None - self.owner_model = None - self.parent_field = None - self.typeclass = self.__class__ - self.is_compound = False - - self.export_mapping = dict( - (format, getattr(self, fname)) for format, fname in self.EXPORT_METHODS.items()) - - def __repr__(self): - type_ = "%s(%s) instance" % (self.__class__.__name__, self._repr_info() or '') - model = " on %s" % self.owner_model.__name__ if self.owner_model else '' - field = " as '%s'" % self.name if self.name else '' - return "<%s>" % (type_ + model + field) - - def _repr_info(self): - return None - - def __call__(self, value, context=None): - return self.convert(value, context) - - def __deepcopy__(self, memo): - return copy.copy(self) - - def _mock(self, context=None): - return None - - def _setup(self, field_name, owner_model): - """Perform late-stage setup tasks that are run after the containing model - has been created. - """ - self.name = field_name - self.owner_model = owner_model - self._input_keys = self._get_input_keys() - - def _set_export_level(self, export_level, serialize_when_none): - if export_level is not None: - self.export_level = export_level - elif serialize_when_none is True: - self.export_level = DEFAULT - elif serialize_when_none is False: - self.export_level = NONEMPTY - else: - self.export_level = None - - def get_export_level(self, context): - if self.owner_model: - level = self.owner_model._options.export_level - else: - level = DEFAULT - if self.export_level is not None: - level = self.export_level - if context.export_level is not None: - level = context.export_level - return level - - def get_input_keys(self, mapping=None): - if mapping: - return self._get_input_keys(mapping) - else: - return self._input_keys - - def _get_input_keys(self, mapping=None): - input_keys = [self.name] - if self.serialized_name: - input_keys.append(self.serialized_name) - if mapping and self.name in mapping: - input_keys.extend(listify(mapping[self.name])) - if self.deserialize_from: - input_keys.extend(self.deserialize_from) - return input_keys - - @property - def default(self): - default = self._default - if callable(default): - default = default() - return default - - def pre_setattr(self, value): - return value - - def convert(self, value, context=None): - return self.to_native(value, context) - - def export(self, value, format, context=None): - return self.export_mapping[format](value, context) - - def to_primitive(self, value, context=None): - """Convert internal data to a value safe to serialize. - """ - return value - - def to_native(self, value, context=None): - """ - Convert untrusted data to a richer Python construct. - """ - return value - - def validate(self, value, context=None): - """ - Validate the field and return a converted value or raise a - ``ValidationError`` with a list of errors raised by the validation - chain. Stop the validation process from continuing through the - validators by raising ``StopValidationError`` instead of ``ValidationError``. - - """ - context = context or get_validation_context() - - if context.convert: - value = self.convert(value, context) - elif self.is_compound: - self.convert(value, context) - - errors = [] - for validator in self.validators: - try: - validator(value, context) - except ValidationError as exc: - errors.append(exc) - if isinstance(exc, StopValidationError): - break - if errors: - raise ValidationError(errors) - - return value - - def check_required(self, value, context): - if self.required and (value is None or value is Undefined): - if self.name is None or context and not context.partial: - raise ConversionError(self.messages['required']) - - def validate_choices(self, value, context): - if self.choices is not None: - if value not in self.choices: - raise ValidationError(self.messages['choices'].format(str(self.choices))) - - def mock(self, context=None): - if not self.required and not random.choice([True, False]): - return self.default - if self.choices is not None: - return random.choice(self.choices) - return self._mock(context) - - -class UUIDType(BaseType): - - """A field that stores a valid UUID value. - """ - - primitive_type = str - native_type = uuid.UUID - - MESSAGES = { - 'convert': _("Couldn't interpret '{0}' value as UUID."), - } - - def __init__(self, **kwargs): - # type: (...) -> uuid.UUID - super(UUIDType, self).__init__(**kwargs) - - def _mock(self, context=None): - return uuid.uuid4() - - def to_native(self, value, context=None): - if not isinstance(value, uuid.UUID): - try: - value = uuid.UUID(value) - except (TypeError, ValueError): - raise ConversionError(self.messages['convert'].format(value)) - return value - - def to_primitive(self, value, context=None): - return str(value) - - -class StringType(BaseType): - - """A Unicode string field.""" - - primitive_type = str - native_type = str - allow_casts = (int, bytes) - - MESSAGES = { - 'convert': _("Couldn't interpret '{0}' as string."), - 'decode': _("Invalid UTF-8 data."), - 'max_length': _("String value is too long."), - 'min_length': _("String value is too short."), - 'regex': _("String value did not match validation regex."), - } - - def __init__(self, regex=None, max_length=None, min_length=None, **kwargs): - # type: (...) -> typing.Text - - self.regex = re.compile(regex) if regex else None - self.max_length = max_length - self.min_length = min_length - - super(StringType, self).__init__(**kwargs) - - def _mock(self, context=None): - return random_string(get_value_in(self.min_length, self.max_length)) - - def to_native(self, value, context=None): - if isinstance(value, str): - return value - if isinstance(value, self.allow_casts): - if isinstance(value, bytes): - try: - return str(value, 'utf-8') - except UnicodeError: - raise ConversionError(self.messages['decode'].format(value)) - elif isinstance(value, bool): - pass - else: - return str(value) - raise ConversionError(self.messages['convert'].format(value)) - - def validate_length(self, value, context=None): - length = len(value) - if self.max_length is not None and length > self.max_length: - raise ValidationError(self.messages['max_length']) - - if self.min_length is not None and length < self.min_length: - raise ValidationError(self.messages['min_length']) - - def validate_regex(self, value, context=None): - if self.regex is not None and self.regex.match(value) is None: - raise ValidationError(self.messages['regex']) - - -class NumberType(BaseType): - - """A generic number field. - Converts to and validates against `number_type` parameter. - """ - - primitive_type = None - native_type = None - number_type = None - MESSAGES = { - 'number_coerce': _("Value '{0}' is not {1}."), - 'number_min': _("{0} value should be greater than or equal to {1}."), - 'number_max': _("{0} value should be less than or equal to {1}."), - } - - def __init__(self, min_value=None, max_value=None, strict=False, **kwargs): - # type: (...) -> typing.Union[int, float] - - self.min_value = min_value - self.max_value = max_value - self.strict = strict - - super(NumberType, self).__init__(**kwargs) - - def _mock(self, context=None): - number = random.uniform( - *get_range_endpoints(self.min_value, self.max_value) - ) - return self.native_type(number) if self.native_type else number - - def to_native(self, value, context=None): - if isinstance(value, bool): - value = int(value) - if isinstance(value, self.native_type): - return value - try: - native_value = self.native_type(value) - except (TypeError, ValueError): - pass - else: - if self.native_type is float: # Float conversion is strict enough. - return native_value - if not self.strict and native_value == value: # Match numeric types. - return native_value - if isinstance(value, (string_type, numbers.Integral)): - return native_value - - raise ConversionError(self.messages['number_coerce'] - .format(value, self.number_type.lower())) - - def validate_range(self, value, context=None): - if self.min_value is not None and value < self.min_value: - raise ValidationError(self.messages['number_min'] - .format(self.number_type, self.min_value)) - - if self.max_value is not None and value > self.max_value: - raise ValidationError(self.messages['number_max'] - .format(self.number_type, self.max_value)) - - return value - - -class IntType(NumberType): - - """A field that validates input as an Integer - """ - - primitive_type = int - native_type = int - number_type = 'Int' - - def __init__(self, **kwargs): - # type: (...) -> int - super(IntType, self).__init__(**kwargs) - - -LongType = IntType - - -class FloatType(NumberType): - - """A field that validates input as a Float - """ - - primitive_type = float - native_type = float - number_type = 'Float' - - def __init__(self, **kwargs): - # type: (...) -> float - super(FloatType, self).__init__(**kwargs) - - -class DecimalType(NumberType): - - """A fixed-point decimal number field. - """ - - primitive_type = str - native_type = decimal.Decimal - number_type = 'Decimal' - - def to_primitive(self, value, context=None): - return str(value) - - def to_native(self, value, context=None): - if isinstance(value, decimal.Decimal): - return value - - if not isinstance(value, (string_type, bool)): - value = str(value) - try: - value = decimal.Decimal(value) - except (TypeError, decimal.InvalidOperation): - raise ConversionError(self.messages['number_coerce'].format( - value, self.number_type.lower())) - - return value - - -class HashType(StringType): - - MESSAGES = { - 'hash_length': _("Hash value is wrong length."), - 'hash_hex': _("Hash value is not hexadecimal."), - } - - def _mock(self, context=None): - return random_string(self.LENGTH, string.hexdigits) - - def to_native(self, value, context=None): - value = super(HashType, self).to_native(value, context) - - if len(value) != self.LENGTH: - raise ValidationError(self.messages['hash_length']) - try: - int(value, 16) - except ValueError: - raise ConversionError(self.messages['hash_hex']) - return value - - -class MD5Type(HashType): - - """A field that validates input as resembling an MD5 hash. - """ - - LENGTH = 32 - - -class SHA1Type(HashType): - - """A field that validates input as resembling an SHA1 hash. - """ - - LENGTH = 40 - - -class BooleanType(BaseType): - - """A boolean field type. In addition to ``True`` and ``False``, coerces these - values: - - + For ``True``: "True", "true", "1" - + For ``False``: "False", "false", "0" - - """ - - primitive_type = bool - native_type = bool - - TRUE_VALUES = ('True', 'true', '1') - FALSE_VALUES = ('False', 'false', '0') - - def __init__(self, **kwargs): - # type: (...) -> bool - super(BooleanType, self).__init__(**kwargs) - - def _mock(self, context=None): - return random.choice([True, False]) - - def to_native(self, value, context=None): - if isinstance(value, string_type): - if value in self.TRUE_VALUES: - value = True - elif value in self.FALSE_VALUES: - value = False - - elif isinstance(value, int) and value in [0, 1]: - value = bool(value) - - if not isinstance(value, bool): - raise ConversionError(_("Must be either true or false.")) - - return value - - -class DateType(BaseType): - - """Defaults to converting to and from ISO8601 date values. - """ - - primitive_type = str - native_type = datetime.date - - SERIALIZED_FORMAT = '%Y-%m-%d' - MESSAGES = { - 'parse': _("Could not parse {0}. Should be ISO 8601 (YYYY-MM-DD)."), - 'parse_formats': _('Could not parse {0}. Valid formats: {1}'), - } - - def __init__(self, formats=None, **kwargs): - # type: (...) -> datetime.date - - if formats: - self.formats = listify(formats) - self.conversion_errmsg = self.MESSAGES['parse_formats'] - else: - self.formats = ['%Y-%m-%d'] - self.conversion_errmsg = self.MESSAGES['parse'] - - self.serialized_format = self.SERIALIZED_FORMAT - - super(DateType, self).__init__(**kwargs) - - def _mock(self, context=None): - return datetime.date( - year=random.randrange(600) + 1900, - month=random.randrange(12) + 1, - day=random.randrange(28) + 1, - ) - - def to_native(self, value, context=None): - if isinstance(value, datetime.datetime): - return value.date() - if isinstance(value, datetime.date): - return value - - for fmt in self.formats: - try: - return datetime.datetime.strptime(value, fmt).date() - except (ValueError, TypeError): - continue - else: - raise ConversionError(self.conversion_errmsg.format(value, ", ".join(self.formats))) - - def to_primitive(self, value, context=None): - return value.strftime(self.serialized_format) - - -class DateTimeType(BaseType): - - """A field that holds a combined date and time value. - - The built-in parser accepts input values conforming to the ISO 8601 format - ``--
T:[:][]``. A space may be substituted - for the delimiter ``T``. The time zone designator ```` may be either ``Z`` - or ``±[:][]``. - - Values are stored as standard ``datetime.datetime`` instances with the time zone - offset in the ``tzinfo`` component if available. Raw values that do not specify a time - zone will be converted to naive ``datetime`` objects unless ``tzd='utc'`` is in effect. - - Unix timestamps are also valid input values and will be converted to UTC datetimes. - - :param formats: - (Optional) A value or iterable of values suitable as ``datetime.datetime.strptime`` format - strings, for example ``('%Y-%m-%dT%H:%M:%S', '%Y-%m-%dT%H:%M:%S.%f')``. If the parameter is - present, ``strptime()`` will be used for parsing instead of the built-in parser. - :param serialized_format: - The output format suitable for Python ``strftime``. Default: ``'%Y-%m-%dT%H:%M:%S.%f%z'`` - :param parser: - (Optional) An external function to use for parsing instead of the built-in parser. It should - return a ``datetime.datetime`` instance. - :param tzd: - Sets the time zone policy. - Default: ``'allow'`` - ============== ====================================================================== - ``'require'`` Values must specify a time zone. - ``'allow'`` Values both with and without a time zone designator are allowed. - ``'utc'`` Like ``allow``, but values with no time zone information are assumed - to be in UTC. - ``'reject'`` Values must not specify a time zone. This also prohibits timestamps. - ============== ====================================================================== - :param convert_tz: - Indicates whether values with a time zone designator should be automatically converted to UTC. - Default: ``False`` - * ``True``: Convert the datetime to UTC based on its time zone offset. - * ``False``: Don't convert. Keep the original time and offset intact. - :param drop_tzinfo: - Can be set to automatically remove the ``tzinfo`` objects. This option should generally - be used in conjunction with the ``convert_tz`` option unless you only care about local - wall clock times. Default: ``False`` - * ``True``: Discard the ``tzinfo`` components and make naive ``datetime`` objects instead. - * ``False``: Preserve the ``tzinfo`` components if present. - """ - - primitive_type = str - native_type = datetime.datetime - - SERIALIZED_FORMAT = '%Y-%m-%dT%H:%M:%S.%f%z' - - MESSAGES = { - 'parse': _('Could not parse {0}. Should be ISO 8601 or timestamp.'), - 'parse_formats': _('Could not parse {0}. Valid formats: {1}'), - 'parse_external': _('Could not parse {0}.'), - 'parse_tzd_require': _('Could not parse {0}. Time zone offset required.'), - 'parse_tzd_reject': _('Could not parse {0}. Time zone offset not allowed.'), - 'tzd_require': _('Could not convert {0}. Time zone required but not found.'), - 'tzd_reject': _('Could not convert {0}. Time zone offsets not allowed.'), - 'validate_tzd_require': _('Time zone information required but not found.'), - 'validate_tzd_reject': _('Time zone information not allowed.'), - 'validate_utc_none': _('Time zone must be UTC but was None.'), - 'validate_utc_wrong': _('Time zone must be UTC.'), - } - - REGEX = re.compile(r""" - (?P\d{4})-(?P\d\d)-(?P\d\d)(?:T|\ ) - (?P\d\d):(?P\d\d) - (?::(?P\d\d)(?:(?:\.|,)(?P\d{1,6}))?)? - (?:(?P(?P[+−-])(?P\d\d):?(?P\d\d)?) - |(?PZ))?$""", re.X) - - TIMEDELTA_ZERO = datetime.timedelta(0) - - class fixed_timezone(datetime.tzinfo): - def utcoffset(self, dt): return self.offset - def fromutc(self, dt): return dt + self.offset - def dst(self, dt): return None - def tzname(self, dt): return self.str - def __str__(self): return self.str - def __repr__(self, info=''): return '{0}({1})'.format(type(self).__name__, info) - - class utc_timezone(fixed_timezone): - offset = datetime.timedelta(0) - name = str = 'UTC' - - class offset_timezone(fixed_timezone): - def __init__(self, hours=0, minutes=0): - self.offset = datetime.timedelta(hours=hours, minutes=minutes) - total_seconds = self.offset.days * 86400 + self.offset.seconds - self.str = '{0:s}{1:02d}:{2:02d}'.format( - '+' if total_seconds >= 0 else '-', - int(abs(total_seconds) / 3600), - int(abs(total_seconds) % 3600 / 60)) - def __repr__(self): - return DateTimeType.fixed_timezone.__repr__(self, self.str) - - UTC = utc_timezone() - EPOCH = datetime.datetime(1970, 1, 1, tzinfo=UTC) - - def __init__(self, formats=None, serialized_format=None, parser=None, - tzd='allow', convert_tz=False, drop_tzinfo=False, **kwargs): - # type: (...) -> datetime.datetime - - if tzd not in ('require', 'allow', 'utc', 'reject'): - raise ValueError("DateTimeType.__init__() got an invalid value for parameter 'tzd'") - self.formats = listify(formats) - self.serialized_format = serialized_format or self.SERIALIZED_FORMAT - self.parser = parser - self.tzd = tzd - self.convert_tz = convert_tz - self.drop_tzinfo = drop_tzinfo - - super(DateTimeType, self).__init__(**kwargs) - - def _mock(self, context=None): - dt = datetime.datetime( - year=random.randrange(600) + 1900, - month=random.randrange(12) + 1, - day=random.randrange(28) + 1, - hour=random.randrange(24), - minute=random.randrange(60), - second=random.randrange(60), - microsecond=random.randrange(1000000)) - - if self.tzd == 'reject' or \ - self.drop_tzinfo or \ - self.tzd == 'allow' and random.randrange(2): - return dt - elif self.convert_tz: - return dt.replace(tzinfo=self.UTC) - else: - return dt.replace(tzinfo=self.offset_timezone(hours=random.randrange(-12, 15), - minutes=random.choice([0, 30, 45]))) - - def to_native(self, value, context=None): - - if isinstance(value, datetime.datetime): - if value.tzinfo is None: - if not self.drop_tzinfo: - if self.tzd == 'require': - raise ConversionError(self.messages['tzd_require'].format(value)) - if self.tzd == 'utc': - value = value.replace(tzinfo=self.UTC) - else: - if self.tzd == 'reject': - raise ConversionError(self.messages['tzd_reject'].format(value)) - if self.convert_tz: - value = value.astimezone(self.UTC) - if self.drop_tzinfo: - value = value.replace(tzinfo=None) - return value - - if self.formats: - # Delegate to datetime.datetime.strptime() using provided format strings. - for fmt in self.formats: - try: - dt = datetime.datetime.strptime(value, fmt) - break - except (ValueError, TypeError): - continue - else: - raise ConversionError(self.messages['parse_formats'].format(value, ", ".join(self.formats))) - elif self.parser: - # Delegate to external parser. - try: - dt = self.parser(value) - except: - raise ConversionError(self.messages['parse_external'].format(value)) - else: - # Use built-in parser. - try: - value = float(value) - except ValueError: - dt = self.from_string(value) - except TypeError: - raise ConversionError(self.messages['parse'].format(value)) - else: - dt = self.from_timestamp(value) - if not dt: - raise ConversionError(self.messages['parse'].format(value)) - - if dt.tzinfo is None: - if self.tzd == 'require': - raise ConversionError(self.messages['parse_tzd_require'].format(value)) - if self.tzd == 'utc' and not self.drop_tzinfo: - dt = dt.replace(tzinfo=self.UTC) - else: - if self.tzd == 'reject': - raise ConversionError(self.messages['parse_tzd_reject'].format(value)) - if self.convert_tz: - dt = dt.astimezone(self.UTC) - if self.drop_tzinfo: - dt = dt.replace(tzinfo=None) - - return dt - - def from_string(self, value): - match = self.REGEX.match(value) - if not match: - return None - parts = dict(((k, v) for k, v in match.groupdict().items() if v is not None)) - p = lambda name: int(parts.get(name, 0)) - microsecond = p('sec_frac') and p('sec_frac') * 10 ** (6 - len(parts['sec_frac'])) - if 'tzd_utc' in parts: - tz = self.UTC - elif 'tzd_offset' in parts: - tz_sign = 1 if parts['tzd_sign'] == '+' else -1 - tz_offset = (p('tzd_hour') * 60 + p('tzd_minute')) * tz_sign - if tz_offset == 0: - tz = self.UTC - else: - tz = self.offset_timezone(minutes=tz_offset) - else: - tz = None - try: - return datetime.datetime(p('year'), p('month'), p('day'), - p('hour'), p('minute'), p('second'), - microsecond, tz) - except (ValueError, TypeError): - return None - - def from_timestamp(self, value): - try: - return datetime.datetime(1970, 1, 1, tzinfo=self.UTC) + datetime.timedelta(seconds=value) - except (ValueError, TypeError): - return None - - def to_primitive(self, value, context=None): - if callable(self.serialized_format): - return self.serialized_format(value) - return value.strftime(self.serialized_format) - - def validate_tz(self, value, context=None): - if value.tzinfo is None: - if not self.drop_tzinfo: - if self.tzd == 'require': - raise ValidationError(self.messages['validate_tzd_require']) - if self.tzd == 'utc': - raise ValidationError(self.messages['validate_utc_none']) - else: - if self.drop_tzinfo: - raise ValidationError(self.messages['validate_tzd_reject']) - if self.tzd == 'reject': - raise ValidationError(self.messages['validate_tzd_reject']) - if self.convert_tz \ - and value.tzinfo.utcoffset(value) != self.TIMEDELTA_ZERO: - raise ValidationError(self.messages['validate_utc_wrong']) - - -class UTCDateTimeType(DateTimeType): - - """A variant of ``DateTimeType`` that normalizes everything to UTC and stores values - as naive ``datetime`` instances. By default sets ``tzd='utc'``, ``convert_tz=True``, - and ``drop_tzinfo=True``. The standard export format always includes the UTC time - zone designator ``"Z"``. - """ - - SERIALIZED_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' - - def __init__(self, formats=None, parser=None, tzd='utc', convert_tz=True, drop_tzinfo=True, **kwargs): - # type: (...) -> datetime.datetime - super(UTCDateTimeType, self).__init__(formats=formats, parser=parser, tzd=tzd, - convert_tz=convert_tz, drop_tzinfo=drop_tzinfo, **kwargs) - - -class TimestampType(DateTimeType): - - """A variant of ``DateTimeType`` that exports itself as a Unix timestamp - instead of an ISO 8601 string. Always sets ``tzd='require'`` and - ``convert_tz=True``. - """ - - primitive_type = float - - def __init__(self, formats=None, parser=None, drop_tzinfo=False, **kwargs): - # type: (...) -> datetime.datetime - super(TimestampType, self).__init__(formats=formats, parser=parser, tzd='require', - convert_tz=True, drop_tzinfo=drop_tzinfo, **kwargs) - - def to_primitive(self, value, context=None): - if value.tzinfo is None: - value = value.replace(tzinfo=self.UTC) - else: - value = value.astimezone(self.UTC) - delta = value - self.EPOCH - return delta.total_seconds() - - -class TimedeltaType(BaseType): - - """Converts Python Timedelta objects into the corresponding value in seconds. - """ - - primitive_type = float - native_type = datetime.timedelta - - MESSAGES = { - 'convert': _("Couldn't interpret '{0}' value as Timedelta."), - } - - DAYS = 'days' - SECONDS = 'seconds' - MICROSECONDS = 'microseconds' - MILLISECONDS = 'milliseconds' - MINUTES = 'minutes' - HOURS = 'hours' - WEEKS = 'weeks' - - def __init__(self, precision='seconds', **kwargs): - # type: (...) -> datetime.timedelta - precision = precision.lower() - units = (self.DAYS, self.SECONDS, self.MICROSECONDS, self.MILLISECONDS, - self.MINUTES, self.HOURS, self.WEEKS) - if precision not in units: - raise ValueError("TimedeltaType.__init__() got an invalid value for parameter 'precision'") - self.precision = precision - super(TimedeltaType, self).__init__(**kwargs) - - def _mock(self, context=None): - return datetime.timedelta(seconds=random.random() * 1000) - - def to_native(self, value, context=None): - if isinstance(value, datetime.timedelta): - return value - try: - return datetime.timedelta(**{self.precision: float(value)}) - except (ValueError, TypeError): - raise ConversionError(self.messages['convert'].format(value)) - - def to_primitive(self, value, context=None): - base_unit = datetime.timedelta(**{self.precision: 1}) - return int(value.total_seconds() / base_unit.total_seconds()) - - -class GeoPointType(BaseType): - - """A list storing a latitude and longitude. - """ - - primitive_type = list - native_type = list - - MESSAGES = { - 'point_min': _("{0} value {1} should be greater than or equal to {2}."), - 'point_max': _("{0} value {1} should be less than or equal to {2}."), - } - - def _mock(self, context=None): - return (random.randrange(-90, 90), random.randrange(-180, 180)) - - @classmethod - def _normalize(cls, value): - if isinstance(value, dict): - # py3: ensure list and not view - return list(value.values()) - else: - return list(value) - - def to_native(self, value, context=None): - """Make sure that a geo-value is of type (x, y) - """ - if not isinstance(value, (tuple, list, dict)): - raise ConversionError(_('GeoPointType can only accept tuples, lists, or dicts')) - elements = self._normalize(value) - if not len(elements) == 2: - raise ConversionError(_('Value must be a two-dimensional point')) - if not all(isinstance(v, (float, int)) for v in elements): - raise ConversionError(_('Both values in point must be float or int')) - return value - - def validate_range(self, value, context=None): - latitude, longitude = self._normalize(value) - if latitude < -90: - raise ValidationError( - self.messages['point_min'].format('Latitude', latitude, '-90') - ) - if latitude > 90: - raise ValidationError( - self.messages['point_max'].format('Latitude', latitude, '90') - ) - if longitude < -180: - raise ValidationError( - self.messages['point_min'].format('Longitude', longitude, -180) - ) - if longitude > 180: - raise ValidationError( - self.messages['point_max'].format('Longitude', longitude, 180) - ) - - -class MultilingualStringType(BaseType): - - """ - A multilanguage string field, stored as a dict with {'locale': 'localized_value'}. - - Minimum and maximum lengths apply to each of the localized values. - - At least one of ``default_locale`` or ``context.app_data['locale']`` must be defined - when calling ``.to_primitive``. - - """ - - primitive_type = str - native_type = str - - allow_casts = (int, bytes) - - MESSAGES = { - 'convert': _("Couldn't interpret value as string."), - 'max_length': _("String value in locale {0} is too long."), - 'min_length': _("String value in locale {0} is too short."), - 'locale_not_found': _("No requested locale was available."), - 'no_locale': _("No default or explicit locales were given."), - 'regex_locale': _("Name of locale {0} did not match validation regex."), - 'regex_localized': _("String value in locale {0} did not match validation regex."), - } - - LOCALE_REGEX = r'^[a-z]{2}(:?_[A-Z]{2})?$' - - def __init__(self, regex=None, max_length=None, min_length=None, - default_locale=None, locale_regex=LOCALE_REGEX, **kwargs): - self.regex = re.compile(regex) if regex else None - self.max_length = max_length - self.min_length = min_length - self.default_locale = default_locale - self.locale_regex = re.compile(locale_regex) if locale_regex else None - - super(MultilingualStringType, self).__init__(**kwargs) - - def _mock(self, context=None): - return random_string(get_value_in(self.min_length, self.max_length)) - - def to_native(self, value, context=None): - """Make sure a MultilingualStringType value is a dict or None.""" - - if not (value is None or isinstance(value, dict)): - raise ConversionError(_('Value must be a dict or None')) - - return value - - def to_primitive(self, value, context=None): - """ - Use a combination of ``default_locale`` and ``context.app_data['locale']`` to return - the best localized string. - - """ - if value is None: - return None - - context_locale = None - if context and 'locale' in context.app_data: - context_locale = context.app_data['locale'] - - # Build a list of all possible locales to try - possible_locales = [] - for locale in (context_locale, self.default_locale): - if not locale: - continue - - if isinstance(locale, string_type): - possible_locales.append(locale) - else: - possible_locales.extend(locale) - - if not possible_locales: - raise ConversionError(self.messages['no_locale']) - - for locale in possible_locales: - if locale in value: - localized = value[locale] - break - else: - raise ConversionError(self.messages['locale_not_found']) - - if not isinstance(localized, str): - if isinstance(localized, self.allow_casts): - if isinstance(localized, bytes): - localized = str(localized, 'utf-8') - else: - localized = str(localized) - else: - raise ConversionError(self.messages['convert']) - - return localized - - def validate_length(self, value, context=None): - for locale, localized in value.items(): - len_of_value = len(localized) if localized else 0 - - if self.max_length is not None and len_of_value > self.max_length: - raise ValidationError(self.messages['max_length'].format(locale)) - - if self.min_length is not None and len_of_value < self.min_length: - raise ValidationError(self.messages['min_length'].format(locale)) - - def validate_regex(self, value, context=None): - if self.regex is None and self.locale_regex is None: - return - - for locale, localized in value.items(): - if self.regex is not None and self.regex.match(localized) is None: - raise ValidationError( - self.messages['regex_localized'].format(locale)) - - if self.locale_regex is not None and self.locale_regex.match(locale) is None: - raise ValidationError( - self.messages['regex_locale'].format(locale)) - - -if PY2: - # Python 2 names cannot be unicode - __all__ = [n.encode('ascii') for n in __all__] diff --git a/solnlib/packages/schematics/types/compound.py b/solnlib/packages/schematics/types/compound.py deleted file mode 100644 index bc7b33fd..00000000 --- a/solnlib/packages/schematics/types/compound.py +++ /dev/null @@ -1,453 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals, absolute_import - -try: - import typing -except ImportError: - pass -else: - T = typing.TypeVar("T") - -from collections import Iterable, Sequence, Mapping -import itertools - -from ..common import * -from ..exceptions import * -from ..transforms import ( - export_loop, - get_import_context, get_export_context, - to_native_converter, to_primitive_converter) -from ..translator import _ -from ..util import get_all_subclasses, import_string - -from .base import BaseType, get_value_in - -__all__ = ['CompoundType', 'MultiType', 'ModelType', 'ListType', 'DictType', - 'PolyModelType'] - - -class CompoundType(BaseType): - - def __init__(self, **kwargs): - super(CompoundType, self).__init__(**kwargs) - self.is_compound = True - try: - self.field.parent_field = self - except AttributeError: - pass - - def _setup(self, field_name, owner_model): - # Recursively set up inner fields. - if hasattr(self, 'field'): - self.field._setup(None, owner_model) - super(CompoundType, self)._setup(field_name, owner_model) - - def convert(self, value, context=None): - context = context or get_import_context() - return self._convert(value, context) - - def _convert(self, value, context): - raise NotImplementedError - - def export(self, value, format, context=None): - context = context or get_export_context() - return self._export(value, format, context) - - def _export(self, value, format, context): - raise NotImplementedError - - def to_native(self, value, context=None): - context = context or get_export_context(to_native_converter) - return to_native_converter(self, value, context) - - def to_primitive(self, value, context=None): - context = context or get_export_context(to_primitive_converter) - return to_primitive_converter(self, value, context) - - def _init_field(self, field, options): - """ - Instantiate the inner field that represents each element within this compound type. - In case the inner field is itself a compound type, its inner field can be provided - as the ``nested_field`` keyword argument. - """ - if not isinstance(field, BaseType): - nested_field = options.pop('nested_field', None) or options.pop('compound_field', None) - if nested_field: - field = field(field=nested_field, **options) - else: - field = field(**options) - return field - -MultiType = CompoundType - - -class ModelType(CompoundType): - """A field that can hold an instance of the specified model.""" - - primitive_type = dict - - @property - def native_type(self): - return self.model_class - - @property - def fields(self): - return self.model_class.fields - - @property - def model_class(self): - if self._model_class: - return self._model_class - - model_class = import_string(self.model_name) - self._model_class = model_class - return model_class - - def __init__(self, - model_spec, # type: typing.Type[T] - **kwargs): - # type: (...) -> T - - if isinstance(model_spec, ModelMeta): - self._model_class = model_spec - self.model_name = self.model_class.__name__ - elif isinstance(model_spec, string_type): - self._model_class = None - self.model_name = model_spec - else: - raise TypeError("ModelType: Expected a model, got an argument " - "of the type '{}'.".format(model_spec.__class__.__name__)) - - super(ModelType, self).__init__(**kwargs) - - def _repr_info(self): - return self.model_class.__name__ - - def _mock(self, context=None): - return self.model_class.get_mock_object(context) - - def _setup(self, field_name, owner_model): - # Resolve possible name-based model reference. - if not self._model_class: - if self.model_name == owner_model.__name__: - self._model_class = owner_model - else: - pass # Intentionally left blank, it will be setup later. - super(ModelType, self)._setup(field_name, owner_model) - - def pre_setattr(self, value): - if value is not None \ - and not isinstance(value, Model): - if not isinstance(value, dict): - raise ConversionError(_('Model conversion requires a model or dict')) - value = self.model_class(value) - return value - - def _convert(self, value, context): - field_model_class = self.model_class - if isinstance(value, field_model_class): - model_class = type(value) - elif isinstance(value, dict): - model_class = field_model_class - else: - raise ConversionError( - _("Input must be a mapping or '%s' instance") % field_model_class.__name__) - if context.convert and context.oo: - return model_class(value, context=context) - else: - return model_class.convert(value, context=context) - - def _export(self, value, format, context): - if isinstance(value, Model): - model_class = type(value) - else: - model_class = self.model_class - return export_loop(model_class, value, context=context) - - -class ListType(CompoundType): - """A field for storing a list of items, all of which must conform to the type - specified by the ``field`` parameter. - - Use it like this:: - - ... - categories = ListType(StringType) - """ - - primitive_type = list - native_type = list - - def __init__(self, - field, # type: T - min_size=None, max_size=None, **kwargs): - # type: (...) -> typing.List[T] - - self.field = self._init_field(field, kwargs) - self.min_size = min_size - self.max_size = max_size - - validators = [self.check_length] + kwargs.pop("validators", []) - - super(ListType, self).__init__(validators=validators, **kwargs) - - @property - def model_class(self): - return self.field.model_class - - def _repr_info(self): - return self.field.__class__.__name__ - - def _mock(self, context=None): - random_length = get_value_in(self.min_size, self.max_size) - - return [self.field._mock(context) for dummy in range(random_length)] - - def _coerce(self, value): - if isinstance(value, list): - return value - elif isinstance(value, (string_type, Mapping)): # unacceptable iterables - pass - elif isinstance(value, Sequence): - return value - elif isinstance(value, Iterable): - return value - raise ConversionError(_('Could not interpret the value as a list')) - - def _convert(self, value, context): - value = self._coerce(value) - data = [] - errors = {} - for index, item in enumerate(value): - try: - data.append(context.field_converter(self.field, item, context)) - except BaseError as exc: - errors[index] = exc - if errors: - raise CompoundError(errors) - return data - - def check_length(self, value, context): - list_length = len(value) if value else 0 - - if self.min_size is not None and list_length < self.min_size: - message = ({ - True: _('Please provide at least %d item.'), - False: _('Please provide at least %d items.'), - }[self.min_size == 1]) % self.min_size - raise ValidationError(message) - - if self.max_size is not None and list_length > self.max_size: - message = ({ - True: _('Please provide no more than %d item.'), - False: _('Please provide no more than %d items.'), - }[self.max_size == 1]) % self.max_size - raise ValidationError(message) - - def _export(self, list_instance, format, context): - """Loops over each item in the model and applies either the field - transform or the multitype transform. Essentially functions the same - as `transforms.export_loop`. - """ - data = [] - _export_level = self.field.get_export_level(context) - if _export_level == DROP: - return data - for value in list_instance: - shaped = self.field.export(value, format, context) - if shaped is None: - if _export_level <= NOT_NONE: - continue - elif self.field.is_compound and len(shaped) == 0: - if _export_level <= NONEMPTY: - continue - data.append(shaped) - return data - - -class DictType(CompoundType): - """A field for storing a mapping of items, the values of which must conform to the type - specified by the ``field`` parameter. - - Use it like this:: - - ... - categories = DictType(StringType) - - """ - - primitive_type = dict - native_type = dict - - def __init__(self, field, coerce_key=None, **kwargs): - # type: (...) -> typing.Dict[str, T] - - self.field = self._init_field(field, kwargs) - self.coerce_key = coerce_key or str - super(DictType, self).__init__(**kwargs) - - @property - def model_class(self): - return self.field.model_class - - def _repr_info(self): - return self.field.__class__.__name__ - - def _convert(self, value, context, safe=False): - if not isinstance(value, Mapping): - raise ConversionError(_('Only mappings may be used in a DictType')) - - data = {} - errors = {} - for k, v in iteritems(value): - try: - data[self.coerce_key(k)] = context.field_converter(self.field, v, context) - except BaseError as exc: - errors[k] = exc - if errors: - raise CompoundError(errors) - return data - - def _export(self, dict_instance, format, context): - """Loops over each item in the model and applies either the field - transform or the multitype transform. Essentially functions the same - as `transforms.export_loop`. - """ - data = {} - _export_level = self.field.get_export_level(context) - if _export_level == DROP: - return data - for key, value in iteritems(dict_instance): - shaped = self.field.export(value, format, context) - if shaped is None: - if _export_level <= NOT_NONE: - continue - elif self.field.is_compound and len(shaped) == 0: - if _export_level <= NONEMPTY: - continue - data[key] = shaped - return data - - -class PolyModelType(CompoundType): - """A field that accepts an instance of any of the specified models.""" - - primitive_type = dict - native_type = None # cannot be determined from a PolyModelType instance - - def __init__(self, model_spec, **kwargs): - - if isinstance(model_spec, (ModelMeta, string_type)): - self.model_classes = (model_spec,) - allow_subclasses = True - elif isinstance(model_spec, Iterable): - self.model_classes = tuple(model_spec) - allow_subclasses = False - else: - raise Exception("The first argument to PolyModelType.__init__() " - "must be a model or an iterable.") - - self.claim_function = kwargs.pop("claim_function", None) - self.allow_subclasses = kwargs.pop("allow_subclasses", allow_subclasses) - - CompoundType.__init__(self, **kwargs) - - def _setup(self, field_name, owner_model): - # Resolve possible name-based model references. - resolved_classes = [] - for m in self.model_classes: - if isinstance(m, string_type): - if m == owner_model.__name__: - resolved_classes.append(owner_model) - else: - raise Exception("PolyModelType: Unable to resolve model '{}'.".format(m)) - else: - resolved_classes.append(m) - self.model_classes = tuple(resolved_classes) - super(PolyModelType, self)._setup(field_name, owner_model) - - def is_allowed_model(self, model_instance): - if self.allow_subclasses: - if isinstance(model_instance, self.model_classes): - return True - else: - if model_instance.__class__ in self.model_classes: - return True - return False - - def _convert(self, value, context): - - if value is None: - return None - - if not context.validate: - if self.is_allowed_model(value): - return value - if not isinstance(value, dict): - if len(self.model_classes) > 1: - instanceof_msg = 'one of: {}'.format(', '.join( - cls.__name__ for cls in self.model_classes)) - else: - instanceof_msg = self.model_classes[0].__name__ - raise ConversionError(_('Please use a mapping for this field or ' - 'an instance of {}').format(instanceof_msg)) - - model_class = self.find_model(value) - return model_class(value, context=context) - - def find_model(self, data): - """Finds the intended type by consulting potential classes or `claim_function`.""" - - if self.claim_function: - kls = self.claim_function(self, data) - if not kls: - raise Exception("Input for polymorphic field did not match any model") - return kls - - fallback = None - matching_classes = [] - for kls in self._get_candidates(): - try: - # If a model defines a _claim_polymorphic method, use - # it to see if the model matches the data. - kls_claim = kls._claim_polymorphic - except AttributeError: - # The first model that doesn't define the hook can be - # used as a default if there's no match. - if not fallback: - fallback = kls - else: - if kls_claim(data): - matching_classes.append(kls) - - if not matching_classes and fallback: - return fallback - - elif len(matching_classes) != 1: - raise Exception("Got ambiguous input for polymorphic field") - - return matching_classes[0] - - def _export(self, model_instance, format, context): - - model_class = model_instance.__class__ - if not self.is_allowed_model(model_instance): - raise Exception("Cannot export: {} is not an allowed type".format(model_class)) - - return model_instance.export(context=context) - - def _get_candidates(self): - candidates = self.model_classes - - if self.allow_subclasses: - candidates = itertools.chain.from_iterable( - ([m] + get_all_subclasses(m) for m in candidates) - ) - - return candidates - - -if PY2: - # Python 2 names cannot be unicode - __all__ = [n.encode('ascii') for n in __all__] diff --git a/solnlib/packages/schematics/types/net.py b/solnlib/packages/schematics/types/net.py deleted file mode 100644 index fc0e21fb..00000000 --- a/solnlib/packages/schematics/types/net.py +++ /dev/null @@ -1,289 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals, absolute_import - -import random -import re - -try: # PY3 - from urllib.request import urlopen - from urllib.parse import urlunsplit, quote as urlquote - from urllib.error import URLError -except ImportError: # PY2 - from urllib2 import urlopen, URLError - from urlparse import urlunsplit - from urllib import quote as urlquote - -from ..common import * -from ..exceptions import ValidationError, StopValidationError -from ..translator import _ - -from .base import StringType, fill_template - -__all__ = ['IPAddressType', 'IPv4Type', 'IPv6Type', 'MACAddressType', 'URLType', 'EmailType'] - - -### Character ranges - -HEX = '0-9A-F' -ALPHA = 'A-Z' -ALPHANUM = 'A-Z0-9' - - -### IP address patterns - -IPV4_OCTET = '( 25[0-5] | 2[0-4][0-9] | [0-1]?[0-9]{1,2} )' -IPV4 = r'( ((%(oct)s\.){3} %(oct)s) )' % {'oct': IPV4_OCTET} - -IPV6_H16 = '[%s]{1,4}' % HEX -IPV6_L32 = '(%(h16)s:%(h16)s|%(ipv4)s)' % {'h16': IPV6_H16, 'ipv4': IPV4} -IPV6 = r"""( - (%(h16)s:){6}%(l32)s | - :: (%(h16)s:){5}%(l32)s | - ( %(h16)s )? :: (%(h16)s:){4}%(l32)s | - ( (%(h16)s:){,1}%(h16)s )? :: (%(h16)s:){3}%(l32)s | - ( (%(h16)s:){,2}%(h16)s )? :: (%(h16)s:){2}%(l32)s | - ( (%(h16)s:){,3}%(h16)s )? :: (%(h16)s:){1}%(l32)s | - ( (%(h16)s:){,4}%(h16)s )? :: %(l32)s | - ( (%(h16)s:){,5}%(h16)s )? :: %(h16)s | - ( (%(h16)s:){,6}%(h16)s )? :: )""" % {'h16': IPV6_H16, - 'l32': IPV6_L32} - - -class IPAddressType(StringType): - """A field that stores a valid IPv4 or IPv6 address.""" - - VERSION = None - REGEX = re.compile('^%s|%s$' % (IPV4, IPV6), re.I + re.X) - - @classmethod - def valid_ip(cls, value): - return bool(cls.REGEX.match(value)) - - def validate_(self, value, context=None): - if not self.valid_ip(value): - raise ValidationError(_('Invalid IP%s address') % (self.VERSION or '')) - - def _mock(self, context=None): - return random.choice([IPv4Type, IPv6Type])(required=self.required).mock() - - -class IPv4Type(IPAddressType): - """A field that stores a valid IPv4 address.""" - - VERSION = 'v4' - REGEX = re.compile('^%s$' % IPV4, re.I + re.X) - - def _mock(self, context=None): - return '.'.join(str(random.randrange(256)) for _ in range(4)) - - -class IPv6Type(IPAddressType): - """A field that stores a valid IPv6 address.""" - - VERSION = 'v6' - REGEX = re.compile(r'^%s$' % IPV6, re.I + re.X) - - def _mock(self, context=None): - return '2001:db8:' + ':'.join( - '%x' % (random.randrange(1 << 16)) for _ in range(6) - ) - - -### MAC address - -class MACAddressType(StringType): - """A field that stores a valid MAC address.""" - - REGEX = re.compile(r""" - ( - ^([0-9a-f]{2}[-]){5}([0-9a-f]{2})$ - |^([0-9a-f]{2}[:]){5}([0-9a-f]{2})$ - |^([0-9a-f]{12}) - |^([0-9a-f]{6}[-:]([0-9a-f]{6}))$ - |^([0-9a-f]{4}(\.[0-9a-f]{4}){2})$ - ) - """, re.I + re.X) - - def _mock(self, context=None): - return ':'.join('%02x' % (random.randrange(256)) for _ in range(6)) - - def validate_(self, value, context=None): - if not bool(self.REGEX.match(value)): - raise ValidationError(_('Invalid MAC address')) - - def to_primitive(self, value, context=None): - value = value.replace(':', '').replace('.', '').replace('-', '') - return ':'.join(value[i:i+2] for i in range(0, len(value), 2)) - - -### URI patterns - -GEN_DELIMS = set(':/?#[]@') -SUB_DELIMS = set('!$&\'()*+,;=') -UNRESERVED = set('-_.~') -PCHAR = SUB_DELIMS | UNRESERVED | set('%:@') -QUERY_EXTRAS = set('[]') # nonstandard - -VALID_CHARS = GEN_DELIMS | SUB_DELIMS | UNRESERVED | set('%') -VALID_CHAR_STRING = py_native_string(str.join('', VALID_CHARS)) -UNSAFE_CHAR_STRING = '\x00-\x20<>{}|"`\\^\x7F-\x9F' - -def _chrcls(allowed_chars): - """ - Given a subset of the URL-compatible special characters ``!#$%&'()*+,-./:;=?@[]_~``, - returns a regex character class matching any URL-compatible character apart from the - special characters not present in the provided set. - """ - return ('^' - + UNSAFE_CHAR_STRING - + str.join('', VALID_CHARS - allowed_chars).replace('%', '%%') - .replace(']', r'\]') - .replace('-', r'\-')) - -URI_PATTERNS = { - 'scheme' : r'[%s]+' % ('A-Z0-9.+-'), - 'user' : r'[%s]+' % _chrcls(UNRESERVED | SUB_DELIMS | set('%:')), - 'port' : r'[0-9]{2,5}', - 'host4' : IPV4, - 'host6' : r'[%s]+' % (HEX + ':'), - 'hostn' : r'[%s]+' % _chrcls(set('.-')), - 'path' : r'[%s]*' % _chrcls(PCHAR | set('/')), - 'query' : r'[%s]*' % _chrcls(PCHAR | set('/?') | QUERY_EXTRAS), - 'frag' : r'[%s]*' % _chrcls(PCHAR | set('/?')), -} - - -class URLType(StringType): - - """A field that validates the input as a URL. - - :param fqdn: - if ``True`` the validation function will ensure hostname in URL - is a Fully Qualified Domain Name. - :param verify_exists: - if ``True`` the validation function will make sure - the URL is accessible (server responds with HTTP 2xx). - """ - - MESSAGES = { - 'invalid_url': "Not a well-formed URL.", - 'not_found': "URL could not be retrieved.", - } - - URL_REGEX = re.compile(r"""^( - (?P %(scheme)s ) :// - ( (?P %(user)s ) @ )? - (\[ (?P %(host6)s ) ] - | (?P %(host4)s ) - | (?P %(hostn)s ) ) - ( : (?P %(port)s ) )? - (?P / %(path)s )? - (\? (?P %(query)s ) )? - (\# (?P %(frag)s ) )?)$ - """ % URI_PATTERNS, re.I + re.X) - - TLD_REGEX = re.compile(r'^( ([a-z]{2,}) | (xn--[a-z0-9]{4,}) )$', re.I + re.X) - - def __init__(self, fqdn=True, verify_exists=False, **kwargs): - self.schemes = ['http', 'https'] - self.fqdn = fqdn - self.verify_exists = verify_exists - super(URLType, self).__init__(**kwargs) - - def _mock(self, context=None): - return fill_template('http://a%s.ZZ', self.min_length, self.max_length) - - def valid_url(self, value): - match = self.URL_REGEX.match(value) - if not match: - return False - url = match.groupdict() - - if url['scheme'].lower() not in self.schemes: - return False - if url['host6']: - if IPv6Type.valid_ip(url['host6']): - return url - else: - return False - if url['host4']: - return url - - try: - hostname = url['hostn'].encode('ascii').decode('ascii') - except UnicodeError: - try: - hostname = url['hostn'].encode('idna').decode('ascii') - except UnicodeError: - return False - - if hostname[-1] == '.': - hostname = hostname[:-1] - if len(hostname) > 253: - return False - - labels = hostname.split('.') - for label in labels: - if not 0 < len(label) < 64: - return False - if '-' in (label[0], label[-1]): - return False - if self.fqdn: - if len(labels) == 1 \ - or not self.TLD_REGEX.match(labels[-1]): - return False - - url['hostn_enc'] = hostname - - return url - - def validate_(self, value, context=None): - url = self.valid_url(value) - if not url: - raise StopValidationError(self.messages['invalid_url']) - if self.verify_exists: - url_string = urlquote(urlunsplit(( - url['scheme'], - (url['host6'] or url['host4'] or url['hostn_enc']) + ':' + (url['port'] or ''), - url['path'], - url['query'], - url['frag']) - ).encode('utf-8'), safe=VALID_CHAR_STRING) - try: - urlopen(url_string) - except URLError: - raise StopValidationError(self.messages['not_found']) - - -class EmailType(StringType): - - """A field that validates input as an E-Mail-Address. - """ - - MESSAGES = { - 'email': _("Not a well-formed email address.") - } - - EMAIL_REGEX = re.compile(r"""^( - ( ( [%(atext)s]+ (\.[%(atext)s]+)* ) | ("( [%(qtext)s\s] | \\[%(vchar)s\s] )*") ) - @((?!-)[A-Z0-9-]{1,63}(?>> from schematics.models import serializable - >>> class Location(Model): - ... country_code = StringType() - ... @serializable - ... def country_name(self): - ... return {'us': 'United States'}[self.country_code] - ... - >>> location = Location({'country_code': 'us'}) - >>> location.serialize() - {'country_name': 'United States', 'country_code': 'us'} - >>> - :param type: - A custom subclass of `BaseType` for enforcing a certain type - on serialization. - :param serialized_name: - The name of this field in the serialized output. - """ - if isinstance(arg, FunctionType): - decorator = True - func = arg - serialized_type = BaseType - elif arg is None or isinstance(arg, (BaseType, TypeMeta)): - decorator = False - serialized_type = arg or kwargs.pop("type", BaseType) - else: - raise TypeError("The argument to 'serializable' must be a function or a type.") - - if isinstance(serialized_type, BaseType): - # `serialized_type` is already a type instance, - # so update it with the options found in `kwargs`. - serialized_type._set_export_level(kwargs.pop('export_level', None), - kwargs.pop("serialize_when_none", None)) - for name, value in kwargs.items(): - setattr(serialized_type, name, value) - else: - serialized_type = serialized_type(**kwargs) - - if decorator: - return Serializable(type=serialized_type, fget=func) - else: - return partial(Serializable, type=serialized_type) - - -def calculated(type, fget, fset=None): - return Serializable(type=type, fget=fget, fset=fset) - - -class Serializable(object): - - def __init__(self, fget, type, fset=None): - self.type = type - self.fget = fget - self.fset = fset - - def __getattr__(self, name): - return getattr(self.type, name) - - def __get__(self, instance, cls): - if instance is None: - return self - else: - value = self.fget(instance) - if value is Undefined: - raise UndefinedValueError(instance, self.name) - else: - return value - - def __set__(self, instance, value): - if self.fset is None: - raise AttributeError("can't set attribute %s" % self.name) - value = self.type.pre_setattr(value) - self.fset(instance, value) - - def setter(self, fset): - self.fset = fset - return self - - def _repr_info(self): - return self.type.__class__.__name__ - - def __deepcopy__(self, memo): - return self.__class__(self.fget, type=copy.deepcopy(self.type), fset=self.fset) - - def __repr__(self): - type_ = "%s(%s) instance" % (self.__class__.__name__, self._repr_info() or '') - model = " on %s" % self.owner_model.__name__ if self.owner_model else '' - field = " as '%s'" % self.name if self.name else '' - return "<%s>" % (type_ + model + field) - - -if PY2: - # Python 2 names cannot be unicode - __all__ = [n.encode('ascii') for n in __all__] diff --git a/solnlib/packages/schematics/types/union.py b/solnlib/packages/schematics/types/union.py deleted file mode 100644 index 5d21119c..00000000 --- a/solnlib/packages/schematics/types/union.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals, absolute_import - -import inspect -from collections import OrderedDict - -from ..common import * -from ..exceptions import ConversionError -from ..translator import _ -from ..transforms import get_import_context, get_export_context -from .base import BaseType - -__all__ = ['UnionType'] - - -def _valid_init_args(type_): - args = set() - for cls in type_.__mro__: - args.update(inspect.getargspec(cls.__init__).args[1:]) - if cls is BaseType: - break - return args - -def _filter_kwargs(valid_args, kwargs): - return dict((k, v) for k, v in kwargs.items() if k in valid_args) - - -class UnionType(BaseType): - - types = None - - MESSAGES = { - 'convert': _("Couldn't interpret value '{0}' as any of {1}."), - } - - _baseclass_args = _valid_init_args(BaseType) - - def __init__(self, types=None, resolver=None, **kwargs): - - self._types = OrderedDict() - types = types or self.types - if resolver: - self.resolve = resolver - - for type_ in types: - if isinstance(type_, type) and issubclass(type_, BaseType): - type_ = type_(**_filter_kwargs(_valid_init_args(type_), kwargs)) - elif not isinstance(type_, BaseType): - raise TypeError("Got '%s' instance instead of a Schematics type" % type_.__class__.__name__) - self._types[type_.__class__] = type_ - self.typenames = tuple((cls.__name__ for cls in self._types)) - - super(UnionType, self).__init__(**_filter_kwargs(self._baseclass_args, kwargs)) - - def resolve(self, value, context): - for field in self._types.values(): - try: - value = field.convert(value, context) - except ConversionError: - pass - else: - return field, value - return None - - def _resolve(self, value, context): - response = self.resolve(value, context) - if isinstance(response, type): - field = self._types[response] - try: - response = field, field.convert(value, context) - except ConversionError: - pass - if isinstance(response, tuple): - return response - raise ConversionError(self.messages['convert'].format(value, self.typenames)) - - def convert(self, value, context=None): - context = context or get_import_context() - field, native_value = self._resolve(value, context) - return native_value - - def validate(self, value, context=None): - field, _ = self._resolve(value, context) - return field.validate(value, context) - - def _export(self, value, format, context=None): - field, _ = self._resolve(value, context) - return field._export(value, format, context) - - def to_native(self, value, context=None): - field, _ = self._resolve(value, context) - return field.to_native(value, context) - - def to_primitive(self, value, context=None): - field, _ = self._resolve(value, context) - return field.to_primitive(value, context) - - -if PY2: - # Python 2 names cannot be unicode - __all__ = [n.encode('ascii') for n in __all__] diff --git a/solnlib/packages/schematics/undefined.py b/solnlib/packages/schematics/undefined.py deleted file mode 100644 index 05c85edb..00000000 --- a/solnlib/packages/schematics/undefined.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -A type and singleton value (like None) to represent fields that -have not been initialized. -""" - -from __future__ import unicode_literals, absolute_import - - -class UndefinedType(object): - - _instance = None - - def __str__(self): - return 'Undefined' - - def __repr__(self): - return 'Undefined' - - def __eq__(self, other): - return self is other - - def __ne__(self, other): - return self is not other - - def __bool__(self): - return False - - __nonzero__ = __bool__ - - def __lt__(self, other): - self._cmp_err(other, '<') - - def __gt__(self, other): - self._cmp_err(other, '>') - - def __le__(self, other): - self._cmp_err(other, '<=') - - def __ge__(self, other): - self._cmp_err(other, '>=') - - def _cmp_err(self, other, op): - raise TypeError("unorderable types: {0}() {1} {2}()".format( - self.__class__.__name__, op, other.__class__.__name__)) - - def __new__(cls, *args, **kwargs): - if cls._instance is None: - cls._instance = object.__new__(cls) - elif cls is not UndefinedType: - raise TypeError("type 'UndefinedType' is not an acceptable base type") - return cls._instance - - def __init__(self): - pass - - def __setattr__(self, name, value): - raise TypeError("'UndefinedType' object does not support attribute assignment") - - -Undefined = UndefinedType() diff --git a/solnlib/packages/schematics/util.py b/solnlib/packages/schematics/util.py deleted file mode 100644 index 5ce8d759..00000000 --- a/solnlib/packages/schematics/util.py +++ /dev/null @@ -1,176 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals, absolute_import - -import collections -import sys - -from .compat import * - -if PY2: - try: - from thread import get_ident - except ImportError: - from dummy_thread import get_ident -else: - try: - from _thread import get_ident - except ImportError: - from _dummy_thread import get_ident - -__all__ = ['get_ident', 'setdefault', 'Constant', 'listify', - 'get_all_subclasses', 'ImportStringError', 'import_string'] - - -def setdefault(obj, attr, value, search_mro=False, overwrite_none=False): - if search_mro: - exists = hasattr(obj, attr) - else: - exists = attr in obj.__dict__ - if exists and overwrite_none: - if getattr(obj, attr) is None: - exists = False - if exists: - value = getattr(obj, attr) - else: - setattr(obj, attr, value) - return value - - -class Constant(int): - - def __new__(cls, name, value): - return int.__new__(cls, value) - - def __init__(self, name, value): - self.name = name - int.__init__(self) - - def __repr__(self): - return self.name - - __str__ = __repr__ - - -def listify(value): - if isinstance(value, list): - return value - elif value is None: - return [] - elif isinstance(value, string_type): - return [value] - elif isinstance(value, collections.Sequence): - return list(value) - else: - return [value] - - -def get_all_subclasses(cls): - all_subclasses = [] - - for subclass in cls.__subclasses__(): - all_subclasses.append(subclass) - all_subclasses.extend(get_all_subclasses(subclass)) - - return all_subclasses - - -class ImportStringError(ImportError): - - """Provides information about a failed :func:`import_string` attempt. - - Code taken from werzeug BSD license at https://github.com/pallets/werkzeug/blob/master/LICENSE - """ - - #: String in dotted notation that failed to be imported. - import_name = None - #: Wrapped exception. - exception = None - - def __init__(self, import_name, exception): - self.import_name = import_name - self.exception = exception - - msg = ( - 'import_string() failed for %r. Possible reasons are:\n\n' - '- missing __init__.py in a package;\n' - '- package or module path not included in sys.path;\n' - '- duplicated package or module name taking precedence in ' - 'sys.path;\n' - '- missing module, class, function or variable;\n\n' - 'Debugged import:\n\n%s\n\n' - 'Original exception:\n\n%s: %s') - - name = '' - tracked = [] - for part in import_name.replace(':', '.').split('.'): - name += (name and '.') + part - imported = import_string(name, silent=True) - if imported: - tracked.append((name, getattr(imported, '__file__', None))) - else: - track = ['- %r found in %r.' % (n, i) for n, i in tracked] - track.append('- %r not found.' % name) - msg = msg % (import_name, '\n'.join(track), - exception.__class__.__name__, str(exception)) - break - - ImportError.__init__(self, msg) - - def __repr__(self): - return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name, - self.exception) - - -def import_string(import_name, silent=False): - """Imports an object based on a string. This is useful if you want to - use import paths as endpoints or something similar. An import path can - be specified either in dotted notation (``xml.sax.saxutils.escape``) - or with a colon as object delimiter (``xml.sax.saxutils:escape``). - - If `silent` is True the return value will be `None` if the import fails. - - Code taken from werzeug BSD license at https://github.com/pallets/werkzeug/blob/master/LICENSE - - :param import_name: the dotted name for the object to import. - :param silent: if set to `True` import errors are ignored and - `None` is returned instead. - :return: imported object - """ - # force the import name to automatically convert to strings - # __import__ is not able to handle unicode strings in the fromlist - # if the module is a package - import_name = str(import_name).replace(':', '.') - try: - try: - __import__(import_name) - except ImportError: - if '.' not in import_name: - raise - else: - return sys.modules[import_name] - - module_name, obj_name = import_name.rsplit('.', 1) - try: - module = __import__(module_name, None, None, [obj_name]) - except ImportError: - # support importing modules not yet set up by the parent module - # (or package for that matter) - module = import_string(module_name) - - try: - return getattr(module, obj_name) - except AttributeError as e: - raise ImportError(e) - - except ImportError as e: - if not silent: - reraise( - ImportStringError, - ImportStringError(import_name, e), - sys.exc_info()[2]) - - -if PY2: - # Python 2 names cannot be unicode - __all__ = [n.encode('ascii') for n in __all__] diff --git a/solnlib/packages/schematics/validate.py b/solnlib/packages/schematics/validate.py deleted file mode 100644 index bd996675..00000000 --- a/solnlib/packages/schematics/validate.py +++ /dev/null @@ -1,130 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import unicode_literals, absolute_import - -import inspect -import functools - -from .common import * -from .datastructures import Context -from .exceptions import FieldError, DataError -from .transforms import import_loop, validation_converter -from .undefined import Undefined -from .iteration import atoms - -__all__ = [] - - -def validate(schema, mutable, raw_data=None, trusted_data=None, - partial=False, strict=False, convert=True, context=None, **kwargs): - """ - Validate some untrusted data using a model. Trusted data can be passed in - the `trusted_data` parameter. - - :param schema: - The Schema to use as source for validation. - :param mutable: - A mapping or instance that can be changed during validation by Schema - functions. - :param raw_data: - A mapping or instance containing new data to be validated. - :param partial: - Allow partial data to validate; useful for PATCH requests. - Essentially drops the ``required=True`` arguments from field - definitions. Default: False - :param strict: - Complain about unrecognized keys. Default: False - :param trusted_data: - A ``dict``-like structure that may contain already validated data. - :param convert: - Controls whether to perform import conversion before validating. - Can be turned off to skip an unnecessary conversion step if all values - are known to have the right datatypes (e.g., when validating immediately - after the initial import). Default: True - - :returns: data - ``dict`` containing the valid raw_data plus ``trusted_data``. - If errors are found, they are raised as a ValidationError with a list - of errors attached. - """ - if raw_data is None: - raw_data = mutable - - context = context or get_validation_context(partial=partial, strict=strict, - convert=convert) - - errors = {} - try: - data = import_loop(schema, mutable, raw_data, trusted_data=trusted_data, - context=context, **kwargs) - except DataError as exc: - errors = dict(exc.errors) - data = exc.partial_data - - errors.update(_validate_model(schema, mutable, data, context)) - - if errors: - raise DataError(errors, data) - - return data - - -def _validate_model(schema, mutable, data, context): - """ - Validate data using model level methods. - - :param schema: - The Schema to validate ``data`` against. - :param mutable: - A mapping or instance that will be passed to the validator containing - the original data and that can be mutated. - :param data: - A dict with data to validate. Invalid items are removed from it. - :returns: - Errors of the fields that did not pass validation. - """ - errors = {} - invalid_fields = [] - - has_validator = lambda atom: ( - atom.value is not Undefined and - atom.name in schema._validator_functions - ) - for field_name, field, value in atoms(schema, data, filter=has_validator): - try: - schema._validator_functions[field_name](mutable, data, value, context) - except (FieldError, DataError) as exc: - serialized_field_name = field.serialized_name or field_name - errors[serialized_field_name] = exc.errors - invalid_fields.append(field_name) - - for field_name in invalid_fields: - data.pop(field_name) - - return errors - - -def get_validation_context(**options): - validation_options = { - 'field_converter': validation_converter, - 'partial': False, - 'strict': False, - 'convert': True, - 'validate': True, - 'new': False, - } - validation_options.update(options) - return Context(**validation_options) - - -def prepare_validator(func, argcount): - if isinstance(func, classmethod): - func = func.__get__(object).__func__ - if len(inspect.getargspec(func).args) < argcount: - @functools.wraps(func) - def newfunc(*args, **kwargs): - if not kwargs or kwargs.pop('context', 0) is 0: - args = args[:-1] - return func(*args, **kwargs) - return newfunc - return func diff --git a/solnlib/packages/sortedcontainers/LICENSE b/solnlib/packages/sortedcontainers/LICENSE deleted file mode 100644 index f89eee22..00000000 --- a/solnlib/packages/sortedcontainers/LICENSE +++ /dev/null @@ -1,14 +0,0 @@ -Copyright 2014-2016 Grant Jenks - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - diff --git a/solnlib/packages/sortedcontainers/__init__.py b/solnlib/packages/sortedcontainers/__init__.py deleted file mode 100644 index 59436397..00000000 --- a/solnlib/packages/sortedcontainers/__init__.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Sorted Containers -- Sorted List, Sorted Dict, Sorted Set - -Sorted Containers is an Apache2 licensed containers library, written in -pure-Python, and fast as C-extensions. - -Python's standard library is great until you need a sorted collections -type. Many will attest that you can get really far without one, but the moment -you **really need** a sorted list, dict, or set, you're faced with a dozen -different implementations, most using C-extensions without great documentation -and benchmarking. - -In Python, we can do better. And we can do it in pure-Python! - -:: - - >>> from sortedcontainers import SortedList - >>> sl = SortedList(['e', 'a', 'c', 'd', 'b']) - >>> sl - SortedList(['a', 'b', 'c', 'd', 'e']) - >>> sl *= 1000000 - >>> sl.count('c') - 1000000 - >>> sl[-3:] - ['e', 'e', 'e'] - >>> from sortedcontainers import SortedDict - >>> sd = SortedDict({'c': 3, 'a': 1, 'b': 2}) - >>> sd - SortedDict({'a': 1, 'b': 2, 'c': 3}) - >>> sd.popitem(index=-1) - ('c', 3) - >>> from sortedcontainers import SortedSet - >>> ss = SortedSet('abracadabra') - >>> ss - SortedSet(['a', 'b', 'c', 'd', 'r']) - >>> ss.bisect_left('c') - 2 - -Sorted Containers takes all of the work out of Python sorted types - making -your deployment and use of Python easy. There's no need to install a C compiler -or pre-build and distribute custom extensions. Performance is a feature and -testing has 100% coverage with unit tests and hours of stress. - -:copyright: (c) 2014-2018 by Grant Jenks. -:license: Apache 2.0, see LICENSE for more details. - -""" - - -from .sortedlist import SortedList, SortedKeyList, SortedListWithKey -from .sortedset import SortedSet -from .sorteddict import ( - SortedDict, - SortedKeysView, - SortedItemsView, - SortedValuesView, -) - -__all__ = [ - 'SortedList', - 'SortedKeyList', - 'SortedListWithKey', - 'SortedDict', - 'SortedKeysView', - 'SortedItemsView', - 'SortedValuesView', - 'SortedSet', -] - -__title__ = 'sortedcontainers' -__version__ = '2.1.0' -__build__ = 0x020100 -__author__ = 'Grant Jenks' -__license__ = 'Apache 2.0' -__copyright__ = '2014-2018, Grant Jenks' diff --git a/solnlib/packages/sortedcontainers/sorteddict.py b/solnlib/packages/sortedcontainers/sorteddict.py deleted file mode 100644 index ba9ed72d..00000000 --- a/solnlib/packages/sortedcontainers/sorteddict.py +++ /dev/null @@ -1,800 +0,0 @@ -"""Sorted Dict -============== - -:doc:`Sorted Containers` is an Apache2 licensed Python sorted -collections library, written in pure-Python, and fast as C-extensions. The -:doc:`introduction` is the best way to get started. - -Sorted dict implementations: - -.. currentmodule:: sortedcontainers - -* :class:`SortedDict` -* :class:`SortedKeysView` -* :class:`SortedItemsView` -* :class:`SortedValuesView` - -""" - -import sys -import warnings - -from .sortedlist import SortedList, recursive_repr -from .sortedset import SortedSet - -############################################################################### -# BEGIN Python 2/3 Shims -############################################################################### - -try: - from collections.abc import ItemsView, KeysView, ValuesView, Sequence -except ImportError: - from collections import ItemsView, KeysView, ValuesView, Sequence - -############################################################################### -# END Python 2/3 Shims -############################################################################### - - -class SortedDict(dict): - """Sorted dict is a sorted mutable mapping. - - Sorted dict keys are maintained in sorted order. The design of sorted dict - is simple: sorted dict inherits from dict to store items and maintains a - sorted list of keys. - - Sorted dict keys must be hashable and comparable. The hash and total - ordering of keys must not change while they are stored in the sorted dict. - - Mutable mapping methods: - - * :func:`SortedDict.__getitem__` (inherited from dict) - * :func:`SortedDict.__setitem__` - * :func:`SortedDict.__delitem__` - * :func:`SortedDict.__iter__` - * :func:`SortedDict.__len__` (inherited from dict) - - Methods for adding items: - - * :func:`SortedDict.setdefault` - * :func:`SortedDict.update` - - Methods for removing items: - - * :func:`SortedDict.clear` - * :func:`SortedDict.pop` - * :func:`SortedDict.popitem` - - Methods for looking up items: - - * :func:`SortedDict.__contains__` (inherited from dict) - * :func:`SortedDict.get` (inherited from dict) - * :func:`SortedDict.peekitem` - - Methods for views: - - * :func:`SortedDict.keys` - * :func:`SortedDict.items` - * :func:`SortedDict.values` - - Methods for miscellany: - - * :func:`SortedDict.copy` - * :func:`SortedDict.fromkeys` - * :func:`SortedDict.__reversed__` - * :func:`SortedDict.__eq__` (inherited from dict) - * :func:`SortedDict.__ne__` (inherited from dict) - * :func:`SortedDict.__repr__` - * :func:`SortedDict._check` - - Sorted list methods available (applies to keys): - - * :func:`SortedList.bisect_left` - * :func:`SortedList.bisect_right` - * :func:`SortedList.count` - * :func:`SortedList.index` - * :func:`SortedList.irange` - * :func:`SortedList.islice` - * :func:`SortedList._reset` - - Additional sorted list methods available, if key-function used: - - * :func:`SortedKeyList.bisect_key_left` - * :func:`SortedKeyList.bisect_key_right` - * :func:`SortedKeyList.irange_key` - - Sorted dicts may only be compared for equality and inequality. - - """ - def __init__(self, *args, **kwargs): - """Initialize sorted dict instance. - - Optional key-function argument defines a callable that, like the `key` - argument to the built-in `sorted` function, extracts a comparison key - from each dictionary key. If no function is specified, the default - compares the dictionary keys directly. The key-function argument must - be provided as a positional argument and must come before all other - arguments. - - Optional iterable argument provides an initial sequence of pairs to - initialize the sorted dict. Each pair in the sequence defines the key - and corresponding value. If a key is seen more than once, the last - value associated with it is stored in the new sorted dict. - - Optional mapping argument provides an initial mapping of items to - initialize the sorted dict. - - If keyword arguments are given, the keywords themselves, with their - associated values, are added as items to the dictionary. If a key is - specified both in the positional argument and as a keyword argument, - the value associated with the keyword is stored in the - sorted dict. - - Sorted dict keys must be hashable, per the requirement for Python's - dictionaries. Keys (or the result of the key-function) must also be - comparable, per the requirement for sorted lists. - - >>> d = {'alpha': 1, 'beta': 2} - >>> SortedDict([('alpha', 1), ('beta', 2)]) == d - True - >>> SortedDict({'alpha': 1, 'beta': 2}) == d - True - >>> SortedDict(alpha=1, beta=2) == d - True - - """ - if args and (args[0] is None or callable(args[0])): - _key = self._key = args[0] - args = args[1:] - else: - _key = self._key = None - - self._list = SortedList(key=_key) - - # Calls to super() are expensive so cache references to dict methods on - # sorted dict instances. - - _dict = super(SortedDict, self) - self._dict_clear = _dict.clear - self._dict_delitem = _dict.__delitem__ - self._dict_iter = _dict.__iter__ - self._dict_pop = _dict.pop - self._dict_setitem = _dict.__setitem__ - self._dict_update = _dict.update - - # Reaching through ``self._list`` repeatedly adds unnecessary overhead - # so cache references to sorted list methods. - - _list = self._list - self._list_add = _list.add - self._list_clear = _list.clear - self._list_iter = _list.__iter__ - self._list_reversed = _list.__reversed__ - self._list_pop = _list.pop - self._list_remove = _list.remove - self._list_update = _list.update - - # Expose some sorted list methods publicly. - - self.bisect_left = _list.bisect_left - self.bisect = _list.bisect_right - self.bisect_right = _list.bisect_right - self.index = _list.index - self.irange = _list.irange - self.islice = _list.islice - self._reset = _list._reset - - if _key is not None: - self.bisect_key_left = _list.bisect_key_left - self.bisect_key_right = _list.bisect_key_right - self.bisect_key = _list.bisect_key - self.irange_key = _list.irange_key - - self._update(*args, **kwargs) - - - @property - def key(self): - """Function used to extract comparison key from keys. - - Sorted dict compares keys directly when the key function is none. - - """ - return self._key - - - @property - def iloc(self): - """Cached reference of sorted keys view. - - Deprecated in version 2 of Sorted Containers. Use - :func:`SortedDict.keys` instead. - - """ - # pylint: disable=attribute-defined-outside-init - try: - return self._iloc - except AttributeError: - warnings.warn( - 'sorted_dict.iloc is deprecated.' - ' Use SortedDict.keys() instead.', - DeprecationWarning, - stacklevel=2, - ) - _iloc = self._iloc = SortedKeysView(self) - return _iloc - - - def clear(self): - - """Remove all items from sorted dict. - - Runtime complexity: `O(n)` - - """ - self._dict_clear() - self._list_clear() - - - def __delitem__(self, key): - """Remove item from sorted dict identified by `key`. - - ``sd.__delitem__(key)`` <==> ``del sd[key]`` - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3}) - >>> del sd['b'] - >>> sd - SortedDict({'a': 1, 'c': 3}) - >>> del sd['z'] - Traceback (most recent call last): - ... - KeyError: 'z' - - :param key: `key` for item lookup - :raises KeyError: if key not found - - """ - self._dict_delitem(key) - self._list_remove(key) - - - def __iter__(self): - """Return an iterator over the keys of the sorted dict. - - ``sd.__iter__()`` <==> ``iter(sd)`` - - Iterating the sorted dict while adding or deleting items may raise a - :exc:`RuntimeError` or fail to iterate over all keys. - - """ - return self._list_iter() - - - def __reversed__(self): - """Return a reverse iterator over the keys of the sorted dict. - - ``sd.__reversed__()`` <==> ``reversed(sd)`` - - Iterating the sorted dict while adding or deleting items may raise a - :exc:`RuntimeError` or fail to iterate over all keys. - - """ - return self._list_reversed() - - - def __setitem__(self, key, value): - """Store item in sorted dict with `key` and corresponding `value`. - - ``sd.__setitem__(key, value)`` <==> ``sd[key] = value`` - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sd = SortedDict() - >>> sd['c'] = 3 - >>> sd['a'] = 1 - >>> sd['b'] = 2 - >>> sd - SortedDict({'a': 1, 'b': 2, 'c': 3}) - - :param key: key for item - :param value: value for item - - """ - if key not in self: - self._list_add(key) - self._dict_setitem(key, value) - - _setitem = __setitem__ - - - def copy(self): - """Return a shallow copy of the sorted dict. - - Runtime complexity: `O(n)` - - :return: new sorted dict - - """ - return self.__class__(self._key, self.items()) - - __copy__ = copy - - - @classmethod - def fromkeys(cls, iterable, value=None): - """Return a new sorted dict initailized from `iterable` and `value`. - - Items in the sorted dict have keys from `iterable` and values equal to - `value`. - - Runtime complexity: `O(n*log(n))` - - :return: new sorted dict - - """ - return cls((key, value) for key in iterable) - - - def keys(self): - """Return new sorted keys view of the sorted dict's keys. - - See :class:`SortedKeysView` for details. - - :return: new sorted keys view - - """ - return SortedKeysView(self) - - - def items(self): - """Return new sorted items view of the sorted dict's items. - - See :class:`SortedItemsView` for details. - - :return: new sorted items view - - """ - return SortedItemsView(self) - - - def values(self): - """Return new sorted values view of the sorted dict's values. - - See :class:`SortedValuesView` for details. - - :return: new sorted values view - - """ - return SortedValuesView(self) - - - if sys.hexversion < 0x03000000: - def __make_raise_attributeerror(original, alternate): - # pylint: disable=no-self-argument - message = ( - 'SortedDict.{original}() is not implemented.' - ' Use SortedDict.{alternate}() instead.' - ).format(original=original, alternate=alternate) - def method(self): - # pylint: disable=missing-docstring,unused-argument - raise AttributeError(message) - method.__name__ = original - method.__doc__ = message - return property(method) - - iteritems = __make_raise_attributeerror('iteritems', 'items') - iterkeys = __make_raise_attributeerror('iterkeys', 'keys') - itervalues = __make_raise_attributeerror('itervalues', 'values') - viewitems = __make_raise_attributeerror('viewitems', 'items') - viewkeys = __make_raise_attributeerror('viewkeys', 'keys') - viewvalues = __make_raise_attributeerror('viewvalues', 'values') - - - class _NotGiven(object): - # pylint: disable=too-few-public-methods - def __repr__(self): - return '' - - __not_given = _NotGiven() - - def pop(self, key, default=__not_given): - """Remove and return value for item identified by `key`. - - If the `key` is not found then return `default` if given. If `default` - is not given then raise :exc:`KeyError`. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3}) - >>> sd.pop('c') - 3 - >>> sd.pop('z', 26) - 26 - >>> sd.pop('y') - Traceback (most recent call last): - ... - KeyError: 'y' - - :param key: `key` for item - :param default: `default` value if key not found (optional) - :return: value for item - :raises KeyError: if `key` not found and `default` not given - - """ - if key in self: - self._list_remove(key) - return self._dict_pop(key) - else: - if default is self.__not_given: - raise KeyError(key) - else: - return default - - - def popitem(self, index=-1): - """Remove and return ``(key, value)`` pair at `index` from sorted dict. - - Optional argument `index` defaults to -1, the last item in the sorted - dict. Specify ``index=0`` for the first item in the sorted dict. - - If the sorted dict is empty, raises :exc:`KeyError`. - - If the `index` is out of range, raises :exc:`IndexError`. - - Runtime complexity: `O(log(n))` - - >>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3}) - >>> sd.popitem() - ('c', 3) - >>> sd.popitem(0) - ('a', 1) - >>> sd.popitem(100) - Traceback (most recent call last): - ... - IndexError: list index out of range - - :param int index: `index` of item (default -1) - :return: key and value pair - :raises KeyError: if sorted dict is empty - :raises IndexError: if `index` out of range - - """ - if not self: - raise KeyError('popitem(): dictionary is empty') - - key = self._list_pop(index) - value = self._dict_pop(key) - return (key, value) - - - def peekitem(self, index=-1): - """Return ``(key, value)`` pair at `index` in sorted dict. - - Optional argument `index` defaults to -1, the last item in the sorted - dict. Specify ``index=0`` for the first item in the sorted dict. - - Unlike :func:`SortedDict.popitem`, the sorted dict is not modified. - - If the `index` is out of range, raises :exc:`IndexError`. - - Runtime complexity: `O(log(n))` - - >>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3}) - >>> sd.peekitem() - ('c', 3) - >>> sd.peekitem(0) - ('a', 1) - >>> sd.peekitem(100) - Traceback (most recent call last): - ... - IndexError: list index out of range - - :param int index: index of item (default -1) - :return: key and value pair - :raises IndexError: if `index` out of range - - """ - key = self._list[index] - return key, self[key] - - - def setdefault(self, key, default=None): - """Return value for item identified by `key` in sorted dict. - - If `key` is in the sorted dict then return its value. If `key` is not - in the sorted dict then insert `key` with value `default` and return - `default`. - - Optional argument `default` defaults to none. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sd = SortedDict() - >>> sd.setdefault('a', 1) - 1 - >>> sd.setdefault('a', 10) - 1 - >>> sd - SortedDict({'a': 1}) - - :param key: key for item - :param default: value for item (default None) - :return: value for item identified by `key` - - """ - if key in self: - return self[key] - self._dict_setitem(key, default) - self._list_add(key) - return default - - - def update(self, *args, **kwargs): - """Update sorted dict with items from `args` and `kwargs`. - - Overwrites existing items. - - Optional arguments `args` and `kwargs` may be a mapping, an iterable of - pairs or keyword arguments. See :func:`SortedDict.__init__` for - details. - - :param args: mapping or iterable of pairs - :param kwargs: keyword arguments mapping - - """ - if not self: - self._dict_update(*args, **kwargs) - self._list_update(self._dict_iter()) - return - - if not kwargs and len(args) == 1 and isinstance(args[0], dict): - pairs = args[0] - else: - pairs = dict(*args, **kwargs) - - if (10 * len(pairs)) > len(self): - self._dict_update(pairs) - self._list_clear() - self._list_update(self._dict_iter()) - else: - for key in pairs: - self._setitem(key, pairs[key]) - - _update = update - - - def __reduce__(self): - """Support for pickle. - - The tricks played with caching references in - :func:`SortedDict.__init__` confuse pickle so customize the reducer. - - """ - return (self.__class__, (self._key, list(self.items()))) - - - @recursive_repr() - def __repr__(self): - """Return string representation of sorted dict. - - ``sd.__repr__()`` <==> ``repr(sd)`` - - :return: string representation - - """ - _key = self._key - type_name = type(self).__name__ - key_arg = '' if _key is None else '{0!r}, '.format(_key) - item_format = '{0!r}: {1!r}'.format - items = ', '.join(item_format(key, self[key]) for key in self._list) - return '{0}({1}{{{2}}})'.format(type_name, key_arg, items) - - - def _check(self): - """Check invariants of sorted dict. - - Runtime complexity: `O(n)` - - """ - _list = self._list - _list._check() - assert len(self) == len(_list) - assert all(key in self for key in _list) - - -def _view_delitem(self, index): - """Remove item at `index` from sorted dict. - - ``view.__delitem__(index)`` <==> ``del view[index]`` - - Supports slicing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3}) - >>> view = sd.keys() - >>> del view[0] - >>> sd - SortedDict({'b': 2, 'c': 3}) - >>> del view[-1] - >>> sd - SortedDict({'b': 2}) - >>> del view[:] - >>> sd - SortedDict({}) - - :param index: integer or slice for indexing - :raises IndexError: if index out of range - - """ - _mapping = self._mapping - _list = _mapping._list - _dict_delitem = _mapping._dict_delitem - if isinstance(index, slice): - keys = _list[index] - del _list[index] - for key in keys: - _dict_delitem(key) - else: - key = _list.pop(index) - _dict_delitem(key) - - -class SortedKeysView(KeysView, Sequence): - """Sorted keys view is a dynamic view of the sorted dict's keys. - - When the sorted dict's keys change, the view reflects those changes. - - The keys view implements the set and sequence abstract base classes. - - """ - __slots__ = () - - - @classmethod - def _from_iterable(cls, it): - return SortedSet(it) - - - def __getitem__(self, index): - """Lookup key at `index` in sorted keys views. - - ``skv.__getitem__(index)`` <==> ``skv[index]`` - - Supports slicing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3}) - >>> skv = sd.keys() - >>> skv[0] - 'a' - >>> skv[-1] - 'c' - >>> skv[:] - ['a', 'b', 'c'] - >>> skv[100] - Traceback (most recent call last): - ... - IndexError: list index out of range - - :param index: integer or slice for indexing - :return: key or list of keys - :raises IndexError: if index out of range - - """ - return self._mapping._list[index] - - - __delitem__ = _view_delitem - - -class SortedItemsView(ItemsView, Sequence): - """Sorted items view is a dynamic view of the sorted dict's items. - - When the sorted dict's items change, the view reflects those changes. - - The items view implements the set and sequence abstract base classes. - - """ - __slots__ = () - - - @classmethod - def _from_iterable(cls, it): - return SortedSet(it) - - - def __getitem__(self, index): - """Lookup item at `index` in sorted items view. - - ``siv.__getitem__(index)`` <==> ``siv[index]`` - - Supports slicing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3}) - >>> siv = sd.items() - >>> siv[0] - ('a', 1) - >>> siv[-1] - ('c', 3) - >>> siv[:] - [('a', 1), ('b', 2), ('c', 3)] - >>> siv[100] - Traceback (most recent call last): - ... - IndexError: list index out of range - - :param index: integer or slice for indexing - :return: item or list of items - :raises IndexError: if index out of range - - """ - _mapping = self._mapping - _mapping_list = _mapping._list - - if isinstance(index, slice): - keys = _mapping_list[index] - return [(key, _mapping[key]) for key in keys] - - key = _mapping_list[index] - return key, _mapping[key] - - - __delitem__ = _view_delitem - - -class SortedValuesView(ValuesView, Sequence): - """Sorted values view is a dynamic view of the sorted dict's values. - - When the sorted dict's values change, the view reflects those changes. - - The values view implements the sequence abstract base class. - - """ - __slots__ = () - - - def __getitem__(self, index): - """Lookup value at `index` in sorted values view. - - ``siv.__getitem__(index)`` <==> ``siv[index]`` - - Supports slicing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sd = SortedDict({'a': 1, 'b': 2, 'c': 3}) - >>> svv = sd.values() - >>> svv[0] - 1 - >>> svv[-1] - 3 - >>> svv[:] - [1, 2, 3] - >>> svv[100] - Traceback (most recent call last): - ... - IndexError: list index out of range - - :param index: integer or slice for indexing - :return: value or list of values - :raises IndexError: if index out of range - - """ - _mapping = self._mapping - _mapping_list = _mapping._list - - if isinstance(index, slice): - keys = _mapping_list[index] - return [_mapping[key] for key in keys] - - key = _mapping_list[index] - return _mapping[key] - - - __delitem__ = _view_delitem diff --git a/solnlib/packages/sortedcontainers/sortedlist.py b/solnlib/packages/sortedcontainers/sortedlist.py deleted file mode 100644 index b3f1250e..00000000 --- a/solnlib/packages/sortedcontainers/sortedlist.py +++ /dev/null @@ -1,2627 +0,0 @@ -"""Sorted List -============== - -:doc:`Sorted Containers` is an Apache2 licensed Python sorted -collections library, written in pure-Python, and fast as C-extensions. The -:doc:`introduction` is the best way to get started. - -Sorted list implementations: - -.. currentmodule:: sortedcontainers - -* :class:`SortedList` -* :class:`SortedKeyList` - -""" -# pylint: disable=too-many-lines -from __future__ import print_function - -from bisect import bisect_left, bisect_right, insort -from itertools import chain, repeat, starmap -from math import log -from operator import add, eq, ne, gt, ge, lt, le, iadd -from textwrap import dedent - -############################################################################### -# BEGIN Python 2/3 Shims -############################################################################### - -try: - from collections.abc import Sequence, MutableSequence -except ImportError: - from collections import Sequence, MutableSequence - -from functools import wraps -from sys import hexversion - -if hexversion < 0x03000000: - from itertools import imap as map # pylint: disable=redefined-builtin - from itertools import izip as zip # pylint: disable=redefined-builtin - try: - from thread import get_ident - except ImportError: - from dummy_thread import get_ident -else: - from functools import reduce - try: - from _thread import get_ident - except ImportError: - from _dummy_thread import get_ident - - -def recursive_repr(fillvalue='...'): - "Decorator to make a repr function return fillvalue for a recursive call." - # pylint: disable=missing-docstring - # Copied from reprlib in Python 3 - # https://hg.python.org/cpython/file/3.6/Lib/reprlib.py - - def decorating_function(user_function): - repr_running = set() - - @wraps(user_function) - def wrapper(self): - key = id(self), get_ident() - if key in repr_running: - return fillvalue - repr_running.add(key) - try: - result = user_function(self) - finally: - repr_running.discard(key) - return result - - return wrapper - - return decorating_function - -############################################################################### -# END Python 2/3 Shims -############################################################################### - - -class SortedList(MutableSequence): - """Sorted list is a sorted mutable sequence. - - Sorted list values are maintained in sorted order. - - Sorted list values must be comparable. The total ordering of values must - not change while they are stored in the sorted list. - - Methods for adding values: - - * :func:`SortedList.add` - * :func:`SortedList.update` - * :func:`SortedList.__add__` - * :func:`SortedList.__iadd__` - * :func:`SortedList.__mul__` - * :func:`SortedList.__imul__` - - Methods for removing values: - - * :func:`SortedList.clear` - * :func:`SortedList.discard` - * :func:`SortedList.remove` - * :func:`SortedList.pop` - * :func:`SortedList.__delitem__` - - Methods for looking up values: - - * :func:`SortedList.bisect_left` - * :func:`SortedList.bisect_right` - * :func:`SortedList.count` - * :func:`SortedList.index` - * :func:`SortedList.__contains__` - * :func:`SortedList.__getitem__` - - Methods for iterating values: - - * :func:`SortedList.irange` - * :func:`SortedList.islice` - * :func:`SortedList.__iter__` - * :func:`SortedList.__reversed__` - - Methods for miscellany: - - * :func:`SortedList.copy` - * :func:`SortedList.__len__` - * :func:`SortedList.__repr__` - * :func:`SortedList._check` - * :func:`SortedList._reset` - - Sorted lists use lexicographical ordering semantics when compared to other - sequences. - - Some methods of mutable sequences are not supported and will raise - not-implemented error. - - """ - DEFAULT_LOAD_FACTOR = 1000 - - - def __init__(self, iterable=None, key=None): - """Initialize sorted list instance. - - Optional `iterable` argument provides an initial iterable of values to - initialize the sorted list. - - Runtime complexity: `O(n*log(n))` - - >>> sl = SortedList() - >>> sl - SortedList([]) - >>> sl = SortedList([3, 1, 2, 5, 4]) - >>> sl - SortedList([1, 2, 3, 4, 5]) - - :param iterable: initial values (optional) - - """ - assert key is None - self._len = 0 - self._load = self.DEFAULT_LOAD_FACTOR - self._lists = [] - self._maxes = [] - self._index = [] - self._offset = 0 - - if iterable is not None: - self._update(iterable) - - - def __new__(cls, iterable=None, key=None): - """Create new sorted list or sorted-key list instance. - - Optional `key`-function argument will return an instance of subtype - :class:`SortedKeyList`. - - >>> sl = SortedList() - >>> isinstance(sl, SortedList) - True - >>> sl = SortedList(key=lambda x: -x) - >>> isinstance(sl, SortedList) - True - >>> isinstance(sl, SortedKeyList) - True - - :param iterable: initial values (optional) - :param key: function used to extract comparison key (optional) - :return: sorted list or sorted-key list instance - - """ - # pylint: disable=unused-argument - if key is None: - return object.__new__(cls) - else: - if cls is SortedList: - return object.__new__(SortedKeyList) - else: - raise TypeError('inherit SortedKeyList for key argument') - - - @property - def key(self): # pylint: disable=useless-return - """Function used to extract comparison key from values. - - Sorted list compares values directly so the key function is none. - - """ - return None - - - def _reset(self, load): - """Reset sorted list load factor. - - The `load` specifies the load-factor of the list. The default load - factor of 1000 works well for lists from tens to tens-of-millions of - values. Good practice is to use a value that is the cube root of the - list size. With billions of elements, the best load factor depends on - your usage. It's best to leave the load factor at the default until you - start benchmarking. - - See :doc:`implementation` and :doc:`performance-scale` for more - information. - - Runtime complexity: `O(n)` - - :param int load: load-factor for sorted list sublists - - """ - values = reduce(iadd, self._lists, []) - self._clear() - self._load = load - self._update(values) - - - def clear(self): - """Remove all values from sorted list. - - Runtime complexity: `O(n)` - - """ - self._len = 0 - del self._lists[:] - del self._maxes[:] - del self._index[:] - self._offset = 0 - - _clear = clear - - - def add(self, value): - """Add `value` to sorted list. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sl = SortedList() - >>> sl.add(3) - >>> sl.add(1) - >>> sl.add(2) - >>> sl - SortedList([1, 2, 3]) - - :param value: value to add to sorted list - - """ - _lists = self._lists - _maxes = self._maxes - - if _maxes: - pos = bisect_right(_maxes, value) - - if pos == len(_maxes): - pos -= 1 - _lists[pos].append(value) - _maxes[pos] = value - else: - insort(_lists[pos], value) - - self._expand(pos) - else: - _lists.append([value]) - _maxes.append(value) - - self._len += 1 - - - def _expand(self, pos): - """Split sublists with length greater than double the load-factor. - - Updates the index when the sublist length is less than double the load - level. This requires incrementing the nodes in a traversal from the - leaf node to the root. For an example traversal see - ``SortedList._loc``. - - """ - _load = self._load - _lists = self._lists - _index = self._index - - if len(_lists[pos]) > (_load << 1): - _maxes = self._maxes - - _lists_pos = _lists[pos] - half = _lists_pos[_load:] - del _lists_pos[_load:] - _maxes[pos] = _lists_pos[-1] - - _lists.insert(pos + 1, half) - _maxes.insert(pos + 1, half[-1]) - - del _index[:] - else: - if _index: - child = self._offset + pos - while child: - _index[child] += 1 - child = (child - 1) >> 1 - _index[0] += 1 - - - def update(self, iterable): - """Update sorted list by adding all values from `iterable`. - - Runtime complexity: `O(k*log(n))` -- approximate. - - >>> sl = SortedList() - >>> sl.update([3, 1, 2]) - >>> sl - SortedList([1, 2, 3]) - - :param iterable: iterable of values to add - - """ - _lists = self._lists - _maxes = self._maxes - values = sorted(iterable) - - if _maxes: - if len(values) * 4 >= self._len: - values.extend(chain.from_iterable(_lists)) - values.sort() - self._clear() - else: - _add = self.add - for val in values: - _add(val) - return - - _load = self._load - _lists.extend(values[pos:(pos + _load)] - for pos in range(0, len(values), _load)) - _maxes.extend(sublist[-1] for sublist in _lists) - self._len = len(values) - del self._index[:] - - _update = update - - - def __contains__(self, value): - """Return true if `value` is an element of the sorted list. - - ``sl.__contains__(value)`` <==> ``value in sl`` - - Runtime complexity: `O(log(n))` - - >>> sl = SortedList([1, 2, 3, 4, 5]) - >>> 3 in sl - True - - :param value: search for value in sorted list - :return: true if `value` in sorted list - - """ - _maxes = self._maxes - - if not _maxes: - return False - - pos = bisect_left(_maxes, value) - - if pos == len(_maxes): - return False - - _lists = self._lists - idx = bisect_left(_lists[pos], value) - - return _lists[pos][idx] == value - - - def discard(self, value): - """Remove `value` from sorted list if it is a member. - - If `value` is not a member, do nothing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sl = SortedList([1, 2, 3, 4, 5]) - >>> sl.discard(5) - >>> sl.discard(0) - >>> sl == [1, 2, 3, 4] - True - - :param value: `value` to discard from sorted list - - """ - _maxes = self._maxes - - if not _maxes: - return - - pos = bisect_left(_maxes, value) - - if pos == len(_maxes): - return - - _lists = self._lists - idx = bisect_left(_lists[pos], value) - - if _lists[pos][idx] == value: - self._delete(pos, idx) - - - def remove(self, value): - """Remove `value` from sorted list; `value` must be a member. - - If `value` is not a member, raise ValueError. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sl = SortedList([1, 2, 3, 4, 5]) - >>> sl.remove(5) - >>> sl == [1, 2, 3, 4] - True - >>> sl.remove(0) - Traceback (most recent call last): - ... - ValueError: 0 not in list - - :param value: `value` to remove from sorted list - :raises ValueError: if `value` is not in sorted list - - """ - _maxes = self._maxes - - if not _maxes: - raise ValueError('{0!r} not in list'.format(value)) - - pos = bisect_left(_maxes, value) - - if pos == len(_maxes): - raise ValueError('{0!r} not in list'.format(value)) - - _lists = self._lists - idx = bisect_left(_lists[pos], value) - - if _lists[pos][idx] == value: - self._delete(pos, idx) - else: - raise ValueError('{0!r} not in list'.format(value)) - - - def _delete(self, pos, idx): - """Delete value at the given `(pos, idx)`. - - Combines lists that are less than half the load level. - - Updates the index when the sublist length is more than half the load - level. This requires decrementing the nodes in a traversal from the - leaf node to the root. For an example traversal see - ``SortedList._loc``. - - :param int pos: lists index - :param int idx: sublist index - - """ - _lists = self._lists - _maxes = self._maxes - _index = self._index - - _lists_pos = _lists[pos] - - del _lists_pos[idx] - self._len -= 1 - - len_lists_pos = len(_lists_pos) - - if len_lists_pos > (self._load >> 1): - _maxes[pos] = _lists_pos[-1] - - if _index: - child = self._offset + pos - while child > 0: - _index[child] -= 1 - child = (child - 1) >> 1 - _index[0] -= 1 - elif len(_lists) > 1: - if not pos: - pos += 1 - - prev = pos - 1 - _lists[prev].extend(_lists[pos]) - _maxes[prev] = _lists[prev][-1] - - del _lists[pos] - del _maxes[pos] - del _index[:] - - self._expand(prev) - elif len_lists_pos: - _maxes[pos] = _lists_pos[-1] - else: - del _lists[pos] - del _maxes[pos] - del _index[:] - - - def _loc(self, pos, idx): - """Convert an index pair (lists index, sublist index) into a single - index number that corresponds to the position of the value in the - sorted list. - - Many queries require the index be built. Details of the index are - described in ``SortedList._build_index``. - - Indexing requires traversing the tree from a leaf node to the root. The - parent of each node is easily computable at ``(pos - 1) // 2``. - - Left-child nodes are always at odd indices and right-child nodes are - always at even indices. - - When traversing up from a right-child node, increment the total by the - left-child node. - - The final index is the sum from traversal and the index in the sublist. - - For example, using the index from ``SortedList._build_index``:: - - _index = 14 5 9 3 2 4 5 - _offset = 3 - - Tree:: - - 14 - 5 9 - 3 2 4 5 - - Converting an index pair (2, 3) into a single index involves iterating - like so: - - 1. Starting at the leaf node: offset + alpha = 3 + 2 = 5. We identify - the node as a left-child node. At such nodes, we simply traverse to - the parent. - - 2. At node 9, position 2, we recognize the node as a right-child node - and accumulate the left-child in our total. Total is now 5 and we - traverse to the parent at position 0. - - 3. Iteration ends at the root. - - The index is then the sum of the total and sublist index: 5 + 3 = 8. - - :param int pos: lists index - :param int idx: sublist index - :return: index in sorted list - - """ - if not pos: - return idx - - _index = self._index - - if not _index: - self._build_index() - - total = 0 - - # Increment pos to point in the index to len(self._lists[pos]). - - pos += self._offset - - # Iterate until reaching the root of the index tree at pos = 0. - - while pos: - - # Right-child nodes are at odd indices. At such indices - # account the total below the left child node. - - if not pos & 1: - total += _index[pos - 1] - - # Advance pos to the parent node. - - pos = (pos - 1) >> 1 - - return total + idx - - - def _pos(self, idx): - """Convert an index into an index pair (lists index, sublist index) - that can be used to access the corresponding lists position. - - Many queries require the index be built. Details of the index are - described in ``SortedList._build_index``. - - Indexing requires traversing the tree to a leaf node. Each node has two - children which are easily computable. Given an index, pos, the - left-child is at ``pos * 2 + 1`` and the right-child is at ``pos * 2 + - 2``. - - When the index is less than the left-child, traversal moves to the - left sub-tree. Otherwise, the index is decremented by the left-child - and traversal moves to the right sub-tree. - - At a child node, the indexing pair is computed from the relative - position of the child node as compared with the offset and the remaining - index. - - For example, using the index from ``SortedList._build_index``:: - - _index = 14 5 9 3 2 4 5 - _offset = 3 - - Tree:: - - 14 - 5 9 - 3 2 4 5 - - Indexing position 8 involves iterating like so: - - 1. Starting at the root, position 0, 8 is compared with the left-child - node (5) which it is greater than. When greater the index is - decremented and the position is updated to the right child node. - - 2. At node 9 with index 3, we again compare the index to the left-child - node with value 4. Because the index is the less than the left-child - node, we simply traverse to the left. - - 3. At node 4 with index 3, we recognize that we are at a leaf node and - stop iterating. - - 4. To compute the sublist index, we subtract the offset from the index - of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we - simply use the index remaining from iteration. In this case, 3. - - The final index pair from our example is (2, 3) which corresponds to - index 8 in the sorted list. - - :param int idx: index in sorted list - :return: (lists index, sublist index) pair - - """ - if idx < 0: - last_len = len(self._lists[-1]) - - if (-idx) <= last_len: - return len(self._lists) - 1, last_len + idx - - idx += self._len - - if idx < 0: - raise IndexError('list index out of range') - elif idx >= self._len: - raise IndexError('list index out of range') - - if idx < len(self._lists[0]): - return 0, idx - - _index = self._index - - if not _index: - self._build_index() - - pos = 0 - child = 1 - len_index = len(_index) - - while child < len_index: - index_child = _index[child] - - if idx < index_child: - pos = child - else: - idx -= index_child - pos = child + 1 - - child = (pos << 1) + 1 - - return (pos - self._offset, idx) - - - def _build_index(self): - """Build a positional index for indexing the sorted list. - - Indexes are represented as binary trees in a dense array notation - similar to a binary heap. - - For example, given a lists representation storing integers:: - - 0: [1, 2, 3] - 1: [4, 5] - 2: [6, 7, 8, 9] - 3: [10, 11, 12, 13, 14] - - The first transformation maps the sub-lists by their length. The - first row of the index is the length of the sub-lists:: - - 0: [3, 2, 4, 5] - - Each row after that is the sum of consecutive pairs of the previous - row:: - - 1: [5, 9] - 2: [14] - - Finally, the index is built by concatenating these lists together:: - - _index = [14, 5, 9, 3, 2, 4, 5] - - An offset storing the start of the first row is also stored:: - - _offset = 3 - - When built, the index can be used for efficient indexing into the list. - See the comment and notes on ``SortedList._pos`` for details. - - """ - row0 = list(map(len, self._lists)) - - if len(row0) == 1: - self._index[:] = row0 - self._offset = 0 - return - - head = iter(row0) - tail = iter(head) - row1 = list(starmap(add, zip(head, tail))) - - if len(row0) & 1: - row1.append(row0[-1]) - - if len(row1) == 1: - self._index[:] = row1 + row0 - self._offset = 1 - return - - size = 2 ** (int(log(len(row1) - 1, 2)) + 1) - row1.extend(repeat(0, size - len(row1))) - tree = [row0, row1] - - while len(tree[-1]) > 1: - head = iter(tree[-1]) - tail = iter(head) - row = list(starmap(add, zip(head, tail))) - tree.append(row) - - reduce(iadd, reversed(tree), self._index) - self._offset = size * 2 - 1 - - - def __delitem__(self, index): - """Remove value at `index` from sorted list. - - ``sl.__delitem__(index)`` <==> ``del sl[index]`` - - Supports slicing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sl = SortedList('abcde') - >>> del sl[2] - >>> sl - SortedList(['a', 'b', 'd', 'e']) - >>> del sl[:2] - >>> sl - SortedList(['d', 'e']) - - :param index: integer or slice for indexing - :raises IndexError: if index out of range - - """ - if isinstance(index, slice): - start, stop, step = index.indices(self._len) - - if step == 1 and start < stop: - if start == 0 and stop == self._len: - return self._clear() - elif self._len <= 8 * (stop - start): - values = self._getitem(slice(None, start)) - if stop < self._len: - values += self._getitem(slice(stop, None)) - self._clear() - return self._update(values) - - indices = range(start, stop, step) - - # Delete items from greatest index to least so - # that the indices remain valid throughout iteration. - - if step > 0: - indices = reversed(indices) - - _pos, _delete = self._pos, self._delete - - for index in indices: - pos, idx = _pos(index) - _delete(pos, idx) - else: - pos, idx = self._pos(index) - self._delete(pos, idx) - - - def __getitem__(self, index): - """Lookup value at `index` in sorted list. - - ``sl.__getitem__(index)`` <==> ``sl[index]`` - - Supports slicing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sl = SortedList('abcde') - >>> sl[1] - 'b' - >>> sl[-1] - 'e' - >>> sl[2:5] - ['c', 'd', 'e'] - - :param index: integer or slice for indexing - :return: value or list of values - :raises IndexError: if index out of range - - """ - _lists = self._lists - - if isinstance(index, slice): - start, stop, step = index.indices(self._len) - - if step == 1 and start < stop: - if start == 0 and stop == self._len: - return reduce(iadd, self._lists, []) - - start_pos, start_idx = self._pos(start) - - if stop == self._len: - stop_pos = len(_lists) - 1 - stop_idx = len(_lists[stop_pos]) - else: - stop_pos, stop_idx = self._pos(stop) - - if start_pos == stop_pos: - return _lists[start_pos][start_idx:stop_idx] - - prefix = _lists[start_pos][start_idx:] - middle = _lists[(start_pos + 1):stop_pos] - result = reduce(iadd, middle, prefix) - result += _lists[stop_pos][:stop_idx] - - return result - - if step == -1 and start > stop: - result = self._getitem(slice(stop + 1, start + 1)) - result.reverse() - return result - - # Return a list because a negative step could - # reverse the order of the items and this could - # be the desired behavior. - - indices = range(start, stop, step) - return list(self._getitem(index) for index in indices) - else: - if self._len: - if index == 0: - return _lists[0][0] - elif index == -1: - return _lists[-1][-1] - else: - raise IndexError('list index out of range') - - if 0 <= index < len(_lists[0]): - return _lists[0][index] - - len_last = len(_lists[-1]) - - if -len_last < index < 0: - return _lists[-1][len_last + index] - - pos, idx = self._pos(index) - return _lists[pos][idx] - - _getitem = __getitem__ - - - def __setitem__(self, index, value): - """Raise not-implemented error. - - ``sl.__setitem__(index, value)`` <==> ``sl[index] = value`` - - :raises NotImplementedError: use ``del sl[index]`` and - ``sl.add(value)`` instead - - """ - message = 'use ``del sl[index]`` and ``sl.add(value)`` instead' - raise NotImplementedError(message) - - - def __iter__(self): - """Return an iterator over the sorted list. - - ``sl.__iter__()`` <==> ``iter(sl)`` - - Iterating the sorted list while adding or deleting values may raise a - :exc:`RuntimeError` or fail to iterate over all values. - - """ - return chain.from_iterable(self._lists) - - - def __reversed__(self): - """Return a reverse iterator over the sorted list. - - ``sl.__reversed__()`` <==> ``reversed(sl)`` - - Iterating the sorted list while adding or deleting values may raise a - :exc:`RuntimeError` or fail to iterate over all values. - - """ - return chain.from_iterable(map(reversed, reversed(self._lists))) - - - def reverse(self): - """Raise not-implemented error. - - Sorted list maintains values in ascending sort order. Values may not be - reversed in-place. - - Use ``reversed(sl)`` for an iterator over values in descending sort - order. - - Implemented to override `MutableSequence.reverse` which provides an - erroneous default implementation. - - :raises NotImplementedError: use ``reversed(sl)`` instead - - """ - raise NotImplementedError('use ``reversed(sl)`` instead') - - - def islice(self, start=None, stop=None, reverse=False): - """Return an iterator that slices sorted list from `start` to `stop`. - - The `start` and `stop` index are treated inclusive and exclusive, - respectively. - - Both `start` and `stop` default to `None` which is automatically - inclusive of the beginning and end of the sorted list. - - When `reverse` is `True` the values are yielded from the iterator in - reverse order; `reverse` defaults to `False`. - - >>> sl = SortedList('abcdefghij') - >>> it = sl.islice(2, 6) - >>> list(it) - ['c', 'd', 'e', 'f'] - - :param int start: start index (inclusive) - :param int stop: stop index (exclusive) - :param bool reverse: yield values in reverse order - :return: iterator - - """ - _len = self._len - - if not _len: - return iter(()) - - start, stop, _ = slice(start, stop).indices(self._len) - - if start >= stop: - return iter(()) - - _pos = self._pos - - min_pos, min_idx = _pos(start) - - if stop == _len: - max_pos = len(self._lists) - 1 - max_idx = len(self._lists[-1]) - else: - max_pos, max_idx = _pos(stop) - - return self._islice(min_pos, min_idx, max_pos, max_idx, reverse) - - - def _islice(self, min_pos, min_idx, max_pos, max_idx, reverse): - """Return an iterator that slices sorted list using two index pairs. - - The index pairs are (min_pos, min_idx) and (max_pos, max_idx), the - first inclusive and the latter exclusive. See `_pos` for details on how - an index is converted to an index pair. - - When `reverse` is `True`, values are yielded from the iterator in - reverse order. - - """ - _lists = self._lists - - if min_pos > max_pos: - return iter(()) - - if min_pos == max_pos: - if reverse: - indices = reversed(range(min_idx, max_idx)) - return map(_lists[min_pos].__getitem__, indices) - - indices = range(min_idx, max_idx) - return map(_lists[min_pos].__getitem__, indices) - - next_pos = min_pos + 1 - - if next_pos == max_pos: - if reverse: - min_indices = range(min_idx, len(_lists[min_pos])) - max_indices = range(max_idx) - return chain( - map(_lists[max_pos].__getitem__, reversed(max_indices)), - map(_lists[min_pos].__getitem__, reversed(min_indices)), - ) - - min_indices = range(min_idx, len(_lists[min_pos])) - max_indices = range(max_idx) - return chain( - map(_lists[min_pos].__getitem__, min_indices), - map(_lists[max_pos].__getitem__, max_indices), - ) - - if reverse: - min_indices = range(min_idx, len(_lists[min_pos])) - sublist_indices = range(next_pos, max_pos) - sublists = map(_lists.__getitem__, reversed(sublist_indices)) - max_indices = range(max_idx) - return chain( - map(_lists[max_pos].__getitem__, reversed(max_indices)), - chain.from_iterable(map(reversed, sublists)), - map(_lists[min_pos].__getitem__, reversed(min_indices)), - ) - - min_indices = range(min_idx, len(_lists[min_pos])) - sublist_indices = range(next_pos, max_pos) - sublists = map(_lists.__getitem__, sublist_indices) - max_indices = range(max_idx) - return chain( - map(_lists[min_pos].__getitem__, min_indices), - chain.from_iterable(sublists), - map(_lists[max_pos].__getitem__, max_indices), - ) - - - def irange(self, minimum=None, maximum=None, inclusive=(True, True), - reverse=False): - """Create an iterator of values between `minimum` and `maximum`. - - Both `minimum` and `maximum` default to `None` which is automatically - inclusive of the beginning and end of the sorted list. - - The argument `inclusive` is a pair of booleans that indicates whether - the minimum and maximum ought to be included in the range, - respectively. The default is ``(True, True)`` such that the range is - inclusive of both minimum and maximum. - - When `reverse` is `True` the values are yielded from the iterator in - reverse order; `reverse` defaults to `False`. - - >>> sl = SortedList('abcdefghij') - >>> it = sl.irange('c', 'f') - >>> list(it) - ['c', 'd', 'e', 'f'] - - :param minimum: minimum value to start iterating - :param maximum: maximum value to stop iterating - :param inclusive: pair of booleans - :param bool reverse: yield values in reverse order - :return: iterator - - """ - _maxes = self._maxes - - if not _maxes: - return iter(()) - - _lists = self._lists - - # Calculate the minimum (pos, idx) pair. By default this location - # will be inclusive in our calculation. - - if minimum is None: - min_pos = 0 - min_idx = 0 - else: - if inclusive[0]: - min_pos = bisect_left(_maxes, minimum) - - if min_pos == len(_maxes): - return iter(()) - - min_idx = bisect_left(_lists[min_pos], minimum) - else: - min_pos = bisect_right(_maxes, minimum) - - if min_pos == len(_maxes): - return iter(()) - - min_idx = bisect_right(_lists[min_pos], minimum) - - # Calculate the maximum (pos, idx) pair. By default this location - # will be exclusive in our calculation. - - if maximum is None: - max_pos = len(_maxes) - 1 - max_idx = len(_lists[max_pos]) - else: - if inclusive[1]: - max_pos = bisect_right(_maxes, maximum) - - if max_pos == len(_maxes): - max_pos -= 1 - max_idx = len(_lists[max_pos]) - else: - max_idx = bisect_right(_lists[max_pos], maximum) - else: - max_pos = bisect_left(_maxes, maximum) - - if max_pos == len(_maxes): - max_pos -= 1 - max_idx = len(_lists[max_pos]) - else: - max_idx = bisect_left(_lists[max_pos], maximum) - - return self._islice(min_pos, min_idx, max_pos, max_idx, reverse) - - - def __len__(self): - """Return the size of the sorted list. - - ``sl.__len__()`` <==> ``len(sl)`` - - :return: size of sorted list - - """ - return self._len - - - def bisect_left(self, value): - """Return an index to insert `value` in the sorted list. - - If the `value` is already present, the insertion point will be before - (to the left of) any existing values. - - Similar to the `bisect` module in the standard library. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sl = SortedList([10, 11, 12, 13, 14]) - >>> sl.bisect_left(12) - 2 - - :param value: insertion index of value in sorted list - :return: index - - """ - _maxes = self._maxes - - if not _maxes: - return 0 - - pos = bisect_left(_maxes, value) - - if pos == len(_maxes): - return self._len - - idx = bisect_left(self._lists[pos], value) - return self._loc(pos, idx) - - - def bisect_right(self, value): - """Return an index to insert `value` in the sorted list. - - Similar to `bisect_left`, but if `value` is already present, the - insertion point with be after (to the right of) any existing values. - - Similar to the `bisect` module in the standard library. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sl = SortedList([10, 11, 12, 13, 14]) - >>> sl.bisect_right(12) - 3 - - :param value: insertion index of value in sorted list - :return: index - - """ - _maxes = self._maxes - - if not _maxes: - return 0 - - pos = bisect_right(_maxes, value) - - if pos == len(_maxes): - return self._len - - idx = bisect_right(self._lists[pos], value) - return self._loc(pos, idx) - - bisect = bisect_right - _bisect_right = bisect_right - - - def count(self, value): - """Return number of occurrences of `value` in the sorted list. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sl = SortedList([1, 2, 2, 3, 3, 3, 4, 4, 4, 4]) - >>> sl.count(3) - 3 - - :param value: value to count in sorted list - :return: count - - """ - _maxes = self._maxes - - if not _maxes: - return 0 - - pos_left = bisect_left(_maxes, value) - - if pos_left == len(_maxes): - return 0 - - _lists = self._lists - idx_left = bisect_left(_lists[pos_left], value) - pos_right = bisect_right(_maxes, value) - - if pos_right == len(_maxes): - return self._len - self._loc(pos_left, idx_left) - - idx_right = bisect_right(_lists[pos_right], value) - - if pos_left == pos_right: - return idx_right - idx_left - - right = self._loc(pos_right, idx_right) - left = self._loc(pos_left, idx_left) - return right - left - - - def copy(self): - """Return a shallow copy of the sorted list. - - Runtime complexity: `O(n)` - - :return: new sorted list - - """ - return self.__class__(self) - - __copy__ = copy - - - def append(self, value): - """Raise not-implemented error. - - Implemented to override `MutableSequence.append` which provides an - erroneous default implementation. - - :raises NotImplementedError: use ``sl.add(value)`` instead - - """ - raise NotImplementedError('use ``sl.add(value)`` instead') - - - def extend(self, values): - """Raise not-implemented error. - - Implemented to override `MutableSequence.extend` which provides an - erroneous default implementation. - - :raises NotImplementedError: use ``sl.update(values)`` instead - - """ - raise NotImplementedError('use ``sl.update(values)`` instead') - - - def insert(self, index, value): - """Raise not-implemented error. - - :raises NotImplementedError: use ``sl.add(value)`` instead - - """ - raise NotImplementedError('use ``sl.add(value)`` instead') - - - def pop(self, index=-1): - """Remove and return value at `index` in sorted list. - - Raise :exc:`IndexError` if the sorted list is empty or index is out of - range. - - Negative indices are supported. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sl = SortedList('abcde') - >>> sl.pop() - 'e' - >>> sl.pop(2) - 'c' - >>> sl - SortedList(['a', 'b', 'd']) - - :param int index: index of value (default -1) - :return: value - :raises IndexError: if index is out of range - - """ - if not self._len: - raise IndexError('pop index out of range') - - _lists = self._lists - - if index == 0: - val = _lists[0][0] - self._delete(0, 0) - return val - - if index == -1: - pos = len(_lists) - 1 - loc = len(_lists[pos]) - 1 - val = _lists[pos][loc] - self._delete(pos, loc) - return val - - if 0 <= index < len(_lists[0]): - val = _lists[0][index] - self._delete(0, index) - return val - - len_last = len(_lists[-1]) - - if -len_last < index < 0: - pos = len(_lists) - 1 - loc = len_last + index - val = _lists[pos][loc] - self._delete(pos, loc) - return val - - pos, idx = self._pos(index) - val = _lists[pos][idx] - self._delete(pos, idx) - return val - - - def index(self, value, start=None, stop=None): - """Return first index of value in sorted list. - - Raise ValueError if `value` is not present. - - Index must be between `start` and `stop` for the `value` to be - considered present. The default value, None, for `start` and `stop` - indicate the beginning and end of the sorted list. - - Negative indices are supported. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> sl = SortedList('abcde') - >>> sl.index('d') - 3 - >>> sl.index('z') - Traceback (most recent call last): - ... - ValueError: 'z' is not in list - - :param value: value in sorted list - :param int start: start index (default None, start of sorted list) - :param int stop: stop index (default None, end of sorted list) - :return: index of value - :raises ValueError: if value is not present - - """ - _len = self._len - - if not _len: - raise ValueError('{0!r} is not in list'.format(value)) - - if start is None: - start = 0 - if start < 0: - start += _len - if start < 0: - start = 0 - - if stop is None: - stop = _len - if stop < 0: - stop += _len - if stop > _len: - stop = _len - - if stop <= start: - raise ValueError('{0!r} is not in list'.format(value)) - - _maxes = self._maxes - pos_left = bisect_left(_maxes, value) - - if pos_left == len(_maxes): - raise ValueError('{0!r} is not in list'.format(value)) - - _lists = self._lists - idx_left = bisect_left(_lists[pos_left], value) - - if _lists[pos_left][idx_left] != value: - raise ValueError('{0!r} is not in list'.format(value)) - - stop -= 1 - left = self._loc(pos_left, idx_left) - - if start <= left: - if left <= stop: - return left - else: - right = self._bisect_right(value) - 1 - - if start <= right: - return start - - raise ValueError('{0!r} is not in list'.format(value)) - - - def __add__(self, other): - """Return new sorted list containing all values in both sequences. - - ``sl.__add__(other)`` <==> ``sl + other`` - - Values in `other` do not need to be in sorted order. - - Runtime complexity: `O(n*log(n))` - - >>> sl1 = SortedList('bat') - >>> sl2 = SortedList('cat') - >>> sl1 + sl2 - SortedList(['a', 'a', 'b', 'c', 't', 't']) - - :param other: other iterable - :return: new sorted list - - """ - values = reduce(iadd, self._lists, []) - values.extend(other) - return self.__class__(values) - - __radd__ = __add__ - - - def __iadd__(self, other): - """Update sorted list with values from `other`. - - ``sl.__iadd__(other)`` <==> ``sl += other`` - - Values in `other` do not need to be in sorted order. - - Runtime complexity: `O(k*log(n))` -- approximate. - - >>> sl = SortedList('bat') - >>> sl += 'cat' - >>> sl - SortedList(['a', 'a', 'b', 'c', 't', 't']) - - :param other: other iterable - :return: existing sorted list - - """ - self._update(other) - return self - - - def __mul__(self, num): - """Return new sorted list with `num` shallow copies of values. - - ``sl.__mul__(num)`` <==> ``sl * num`` - - Runtime complexity: `O(n*log(n))` - - >>> sl = SortedList('abc') - >>> sl * 3 - SortedList(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c']) - - :param int num: count of shallow copies - :return: new sorted list - - """ - values = reduce(iadd, self._lists, []) * num - return self.__class__(values) - - __rmul__ = __mul__ - - - def __imul__(self, num): - """Update the sorted list with `num` shallow copies of values. - - ``sl.__imul__(num)`` <==> ``sl *= num`` - - Runtime complexity: `O(n*log(n))` - - >>> sl = SortedList('abc') - >>> sl *= 3 - >>> sl - SortedList(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c']) - - :param int num: count of shallow copies - :return: existing sorted list - - """ - values = reduce(iadd, self._lists, []) * num - self._clear() - self._update(values) - return self - - - def __make_cmp(seq_op, symbol, doc): - "Make comparator method." - def comparer(self, other): - "Compare method for sorted list and sequence." - if not isinstance(other, Sequence): - return NotImplemented - - self_len = self._len - len_other = len(other) - - if self_len != len_other: - if seq_op is eq: - return False - if seq_op is ne: - return True - - for alpha, beta in zip(self, other): - if alpha != beta: - return seq_op(alpha, beta) - - return seq_op(self_len, len_other) - - seq_op_name = seq_op.__name__ - comparer.__name__ = '__{0}__'.format(seq_op_name) - doc_str = """Return true if and only if sorted list is {0} `other`. - - ``sl.__{1}__(other)`` <==> ``sl {2} other`` - - Comparisons use lexicographical order as with sequences. - - Runtime complexity: `O(n)` - - :param other: `other` sequence - :return: true if sorted list is {0} `other` - - """ - comparer.__doc__ = dedent(doc_str.format(doc, seq_op_name, symbol)) - return comparer - - - __eq__ = __make_cmp(eq, '==', 'equal to') - __ne__ = __make_cmp(ne, '!=', 'not equal to') - __lt__ = __make_cmp(lt, '<', 'less than') - __gt__ = __make_cmp(gt, '>', 'greater than') - __le__ = __make_cmp(le, '<=', 'less than or equal to') - __ge__ = __make_cmp(ge, '>=', 'greater than or equal to') - __make_cmp = staticmethod(__make_cmp) - - - @recursive_repr() - def __repr__(self): - """Return string representation of sorted list. - - ``sl.__repr__()`` <==> ``repr(sl)`` - - :return: string representation - - """ - return '{0}({1!r})'.format(type(self).__name__, list(self)) - - - def _check(self): - """Check invariants of sorted list. - - Runtime complexity: `O(n)` - - """ - try: - assert self._load >= 4 - assert len(self._maxes) == len(self._lists) - assert self._len == sum(len(sublist) for sublist in self._lists) - - # Check all sublists are sorted. - - for sublist in self._lists: - for pos in range(1, len(sublist)): - assert sublist[pos - 1] <= sublist[pos] - - # Check beginning/end of sublists are sorted. - - for pos in range(1, len(self._lists)): - assert self._lists[pos - 1][-1] <= self._lists[pos][0] - - # Check _maxes index is the last value of each sublist. - - for pos in range(len(self._maxes)): - assert self._maxes[pos] == self._lists[pos][-1] - - # Check sublist lengths are less than double load-factor. - - double = self._load << 1 - assert all(len(sublist) <= double for sublist in self._lists) - - # Check sublist lengths are greater than half load-factor for all - # but the last sublist. - - half = self._load >> 1 - for pos in range(0, len(self._lists) - 1): - assert len(self._lists[pos]) >= half - - if self._index: - assert self._len == self._index[0] - assert len(self._index) == self._offset + len(self._lists) - - # Check index leaf nodes equal length of sublists. - - for pos in range(len(self._lists)): - leaf = self._index[self._offset + pos] - assert leaf == len(self._lists[pos]) - - # Check index branch nodes are the sum of their children. - - for pos in range(self._offset): - child = (pos << 1) + 1 - if child >= len(self._index): - assert self._index[pos] == 0 - elif child + 1 == len(self._index): - assert self._index[pos] == self._index[child] - else: - child_sum = self._index[child] + self._index[child + 1] - assert child_sum == self._index[pos] - except: - import sys - import traceback - traceback.print_exc(file=sys.stdout) - print('len', self._len) - print('load', self._load) - print('offset', self._offset) - print('len_index', len(self._index)) - print('index', self._index) - print('len_maxes', len(self._maxes)) - print('maxes', self._maxes) - print('len_lists', len(self._lists)) - print('lists', self._lists) - raise - - -def identity(value): - "Identity function." - return value - - -class SortedKeyList(SortedList): - """Sorted-key list is a subtype of sorted list. - - The sorted-key list maintains values in comparison order based on the - result of a key function applied to every value. - - All the same methods that are available in :class:`SortedList` are also - available in :class:`SortedKeyList`. - - Additional methods provided: - - * :attr:`SortedKeyList.key` - * :func:`SortedKeyList.bisect_key_left` - * :func:`SortedKeyList.bisect_key_right` - * :func:`SortedKeyList.irange_key` - - Some examples below use: - - >>> from operator import neg - >>> neg - - >>> neg(1) - -1 - - """ - def __init__(self, iterable=None, key=identity): - """Initialize sorted-key list instance. - - Optional `iterable` argument provides an initial iterable of values to - initialize the sorted-key list. - - Optional `key` argument defines a callable that, like the `key` - argument to Python's `sorted` function, extracts a comparison key from - each value. The default is the identity function. - - Runtime complexity: `O(n*log(n))` - - >>> from operator import neg - >>> skl = SortedKeyList(key=neg) - >>> skl - SortedKeyList([], key=) - >>> skl = SortedKeyList([3, 1, 2], key=neg) - >>> skl - SortedKeyList([3, 2, 1], key=) - - :param iterable: initial values (optional) - :param key: function used to extract comparison key (optional) - - """ - self._key = key - self._len = 0 - self._load = self.DEFAULT_LOAD_FACTOR - self._lists = [] - self._keys = [] - self._maxes = [] - self._index = [] - self._offset = 0 - - if iterable is not None: - self._update(iterable) - - - def __new__(cls, iterable=None, key=identity): - return object.__new__(cls) - - - @property - def key(self): - "Function used to extract comparison key from values." - return self._key - - - def clear(self): - """Remove all values from sorted-key list. - - Runtime complexity: `O(n)` - - """ - self._len = 0 - del self._lists[:] - del self._keys[:] - del self._maxes[:] - del self._index[:] - - _clear = clear - - - def add(self, value): - """Add `value` to sorted-key list. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> from operator import neg - >>> skl = SortedKeyList(key=neg) - >>> skl.add(3) - >>> skl.add(1) - >>> skl.add(2) - >>> skl - SortedKeyList([3, 2, 1], key=) - - :param value: value to add to sorted-key list - - """ - _lists = self._lists - _keys = self._keys - _maxes = self._maxes - - key = self._key(value) - - if _maxes: - pos = bisect_right(_maxes, key) - - if pos == len(_maxes): - pos -= 1 - _lists[pos].append(value) - _keys[pos].append(key) - _maxes[pos] = key - else: - idx = bisect_right(_keys[pos], key) - _lists[pos].insert(idx, value) - _keys[pos].insert(idx, key) - - self._expand(pos) - else: - _lists.append([value]) - _keys.append([key]) - _maxes.append(key) - - self._len += 1 - - - def _expand(self, pos): - """Split sublists with length greater than double the load-factor. - - Updates the index when the sublist length is less than double the load - level. This requires incrementing the nodes in a traversal from the - leaf node to the root. For an example traversal see - ``SortedList._loc``. - - """ - _lists = self._lists - _keys = self._keys - _index = self._index - - if len(_keys[pos]) > (self._load << 1): - _maxes = self._maxes - _load = self._load - - _lists_pos = _lists[pos] - _keys_pos = _keys[pos] - half = _lists_pos[_load:] - half_keys = _keys_pos[_load:] - del _lists_pos[_load:] - del _keys_pos[_load:] - _maxes[pos] = _keys_pos[-1] - - _lists.insert(pos + 1, half) - _keys.insert(pos + 1, half_keys) - _maxes.insert(pos + 1, half_keys[-1]) - - del _index[:] - else: - if _index: - child = self._offset + pos - while child: - _index[child] += 1 - child = (child - 1) >> 1 - _index[0] += 1 - - - def update(self, iterable): - """Update sorted-key list by adding all values from `iterable`. - - Runtime complexity: `O(k*log(n))` -- approximate. - - >>> from operator import neg - >>> skl = SortedKeyList(key=neg) - >>> skl.update([3, 1, 2]) - >>> skl - SortedKeyList([3, 2, 1], key=) - - :param iterable: iterable of values to add - - """ - _lists = self._lists - _keys = self._keys - _maxes = self._maxes - values = sorted(iterable, key=self._key) - - if _maxes: - if len(values) * 4 >= self._len: - values.extend(chain.from_iterable(_lists)) - values.sort(key=self._key) - self._clear() - else: - _add = self.add - for val in values: - _add(val) - return - - _load = self._load - _lists.extend(values[pos:(pos + _load)] - for pos in range(0, len(values), _load)) - _keys.extend(list(map(self._key, _list)) for _list in _lists) - _maxes.extend(sublist[-1] for sublist in _keys) - self._len = len(values) - del self._index[:] - - _update = update - - - def __contains__(self, value): - """Return true if `value` is an element of the sorted-key list. - - ``skl.__contains__(value)`` <==> ``value in skl`` - - Runtime complexity: `O(log(n))` - - >>> from operator import neg - >>> skl = SortedKeyList([1, 2, 3, 4, 5], key=neg) - >>> 3 in skl - True - - :param value: search for value in sorted-key list - :return: true if `value` in sorted-key list - - """ - _maxes = self._maxes - - if not _maxes: - return False - - key = self._key(value) - pos = bisect_left(_maxes, key) - - if pos == len(_maxes): - return False - - _lists = self._lists - _keys = self._keys - - idx = bisect_left(_keys[pos], key) - - len_keys = len(_keys) - len_sublist = len(_keys[pos]) - - while True: - if _keys[pos][idx] != key: - return False - if _lists[pos][idx] == value: - return True - idx += 1 - if idx == len_sublist: - pos += 1 - if pos == len_keys: - return False - len_sublist = len(_keys[pos]) - idx = 0 - - - def discard(self, value): - """Remove `value` from sorted-key list if it is a member. - - If `value` is not a member, do nothing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> from operator import neg - >>> skl = SortedKeyList([5, 4, 3, 2, 1], key=neg) - >>> skl.discard(1) - >>> skl.discard(0) - >>> skl == [5, 4, 3, 2] - True - - :param value: `value` to discard from sorted-key list - - """ - _maxes = self._maxes - - if not _maxes: - return - - key = self._key(value) - pos = bisect_left(_maxes, key) - - if pos == len(_maxes): - return - - _lists = self._lists - _keys = self._keys - idx = bisect_left(_keys[pos], key) - len_keys = len(_keys) - len_sublist = len(_keys[pos]) - - while True: - if _keys[pos][idx] != key: - return - if _lists[pos][idx] == value: - self._delete(pos, idx) - return - idx += 1 - if idx == len_sublist: - pos += 1 - if pos == len_keys: - return - len_sublist = len(_keys[pos]) - idx = 0 - - - def remove(self, value): - """Remove `value` from sorted-key list; `value` must be a member. - - If `value` is not a member, raise ValueError. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> from operator import neg - >>> skl = SortedKeyList([1, 2, 3, 4, 5], key=neg) - >>> skl.remove(5) - >>> skl == [4, 3, 2, 1] - True - >>> skl.remove(0) - Traceback (most recent call last): - ... - ValueError: 0 not in list - - :param value: `value` to remove from sorted-key list - :raises ValueError: if `value` is not in sorted-key list - - """ - _maxes = self._maxes - - if not _maxes: - raise ValueError('{0!r} not in list'.format(value)) - - key = self._key(value) - pos = bisect_left(_maxes, key) - - if pos == len(_maxes): - raise ValueError('{0!r} not in list'.format(value)) - - _lists = self._lists - _keys = self._keys - idx = bisect_left(_keys[pos], key) - len_keys = len(_keys) - len_sublist = len(_keys[pos]) - - while True: - if _keys[pos][idx] != key: - raise ValueError('{0!r} not in list'.format(value)) - if _lists[pos][idx] == value: - self._delete(pos, idx) - return - idx += 1 - if idx == len_sublist: - pos += 1 - if pos == len_keys: - raise ValueError('{0!r} not in list'.format(value)) - len_sublist = len(_keys[pos]) - idx = 0 - - - def _delete(self, pos, idx): - """Delete value at the given `(pos, idx)`. - - Combines lists that are less than half the load level. - - Updates the index when the sublist length is more than half the load - level. This requires decrementing the nodes in a traversal from the - leaf node to the root. For an example traversal see - ``SortedList._loc``. - - :param int pos: lists index - :param int idx: sublist index - - """ - _lists = self._lists - _keys = self._keys - _maxes = self._maxes - _index = self._index - keys_pos = _keys[pos] - lists_pos = _lists[pos] - - del keys_pos[idx] - del lists_pos[idx] - self._len -= 1 - - len_keys_pos = len(keys_pos) - - if len_keys_pos > (self._load >> 1): - _maxes[pos] = keys_pos[-1] - - if _index: - child = self._offset + pos - while child > 0: - _index[child] -= 1 - child = (child - 1) >> 1 - _index[0] -= 1 - elif len(_keys) > 1: - if not pos: - pos += 1 - - prev = pos - 1 - _keys[prev].extend(_keys[pos]) - _lists[prev].extend(_lists[pos]) - _maxes[prev] = _keys[prev][-1] - - del _lists[pos] - del _keys[pos] - del _maxes[pos] - del _index[:] - - self._expand(prev) - elif len_keys_pos: - _maxes[pos] = keys_pos[-1] - else: - del _lists[pos] - del _keys[pos] - del _maxes[pos] - del _index[:] - - - def irange(self, minimum=None, maximum=None, inclusive=(True, True), - reverse=False): - """Create an iterator of values between `minimum` and `maximum`. - - Both `minimum` and `maximum` default to `None` which is automatically - inclusive of the beginning and end of the sorted-key list. - - The argument `inclusive` is a pair of booleans that indicates whether - the minimum and maximum ought to be included in the range, - respectively. The default is ``(True, True)`` such that the range is - inclusive of both minimum and maximum. - - When `reverse` is `True` the values are yielded from the iterator in - reverse order; `reverse` defaults to `False`. - - >>> from operator import neg - >>> skl = SortedKeyList([11, 12, 13, 14, 15], key=neg) - >>> it = skl.irange(14.5, 11.5) - >>> list(it) - [14, 13, 12] - - :param minimum: minimum value to start iterating - :param maximum: maximum value to stop iterating - :param inclusive: pair of booleans - :param bool reverse: yield values in reverse order - :return: iterator - - """ - min_key = self._key(minimum) if minimum is not None else None - max_key = self._key(maximum) if maximum is not None else None - return self._irange_key( - min_key=min_key, max_key=max_key, - inclusive=inclusive, reverse=reverse, - ) - - - def irange_key(self, min_key=None, max_key=None, inclusive=(True, True), - reverse=False): - """Create an iterator of values between `min_key` and `max_key`. - - Both `min_key` and `max_key` default to `None` which is automatically - inclusive of the beginning and end of the sorted-key list. - - The argument `inclusive` is a pair of booleans that indicates whether - the minimum and maximum ought to be included in the range, - respectively. The default is ``(True, True)`` such that the range is - inclusive of both minimum and maximum. - - When `reverse` is `True` the values are yielded from the iterator in - reverse order; `reverse` defaults to `False`. - - >>> from operator import neg - >>> skl = SortedKeyList([11, 12, 13, 14, 15], key=neg) - >>> it = skl.irange_key(-14, -12) - >>> list(it) - [14, 13, 12] - - :param min_key: minimum key to start iterating - :param max_key: maximum key to stop iterating - :param inclusive: pair of booleans - :param bool reverse: yield values in reverse order - :return: iterator - - """ - _maxes = self._maxes - - if not _maxes: - return iter(()) - - _keys = self._keys - - # Calculate the minimum (pos, idx) pair. By default this location - # will be inclusive in our calculation. - - if min_key is None: - min_pos = 0 - min_idx = 0 - else: - if inclusive[0]: - min_pos = bisect_left(_maxes, min_key) - - if min_pos == len(_maxes): - return iter(()) - - min_idx = bisect_left(_keys[min_pos], min_key) - else: - min_pos = bisect_right(_maxes, min_key) - - if min_pos == len(_maxes): - return iter(()) - - min_idx = bisect_right(_keys[min_pos], min_key) - - # Calculate the maximum (pos, idx) pair. By default this location - # will be exclusive in our calculation. - - if max_key is None: - max_pos = len(_maxes) - 1 - max_idx = len(_keys[max_pos]) - else: - if inclusive[1]: - max_pos = bisect_right(_maxes, max_key) - - if max_pos == len(_maxes): - max_pos -= 1 - max_idx = len(_keys[max_pos]) - else: - max_idx = bisect_right(_keys[max_pos], max_key) - else: - max_pos = bisect_left(_maxes, max_key) - - if max_pos == len(_maxes): - max_pos -= 1 - max_idx = len(_keys[max_pos]) - else: - max_idx = bisect_left(_keys[max_pos], max_key) - - return self._islice(min_pos, min_idx, max_pos, max_idx, reverse) - - _irange_key = irange_key - - - def bisect_left(self, value): - """Return an index to insert `value` in the sorted-key list. - - If the `value` is already present, the insertion point will be before - (to the left of) any existing values. - - Similar to the `bisect` module in the standard library. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> from operator import neg - >>> skl = SortedKeyList([5, 4, 3, 2, 1], key=neg) - >>> skl.bisect_left(1) - 4 - - :param value: insertion index of value in sorted-key list - :return: index - - """ - return self._bisect_key_left(self._key(value)) - - - def bisect_right(self, value): - """Return an index to insert `value` in the sorted-key list. - - Similar to `bisect_left`, but if `value` is already present, the - insertion point with be after (to the right of) any existing values. - - Similar to the `bisect` module in the standard library. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> from operator import neg - >>> skl = SortedList([5, 4, 3, 2, 1], key=neg) - >>> skl.bisect_right(1) - 5 - - :param value: insertion index of value in sorted-key list - :return: index - - """ - return self._bisect_key_right(self._key(value)) - - bisect = bisect_right - - - def bisect_key_left(self, key): - """Return an index to insert `key` in the sorted-key list. - - If the `key` is already present, the insertion point will be before (to - the left of) any existing keys. - - Similar to the `bisect` module in the standard library. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> from operator import neg - >>> skl = SortedKeyList([5, 4, 3, 2, 1], key=neg) - >>> skl.bisect_key_left(-1) - 4 - - :param key: insertion index of key in sorted-key list - :return: index - - """ - _maxes = self._maxes - - if not _maxes: - return 0 - - pos = bisect_left(_maxes, key) - - if pos == len(_maxes): - return self._len - - idx = bisect_left(self._keys[pos], key) - - return self._loc(pos, idx) - - _bisect_key_left = bisect_key_left - - - def bisect_key_right(self, key): - """Return an index to insert `key` in the sorted-key list. - - Similar to `bisect_key_left`, but if `key` is already present, the - insertion point with be after (to the right of) any existing keys. - - Similar to the `bisect` module in the standard library. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> from operator import neg - >>> skl = SortedList([5, 4, 3, 2, 1], key=neg) - >>> skl.bisect_key_right(-1) - 5 - - :param key: insertion index of key in sorted-key list - :return: index - - """ - _maxes = self._maxes - - if not _maxes: - return 0 - - pos = bisect_right(_maxes, key) - - if pos == len(_maxes): - return self._len - - idx = bisect_right(self._keys[pos], key) - - return self._loc(pos, idx) - - bisect_key = bisect_key_right - _bisect_key_right = bisect_key_right - - - def count(self, value): - """Return number of occurrences of `value` in the sorted-key list. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> from operator import neg - >>> skl = SortedKeyList([4, 4, 4, 4, 3, 3, 3, 2, 2, 1], key=neg) - >>> skl.count(2) - 2 - - :param value: value to count in sorted-key list - :return: count - - """ - _maxes = self._maxes - - if not _maxes: - return 0 - - key = self._key(value) - pos = bisect_left(_maxes, key) - - if pos == len(_maxes): - return 0 - - _lists = self._lists - _keys = self._keys - idx = bisect_left(_keys[pos], key) - total = 0 - len_keys = len(_keys) - len_sublist = len(_keys[pos]) - - while True: - if _keys[pos][idx] != key: - return total - if _lists[pos][idx] == value: - total += 1 - idx += 1 - if idx == len_sublist: - pos += 1 - if pos == len_keys: - return total - len_sublist = len(_keys[pos]) - idx = 0 - - - def copy(self): - """Return a shallow copy of the sorted-key list. - - Runtime complexity: `O(n)` - - :return: new sorted-key list - - """ - return self.__class__(self, key=self._key) - - __copy__ = copy - - - def index(self, value, start=None, stop=None): - """Return first index of value in sorted-key list. - - Raise ValueError if `value` is not present. - - Index must be between `start` and `stop` for the `value` to be - considered present. The default value, None, for `start` and `stop` - indicate the beginning and end of the sorted-key list. - - Negative indices are supported. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> from operator import neg - >>> skl = SortedKeyList([5, 4, 3, 2, 1], key=neg) - >>> skl.index(2) - 3 - >>> skl.index(0) - Traceback (most recent call last): - ... - ValueError: 0 is not in list - - :param value: value in sorted-key list - :param int start: start index (default None, start of sorted-key list) - :param int stop: stop index (default None, end of sorted-key list) - :return: index of value - :raises ValueError: if value is not present - - """ - _len = self._len - - if not _len: - raise ValueError('{0!r} is not in list'.format(value)) - - if start is None: - start = 0 - if start < 0: - start += _len - if start < 0: - start = 0 - - if stop is None: - stop = _len - if stop < 0: - stop += _len - if stop > _len: - stop = _len - - if stop <= start: - raise ValueError('{0!r} is not in list'.format(value)) - - _maxes = self._maxes - key = self._key(value) - pos = bisect_left(_maxes, key) - - if pos == len(_maxes): - raise ValueError('{0!r} is not in list'.format(value)) - - stop -= 1 - _lists = self._lists - _keys = self._keys - idx = bisect_left(_keys[pos], key) - len_keys = len(_keys) - len_sublist = len(_keys[pos]) - - while True: - if _keys[pos][idx] != key: - raise ValueError('{0!r} is not in list'.format(value)) - if _lists[pos][idx] == value: - loc = self._loc(pos, idx) - if start <= loc <= stop: - return loc - elif loc > stop: - break - idx += 1 - if idx == len_sublist: - pos += 1 - if pos == len_keys: - raise ValueError('{0!r} is not in list'.format(value)) - len_sublist = len(_keys[pos]) - idx = 0 - - raise ValueError('{0!r} is not in list'.format(value)) - - - def __add__(self, other): - """Return new sorted-key list containing all values in both sequences. - - ``skl.__add__(other)`` <==> ``skl + other`` - - Values in `other` do not need to be in sorted-key order. - - Runtime complexity: `O(n*log(n))` - - >>> from operator import neg - >>> skl1 = SortedKeyList([5, 4, 3], key=neg) - >>> skl2 = SortedKeyList([2, 1, 0], key=neg) - >>> skl1 + skl2 - SortedKeyList([5, 4, 3, 2, 1, 0], key=) - - :param other: other iterable - :return: new sorted-key list - - """ - values = reduce(iadd, self._lists, []) - values.extend(other) - return self.__class__(values, key=self._key) - - __radd__ = __add__ - - - def __mul__(self, num): - """Return new sorted-key list with `num` shallow copies of values. - - ``skl.__mul__(num)`` <==> ``skl * num`` - - Runtime complexity: `O(n*log(n))` - - >>> from operator import neg - >>> skl = SortedKeyList([3, 2, 1], key=neg) - >>> skl * 2 - SortedKeyList([3, 3, 2, 2, 1, 1], key=) - - :param int num: count of shallow copies - :return: new sorted-key list - - """ - values = reduce(iadd, self._lists, []) * num - return self.__class__(values, key=self._key) - - - @recursive_repr() - def __repr__(self): - """Return string representation of sorted-key list. - - ``skl.__repr__()`` <==> ``repr(skl)`` - - :return: string representation - - """ - type_name = type(self).__name__ - return '{0}({1!r}, key={2!r})'.format(type_name, list(self), self._key) - - - def _check(self): - """Check invariants of sorted-key list. - - Runtime complexity: `O(n)` - - """ - try: - assert self._load >= 4 - assert len(self._maxes) == len(self._lists) == len(self._keys) - assert self._len == sum(len(sublist) for sublist in self._lists) - - # Check all sublists are sorted. - - for sublist in self._keys: - for pos in range(1, len(sublist)): - assert sublist[pos - 1] <= sublist[pos] - - # Check beginning/end of sublists are sorted. - - for pos in range(1, len(self._keys)): - assert self._keys[pos - 1][-1] <= self._keys[pos][0] - - # Check _keys matches _key mapped to _lists. - - for val_sublist, key_sublist in zip(self._lists, self._keys): - assert len(val_sublist) == len(key_sublist) - for val, key in zip(val_sublist, key_sublist): - assert self._key(val) == key - - # Check _maxes index is the last value of each sublist. - - for pos in range(len(self._maxes)): - assert self._maxes[pos] == self._keys[pos][-1] - - # Check sublist lengths are less than double load-factor. - - double = self._load << 1 - assert all(len(sublist) <= double for sublist in self._lists) - - # Check sublist lengths are greater than half load-factor for all - # but the last sublist. - - half = self._load >> 1 - for pos in range(0, len(self._lists) - 1): - assert len(self._lists[pos]) >= half - - if self._index: - assert self._len == self._index[0] - assert len(self._index) == self._offset + len(self._lists) - - # Check index leaf nodes equal length of sublists. - - for pos in range(len(self._lists)): - leaf = self._index[self._offset + pos] - assert leaf == len(self._lists[pos]) - - # Check index branch nodes are the sum of their children. - - for pos in range(self._offset): - child = (pos << 1) + 1 - if child >= len(self._index): - assert self._index[pos] == 0 - elif child + 1 == len(self._index): - assert self._index[pos] == self._index[child] - else: - child_sum = self._index[child] + self._index[child + 1] - assert child_sum == self._index[pos] - except: - import sys - import traceback - traceback.print_exc(file=sys.stdout) - print('len', self._len) - print('load', self._load) - print('offset', self._offset) - print('len_index', len(self._index)) - print('index', self._index) - print('len_maxes', len(self._maxes)) - print('maxes', self._maxes) - print('len_keys', len(self._keys)) - print('keys', self._keys) - print('len_lists', len(self._lists)) - print('lists', self._lists) - raise - - -SortedListWithKey = SortedKeyList diff --git a/solnlib/packages/sortedcontainers/sortedset.py b/solnlib/packages/sortedcontainers/sortedset.py deleted file mode 100644 index be2b8999..00000000 --- a/solnlib/packages/sortedcontainers/sortedset.py +++ /dev/null @@ -1,733 +0,0 @@ -"""Sorted Set -============= - -:doc:`Sorted Containers` is an Apache2 licensed Python sorted -collections library, written in pure-Python, and fast as C-extensions. The -:doc:`introduction` is the best way to get started. - -Sorted set implementations: - -.. currentmodule:: sortedcontainers - -* :class:`SortedSet` - -""" - -from itertools import chain -from operator import eq, ne, gt, ge, lt, le -from textwrap import dedent - -from .sortedlist import SortedList, recursive_repr - -############################################################################### -# BEGIN Python 2/3 Shims -############################################################################### - -try: - from collections.abc import MutableSet, Sequence, Set -except ImportError: - from collections import MutableSet, Sequence, Set - -############################################################################### -# END Python 2/3 Shims -############################################################################### - - -class SortedSet(MutableSet, Sequence): - """Sorted set is a sorted mutable set. - - Sorted set values are maintained in sorted order. The design of sorted set - is simple: sorted set uses a set for set-operations and maintains a sorted - list of values. - - Sorted set values must be hashable and comparable. The hash and total - ordering of values must not change while they are stored in the sorted set. - - Mutable set methods: - - * :func:`SortedSet.__contains__` - * :func:`SortedSet.__iter__` - * :func:`SortedSet.__len__` - * :func:`SortedSet.add` - * :func:`SortedSet.discard` - - Sequence methods: - - * :func:`SortedSet.__getitem__` - * :func:`SortedSet.__delitem__` - * :func:`SortedSet.__reversed__` - - Methods for removing values: - - * :func:`SortedSet.clear` - * :func:`SortedSet.pop` - * :func:`SortedSet.remove` - - Set-operation methods: - - * :func:`SortedSet.difference` - * :func:`SortedSet.difference_update` - * :func:`SortedSet.intersection` - * :func:`SortedSet.intersection_update` - * :func:`SortedSet.symmetric_difference` - * :func:`SortedSet.symmetric_difference_update` - * :func:`SortedSet.union` - * :func:`SortedSet.update` - - Methods for miscellany: - - * :func:`SortedSet.copy` - * :func:`SortedSet.count` - * :func:`SortedSet.__repr__` - * :func:`SortedSet._check` - - Sorted list methods available: - - * :func:`SortedList.bisect_left` - * :func:`SortedList.bisect_right` - * :func:`SortedList.index` - * :func:`SortedList.irange` - * :func:`SortedList.islice` - * :func:`SortedList._reset` - - Additional sorted list methods available, if key-function used: - - * :func:`SortedKeyList.bisect_key_left` - * :func:`SortedKeyList.bisect_key_right` - * :func:`SortedKeyList.irange_key` - - Sorted set comparisons use subset and superset relations. Two sorted sets - are equal if and only if every element of each sorted set is contained in - the other (each is a subset of the other). A sorted set is less than - another sorted set if and only if the first sorted set is a proper subset - of the second sorted set (is a subset, but is not equal). A sorted set is - greater than another sorted set if and only if the first sorted set is a - proper superset of the second sorted set (is a superset, but is not equal). - - """ - def __init__(self, iterable=None, key=None): - """Initialize sorted set instance. - - Optional `iterable` argument provides an initial iterable of values to - initialize the sorted set. - - Optional `key` argument defines a callable that, like the `key` - argument to Python's `sorted` function, extracts a comparison key from - each value. The default, none, compares values directly. - - Runtime complexity: `O(n*log(n))` - - >>> ss = SortedSet([3, 1, 2, 5, 4]) - >>> ss - SortedSet([1, 2, 3, 4, 5]) - >>> from operator import neg - >>> ss = SortedSet([3, 1, 2, 5, 4], neg) - >>> ss - SortedSet([5, 4, 3, 2, 1], key=) - - :param iterable: initial values (optional) - :param key: function used to extract comparison key (optional) - - """ - self._key = key - - # SortedSet._fromset calls SortedSet.__init__ after initializing the - # _set attribute. So only create a new set if the _set attribute is not - # already present. - - if not hasattr(self, '_set'): - self._set = set() - - self._list = SortedList(self._set, key=key) - - # Expose some set methods publicly. - - _set = self._set - self.isdisjoint = _set.isdisjoint - self.issubset = _set.issubset - self.issuperset = _set.issuperset - - # Expose some sorted list methods publicly. - - _list = self._list - self.bisect_left = _list.bisect_left - self.bisect = _list.bisect - self.bisect_right = _list.bisect_right - self.index = _list.index - self.irange = _list.irange - self.islice = _list.islice - self._reset = _list._reset - - if key is not None: - self.bisect_key_left = _list.bisect_key_left - self.bisect_key_right = _list.bisect_key_right - self.bisect_key = _list.bisect_key - self.irange_key = _list.irange_key - - if iterable is not None: - self._update(iterable) - - - @classmethod - def _fromset(cls, values, key=None): - """Initialize sorted set from existing set. - - Used internally by set operations that return a new set. - - """ - sorted_set = object.__new__(cls) - sorted_set._set = values - sorted_set.__init__(key=key) - return sorted_set - - - @property - def key(self): - """Function used to extract comparison key from values. - - Sorted set compares values directly when the key function is none. - - """ - return self._key - - - def __contains__(self, value): - """Return true if `value` is an element of the sorted set. - - ``ss.__contains__(value)`` <==> ``value in ss`` - - Runtime complexity: `O(1)` - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> 3 in ss - True - - :param value: search for value in sorted set - :return: true if `value` in sorted set - - """ - return value in self._set - - - def __getitem__(self, index): - """Lookup value at `index` in sorted set. - - ``ss.__getitem__(index)`` <==> ``ss[index]`` - - Supports slicing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> ss = SortedSet('abcde') - >>> ss[2] - 'c' - >>> ss[-1] - 'e' - >>> ss[2:5] - ['c', 'd', 'e'] - - :param index: integer or slice for indexing - :return: value or list of values - :raises IndexError: if index out of range - - """ - return self._list[index] - - - def __delitem__(self, index): - """Remove value at `index` from sorted set. - - ``ss.__delitem__(index)`` <==> ``del ss[index]`` - - Supports slicing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> ss = SortedSet('abcde') - >>> del ss[2] - >>> ss - SortedSet(['a', 'b', 'd', 'e']) - >>> del ss[:2] - >>> ss - SortedSet(['d', 'e']) - - :param index: integer or slice for indexing - :raises IndexError: if index out of range - - """ - _set = self._set - _list = self._list - if isinstance(index, slice): - values = _list[index] - _set.difference_update(values) - else: - value = _list[index] - _set.remove(value) - del _list[index] - - - def __make_cmp(set_op, symbol, doc): - "Make comparator method." - def comparer(self, other): - "Compare method for sorted set and set." - if isinstance(other, SortedSet): - return set_op(self._set, other._set) - elif isinstance(other, Set): - return set_op(self._set, other) - return NotImplemented - - set_op_name = set_op.__name__ - comparer.__name__ = '__{0}__'.format(set_op_name) - doc_str = """Return true if and only if sorted set is {0} `other`. - - ``ss.__{1}__(other)`` <==> ``ss {2} other`` - - Comparisons use subset and superset semantics as with sets. - - Runtime complexity: `O(n)` - - :param other: `other` set - :return: true if sorted set is {0} `other` - - """ - comparer.__doc__ = dedent(doc_str.format(doc, set_op_name, symbol)) - return comparer - - - __eq__ = __make_cmp(eq, '==', 'equal to') - __ne__ = __make_cmp(ne, '!=', 'not equal to') - __lt__ = __make_cmp(lt, '<', 'a proper subset of') - __gt__ = __make_cmp(gt, '>', 'a proper superset of') - __le__ = __make_cmp(le, '<=', 'a subset of') - __ge__ = __make_cmp(ge, '>=', 'a superset of') - __make_cmp = staticmethod(__make_cmp) - - - def __len__(self): - """Return the size of the sorted set. - - ``ss.__len__()`` <==> ``len(ss)`` - - :return: size of sorted set - - """ - return len(self._set) - - - def __iter__(self): - """Return an iterator over the sorted set. - - ``ss.__iter__()`` <==> ``iter(ss)`` - - Iterating the sorted set while adding or deleting values may raise a - :exc:`RuntimeError` or fail to iterate over all values. - - """ - return iter(self._list) - - - def __reversed__(self): - """Return a reverse iterator over the sorted set. - - ``ss.__reversed__()`` <==> ``reversed(ss)`` - - Iterating the sorted set while adding or deleting values may raise a - :exc:`RuntimeError` or fail to iterate over all values. - - """ - return reversed(self._list) - - - def add(self, value): - """Add `value` to sorted set. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> ss = SortedSet() - >>> ss.add(3) - >>> ss.add(1) - >>> ss.add(2) - >>> ss - SortedSet([1, 2, 3]) - - :param value: value to add to sorted set - - """ - _set = self._set - if value not in _set: - _set.add(value) - self._list.add(value) - - _add = add - - - def clear(self): - """Remove all values from sorted set. - - Runtime complexity: `O(n)` - - """ - self._set.clear() - self._list.clear() - - - def copy(self): - """Return a shallow copy of the sorted set. - - Runtime complexity: `O(n)` - - :return: new sorted set - - """ - return self._fromset(set(self._set), key=self._key) - - __copy__ = copy - - - def count(self, value): - """Return number of occurrences of `value` in the sorted set. - - Runtime complexity: `O(1)` - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> ss.count(3) - 1 - - :param value: value to count in sorted set - :return: count - - """ - return 1 if value in self._set else 0 - - - def discard(self, value): - """Remove `value` from sorted set if it is a member. - - If `value` is not a member, do nothing. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> ss.discard(5) - >>> ss.discard(0) - >>> ss == set([1, 2, 3, 4]) - True - - :param value: `value` to discard from sorted set - - """ - _set = self._set - if value in _set: - _set.remove(value) - self._list.remove(value) - - _discard = discard - - - def pop(self, index=-1): - """Remove and return value at `index` in sorted set. - - Raise :exc:`IndexError` if the sorted set is empty or index is out of - range. - - Negative indices are supported. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> ss = SortedSet('abcde') - >>> ss.pop() - 'e' - >>> ss.pop(2) - 'c' - >>> ss - SortedSet(['a', 'b', 'd']) - - :param int index: index of value (default -1) - :return: value - :raises IndexError: if index is out of range - - """ - # pylint: disable=arguments-differ - value = self._list.pop(index) - self._set.remove(value) - return value - - - def remove(self, value): - """Remove `value` from sorted set; `value` must be a member. - - If `value` is not a member, raise :exc:`KeyError`. - - Runtime complexity: `O(log(n))` -- approximate. - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> ss.remove(5) - >>> ss == set([1, 2, 3, 4]) - True - >>> ss.remove(0) - Traceback (most recent call last): - ... - KeyError: 0 - - :param value: `value` to remove from sorted set - :raises KeyError: if `value` is not in sorted set - - """ - self._set.remove(value) - self._list.remove(value) - - - def difference(self, *iterables): - """Return the difference of two or more sets as a new sorted set. - - The `difference` method also corresponds to operator ``-``. - - ``ss.__sub__(iterable)`` <==> ``ss - iterable`` - - The difference is all values that are in this sorted set but not the - other `iterables`. - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> ss.difference([4, 5, 6, 7]) - SortedSet([1, 2, 3]) - - :param iterables: iterable arguments - :return: new sorted set - - """ - diff = self._set.difference(*iterables) - return self._fromset(diff, key=self._key) - - __sub__ = difference - - - def difference_update(self, *iterables): - """Remove all values of `iterables` from this sorted set. - - The `difference_update` method also corresponds to operator ``-=``. - - ``ss.__isub__(iterable)`` <==> ``ss -= iterable`` - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> _ = ss.difference_update([4, 5, 6, 7]) - >>> ss - SortedSet([1, 2, 3]) - - :param iterables: iterable arguments - :return: itself - - """ - _set = self._set - _list = self._list - values = set(chain(*iterables)) - if (4 * len(values)) > len(_set): - _set.difference_update(values) - _list.clear() - _list.update(_set) - else: - _discard = self._discard - for value in values: - _discard(value) - return self - - __isub__ = difference_update - - - def intersection(self, *iterables): - """Return the intersection of two or more sets as a new sorted set. - - The `intersection` method also corresponds to operator ``&``. - - ``ss.__and__(iterable)`` <==> ``ss & iterable`` - - The intersection is all values that are in this sorted set and each of - the other `iterables`. - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> ss.intersection([4, 5, 6, 7]) - SortedSet([4, 5]) - - :param iterables: iterable arguments - :return: new sorted set - - """ - intersect = self._set.intersection(*iterables) - return self._fromset(intersect, key=self._key) - - __and__ = intersection - __rand__ = __and__ - - - def intersection_update(self, *iterables): - """Update the sorted set with the intersection of `iterables`. - - The `intersection_update` method also corresponds to operator ``&=``. - - ``ss.__iand__(iterable)`` <==> ``ss &= iterable`` - - Keep only values found in itself and all `iterables`. - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> _ = ss.intersection_update([4, 5, 6, 7]) - >>> ss - SortedSet([4, 5]) - - :param iterables: iterable arguments - :return: itself - - """ - _set = self._set - _list = self._list - _set.intersection_update(*iterables) - _list.clear() - _list.update(_set) - return self - - __iand__ = intersection_update - - - def symmetric_difference(self, other): - """Return the symmetric difference with `other` as a new sorted set. - - The `symmetric_difference` method also corresponds to operator ``^``. - - ``ss.__xor__(other)`` <==> ``ss ^ other`` - - The symmetric difference is all values tha are in exactly one of the - sets. - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> ss.symmetric_difference([4, 5, 6, 7]) - SortedSet([1, 2, 3, 6, 7]) - - :param other: `other` iterable - :return: new sorted set - - """ - diff = self._set.symmetric_difference(other) - return self._fromset(diff, key=self._key) - - __xor__ = symmetric_difference - __rxor__ = __xor__ - - - def symmetric_difference_update(self, other): - """Update the sorted set with the symmetric difference with `other`. - - The `symmetric_difference_update` method also corresponds to operator - ``^=``. - - ``ss.__ixor__(other)`` <==> ``ss ^= other`` - - Keep only values found in exactly one of itself and `other`. - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> _ = ss.symmetric_difference_update([4, 5, 6, 7]) - >>> ss - SortedSet([1, 2, 3, 6, 7]) - - :param other: `other` iterable - :return: itself - - """ - _set = self._set - _list = self._list - _set.symmetric_difference_update(other) - _list.clear() - _list.update(_set) - return self - - __ixor__ = symmetric_difference_update - - - def union(self, *iterables): - """Return new sorted set with values from itself and all `iterables`. - - The `union` method also corresponds to operator ``|``. - - ``ss.__or__(iterable)`` <==> ``ss | iterable`` - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> ss.union([4, 5, 6, 7]) - SortedSet([1, 2, 3, 4, 5, 6, 7]) - - :param iterables: iterable arguments - :return: new sorted set - - """ - return self.__class__(chain(iter(self), *iterables), key=self._key) - - __or__ = union - __ror__ = __or__ - - - def update(self, *iterables): - """Update the sorted set adding values from all `iterables`. - - The `update` method also corresponds to operator ``|=``. - - ``ss.__ior__(iterable)`` <==> ``ss |= iterable`` - - >>> ss = SortedSet([1, 2, 3, 4, 5]) - >>> _ = ss.update([4, 5, 6, 7]) - >>> ss - SortedSet([1, 2, 3, 4, 5, 6, 7]) - - :param iterables: iterable arguments - :return: itself - - """ - _set = self._set - _list = self._list - values = set(chain(*iterables)) - if (4 * len(values)) > len(_set): - _list = self._list - _set.update(values) - _list.clear() - _list.update(_set) - else: - _add = self._add - for value in values: - _add(value) - return self - - __ior__ = update - _update = update - - - def __reduce__(self): - """Support for pickle. - - The tricks played with exposing methods in :func:`SortedSet.__init__` - confuse pickle so customize the reducer. - - """ - return (type(self), (self._set, self._key)) - - - @recursive_repr() - def __repr__(self): - """Return string representation of sorted set. - - ``ss.__repr__()`` <==> ``repr(ss)`` - - :return: string representation - - """ - _key = self._key - key = '' if _key is None else ', key={0!r}'.format(_key) - type_name = type(self).__name__ - return '{0}({1!r}{2})'.format(type_name, list(self), key) - - - def _check(self): - """Check invariants of sorted set. - - Runtime complexity: `O(n)` - - """ - _set = self._set - _list = self._list - _list._check() - assert len(_set) == len(_list) - assert all(value in _set for value in _list) diff --git a/solnlib/packages/splunklib/LICENSE b/solnlib/packages/splunklib/LICENSE deleted file mode 100644 index d6456956..00000000 --- a/solnlib/packages/splunklib/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/solnlib/packages/splunklib/__init__.py b/solnlib/packages/splunklib/__init__.py deleted file mode 100644 index c437b0e7..00000000 --- a/solnlib/packages/splunklib/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2011-2015 Splunk, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"): you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Python library for Splunk.""" - -from __future__ import absolute_import -from .six.moves import map -__version_info__ = (1, 6, 6) -__version__ = ".".join(map(str, __version_info__)) diff --git a/solnlib/packages/splunklib/binding.py b/solnlib/packages/splunklib/binding.py deleted file mode 100644 index 8bfa28d9..00000000 --- a/solnlib/packages/splunklib/binding.py +++ /dev/null @@ -1,1398 +0,0 @@ -# Copyright 2011-2015 Splunk, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"): you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The **splunklib.binding** module provides a low-level binding interface to the -`Splunk REST API `_. - -This module handles the wire details of calling the REST API, such as -authentication tokens, prefix paths, URL encoding, and so on. Actual path -segments, ``GET`` and ``POST`` arguments, and the parsing of responses is left -to the user. - -If you want a friendlier interface to the Splunk REST API, use the -:mod:`splunklib.client` module. -""" - -from __future__ import absolute_import - -import io -import logging -import socket -import ssl -import sys -from base64 import b64encode -from contextlib import contextmanager -from datetime import datetime -from functools import wraps -from io import BytesIO -from xml.etree.ElementTree import XML - -from . import six -from .six import StringIO -from .six.moves import urllib - -from .data import record - -try: - from xml.etree.ElementTree import ParseError -except ImportError as e: - from xml.parsers.expat import ExpatError as ParseError - - -__all__ = [ - "AuthenticationError", - "connect", - "Context", - "handler", - "HTTPError" -] - -# If you change these, update the docstring -# on _authority as well. -DEFAULT_HOST = "localhost" -DEFAULT_PORT = "8089" -DEFAULT_SCHEME = "https" - -def _log_duration(f): - @wraps(f) - def new_f(*args, **kwargs): - start_time = datetime.now() - val = f(*args, **kwargs) - end_time = datetime.now() - logging.debug("Operation took %s", end_time-start_time) - return val - return new_f - - -def _parse_cookies(cookie_str, dictionary): - """Tries to parse any key-value pairs of cookies in a string, - then updates the the dictionary with any key-value pairs found. - - **Example**:: - dictionary = {} - _parse_cookies('my=value', dictionary) - # Now the following is True - dictionary['my'] == 'value' - - :param cookie_str: A string containing "key=value" pairs from an HTTP "Set-Cookie" header. - :type cookie_str: ``str`` - :param dictionary: A dictionary to update with any found key-value pairs. - :type dictionary: ``dict`` - """ - parsed_cookie = six.moves.http_cookies.SimpleCookie(cookie_str) - for cookie in parsed_cookie.values(): - dictionary[cookie.key] = cookie.coded_value - - -def _make_cookie_header(cookies): - """ - Takes a list of 2-tuples of key-value pairs of - cookies, and returns a valid HTTP ``Cookie`` - header. - - **Example**:: - - header = _make_cookie_header([("key", "value"), ("key_2", "value_2")]) - # Now the following is True - header == "key=value; key_2=value_2" - - :param cookies: A list of 2-tuples of cookie key-value pairs. - :type cookies: ``list`` of 2-tuples - :return: ``str` An HTTP header cookie string. - :rtype: ``str`` - """ - return "; ".join("%s=%s" % (key, value) for key, value in cookies) - -# Singleton values to eschew None -class _NoAuthenticationToken(object): - """The value stored in a :class:`Context` or :class:`splunklib.client.Service` - class that is not logged in. - - If a ``Context`` or ``Service`` object is created without an authentication - token, and there has not yet been a call to the ``login`` method, the token - field of the ``Context`` or ``Service`` object is set to - ``_NoAuthenticationToken``. - - Likewise, after a ``Context`` or ``Service`` object has been logged out, the - token is set to this value again. - """ - pass - - -class UrlEncoded(str): - """This class marks URL-encoded strings. - It should be considered an SDK-private implementation detail. - - Manually tracking whether strings are URL encoded can be difficult. Avoid - calling ``urllib.quote`` to replace special characters with escapes. When - you receive a URL-encoded string, *do* use ``urllib.unquote`` to replace - escapes with single characters. Then, wrap any string you want to use as a - URL in ``UrlEncoded``. Note that because the ``UrlEncoded`` class is - idempotent, making multiple calls to it is OK. - - ``UrlEncoded`` objects are identical to ``str`` objects (including being - equal if their contents are equal) except when passed to ``UrlEncoded`` - again. - - ``UrlEncoded`` removes the ``str`` type support for interpolating values - with ``%`` (doing that raises a ``TypeError``). There is no reliable way to - encode values this way, so instead, interpolate into a string, quoting by - hand, and call ``UrlEncode`` with ``skip_encode=True``. - - **Example**:: - - import urllib - UrlEncoded('%s://%s' % (scheme, urllib.quote(host)), skip_encode=True) - - If you append ``str`` strings and ``UrlEncoded`` strings, the result is also - URL encoded. - - **Example**:: - - UrlEncoded('ab c') + 'de f' == UrlEncoded('ab cde f') - 'ab c' + UrlEncoded('de f') == UrlEncoded('ab cde f') - """ - def __new__(self, val='', skip_encode=False, encode_slash=False): - if isinstance(val, UrlEncoded): - # Don't urllib.quote something already URL encoded. - return val - elif skip_encode: - return str.__new__(self, val) - elif encode_slash: - return str.__new__(self, urllib.parse.quote_plus(val)) - else: - # When subclassing str, just call str's __new__ method - # with your class and the value you want to have in the - # new string. - return str.__new__(self, urllib.parse.quote(val)) - - def __add__(self, other): - """self + other - - If *other* is not a ``UrlEncoded``, URL encode it before - adding it. - """ - if isinstance(other, UrlEncoded): - return UrlEncoded(str.__add__(self, other), skip_encode=True) - else: - return UrlEncoded(str.__add__(self, urllib.parse.quote(other)), skip_encode=True) - - def __radd__(self, other): - """other + self - - If *other* is not a ``UrlEncoded``, URL _encode it before - adding it. - """ - if isinstance(other, UrlEncoded): - return UrlEncoded(str.__radd__(self, other), skip_encode=True) - else: - return UrlEncoded(str.__add__(urllib.parse.quote(other), self), skip_encode=True) - - def __mod__(self, fields): - """Interpolation into ``UrlEncoded``s is disabled. - - If you try to write ``UrlEncoded("%s") % "abc", will get a - ``TypeError``. - """ - raise TypeError("Cannot interpolate into a UrlEncoded object.") - def __repr__(self): - return "UrlEncoded(%s)" % repr(urllib.parse.unquote(str(self))) - -@contextmanager -def _handle_auth_error(msg): - """Handle reraising HTTP authentication errors as something clearer. - - If an ``HTTPError`` is raised with status 401 (access denied) in - the body of this context manager, reraise it as an - ``AuthenticationError`` instead, with *msg* as its message. - - This function adds no round trips to the server. - - :param msg: The message to be raised in ``AuthenticationError``. - :type msg: ``str`` - - **Example**:: - - with _handle_auth_error("Your login failed."): - ... # make an HTTP request - """ - try: - yield - except HTTPError as he: - if he.status == 401: - raise AuthenticationError(msg, he) - else: - raise - -def _authentication(request_fun): - """Decorator to handle autologin and authentication errors. - - *request_fun* is a function taking no arguments that needs to - be run with this ``Context`` logged into Splunk. - - ``_authentication``'s behavior depends on whether the - ``autologin`` field of ``Context`` is set to ``True`` or - ``False``. If it's ``False``, then ``_authentication`` - aborts if the ``Context`` is not logged in, and raises an - ``AuthenticationError`` if an ``HTTPError`` of status 401 is - raised in *request_fun*. If it's ``True``, then - ``_authentication`` will try at all sensible places to - log in before issuing the request. - - If ``autologin`` is ``False``, ``_authentication`` makes - one roundtrip to the server if the ``Context`` is logged in, - or zero if it is not. If ``autologin`` is ``True``, it's less - deterministic, and may make at most three roundtrips (though - that would be a truly pathological case). - - :param request_fun: A function of no arguments encapsulating - the request to make to the server. - - **Example**:: - - import splunklib.binding as binding - c = binding.connect(..., autologin=True) - c.logout() - def f(): - c.get("/services") - return 42 - print _authentication(f) - """ - @wraps(request_fun) - def wrapper(self, *args, **kwargs): - if self.token is _NoAuthenticationToken and \ - not self.has_cookies(): - # Not yet logged in. - if self.autologin and self.username and self.password: - # This will throw an uncaught - # AuthenticationError if it fails. - self.login() - else: - # Try the request anyway without authentication. - # Most requests will fail. Some will succeed, such as - # 'GET server/info'. - with _handle_auth_error("Request aborted: not logged in."): - return request_fun(self, *args, **kwargs) - try: - # Issue the request - return request_fun(self, *args, **kwargs) - except HTTPError as he: - if he.status == 401 and self.autologin: - # Authentication failed. Try logging in, and then - # rerunning the request. If either step fails, throw - # an AuthenticationError and give up. - with _handle_auth_error("Autologin failed."): - self.login() - with _handle_auth_error( - "Autologin succeeded, but there was an auth error on " - "next request. Something is very wrong."): - return request_fun(self, *args, **kwargs) - elif he.status == 401 and not self.autologin: - raise AuthenticationError( - "Request failed: Session is not logged in.", he) - else: - raise - - return wrapper - - -def _authority(scheme=DEFAULT_SCHEME, host=DEFAULT_HOST, port=DEFAULT_PORT): - """Construct a URL authority from the given *scheme*, *host*, and *port*. - - Named in accordance with RFC2396_, which defines URLs as:: - - ://? - - .. _RFC2396: http://www.ietf.org/rfc/rfc2396.txt - - So ``https://localhost:8000/a/b/b?boris=hilda`` would be parsed as:: - - scheme := https - authority := localhost:8000 - path := /a/b/c - query := boris=hilda - - :param scheme: URL scheme (the default is "https") - :type scheme: "http" or "https" - :param host: The host name (the default is "localhost") - :type host: string - :param port: The port number (the default is 8089) - :type port: integer - :return: The URL authority. - :rtype: UrlEncoded (subclass of ``str``) - - **Example**:: - - _authority() == "https://localhost:8089" - - _authority(host="splunk.utopia.net") == "https://splunk.utopia.net:8089" - - _authority(host="2001:0db8:85a3:0000:0000:8a2e:0370:7334") == \ - "https://[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:8089" - - _authority(scheme="http", host="splunk.utopia.net", port="471") == \ - "http://splunk.utopia.net:471" - - """ - if ':' in host: - # IPv6 addresses must be enclosed in [ ] in order to be well - # formed. - host = '[' + host + ']' - return UrlEncoded("%s://%s:%s" % (scheme, host, port), skip_encode=True) - -# kwargs: sharing, owner, app -def namespace(sharing=None, owner=None, app=None, **kwargs): - """This function constructs a Splunk namespace. - - Every Splunk resource belongs to a namespace. The namespace is specified by - the pair of values ``owner`` and ``app`` and is governed by a ``sharing`` mode. - The possible values for ``sharing`` are: "user", "app", "global" and "system", - which map to the following combinations of ``owner`` and ``app`` values: - - "user" => {owner}, {app} - - "app" => nobody, {app} - - "global" => nobody, {app} - - "system" => nobody, system - - "nobody" is a special user name that basically means no user, and "system" - is the name reserved for system resources. - - "-" is a wildcard that can be used for both ``owner`` and ``app`` values and - refers to all users and all apps, respectively. - - In general, when you specify a namespace you can specify any combination of - these three values and the library will reconcile the triple, overriding the - provided values as appropriate. - - Finally, if no namespacing is specified the library will make use of the - ``/services`` branch of the REST API, which provides a namespaced view of - Splunk resources equivelent to using ``owner={currentUser}`` and - ``app={defaultApp}``. - - The ``namespace`` function returns a representation of the namespace from - reconciling the values you provide. It ignores any keyword arguments other - than ``owner``, ``app``, and ``sharing``, so you can provide ``dicts`` of - configuration information without first having to extract individual keys. - - :param sharing: The sharing mode (the default is "user"). - :type sharing: "system", "global", "app", or "user" - :param owner: The owner context (the default is "None"). - :type owner: ``string`` - :param app: The app context (the default is "None"). - :type app: ``string`` - :returns: A :class:`splunklib.data.Record` containing the reconciled - namespace. - - **Example**:: - - import splunklib.binding as binding - n = binding.namespace(sharing="user", owner="boris", app="search") - n = binding.namespace(sharing="global", app="search") - """ - if sharing in ["system"]: - return record({'sharing': sharing, 'owner': "nobody", 'app': "system" }) - if sharing in ["global", "app"]: - return record({'sharing': sharing, 'owner': "nobody", 'app': app}) - if sharing in ["user", None]: - return record({'sharing': sharing, 'owner': owner, 'app': app}) - raise ValueError("Invalid value for argument: 'sharing'") - - -class Context(object): - """This class represents a context that encapsulates a splunkd connection. - - The ``Context`` class encapsulates the details of HTTP requests, - authentication, a default namespace, and URL prefixes to simplify access to - the REST API. - - After creating a ``Context`` object, you must call its :meth:`login` - method before you can issue requests to splunkd. Or, use the :func:`connect` - function to create an already-authenticated ``Context`` object. You can - provide a session token explicitly (the same token can be shared by multiple - ``Context`` objects) to provide authentication. - - :param host: The host name (the default is "localhost"). - :type host: ``string`` - :param port: The port number (the default is 8089). - :type port: ``integer`` - :param scheme: The scheme for accessing the service (the default is "https"). - :type scheme: "https" or "http" - :param verify: Enable (True) or disable (False) SSL verrification for https connections. - :type verify: ``Boolean`` - :param sharing: The sharing mode for the namespace (the default is "user"). - :type sharing: "global", "system", "app", or "user" - :param owner: The owner context of the namespace (optional, the default is "None"). - :type owner: ``string`` - :param app: The app context of the namespace (optional, the default is "None"). - :type app: ``string`` - :param token: A session token. When provided, you don't need to call :meth:`login`. - :type token: ``string`` - :param cookie: A session cookie. When provided, you don't need to call :meth:`login`. - This parameter is only supported for Splunk 6.2+. - :type cookie: ``string`` - :param username: The Splunk account username, which is used to - authenticate the Splunk instance. - :type username: ``string`` - :param password: The password for the Splunk account. - :type password: ``string`` - :param headers: List of extra HTTP headers to send (optional). - :type headers: ``list`` of 2-tuples. - :param handler: The HTTP request handler (optional). - :returns: A ``Context`` instance. - - **Example**:: - - import splunklib.binding as binding - c = binding.Context(username="boris", password="natasha", ...) - c.login() - # Or equivalently - c = binding.connect(username="boris", password="natasha") - # Or if you already have a session token - c = binding.Context(token="atg232342aa34324a") - # Or if you already have a valid cookie - c = binding.Context(cookie="splunkd_8089=...") - """ - def __init__(self, handler=None, **kwargs): - self.http = HttpLib(handler, kwargs.get("verify", False), key_file=kwargs.get("key_file"), - cert_file=kwargs.get("cert_file")) # Default to False for backward compat - self.token = kwargs.get("token", _NoAuthenticationToken) - if self.token is None: # In case someone explicitly passes token=None - self.token = _NoAuthenticationToken - self.scheme = kwargs.get("scheme", DEFAULT_SCHEME) - self.host = kwargs.get("host", DEFAULT_HOST) - self.port = int(kwargs.get("port", DEFAULT_PORT)) - self.authority = _authority(self.scheme, self.host, self.port) - self.namespace = namespace(**kwargs) - self.username = kwargs.get("username", "") - self.password = kwargs.get("password", "") - self.basic = kwargs.get("basic", False) - self.autologin = kwargs.get("autologin", False) - self.additional_headers = kwargs.get("headers", []) - - # Store any cookies in the self.http._cookies dict - if "cookie" in kwargs and kwargs['cookie'] not in [None, _NoAuthenticationToken]: - _parse_cookies(kwargs["cookie"], self.http._cookies) - - def get_cookies(self): - """Gets the dictionary of cookies from the ``HttpLib`` member of this instance. - - :return: Dictionary of cookies stored on the ``self.http``. - :rtype: ``dict`` - """ - return self.http._cookies - - def has_cookies(self): - """Returns true if the ``HttpLib`` member of this instance has at least - one cookie stored. - - :return: ``True`` if there is at least one cookie, else ``False`` - :rtype: ``bool`` - """ - return len(self.get_cookies()) > 0 - - # Shared per-context request headers - @property - def _auth_headers(self): - """Headers required to authenticate a request. - - Assumes your ``Context`` already has a authentication token or - cookie, either provided explicitly or obtained by logging - into the Splunk instance. - - :returns: A list of 2-tuples containing key and value - """ - if self.has_cookies(): - return [("Cookie", _make_cookie_header(list(self.get_cookies().items())))] - elif self.basic and (self.username and self.password): - token = 'Basic %s' % b64encode(("%s:%s" % (self.username, self.password)).encode('utf-8')).decode('ascii') - return [("Authorization", token)] - elif self.token is _NoAuthenticationToken: - return [] - else: - # Ensure the token is properly formatted - if self.token.startswith('Splunk '): - token = self.token - else: - token = 'Splunk %s' % self.token - return [("Authorization", token)] - - def connect(self): - """Returns an open connection (socket) to the Splunk instance. - - This method is used for writing bulk events to an index or similar tasks - where the overhead of opening a connection multiple times would be - prohibitive. - - :returns: A socket. - - **Example**:: - - import splunklib.binding as binding - c = binding.connect(...) - socket = c.connect() - socket.write("POST %s HTTP/1.1\\r\\n" % "some/path/to/post/to") - socket.write("Host: %s:%s\\r\\n" % (c.host, c.port)) - socket.write("Accept-Encoding: identity\\r\\n") - socket.write("Authorization: %s\\r\\n" % c.token) - socket.write("X-Splunk-Input-Mode: Streaming\\r\\n") - socket.write("\\r\\n") - """ - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if self.scheme == "https": - sock = ssl.wrap_socket(sock) - sock.connect((socket.gethostbyname(self.host), self.port)) - return sock - - @_authentication - @_log_duration - def delete(self, path_segment, owner=None, app=None, sharing=None, **query): - """Performs a DELETE operation at the REST path segment with the given - namespace and query. - - This method is named to match the HTTP method. ``delete`` makes at least - one round trip to the server, one additional round trip for each 303 - status returned, and at most two additional round trips if - the ``autologin`` field of :func:`connect` is set to ``True``. - - If *owner*, *app*, and *sharing* are omitted, this method uses the - default :class:`Context` namespace. All other keyword arguments are - included in the URL as query parameters. - - :raises AuthenticationError: Raised when the ``Context`` object is not - logged in. - :raises HTTPError: Raised when an error occurred in a GET operation from - *path_segment*. - :param path_segment: A REST path segment. - :type path_segment: ``string`` - :param owner: The owner context of the namespace (optional). - :type owner: ``string`` - :param app: The app context of the namespace (optional). - :type app: ``string`` - :param sharing: The sharing mode of the namespace (optional). - :type sharing: ``string`` - :param query: All other keyword arguments, which are used as query - parameters. - :type query: ``string`` - :return: The response from the server. - :rtype: ``dict`` with keys ``body``, ``headers``, ``reason``, - and ``status`` - - **Example**:: - - c = binding.connect(...) - c.delete('saved/searches/boris') == \\ - {'body': ...a response reader object..., - 'headers': [('content-length', '1786'), - ('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'), - ('server', 'Splunkd'), - ('connection', 'close'), - ('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'), - ('date', 'Fri, 11 May 2012 16:53:06 GMT'), - ('content-type', 'text/xml; charset=utf-8')], - 'reason': 'OK', - 'status': 200} - c.delete('nonexistant/path') # raises HTTPError - c.logout() - c.delete('apps/local') # raises AuthenticationError - """ - path = self.authority + self._abspath(path_segment, owner=owner, - app=app, sharing=sharing) - logging.debug("DELETE request to %s (body: %s)", path, repr(query)) - response = self.http.delete(path, self._auth_headers, **query) - return response - - @_authentication - @_log_duration - def get(self, path_segment, owner=None, app=None, headers=None, sharing=None, **query): - """Performs a GET operation from the REST path segment with the given - namespace and query. - - This method is named to match the HTTP method. ``get`` makes at least - one round trip to the server, one additional round trip for each 303 - status returned, and at most two additional round trips if - the ``autologin`` field of :func:`connect` is set to ``True``. - - If *owner*, *app*, and *sharing* are omitted, this method uses the - default :class:`Context` namespace. All other keyword arguments are - included in the URL as query parameters. - - :raises AuthenticationError: Raised when the ``Context`` object is not - logged in. - :raises HTTPError: Raised when an error occurred in a GET operation from - *path_segment*. - :param path_segment: A REST path segment. - :type path_segment: ``string`` - :param owner: The owner context of the namespace (optional). - :type owner: ``string`` - :param app: The app context of the namespace (optional). - :type app: ``string`` - :param headers: List of extra HTTP headers to send (optional). - :type headers: ``list`` of 2-tuples. - :param sharing: The sharing mode of the namespace (optional). - :type sharing: ``string`` - :param query: All other keyword arguments, which are used as query - parameters. - :type query: ``string`` - :return: The response from the server. - :rtype: ``dict`` with keys ``body``, ``headers``, ``reason``, - and ``status`` - - **Example**:: - - c = binding.connect(...) - c.get('apps/local') == \\ - {'body': ...a response reader object..., - 'headers': [('content-length', '26208'), - ('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'), - ('server', 'Splunkd'), - ('connection', 'close'), - ('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'), - ('date', 'Fri, 11 May 2012 16:30:35 GMT'), - ('content-type', 'text/xml; charset=utf-8')], - 'reason': 'OK', - 'status': 200} - c.get('nonexistant/path') # raises HTTPError - c.logout() - c.get('apps/local') # raises AuthenticationError - """ - if headers is None: - headers = [] - - path = self.authority + self._abspath(path_segment, owner=owner, - app=app, sharing=sharing) - logging.debug("GET request to %s (body: %s)", path, repr(query)) - all_headers = headers + self.additional_headers + self._auth_headers - response = self.http.get(path, all_headers, **query) - return response - - @_authentication - @_log_duration - def post(self, path_segment, owner=None, app=None, sharing=None, headers=None, **query): - """Performs a POST operation from the REST path segment with the given - namespace and query. - - This method is named to match the HTTP method. ``post`` makes at least - one round trip to the server, one additional round trip for each 303 - status returned, and at most two additional round trips if - the ``autologin`` field of :func:`connect` is set to ``True``. - - If *owner*, *app*, and *sharing* are omitted, this method uses the - default :class:`Context` namespace. All other keyword arguments are - included in the URL as query parameters. - - Some of Splunk's endpoints, such as ``receivers/simple`` and - ``receivers/stream``, require unstructured data in the POST body - and all metadata passed as GET-style arguments. If you provide - a ``body`` argument to ``post``, it will be used as the POST - body, and all other keyword arguments will be passed as - GET-style arguments in the URL. - - :raises AuthenticationError: Raised when the ``Context`` object is not - logged in. - :raises HTTPError: Raised when an error occurred in a GET operation from - *path_segment*. - :param path_segment: A REST path segment. - :type path_segment: ``string`` - :param owner: The owner context of the namespace (optional). - :type owner: ``string`` - :param app: The app context of the namespace (optional). - :type app: ``string`` - :param sharing: The sharing mode of the namespace (optional). - :type sharing: ``string`` - :param headers: List of extra HTTP headers to send (optional). - :type headers: ``list`` of 2-tuples. - :param query: All other keyword arguments, which are used as query - parameters. - :type query: ``string`` - :return: The response from the server. - :rtype: ``dict`` with keys ``body``, ``headers``, ``reason``, - and ``status`` - - **Example**:: - - c = binding.connect(...) - c.post('saved/searches', name='boris', - search='search * earliest=-1m | head 1') == \\ - {'body': ...a response reader object..., - 'headers': [('content-length', '10455'), - ('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'), - ('server', 'Splunkd'), - ('connection', 'close'), - ('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'), - ('date', 'Fri, 11 May 2012 16:46:06 GMT'), - ('content-type', 'text/xml; charset=utf-8')], - 'reason': 'Created', - 'status': 201} - c.post('nonexistant/path') # raises HTTPError - c.logout() - # raises AuthenticationError: - c.post('saved/searches', name='boris', - search='search * earliest=-1m | head 1') - """ - if headers is None: - headers = [] - - path = self.authority + self._abspath(path_segment, owner=owner, app=app, sharing=sharing) - logging.debug("POST request to %s (body: %s)", path, repr(query)) - all_headers = headers + self.additional_headers + self._auth_headers - response = self.http.post(path, all_headers, **query) - return response - - @_authentication - @_log_duration - def request(self, path_segment, method="GET", headers=None, body="", - owner=None, app=None, sharing=None): - """Issues an arbitrary HTTP request to the REST path segment. - - This method is named to match ``httplib.request``. This function - makes a single round trip to the server. - - If *owner*, *app*, and *sharing* are omitted, this method uses the - default :class:`Context` namespace. All other keyword arguments are - included in the URL as query parameters. - - :raises AuthenticationError: Raised when the ``Context`` object is not - logged in. - :raises HTTPError: Raised when an error occurred in a GET operation from - *path_segment*. - :param path_segment: A REST path segment. - :type path_segment: ``string`` - :param method: The HTTP method to use (optional). - :type method: ``string`` - :param headers: List of extra HTTP headers to send (optional). - :type headers: ``list`` of 2-tuples. - :param body: Content of the HTTP request (optional). - :type body: ``string`` - :param owner: The owner context of the namespace (optional). - :type owner: ``string`` - :param app: The app context of the namespace (optional). - :type app: ``string`` - :param sharing: The sharing mode of the namespace (optional). - :type sharing: ``string`` - :param query: All other keyword arguments, which are used as query - parameters. - :type query: ``string`` - :return: The response from the server. - :rtype: ``dict`` with keys ``body``, ``headers``, ``reason``, - and ``status`` - - **Example**:: - - c = binding.connect(...) - c.request('saved/searches', method='GET') == \\ - {'body': ...a response reader object..., - 'headers': [('content-length', '46722'), - ('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'), - ('server', 'Splunkd'), - ('connection', 'close'), - ('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'), - ('date', 'Fri, 11 May 2012 17:24:19 GMT'), - ('content-type', 'text/xml; charset=utf-8')], - 'reason': 'OK', - 'status': 200} - c.request('nonexistant/path', method='GET') # raises HTTPError - c.logout() - c.get('apps/local') # raises AuthenticationError - """ - if headers is None: - headers = [] - - path = self.authority \ - + self._abspath(path_segment, owner=owner, - app=app, sharing=sharing) - all_headers = headers + self.additional_headers + self._auth_headers - logging.debug("%s request to %s (headers: %s, body: %s)", - method, path, str(all_headers), repr(body)) - response = self.http.request(path, - {'method': method, - 'headers': all_headers, - 'body': body}) - return response - - def login(self): - """Logs into the Splunk instance referred to by the :class:`Context` - object. - - Unless a ``Context`` is created with an explicit authentication token - (probably obtained by logging in from a different ``Context`` object) - you must call :meth:`login` before you can issue requests. - The authentication token obtained from the server is stored in the - ``token`` field of the ``Context`` object. - - :raises AuthenticationError: Raised when login fails. - :returns: The ``Context`` object, so you can chain calls. - - **Example**:: - - import splunklib.binding as binding - c = binding.Context(...).login() - # Then issue requests... - """ - - if self.has_cookies() and \ - (not self.username and not self.password): - # If we were passed session cookie(s), but no username or - # password, then login is a nop, since we're automatically - # logged in. - return - - if self.token is not _NoAuthenticationToken and \ - (not self.username and not self.password): - # If we were passed a session token, but no username or - # password, then login is a nop, since we're automatically - # logged in. - return - - if self.basic and (self.username and self.password): - # Basic auth mode requested, so this method is a nop as long - # as credentials were passed in. - return - - # Only try to get a token and updated cookie if username & password are specified - try: - response = self.http.post( - self.authority + self._abspath("/services/auth/login"), - username=self.username, - password=self.password, - headers=self.additional_headers, - cookie="1") # In Splunk 6.2+, passing "cookie=1" will return the "set-cookie" header - - body = response.body.read() - session = XML(body).findtext("./sessionKey") - self.token = "Splunk %s" % session - return self - except HTTPError as he: - if he.status == 401: - raise AuthenticationError("Login failed.", he) - else: - raise - - def logout(self): - """Forgets the current session token, and cookies.""" - self.token = _NoAuthenticationToken - self.http._cookies = {} - return self - - def _abspath(self, path_segment, - owner=None, app=None, sharing=None): - """Qualifies *path_segment* into an absolute path for a URL. - - If *path_segment* is already absolute, returns it unchanged. - If *path_segment* is relative, then qualifies it with either - the provided namespace arguments or the ``Context``'s default - namespace. Any forbidden characters in *path_segment* are URL - encoded. This function has no network activity. - - Named to be consistent with RFC2396_. - - .. _RFC2396: http://www.ietf.org/rfc/rfc2396.txt - - :param path_segment: A relative or absolute URL path segment. - :type path_segment: ``string`` - :param owner, app, sharing: Components of a namespace (defaults - to the ``Context``'s namespace if all - three are omitted) - :type owner, app, sharing: ``string`` - :return: A ``UrlEncoded`` (a subclass of ``str``). - :rtype: ``string`` - - **Example**:: - - import splunklib.binding as binding - c = binding.connect(owner='boris', app='search', sharing='user') - c._abspath('/a/b/c') == '/a/b/c' - c._abspath('/a/b c/d') == '/a/b%20c/d' - c._abspath('apps/local/search') == \ - '/servicesNS/boris/search/apps/local/search' - c._abspath('apps/local/search', sharing='system') == \ - '/servicesNS/nobody/system/apps/local/search' - url = c.authority + c._abspath('apps/local/sharing') - """ - skip_encode = isinstance(path_segment, UrlEncoded) - # If path_segment is absolute, escape all forbidden characters - # in it and return it. - if path_segment.startswith('/'): - return UrlEncoded(path_segment, skip_encode=skip_encode) - - # path_segment is relative, so we need a namespace to build an - # absolute path. - if owner or app or sharing: - ns = namespace(owner=owner, app=app, sharing=sharing) - else: - ns = self.namespace - - # If no app or owner are specified, then use the /services - # endpoint. Otherwise, use /servicesNS with the specified - # namespace. If only one of app and owner is specified, use - # '-' for the other. - if ns.app is None and ns.owner is None: - return UrlEncoded("/services/%s" % path_segment, skip_encode=skip_encode) - - oname = "nobody" if ns.owner is None else ns.owner - aname = "system" if ns.app is None else ns.app - path = UrlEncoded("/servicesNS/%s/%s/%s" % (oname, aname, path_segment), - skip_encode=skip_encode) - return path - - -def connect(**kwargs): - """This function returns an authenticated :class:`Context` object. - - This function is a shorthand for calling :meth:`Context.login`. - - This function makes one round trip to the server. - - :param host: The host name (the default is "localhost"). - :type host: ``string`` - :param port: The port number (the default is 8089). - :type port: ``integer`` - :param scheme: The scheme for accessing the service (the default is "https"). - :type scheme: "https" or "http" - :param owner: The owner context of the namespace (the default is "None"). - :type owner: ``string`` - :param app: The app context of the namespace (the default is "None"). - :type app: ``string`` - :param sharing: The sharing mode for the namespace (the default is "user"). - :type sharing: "global", "system", "app", or "user" - :param token: The current session token (optional). Session tokens can be - shared across multiple service instances. - :type token: ``string`` - :param cookie: A session cookie. When provided, you don't need to call :meth:`login`. - This parameter is only supported for Splunk 6.2+. - :type cookie: ``string`` - :param username: The Splunk account username, which is used to - authenticate the Splunk instance. - :type username: ``string`` - :param password: The password for the Splunk account. - :type password: ``string`` - :param headers: List of extra HTTP headers to send (optional). - :type headers: ``list`` of 2-tuples. - :param autologin: When ``True``, automatically tries to log in again if the - session terminates. - :type autologin: ``Boolean`` - :return: An initialized :class:`Context` instance. - - **Example**:: - - import splunklib.binding as binding - c = binding.connect(...) - response = c.get("apps/local") - """ - c = Context(**kwargs) - c.login() - return c - -# Note: the error response schema supports multiple messages but we only -# return the first, although we do return the body so that an exception -# handler that wants to read multiple messages can do so. -class HTTPError(Exception): - """This exception is raised for HTTP responses that return an error.""" - def __init__(self, response, _message=None): - status = response.status - reason = response.reason - body = (response.body.read()).decode() - try: - detail = XML(body).findtext("./messages/msg") - except ParseError as err: - detail = body - message = "HTTP %d %s%s" % ( - status, reason, "" if detail is None else " -- %s" % detail) - Exception.__init__(self, _message or message) - self.status = status - self.reason = reason - self.headers = response.headers - self.body = body - self._response = response - -class AuthenticationError(HTTPError): - """Raised when a login request to Splunk fails. - - If your username was unknown or you provided an incorrect password - in a call to :meth:`Context.login` or :meth:`splunklib.client.Service.login`, - this exception is raised. - """ - def __init__(self, message, cause): - # Put the body back in the response so that HTTPError's constructor can - # read it again. - cause._response.body = BytesIO(cause.body) - - HTTPError.__init__(self, cause._response, message) - -# -# The HTTP interface used by the Splunk binding layer abstracts the underlying -# HTTP library using request & response 'messages' which are implemented as -# dictionaries with the following structure: -# -# # HTTP request message (only method required) -# request { -# method : str, -# headers? : [(str, str)*], -# body? : str, -# } -# -# # HTTP response message (all keys present) -# response { -# status : int, -# reason : str, -# headers : [(str, str)*], -# body : file, -# } -# - -# Encode the given kwargs as a query string. This wrapper will also _encode -# a list value as a sequence of assignemnts to the corresponding arg name, -# for example an argument such as 'foo=[1,2,3]' will be encoded as -# 'foo=1&foo=2&foo=3'. -def _encode(**kwargs): - items = [] - for key, value in six.iteritems(kwargs): - if isinstance(value, list): - items.extend([(key, item) for item in value]) - else: - items.append((key, value)) - return urllib.parse.urlencode(items) - -# Crack the given url into (scheme, host, port, path) -def _spliturl(url): - parsed_url = urllib.parse.urlparse(url) - host = parsed_url.hostname - port = parsed_url.port - path = '?'.join((parsed_url.path, parsed_url.query)) if parsed_url.query else parsed_url.path - # Strip brackets if its an IPv6 address - if host.startswith('[') and host.endswith(']'): host = host[1:-1] - if port is None: port = DEFAULT_PORT - return parsed_url.scheme, host, port, path - -# Given an HTTP request handler, this wrapper objects provides a related -# family of convenience methods built using that handler. -class HttpLib(object): - """A set of convenient methods for making HTTP calls. - - ``HttpLib`` provides a general :meth:`request` method, and :meth:`delete`, - :meth:`post`, and :meth:`get` methods for the three HTTP methods that Splunk - uses. - - By default, ``HttpLib`` uses Python's built-in ``httplib`` library, - but you can replace it by passing your own handling function to the - constructor for ``HttpLib``. - - The handling function should have the type: - - ``handler(`url`, `request_dict`) -> response_dict`` - - where `url` is the URL to make the request to (including any query and - fragment sections) as a dictionary with the following keys: - - - method: The method for the request, typically ``GET``, ``POST``, or ``DELETE``. - - - headers: A list of pairs specifying the HTTP headers (for example: ``[('key': value), ...]``). - - - body: A string containing the body to send with the request (this string - should default to ''). - - and ``response_dict`` is a dictionary with the following keys: - - - status: An integer containing the HTTP status code (such as 200 or 404). - - - reason: The reason phrase, if any, returned by the server. - - - headers: A list of pairs containing the response headers (for example, ``[('key': value), ...]``). - - - body: A stream-like object supporting ``read(size=None)`` and ``close()`` - methods to get the body of the response. - - The response dictionary is returned directly by ``HttpLib``'s methods with - no further processing. By default, ``HttpLib`` calls the :func:`handler` function - to get a handler function. - - If using the default handler, SSL verification can be disabled by passing verify=False. - """ - def __init__(self, custom_handler=None, verify=False, key_file=None, cert_file=None): - if custom_handler is None: - self.handler = handler(verify=verify, key_file=key_file, cert_file=cert_file) - else: - self.handler = custom_handler - self._cookies = {} - - def delete(self, url, headers=None, **kwargs): - """Sends a DELETE request to a URL. - - :param url: The URL. - :type url: ``string`` - :param headers: A list of pairs specifying the headers for the HTTP - response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``). - :type headers: ``list`` - :param kwargs: Additional keyword arguments (optional). These arguments - are interpreted as the query part of the URL. The order of keyword - arguments is not preserved in the request, but the keywords and - their arguments will be URL encoded. - :type kwargs: ``dict`` - :returns: A dictionary describing the response (see :class:`HttpLib` for - its structure). - :rtype: ``dict`` - """ - if headers is None: headers = [] - if kwargs: - # url is already a UrlEncoded. We have to manually declare - # the query to be encoded or it will get automatically URL - # encoded by being appended to url. - url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True) - message = { - 'method': "DELETE", - 'headers': headers, - } - return self.request(url, message) - - def get(self, url, headers=None, **kwargs): - """Sends a GET request to a URL. - - :param url: The URL. - :type url: ``string`` - :param headers: A list of pairs specifying the headers for the HTTP - response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``). - :type headers: ``list`` - :param kwargs: Additional keyword arguments (optional). These arguments - are interpreted as the query part of the URL. The order of keyword - arguments is not preserved in the request, but the keywords and - their arguments will be URL encoded. - :type kwargs: ``dict`` - :returns: A dictionary describing the response (see :class:`HttpLib` for - its structure). - :rtype: ``dict`` - """ - if headers is None: headers = [] - if kwargs: - # url is already a UrlEncoded. We have to manually declare - # the query to be encoded or it will get automatically URL - # encoded by being appended to url. - url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True) - return self.request(url, { 'method': "GET", 'headers': headers }) - - def post(self, url, headers=None, **kwargs): - """Sends a POST request to a URL. - - :param url: The URL. - :type url: ``string`` - :param headers: A list of pairs specifying the headers for the HTTP - response (for example, ``[('Content-Type': 'text/cthulhu'), ('Token': 'boris')]``). - :type headers: ``list`` - :param kwargs: Additional keyword arguments (optional). If the argument - is ``body``, the value is used as the body for the request, and the - keywords and their arguments will be URL encoded. If there is no - ``body`` keyword argument, all the keyword arguments are encoded - into the body of the request in the format ``x-www-form-urlencoded``. - :type kwargs: ``dict`` - :returns: A dictionary describing the response (see :class:`HttpLib` for - its structure). - :rtype: ``dict`` - """ - if headers is None: headers = [] - - # We handle GET-style arguments and an unstructured body. This is here - # to support the receivers/stream endpoint. - if 'body' in kwargs: - # We only use application/x-www-form-urlencoded if there is no other - # Content-Type header present. This can happen in cases where we - # send requests as application/json, e.g. for KV Store. - if len([x for x in headers if x[0].lower() == "content-type"]) == 0: - headers.append(("Content-Type", "application/x-www-form-urlencoded")) - - body = kwargs.pop('body') - if len(kwargs) > 0: - url = url + UrlEncoded('?' + _encode(**kwargs), skip_encode=True) - else: - body = _encode(**kwargs).encode('utf-8') - message = { - 'method': "POST", - 'headers': headers, - 'body': body - } - return self.request(url, message) - - def request(self, url, message, **kwargs): - """Issues an HTTP request to a URL. - - :param url: The URL. - :type url: ``string`` - :param message: A dictionary with the format as described in - :class:`HttpLib`. - :type message: ``dict`` - :param kwargs: Additional keyword arguments (optional). These arguments - are passed unchanged to the handler. - :type kwargs: ``dict`` - :returns: A dictionary describing the response (see :class:`HttpLib` for - its structure). - :rtype: ``dict`` - """ - response = self.handler(url, message, **kwargs) - response = record(response) - if 400 <= response.status: - raise HTTPError(response) - - # Update the cookie with any HTTP request - # Initially, assume list of 2-tuples - key_value_tuples = response.headers - # If response.headers is a dict, get the key-value pairs as 2-tuples - # this is the case when using urllib2 - if isinstance(response.headers, dict): - key_value_tuples = list(response.headers.items()) - for key, value in key_value_tuples: - if key.lower() == "set-cookie": - _parse_cookies(value, self._cookies) - - return response - - -# Converts an httplib response into a file-like object. -class ResponseReader(io.RawIOBase): - """This class provides a file-like interface for :class:`httplib` responses. - - The ``ResponseReader`` class is intended to be a layer to unify the different - types of HTTP libraries used with this SDK. This class also provides a - preview of the stream and a few useful predicates. - """ - # For testing, you can use a StringIO as the argument to - # ``ResponseReader`` instead of an ``httplib.HTTPResponse``. It - # will work equally well. - def __init__(self, response, connection=None): - self._response = response - self._connection = connection - self._buffer = b'' - - def __str__(self): - return self.read() - - @property - def empty(self): - """Indicates whether there is any more data in the response.""" - return self.peek(1) == b"" - - def peek(self, size): - """Nondestructively retrieves a given number of characters. - - The next :meth:`read` operation behaves as though this method was never - called. - - :param size: The number of characters to retrieve. - :type size: ``integer`` - """ - c = self.read(size) - self._buffer = self._buffer + c - return c - - def close(self): - """Closes this response.""" - if self._connection: - self._connection.close() - self._response.close() - - def read(self, size = None): - """Reads a given number of characters from the response. - - :param size: The number of characters to read, or "None" to read the - entire response. - :type size: ``integer`` or "None" - - """ - r = self._buffer - self._buffer = b'' - if size is not None: - size -= len(r) - r = r + self._response.read(size) - return r - - def readable(self): - """ Indicates that the response reader is readable.""" - return True - - def readinto(self, byte_array): - """ Read data into a byte array, upto the size of the byte array. - - :param byte_array: A byte array/memory view to pour bytes into. - :type byte_array: ``bytearray`` or ``memoryview`` - - """ - max_size = len(byte_array) - data = self.read(max_size) - bytes_read = len(data) - byte_array[:bytes_read] = data - return bytes_read - - -def handler(key_file=None, cert_file=None, timeout=None, verify=False): - """This class returns an instance of the default HTTP request handler using - the values you provide. - - :param `key_file`: A path to a PEM (Privacy Enhanced Mail) formatted file containing your private key (optional). - :type key_file: ``string`` - :param `cert_file`: A path to a PEM (Privacy Enhanced Mail) formatted file containing a certificate chain file (optional). - :type cert_file: ``string`` - :param `timeout`: The request time-out period, in seconds (optional). - :type timeout: ``integer`` or "None" - :param `verify`: Set to False to disable SSL verification on https connections. - :type verify: ``Boolean`` - """ - - def connect(scheme, host, port): - kwargs = {} - if timeout is not None: kwargs['timeout'] = timeout - if scheme == "http": - return six.moves.http_client.HTTPConnection(host, port, **kwargs) - if scheme == "https": - if key_file is not None: kwargs['key_file'] = key_file - if cert_file is not None: kwargs['cert_file'] = cert_file - - # If running Python 2.7.9+, disable SSL certificate validation - if (sys.version_info >= (2,7,9) and key_file is None and cert_file is None) and not verify: - kwargs['context'] = ssl._create_unverified_context() - return six.moves.http_client.HTTPSConnection(host, port, **kwargs) - raise ValueError("unsupported scheme: %s" % scheme) - - def request(url, message, **kwargs): - scheme, host, port, path = _spliturl(url) - body = message.get("body", "") - head = { - "Content-Length": str(len(body)), - "Host": host, - "User-Agent": "splunk-sdk-python/1.6.6", - "Accept": "*/*", - "Connection": "Close", - } # defaults - for key, value in message["headers"]: - head[key] = value - method = message.get("method", "GET") - - connection = connect(scheme, host, port) - is_keepalive = False - try: - connection.request(method, path, body, head) - if timeout is not None: - connection.sock.settimeout(timeout) - response = connection.getresponse() - is_keepalive = "keep-alive" in response.getheader("connection", default="close").lower() - finally: - if not is_keepalive: - connection.close() - - return { - "status": response.status, - "reason": response.reason, - "headers": response.getheaders(), - "body": ResponseReader(response, connection if is_keepalive else None), - } - - return request diff --git a/solnlib/packages/splunklib/client.py b/solnlib/packages/splunklib/client.py deleted file mode 100644 index cb040931..00000000 --- a/solnlib/packages/splunklib/client.py +++ /dev/null @@ -1,3730 +0,0 @@ -# Copyright 2011-2015 Splunk, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"): you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# -# The purpose of this module is to provide a friendlier domain interface to -# various Splunk endpoints. The approach here is to leverage the binding -# layer to capture endpoint context and provide objects and methods that -# offer simplified access their corresponding endpoints. The design avoids -# caching resource state. From the perspective of this module, the 'policy' -# for caching resource state belongs in the application or a higher level -# framework, and its the purpose of this module to provide simplified -# access to that resource state. -# -# A side note, the objects below that provide helper methods for updating eg: -# Entity state, are written so that they may be used in a fluent style. -# - -"""The **splunklib.client** module provides a Pythonic interface to the -`Splunk REST API `_, -allowing you programmatically access Splunk's resources. - -**splunklib.client** wraps a Pythonic layer around the wire-level -binding of the **splunklib.binding** module. The core of the library is the -:class:`Service` class, which encapsulates a connection to the server, and -provides access to the various aspects of Splunk's functionality, which are -exposed via the REST API. Typically you connect to a running Splunk instance -with the :func:`connect` function:: - - import splunklib.client as client - service = client.connect(host='localhost', port=8089, - username='admin', password='...') - assert isinstance(service, client.Service) - -:class:`Service` objects have fields for the various Splunk resources (such as apps, -jobs, saved searches, inputs, and indexes). All of these fields are -:class:`Collection` objects:: - - appcollection = service.apps - my_app = appcollection.create('my_app') - my_app = appcollection['my_app'] - appcollection.delete('my_app') - -The individual elements of the collection, in this case *applications*, -are subclasses of :class:`Entity`. An ``Entity`` object has fields for its -attributes, and methods that are specific to each kind of entity. For example:: - - print my_app['author'] # Or: print my_app.author - my_app.package() # Creates a compressed package of this application -""" - -import contextlib -import datetime -import json -import logging -import socket -from datetime import datetime, timedelta -from time import sleep - -from . import six -from .six.moves import urllib - -from . import data -from .binding import (AuthenticationError, Context, HTTPError, UrlEncoded, - _encode, _make_cookie_header, _NoAuthenticationToken, - namespace) -from .data import record - -__all__ = [ - "connect", - "NotSupportedError", - "OperationError", - "IncomparableException", - "Service", - "namespace" -] - -PATH_APPS = "apps/local/" -PATH_CAPABILITIES = "authorization/capabilities/" -PATH_CONF = "configs/conf-%s/" -PATH_PROPERTIES = "properties/" -PATH_DEPLOYMENT_CLIENTS = "deployment/client/" -PATH_DEPLOYMENT_TENANTS = "deployment/tenants/" -PATH_DEPLOYMENT_SERVERS = "deployment/server/" -PATH_DEPLOYMENT_SERVERCLASSES = "deployment/serverclass/" -PATH_EVENT_TYPES = "saved/eventtypes/" -PATH_FIRED_ALERTS = "alerts/fired_alerts/" -PATH_INDEXES = "data/indexes/" -PATH_INPUTS = "data/inputs/" -PATH_JOBS = "search/jobs/" -PATH_LOGGER = "/services/server/logger/" -PATH_MESSAGES = "messages/" -PATH_MODULAR_INPUTS = "data/modular-inputs" -PATH_ROLES = "authorization/roles/" -PATH_SAVED_SEARCHES = "saved/searches/" -PATH_STANZA = "configs/conf-%s/%s" # (file, stanza) -PATH_USERS = "authentication/users/" -PATH_RECEIVERS_STREAM = "/services/receivers/stream" -PATH_RECEIVERS_SIMPLE = "/services/receivers/simple" -PATH_STORAGE_PASSWORDS = "storage/passwords" - -XNAMEF_ATOM = "{http://www.w3.org/2005/Atom}%s" -XNAME_ENTRY = XNAMEF_ATOM % "entry" -XNAME_CONTENT = XNAMEF_ATOM % "content" - -MATCH_ENTRY_CONTENT = "%s/%s/*" % (XNAME_ENTRY, XNAME_CONTENT) - - -class IllegalOperationException(Exception): - """Thrown when an operation is not possible on the Splunk instance that a - :class:`Service` object is connected to.""" - pass - - -class IncomparableException(Exception): - """Thrown when trying to compare objects (using ``==``, ``<``, ``>``, and - so on) of a type that doesn't support it.""" - pass - - -class AmbiguousReferenceException(ValueError): - """Thrown when the name used to fetch an entity matches more than one entity.""" - pass - - -class InvalidNameException(Exception): - """Thrown when the specified name contains characters that are not allowed - in Splunk entity names.""" - pass - - -class NoSuchCapability(Exception): - """Thrown when the capability that has been referred to doesn't exist.""" - pass - - -class OperationError(Exception): - """Raised for a failed operation, such as a time out.""" - pass - - -class NotSupportedError(Exception): - """Raised for operations that are not supported on a given object.""" - pass - - -def _trailing(template, *targets): - """Substring of *template* following all *targets*. - - **Example**:: - - template = "this is a test of the bunnies." - _trailing(template, "is", "est", "the") == " bunnies" - - Each target is matched successively in the string, and the string - remaining after the last target is returned. If one of the targets - fails to match, a ValueError is raised. - - :param template: Template to extract a trailing string from. - :type template: ``string`` - :param targets: Strings to successively match in *template*. - :type targets: list of ``string``s - :return: Trailing string after all targets are matched. - :rtype: ``string`` - :raises ValueError: Raised when one of the targets does not match. - """ - s = template - for t in targets: - n = s.find(t) - if n == -1: - raise ValueError("Target " + t + " not found in template.") - s = s[n + len(t):] - return s - - -# Filter the given state content record according to the given arg list. -def _filter_content(content, *args): - if len(args) > 0: - return record((k, content[k]) for k in args) - return record((k, v) for k, v in six.iteritems(content) - if k not in ['eai:acl', 'eai:attributes', 'type']) - -# Construct a resource path from the given base path + resource name -def _path(base, name): - if not base.endswith('/'): base = base + '/' - return base + name - - -# Load an atom record from the body of the given response -# this will ultimately be sent to an xml ElementTree so we -# should use the xmlcharrefreplace option -def _load_atom(response, match=None): - return data.load(response.body.read() - .decode('utf-8', 'xmlcharrefreplace'), match) - - -# Load an array of atom entries from the body of the given response -def _load_atom_entries(response): - r = _load_atom(response) - if 'feed' in r: - # Need this to handle a random case in the REST API - if r.feed.get('totalResults') in [0, '0']: - return [] - entries = r.feed.get('entry', None) - if entries is None: return None - return entries if isinstance(entries, list) else [entries] - # Unlike most other endpoints, the jobs endpoint does not return - # its state wrapped in another element, but at the top level. - # For example, in XML, it returns ... instead of - # .... - else: - entries = r.get('entry', None) - if entries is None: return None - return entries if isinstance(entries, list) else [entries] - - -# Load the sid from the body of the given response -def _load_sid(response): - return _load_atom(response).response.sid - - -# Parse the given atom entry record into a generic entity state record -def _parse_atom_entry(entry): - title = entry.get('title', None) - - elink = entry.get('link', []) - elink = elink if isinstance(elink, list) else [elink] - links = record((link.rel, link.href) for link in elink) - - # Retrieve entity content values - content = entry.get('content', {}) - - # Host entry metadata - metadata = _parse_atom_metadata(content) - - # Filter some of the noise out of the content record - content = record((k, v) for k, v in six.iteritems(content) - if k not in ['eai:acl', 'eai:attributes']) - - if 'type' in content: - if isinstance(content['type'], list): - content['type'] = [t for t in content['type'] if t != 'text/xml'] - # Unset type if it was only 'text/xml' - if len(content['type']) == 0: - content.pop('type', None) - # Flatten 1 element list - if len(content['type']) == 1: - content['type'] = content['type'][0] - else: - content.pop('type', None) - - return record({ - 'title': title, - 'links': links, - 'access': metadata.access, - 'fields': metadata.fields, - 'content': content, - 'updated': entry.get("updated") - }) - - -# Parse the metadata fields out of the given atom entry content record -def _parse_atom_metadata(content): - # Hoist access metadata - access = content.get('eai:acl', None) - - # Hoist content metadata (and cleanup some naming) - attributes = content.get('eai:attributes', {}) - fields = record({ - 'required': attributes.get('requiredFields', []), - 'optional': attributes.get('optionalFields', []), - 'wildcard': attributes.get('wildcardFields', [])}) - - return record({'access': access, 'fields': fields}) - -# kwargs: scheme, host, port, app, owner, username, password -def connect(**kwargs): - """This function connects and logs in to a Splunk instance. - - This function is a shorthand for :meth:`Service.login`. - The ``connect`` function makes one round trip to the server (for logging in). - - :param host: The host name (the default is "localhost"). - :type host: ``string`` - :param port: The port number (the default is 8089). - :type port: ``integer`` - :param scheme: The scheme for accessing the service (the default is "https"). - :type scheme: "https" or "http" - :param verify: Enable (True) or disable (False) SSL verrification for - https connections. (optional, the default is True) - :type verify: ``Boolean`` - :param `owner`: The owner context of the namespace (optional). - :type owner: ``string`` - :param `app`: The app context of the namespace (optional). - :type app: ``string`` - :param sharing: The sharing mode for the namespace (the default is "user"). - :type sharing: "global", "system", "app", or "user" - :param `token`: The current session token (optional). Session tokens can be - shared across multiple service instances. - :type token: ``string`` - :param cookie: A session cookie. When provided, you don't need to call :meth:`login`. - This parameter is only supported for Splunk 6.2+. - :type cookie: ``string`` - :param autologin: When ``True``, automatically tries to log in again if the - session terminates. - :type autologin: ``boolean`` - :param `username`: The Splunk account username, which is used to - authenticate the Splunk instance. - :type username: ``string`` - :param `password`: The password for the Splunk account. - :type password: ``string`` - :return: An initialized :class:`Service` connection. - - **Example**:: - - import splunklib.client as client - s = client.connect(...) - a = s.apps["my_app"] - ... - """ - s = Service(**kwargs) - s.login() - return s - - -# In preparation for adding Storm support, we added an -# intermediary class between Service and Context. Storm's -# API is not going to be the same as enterprise Splunk's -# API, so we will derive both Service (for enterprise Splunk) -# and StormService for (Splunk Storm) from _BaseService, and -# put any shared behavior on it. -class _BaseService(Context): - pass - - -class Service(_BaseService): - """A Pythonic binding to Splunk instances. - - A :class:`Service` represents a binding to a Splunk instance on an - HTTP or HTTPS port. It handles the details of authentication, wire - formats, and wraps the REST API endpoints into something more - Pythonic. All of the low-level operations on the instance from - :class:`splunklib.binding.Context` are also available in case you need - to do something beyond what is provided by this class. - - After creating a ``Service`` object, you must call its :meth:`login` - method before you can issue requests to Splunk. - Alternately, use the :func:`connect` function to create an already - authenticated :class:`Service` object, or provide a session token - when creating the :class:`Service` object explicitly (the same - token may be shared by multiple :class:`Service` objects). - - :param host: The host name (the default is "localhost"). - :type host: ``string`` - :param port: The port number (the default is 8089). - :type port: ``integer`` - :param scheme: The scheme for accessing the service (the default is "https"). - :type scheme: "https" or "http" - :param verify: Enable (True) or disable (False) SSL verrification for - https connections. (optional, the default is True) - :type verify: ``Boolean`` - :param `owner`: The owner context of the namespace (optional; use "-" for wildcard). - :type owner: ``string`` - :param `app`: The app context of the namespace (optional; use "-" for wildcard). - :type app: ``string`` - :param `token`: The current session token (optional). Session tokens can be - shared across multiple service instances. - :type token: ``string`` - :param cookie: A session cookie. When provided, you don't need to call :meth:`login`. - This parameter is only supported for Splunk 6.2+. - :type cookie: ``string`` - :param `username`: The Splunk account username, which is used to - authenticate the Splunk instance. - :type username: ``string`` - :param `password`: The password, which is used to authenticate the Splunk - instance. - :type password: ``string`` - :return: A :class:`Service` instance. - - **Example**:: - - import splunklib.client as client - s = client.Service(username="boris", password="natasha", ...) - s.login() - # Or equivalently - s = client.connect(username="boris", password="natasha") - # Or if you already have a session token - s = client.Service(token="atg232342aa34324a") - # Or if you already have a valid cookie - s = client.Service(cookie="splunkd_8089=...") - """ - def __init__(self, **kwargs): - super(Service, self).__init__(**kwargs) - self._splunk_version = None - - @property - def apps(self): - """Returns the collection of applications that are installed on this instance of Splunk. - - :return: A :class:`Collection` of :class:`Application` entities. - """ - return Collection(self, PATH_APPS, item=Application) - - @property - def confs(self): - """Returns the collection of configuration files for this Splunk instance. - - :return: A :class:`Configurations` collection of - :class:`ConfigurationFile` entities. - """ - return Configurations(self) - - @property - def capabilities(self): - """Returns the list of system capabilities. - - :return: A ``list`` of capabilities. - """ - response = self.get(PATH_CAPABILITIES) - return _load_atom(response, MATCH_ENTRY_CONTENT).capabilities - - @property - def event_types(self): - """Returns the collection of event types defined in this Splunk instance. - - :return: An :class:`Entity` containing the event types. - """ - return Collection(self, PATH_EVENT_TYPES) - - @property - def fired_alerts(self): - """Returns the collection of alerts that have been fired on the Splunk - instance, grouped by saved search. - - :return: A :class:`Collection` of :class:`AlertGroup` entities. - """ - return Collection(self, PATH_FIRED_ALERTS, item=AlertGroup) - - @property - def indexes(self): - """Returns the collection of indexes for this Splunk instance. - - :return: An :class:`Indexes` collection of :class:`Index` entities. - """ - return Indexes(self, PATH_INDEXES, item=Index) - - @property - def info(self): - """Returns the information about this instance of Splunk. - - :return: The system information, as key-value pairs. - :rtype: ``dict`` - """ - response = self.get("/services/server/info") - return _filter_content(_load_atom(response, MATCH_ENTRY_CONTENT)) - - @property - def inputs(self): - """Returns the collection of inputs configured on this Splunk instance. - - :return: An :class:`Inputs` collection of :class:`Input` entities. - """ - return Inputs(self) - - def job(self, sid): - """Retrieves a search job by sid. - - :return: A :class:`Job` object. - """ - return Job(self, sid).refresh() - - @property - def jobs(self): - """Returns the collection of current search jobs. - - :return: A :class:`Jobs` collection of :class:`Job` entities. - """ - return Jobs(self) - - @property - def loggers(self): - """Returns the collection of logging level categories and their status. - - :return: A :class:`Loggers` collection of logging levels. - """ - return Loggers(self) - - @property - def messages(self): - """Returns the collection of service messages. - - :return: A :class:`Collection` of :class:`Message` entities. - """ - return Collection(self, PATH_MESSAGES, item=Message) - - @property - def modular_input_kinds(self): - """Returns the collection of the modular input kinds on this Splunk instance. - - :return: A :class:`ReadOnlyCollection` of :class:`ModularInputKind` entities. - """ - if self.splunk_version >= (5,): - return ReadOnlyCollection(self, PATH_MODULAR_INPUTS, item=ModularInputKind) - else: - raise IllegalOperationException("Modular inputs are not supported before Splunk version 5.") - - @property - def storage_passwords(self): - """Returns the collection of the storage passwords on this Splunk instance. - - :return: A :class:`ReadOnlyCollection` of :class:`StoragePasswords` entities. - """ - return StoragePasswords(self) - - # kwargs: enable_lookups, reload_macros, parse_only, output_mode - def parse(self, query, **kwargs): - """Parses a search query and returns a semantic map of the search. - - :param query: The search query to parse. - :type query: ``string`` - :param kwargs: Arguments to pass to the ``search/parser`` endpoint - (optional). Valid arguments are: - - * "enable_lookups" (``boolean``): If ``True``, performs reverse lookups - to expand the search expression. - - * "output_mode" (``string``): The output format (XML or JSON). - - * "parse_only" (``boolean``): If ``True``, disables the expansion of - search due to evaluation of subsearches, time term expansion, - lookups, tags, eventtypes, and sourcetype alias. - - * "reload_macros" (``boolean``): If ``True``, reloads macro - definitions from macros.conf. - - :type kwargs: ``dict`` - :return: A semantic map of the parsed search query. - """ - return self.get("search/parser", q=query, **kwargs) - - def restart(self, timeout=None): - """Restarts this Splunk instance. - - The service is unavailable until it has successfully restarted. - - If a *timeout* value is specified, ``restart`` blocks until the service - resumes or the timeout period has been exceeded. Otherwise, ``restart`` returns - immediately. - - :param timeout: A timeout period, in seconds. - :type timeout: ``integer`` - """ - msg = { "value": "Restart requested by " + self.username + "via the Splunk SDK for Python"} - # This message will be deleted once the server actually restarts. - self.messages.create(name="restart_required", **msg) - result = self.post("/services/server/control/restart") - if timeout is None: - return result - start = datetime.now() - diff = timedelta(seconds=timeout) - while datetime.now() - start < diff: - try: - self.login() - if not self.restart_required: - return result - except Exception as e: - sleep(1) - raise Exception("Operation time out.") - - @property - def restart_required(self): - """Indicates whether splunkd is in a state that requires a restart. - - :return: A ``boolean`` that indicates whether a restart is required. - - """ - response = self.get("messages").body.read() - messages = data.load(response)['feed'] - if 'entry' not in messages: - result = False - else: - if isinstance(messages['entry'], dict): - titles = [messages['entry']['title']] - else: - titles = [x['title'] for x in messages['entry']] - result = 'restart_required' in titles - return result - - @property - def roles(self): - """Returns the collection of user roles. - - :return: A :class:`Roles` collection of :class:`Role` entities. - """ - return Roles(self) - - def search(self, query, **kwargs): - """Runs a search using a search query and any optional arguments you - provide, and returns a `Job` object representing the search. - - :param query: A search query. - :type query: ``string`` - :param kwargs: Arguments for the search (optional): - - * "output_mode" (``string``): Specifies the output format of the - results. - - * "earliest_time" (``string``): Specifies the earliest time in the - time range to - search. The time string can be a UTC time (with fractional - seconds), a relative time specifier (to now), or a formatted - time string. - - * "latest_time" (``string``): Specifies the latest time in the time - range to - search. The time string can be a UTC time (with fractional - seconds), a relative time specifier (to now), or a formatted - time string. - - * "rf" (``string``): Specifies one or more fields to add to the - search. - - :type kwargs: ``dict`` - :rtype: class:`Job` - :returns: An object representing the created job. - """ - return self.jobs.create(query, **kwargs) - - @property - def saved_searches(self): - """Returns the collection of saved searches. - - :return: A :class:`SavedSearches` collection of :class:`SavedSearch` - entities. - """ - return SavedSearches(self) - - @property - def settings(self): - """Returns the configuration settings for this instance of Splunk. - - :return: A :class:`Settings` object containing configuration settings. - """ - return Settings(self) - - @property - def splunk_version(self): - """Returns the version of the splunkd instance this object is attached - to. - - The version is returned as a tuple of the version components as - integers (for example, `(4,3,3)` or `(5,)`). - - :return: A ``tuple`` of ``integers``. - """ - if self._splunk_version is None: - self._splunk_version = tuple([int(p) for p in self.info['version'].split('.')]) - return self._splunk_version - - @property - def kvstore(self): - """Returns the collection of KV Store collections. - - :return: A :class:`KVStoreCollections` collection of :class:`KVStoreCollection` entities. - """ - return KVStoreCollections(self) - - @property - def users(self): - """Returns the collection of users. - - :return: A :class:`Users` collection of :class:`User` entities. - """ - return Users(self) - - -class Endpoint(object): - """This class represents individual Splunk resources in the Splunk REST API. - - An ``Endpoint`` object represents a URI, such as ``/services/saved/searches``. - This class provides the common functionality of :class:`Collection` and - :class:`Entity` (essentially HTTP GET and POST methods). - """ - def __init__(self, service, path): - self.service = service - self.path = path if path.endswith('/') else path + '/' - - def get(self, path_segment="", owner=None, app=None, sharing=None, **query): - """Performs a GET operation on the path segment relative to this endpoint. - - This method is named to match the HTTP method. This method makes at least - one roundtrip to the server, one additional round trip for - each 303 status returned, plus at most two additional round - trips if - the ``autologin`` field of :func:`connect` is set to ``True``. - - If *owner*, *app*, and *sharing* are omitted, this method takes a - default namespace from the :class:`Service` object for this :class:`Endpoint`. - All other keyword arguments are included in the URL as query parameters. - - :raises AuthenticationError: Raised when the ``Service`` is not logged in. - :raises HTTPError: Raised when an error in the request occurs. - :param path_segment: A path segment relative to this endpoint. - :type path_segment: ``string`` - :param owner: The owner context of the namespace (optional). - :type owner: ``string`` - :param app: The app context of the namespace (optional). - :type app: ``string`` - :param sharing: The sharing mode for the namespace (optional). - :type sharing: "global", "system", "app", or "user" - :param query: All other keyword arguments, which are used as query - parameters. - :type query: ``string`` - :return: The response from the server. - :rtype: ``dict`` with keys ``body``, ``headers``, ``reason``, - and ``status`` - - **Example**:: - - import splunklib.client - s = client.service(...) - apps = s.apps - apps.get() == \\ - {'body': ...a response reader object..., - 'headers': [('content-length', '26208'), - ('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'), - ('server', 'Splunkd'), - ('connection', 'close'), - ('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'), - ('date', 'Fri, 11 May 2012 16:30:35 GMT'), - ('content-type', 'text/xml; charset=utf-8')], - 'reason': 'OK', - 'status': 200} - apps.get('nonexistant/path') # raises HTTPError - s.logout() - apps.get() # raises AuthenticationError - """ - # self.path to the Endpoint is relative in the SDK, so passing - # owner, app, sharing, etc. along will produce the correct - # namespace in the final request. - if path_segment.startswith('/'): - path = path_segment - else: - path = self.service._abspath(self.path + path_segment, owner=owner, - app=app, sharing=sharing) - # ^-- This was "%s%s" % (self.path, path_segment). - # That doesn't work, because self.path may be UrlEncoded. - return self.service.get(path, - owner=owner, app=app, sharing=sharing, - **query) - - def post(self, path_segment="", owner=None, app=None, sharing=None, **query): - """Performs a POST operation on the path segment relative to this endpoint. - - This method is named to match the HTTP method. This method makes at least - one roundtrip to the server, one additional round trip for - each 303 status returned, plus at most two additional round trips if - the ``autologin`` field of :func:`connect` is set to ``True``. - - If *owner*, *app*, and *sharing* are omitted, this method takes a - default namespace from the :class:`Service` object for this :class:`Endpoint`. - All other keyword arguments are included in the URL as query parameters. - - :raises AuthenticationError: Raised when the ``Service`` is not logged in. - :raises HTTPError: Raised when an error in the request occurs. - :param path_segment: A path segment relative to this endpoint. - :type path_segment: ``string`` - :param owner: The owner context of the namespace (optional). - :type owner: ``string`` - :param app: The app context of the namespace (optional). - :type app: ``string`` - :param sharing: The sharing mode of the namespace (optional). - :type sharing: ``string`` - :param query: All other keyword arguments, which are used as query - parameters. - :type query: ``string`` - :return: The response from the server. - :rtype: ``dict`` with keys ``body``, ``headers``, ``reason``, - and ``status`` - - **Example**:: - - import splunklib.client - s = client.service(...) - apps = s.apps - apps.post(name='boris') == \\ - {'body': ...a response reader object..., - 'headers': [('content-length', '2908'), - ('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'), - ('server', 'Splunkd'), - ('connection', 'close'), - ('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'), - ('date', 'Fri, 11 May 2012 18:34:50 GMT'), - ('content-type', 'text/xml; charset=utf-8')], - 'reason': 'Created', - 'status': 201} - apps.get('nonexistant/path') # raises HTTPError - s.logout() - apps.get() # raises AuthenticationError - """ - if path_segment.startswith('/'): - path = path_segment - else: - path = self.service._abspath(self.path + path_segment, owner=owner, app=app, sharing=sharing) - return self.service.post(path, owner=owner, app=app, sharing=sharing, **query) - - -# kwargs: path, app, owner, sharing, state -class Entity(Endpoint): - """This class is a base class for Splunk entities in the REST API, such as - saved searches, jobs, indexes, and inputs. - - ``Entity`` provides the majority of functionality required by entities. - Subclasses only implement the special cases for individual entities. - For example for deployment serverclasses, the subclass makes whitelists and - blacklists into Python lists. - - An ``Entity`` is addressed like a dictionary, with a few extensions, - so the following all work:: - - ent['email.action'] - ent['disabled'] - ent['whitelist'] - - Many endpoints have values that share a prefix, such as - ``email.to``, ``email.action``, and ``email.subject``. You can extract - the whole fields, or use the key ``email`` to get a dictionary of - all the subelements. That is, ``ent['email']`` returns a - dictionary with the keys ``to``, ``action``, ``subject``, and so on. If - there are multiple levels of dots, each level is made into a - subdictionary, so ``email.body.salutation`` can be accessed at - ``ent['email']['body']['salutation']`` or - ``ent['email.body.salutation']``. - - You can also access the fields as though they were the fields of a Python - object, as in:: - - ent.email.action - ent.disabled - ent.whitelist - - However, because some of the field names are not valid Python identifiers, - the dictionary-like syntax is preferrable. - - The state of an :class:`Entity` object is cached, so accessing a field - does not contact the server. If you think the values on the - server have changed, call the :meth:`Entity.refresh` method. - """ - # Not every endpoint in the API is an Entity or a Collection. For - # example, a saved search at saved/searches/{name} has an additional - # method saved/searches/{name}/scheduled_times, but this isn't an - # entity in its own right. In these cases, subclasses should - # implement a method that uses the get and post methods inherited - # from Endpoint, calls the _load_atom function (it's elsewhere in - # client.py, but not a method of any object) to read the - # information, and returns the extracted data in a Pythonesque form. - # - # The primary use of subclasses of Entity is to handle specially - # named fields in the Entity. If you only need to provide a default - # value for an optional field, subclass Entity and define a - # dictionary ``defaults``. For instance,:: - # - # class Hypothetical(Entity): - # defaults = {'anOptionalField': 'foo', - # 'anotherField': 'bar'} - # - # If you have to do more than provide a default, such as rename or - # actually process values, then define a new method with the - # ``@property`` decorator. - # - # class Hypothetical(Entity): - # @property - # def foobar(self): - # return self.content['foo'] + "-" + self.content["bar"] - - # Subclasses can override defaults the default values for - # optional fields. See above. - defaults = {} - - def __init__(self, service, path, **kwargs): - Endpoint.__init__(self, service, path) - self._state = None - if not kwargs.get('skip_refresh', False): - self.refresh(kwargs.get('state', None)) # "Prefresh" - return - - def __contains__(self, item): - try: - self[item] - return True - except (KeyError, AttributeError): - return False - - def __eq__(self, other): - """Raises IncomparableException. - - Since Entity objects are snapshots of times on the server, no - simple definition of equality will suffice beyond instance - equality, and instance equality leads to strange situations - such as:: - - import splunklib.client as client - c = client.connect(...) - saved_searches = c.saved_searches - x = saved_searches['asearch'] - - but then ``x != saved_searches['asearch']``. - - whether or not there was a change on the server. Rather than - try to do something fancy, we simple declare that equality is - undefined for Entities. - - Makes no roundtrips to the server. - """ - raise IncomparableException( - "Equality is undefined for objects of class %s" % \ - self.__class__.__name__) - - def __getattr__(self, key): - # Called when an attribute was not found by the normal method. In this - # case we try to find it in self.content and then self.defaults. - if key in self.state.content: - return self.state.content[key] - elif key in self.defaults: - return self.defaults[key] - else: - raise AttributeError(key) - - def __getitem__(self, key): - # getattr attempts to find a field on the object in the normal way, - # then calls __getattr__ if it cannot. - return getattr(self, key) - - # Load the Atom entry record from the given response - this is a method - # because the "entry" record varies slightly by entity and this allows - # for a subclass to override and handle any special cases. - def _load_atom_entry(self, response): - elem = _load_atom(response, XNAME_ENTRY) - if isinstance(elem, list): - raise AmbiguousReferenceException("Fetch from server returned multiple entries for name %s." % self.name) - else: - return elem.entry - - # Load the entity state record from the given response - def _load_state(self, response): - entry = self._load_atom_entry(response) - return _parse_atom_entry(entry) - - def _run_action(self, path_segment, **kwargs): - """Run a method and return the content Record from the returned XML. - - A method is a relative path from an Entity that is not itself - an Entity. _run_action assumes that the returned XML is an - Atom field containing one Entry, and the contents of Entry is - what should be the return value. This is right in enough cases - to make this method useful. - """ - response = self.get(path_segment, **kwargs) - data = self._load_atom_entry(response) - rec = _parse_atom_entry(data) - return rec.content - - def _proper_namespace(self, owner=None, app=None, sharing=None): - """Produce a namespace sans wildcards for use in entity requests. - - This method tries to fill in the fields of the namespace which are `None` - or wildcard (`'-'`) from the entity's namespace. If that fails, it uses - the service's namespace. - - :param owner: - :param app: - :param sharing: - :return: - """ - if owner is None and app is None and sharing is None: # No namespace provided - if self._state is not None and 'access' in self._state: - return (self._state.access.owner, - self._state.access.app, - self._state.access.sharing) - else: - return (self.service.namespace['owner'], - self.service.namespace['app'], - self.service.namespace['sharing']) - else: - return (owner,app,sharing) - - def delete(self): - owner, app, sharing = self._proper_namespace() - return self.service.delete(self.path, owner=owner, app=app, sharing=sharing) - - def get(self, path_segment="", owner=None, app=None, sharing=None, **query): - owner, app, sharing = self._proper_namespace(owner, app, sharing) - return super(Entity, self).get(path_segment, owner=owner, app=app, sharing=sharing, **query) - - def post(self, path_segment="", owner=None, app=None, sharing=None, **query): - owner, app, sharing = self._proper_namespace(owner, app, sharing) - return super(Entity, self).post(path_segment, owner=owner, app=app, sharing=sharing, **query) - - def refresh(self, state=None): - """Refreshes the state of this entity. - - If *state* is provided, load it as the new state for this - entity. Otherwise, make a roundtrip to the server (by calling - the :meth:`read` method of ``self``) to fetch an updated state, - plus at most two additional round trips if - the ``autologin`` field of :func:`connect` is set to ``True``. - - :param state: Entity-specific arguments (optional). - :type state: ``dict`` - :raises EntityDeletedException: Raised if the entity no longer exists on - the server. - - **Example**:: - - import splunklib.client as client - s = client.connect(...) - search = s.apps['search'] - search.refresh() - """ - if state is not None: - self._state = state - else: - self._state = self.read(self.get()) - return self - - @property - def access(self): - """Returns the access metadata for this entity. - - :return: A :class:`splunklib.data.Record` object with three keys: - ``owner``, ``app``, and ``sharing``. - """ - return self.state.access - - @property - def content(self): - """Returns the contents of the entity. - - :return: A ``dict`` containing values. - """ - return self.state.content - - def disable(self): - """Disables the entity at this endpoint.""" - self.post("disable") - if self.service.restart_required: - self.service.restart(120) - return self - - def enable(self): - """Enables the entity at this endpoint.""" - self.post("enable") - return self - - @property - def fields(self): - """Returns the content metadata for this entity. - - :return: A :class:`splunklib.data.Record` object with three keys: - ``required``, ``optional``, and ``wildcard``. - """ - return self.state.fields - - @property - def links(self): - """Returns a dictionary of related resources. - - :return: A ``dict`` with keys and corresponding URLs. - """ - return self.state.links - - @property - def name(self): - """Returns the entity name. - - :return: The entity name. - :rtype: ``string`` - """ - return self.state.title - - def read(self, response): - """ Reads the current state of the entity from the server. """ - results = self._load_state(response) - # In lower layers of the SDK, we end up trying to URL encode - # text to be dispatched via HTTP. However, these links are already - # URL encoded when they arrive, and we need to mark them as such. - unquoted_links = dict([(k, UrlEncoded(v, skip_encode=True)) - for k,v in six.iteritems(results['links'])]) - results['links'] = unquoted_links - return results - - def reload(self): - """Reloads the entity.""" - self.post("_reload") - return self - - @property - def state(self): - """Returns the entity's state record. - - :return: A ``dict`` containing fields and metadata for the entity. - """ - if self._state is None: self.refresh() - return self._state - - def update(self, **kwargs): - """Updates the server with any changes you've made to the current entity - along with any additional arguments you specify. - - **Note**: You cannot update the ``name`` field of an entity. - - Many of the fields in the REST API are not valid Python - identifiers, which means you cannot pass them as keyword - arguments. That is, Python will fail to parse the following:: - - # This fails - x.update(check-new=False, email.to='boris@utopia.net') - - However, you can always explicitly use a dictionary to pass - such keys:: - - # This works - x.update(**{'check-new': False, 'email.to': 'boris@utopia.net'}) - - :param kwargs: Additional entity-specific arguments (optional). - :type kwargs: ``dict`` - - :return: The entity this method is called on. - :rtype: class:`Entity` - """ - # The peculiarity in question: the REST API creates a new - # Entity if we pass name in the dictionary, instead of the - # expected behavior of updating this Entity. Therefore we - # check for 'name' in kwargs and throw an error if it is - # there. - if 'name' in kwargs: - raise IllegalOperationException('Cannot update the name of an Entity via the REST API.') - self.post(**kwargs) - return self - - -class ReadOnlyCollection(Endpoint): - """This class represents a read-only collection of entities in the Splunk - instance. - """ - def __init__(self, service, path, item=Entity): - Endpoint.__init__(self, service, path) - self.item = item # Item accessor - self.null_count = -1 - - def __contains__(self, name): - """Is there at least one entry called *name* in this collection? - - Makes a single roundtrip to the server, plus at most two more - if - the ``autologin`` field of :func:`connect` is set to ``True``. - """ - try: - self[name] - return True - except KeyError: - return False - except AmbiguousReferenceException: - return True - - def __getitem__(self, key): - """Fetch an item named *key* from this collection. - - A name is not a unique identifier in a collection. The unique - identifier is a name plus a namespace. For example, there can - be a saved search named ``'mysearch'`` with sharing ``'app'`` - in application ``'search'``, and another with sharing - ``'user'`` with owner ``'boris'`` and application - ``'search'``. If the ``Collection`` is attached to a - ``Service`` that has ``'-'`` (wildcard) as user and app in its - namespace, then both of these may be visible under the same - name. - - Where there is no conflict, ``__getitem__`` will fetch the - entity given just the name. If there is a conflict and you - pass just a name, it will raise a ``ValueError``. In that - case, add the namespace as a second argument. - - This function makes a single roundtrip to the server, plus at - most two additional round trips if - the ``autologin`` field of :func:`connect` is set to ``True``. - - :param key: The name to fetch, or a tuple (name, namespace). - :return: An :class:`Entity` object. - :raises KeyError: Raised if *key* does not exist. - :raises ValueError: Raised if no namespace is specified and *key* - does not refer to a unique name. - - *Example*:: - - s = client.connect(...) - saved_searches = s.saved_searches - x1 = saved_searches.create( - 'mysearch', 'search * | head 1', - owner='admin', app='search', sharing='app') - x2 = saved_searches.create( - 'mysearch', 'search * | head 1', - owner='admin', app='search', sharing='user') - # Raises ValueError: - saved_searches['mysearch'] - # Fetches x1 - saved_searches[ - 'mysearch', - client.namespace(sharing='app', app='search')] - # Fetches x2 - saved_searches[ - 'mysearch', - client.namespace(sharing='user', owner='boris', app='search')] - """ - try: - if isinstance(key, tuple) and len(key) == 2: - # x[a,b] is translated to x.__getitem__( (a,b) ), so we - # have to extract values out. - key, ns = key - key = UrlEncoded(key, encode_slash=True) - response = self.get(key, owner=ns.owner, app=ns.app) - else: - key = UrlEncoded(key, encode_slash=True) - response = self.get(key) - entries = self._load_list(response) - if len(entries) > 1: - raise AmbiguousReferenceException("Found multiple entities named '%s'; please specify a namespace." % key) - elif len(entries) == 0: - raise KeyError(key) - else: - return entries[0] - except HTTPError as he: - if he.status == 404: # No entity matching key and namespace. - raise KeyError(key) - else: - raise - - def __iter__(self, **kwargs): - """Iterate over the entities in the collection. - - :param kwargs: Additional arguments. - :type kwargs: ``dict`` - :rtype: iterator over entities. - - Implemented to give Collection a listish interface. This - function always makes a roundtrip to the server, plus at most - two additional round trips if - the ``autologin`` field of :func:`connect` is set to ``True``. - - **Example**:: - - import splunklib.client as client - c = client.connect(...) - saved_searches = c.saved_searches - for entity in saved_searches: - print "Saved search named %s" % entity.name - """ - - for item in self.iter(**kwargs): - yield item - - def __len__(self): - """Enable ``len(...)`` for ``Collection`` objects. - - Implemented for consistency with a listish interface. No - further failure modes beyond those possible for any method on - an Endpoint. - - This function always makes a round trip to the server, plus at - most two additional round trips if - the ``autologin`` field of :func:`connect` is set to ``True``. - - **Example**:: - - import splunklib.client as client - c = client.connect(...) - saved_searches = c.saved_searches - n = len(saved_searches) - """ - return len(self.list()) - - def _entity_path(self, state): - """Calculate the path to an entity to be returned. - - *state* should be the dictionary returned by - :func:`_parse_atom_entry`. :func:`_entity_path` extracts the - link to this entity from *state*, and strips all the namespace - prefixes from it to leave only the relative path of the entity - itself, sans namespace. - - :rtype: ``string`` - :return: an absolute path - """ - # This has been factored out so that it can be easily - # overloaded by Configurations, which has to switch its - # entities' endpoints from its own properties/ to configs/. - raw_path = urllib.parse.unquote(state.links.alternate) - if 'servicesNS/' in raw_path: - return _trailing(raw_path, 'servicesNS/', '/', '/') - elif 'services/' in raw_path: - return _trailing(raw_path, 'services/') - else: - return raw_path - - def _load_list(self, response): - """Converts *response* to a list of entities. - - *response* is assumed to be a :class:`Record` containing an - HTTP response, of the form:: - - {'status': 200, - 'headers': [('content-length', '232642'), - ('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'), - ('server', 'Splunkd'), - ('connection', 'close'), - ('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'), - ('date', 'Tue, 29 May 2012 15:27:08 GMT'), - ('content-type', 'text/xml; charset=utf-8')], - 'reason': 'OK', - 'body': ...a stream implementing .read()...} - - The ``'body'`` key refers to a stream containing an Atom feed, - that is, an XML document with a toplevel element ````, - and within that element one or more ```` elements. - """ - # Some subclasses of Collection have to override this because - # splunkd returns something that doesn't match - # . - entries = _load_atom_entries(response) - if entries is None: return [] - entities = [] - for entry in entries: - state = _parse_atom_entry(entry) - entity = self.item( - self.service, - self._entity_path(state), - state=state) - entities.append(entity) - - return entities - - def itemmeta(self): - """Returns metadata for members of the collection. - - Makes a single roundtrip to the server, plus two more at most if - the ``autologin`` field of :func:`connect` is set to ``True``. - - :return: A :class:`splunklib.data.Record` object containing the metadata. - - **Example**:: - - import splunklib.client as client - import pprint - s = client.connect(...) - pprint.pprint(s.apps.itemmeta()) - {'access': {'app': 'search', - 'can_change_perms': '1', - 'can_list': '1', - 'can_share_app': '1', - 'can_share_global': '1', - 'can_share_user': '1', - 'can_write': '1', - 'modifiable': '1', - 'owner': 'admin', - 'perms': {'read': ['*'], 'write': ['admin']}, - 'removable': '0', - 'sharing': 'user'}, - 'fields': {'optional': ['author', - 'configured', - 'description', - 'label', - 'manageable', - 'template', - 'visible'], - 'required': ['name'], 'wildcard': []}} - """ - response = self.get("_new") - content = _load_atom(response, MATCH_ENTRY_CONTENT) - return _parse_atom_metadata(content) - - def iter(self, offset=0, count=None, pagesize=None, **kwargs): - """Iterates over the collection. - - This method is equivalent to the :meth:`list` method, but - it returns an iterator and can load a certain number of entities at a - time from the server. - - :param offset: The index of the first entity to return (optional). - :type offset: ``integer`` - :param count: The maximum number of entities to return (optional). - :type count: ``integer`` - :param pagesize: The number of entities to load (optional). - :type pagesize: ``integer`` - :param kwargs: Additional arguments (optional): - - - "search" (``string``): The search query to filter responses. - - - "sort_dir" (``string``): The direction to sort returned items: - "asc" or "desc". - - - "sort_key" (``string``): The field to use for sorting (optional). - - - "sort_mode" (``string``): The collating sequence for sorting - returned items: "auto", "alpha", "alpha_case", or "num". - - :type kwargs: ``dict`` - - **Example**:: - - import splunklib.client as client - s = client.connect(...) - for saved_search in s.saved_searches.iter(pagesize=10): - # Loads 10 saved searches at a time from the - # server. - ... - """ - assert pagesize is None or pagesize > 0 - if count is None: - count = self.null_count - fetched = 0 - while count == self.null_count or fetched < count: - response = self.get(count=pagesize or count, offset=offset, **kwargs) - items = self._load_list(response) - N = len(items) - fetched += N - for item in items: - yield item - if pagesize is None or N < pagesize: - break - offset += N - logging.debug("pagesize=%d, fetched=%d, offset=%d, N=%d, kwargs=%s", pagesize, fetched, offset, N, kwargs) - - # kwargs: count, offset, search, sort_dir, sort_key, sort_mode - def list(self, count=None, **kwargs): - """Retrieves a list of entities in this collection. - - The entire collection is loaded at once and is returned as a list. This - function makes a single roundtrip to the server, plus at most two more if - the ``autologin`` field of :func:`connect` is set to ``True``. - There is no caching--every call makes at least one round trip. - - :param count: The maximum number of entities to return (optional). - :type count: ``integer`` - :param kwargs: Additional arguments (optional): - - - "offset" (``integer``): The offset of the first item to return. - - - "search" (``string``): The search query to filter responses. - - - "sort_dir" (``string``): The direction to sort returned items: - "asc" or "desc". - - - "sort_key" (``string``): The field to use for sorting (optional). - - - "sort_mode" (``string``): The collating sequence for sorting - returned items: "auto", "alpha", "alpha_case", or "num". - - :type kwargs: ``dict`` - :return: A ``list`` of entities. - """ - # response = self.get(count=count, **kwargs) - # return self._load_list(response) - return list(self.iter(count=count, **kwargs)) - - - - -class Collection(ReadOnlyCollection): - """A collection of entities. - - Splunk provides a number of different collections of distinct - entity types: applications, saved searches, fired alerts, and a - number of others. Each particular type is available separately - from the Splunk instance, and the entities of that type are - returned in a :class:`Collection`. - - The interface for :class:`Collection` does not quite match either - ``list`` or ``dict`` in Python, because there are enough semantic - mismatches with either to make its behavior surprising. A unique - element in a :class:`Collection` is defined by a string giving its - name plus namespace (although the namespace is optional if the name is - unique). - - **Example**:: - - import splunklib.client as client - service = client.connect(...) - mycollection = service.saved_searches - mysearch = mycollection['my_search', client.namespace(owner='boris', app='natasha', sharing='user')] - # Or if there is only one search visible named 'my_search' - mysearch = mycollection['my_search'] - - Similarly, ``name`` in ``mycollection`` works as you might expect (though - you cannot currently pass a namespace to the ``in`` operator), as does - ``len(mycollection)``. - - However, as an aggregate, :class:`Collection` behaves more like a - list. If you iterate over a :class:`Collection`, you get an - iterator over the entities, not the names and namespaces. - - **Example**:: - - for entity in mycollection: - assert isinstance(entity, client.Entity) - - Use the :meth:`create` and :meth:`delete` methods to create and delete - entities in this collection. To view the access control list and other - metadata of the collection, use the :meth:`ReadOnlyCollection.itemmeta` method. - - :class:`Collection` does no caching. Each call makes at least one - round trip to the server to fetch data. - """ - - def create(self, name, **params): - """Creates a new entity in this collection. - - This function makes either one or two roundtrips to the - server, depending on the type of entities in this - collection, plus at most two more if - the ``autologin`` field of :func:`connect` is set to ``True``. - - :param name: The name of the entity to create. - :type name: ``string`` - :param namespace: A namespace, as created by the :func:`splunklib.binding.namespace` - function (optional). You can also set ``owner``, ``app``, and - ``sharing`` in ``params``. - :type namespace: A :class:`splunklib.data.Record` object with keys ``owner``, ``app``, - and ``sharing``. - :param params: Additional entity-specific arguments (optional). - :type params: ``dict`` - :return: The new entity. - :rtype: A subclass of :class:`Entity`, chosen by :meth:`Collection.self.item`. - - **Example**:: - - import splunklib.client as client - s = client.connect(...) - applications = s.apps - new_app = applications.create("my_fake_app") - """ - if not isinstance(name, six.string_types): - raise InvalidNameException("%s is not a valid name for an entity." % name) - if 'namespace' in params: - namespace = params.pop('namespace') - params['owner'] = namespace.owner - params['app'] = namespace.app - params['sharing'] = namespace.sharing - response = self.post(name=name, **params) - atom = _load_atom(response, XNAME_ENTRY) - if atom is None: - # This endpoint doesn't return the content of the new - # item. We have to go fetch it ourselves. - return self[name] - else: - entry = atom.entry - state = _parse_atom_entry(entry) - entity = self.item( - self.service, - self._entity_path(state), - state=state) - return entity - - def delete(self, name, **params): - """Deletes a specified entity from the collection. - - :param name: The name of the entity to delete. - :type name: ``string`` - :return: The collection. - :rtype: ``self`` - - This method is implemented for consistency with the REST API's DELETE - method. - - If there is no *name* entity on the server, a ``KeyError`` is - thrown. This function always makes a roundtrip to the server. - - **Example**:: - - import splunklib.client as client - c = client.connect(...) - saved_searches = c.saved_searches - saved_searches.create('my_saved_search', - 'search * | head 1') - assert 'my_saved_search' in saved_searches - saved_searches.delete('my_saved_search') - assert 'my_saved_search' not in saved_searches - """ - name = UrlEncoded(name, encode_slash=True) - if 'namespace' in params: - namespace = params.pop('namespace') - params['owner'] = namespace.owner - params['app'] = namespace.app - params['sharing'] = namespace.sharing - try: - self.service.delete(_path(self.path, name), **params) - except HTTPError as he: - # An HTTPError with status code 404 means that the entity - # has already been deleted, and we reraise it as a - # KeyError. - if he.status == 404: - raise KeyError("No such entity %s" % name) - else: - raise - return self - - def get(self, name="", owner=None, app=None, sharing=None, **query): - """Performs a GET request to the server on the collection. - - If *owner*, *app*, and *sharing* are omitted, this method takes a - default namespace from the :class:`Service` object for this :class:`Endpoint`. - All other keyword arguments are included in the URL as query parameters. - - :raises AuthenticationError: Raised when the ``Service`` is not logged in. - :raises HTTPError: Raised when an error in the request occurs. - :param path_segment: A path segment relative to this endpoint. - :type path_segment: ``string`` - :param owner: The owner context of the namespace (optional). - :type owner: ``string`` - :param app: The app context of the namespace (optional). - :type app: ``string`` - :param sharing: The sharing mode for the namespace (optional). - :type sharing: "global", "system", "app", or "user" - :param query: All other keyword arguments, which are used as query - parameters. - :type query: ``string`` - :return: The response from the server. - :rtype: ``dict`` with keys ``body``, ``headers``, ``reason``, - and ``status`` - - Example: - - import splunklib.client - s = client.service(...) - saved_searches = s.saved_searches - saved_searches.get("my/saved/search") == \\ - {'body': ...a response reader object..., - 'headers': [('content-length', '26208'), - ('expires', 'Fri, 30 Oct 1998 00:00:00 GMT'), - ('server', 'Splunkd'), - ('connection', 'close'), - ('cache-control', 'no-store, max-age=0, must-revalidate, no-cache'), - ('date', 'Fri, 11 May 2012 16:30:35 GMT'), - ('content-type', 'text/xml; charset=utf-8')], - 'reason': 'OK', - 'status': 200} - saved_searches.get('nonexistant/search') # raises HTTPError - s.logout() - saved_searches.get() # raises AuthenticationError - - """ - name = UrlEncoded(name, encode_slash=True) - return super(Collection, self).get(name, owner, app, sharing, **query) - - - - -class ConfigurationFile(Collection): - """This class contains all of the stanzas from one configuration file. - """ - # __init__'s arguments must match those of an Entity, not a - # Collection, since it is being created as the elements of a - # Configurations, which is a Collection subclass. - def __init__(self, service, path, **kwargs): - Collection.__init__(self, service, path, item=Stanza) - self.name = kwargs['state']['title'] - - -class Configurations(Collection): - """This class provides access to the configuration files from this Splunk - instance. Retrieve this collection using :meth:`Service.confs`. - - Splunk's configuration is divided into files, and each file into - stanzas. This collection is unusual in that the values in it are - themselves collections of :class:`ConfigurationFile` objects. - """ - def __init__(self, service): - Collection.__init__(self, service, PATH_PROPERTIES, item=ConfigurationFile) - if self.service.namespace.owner == '-' or self.service.namespace.app == '-': - raise ValueError("Configurations cannot have wildcards in namespace.") - - def __getitem__(self, key): - # The superclass implementation is designed for collections that contain - # entities. This collection (Configurations) contains collections - # (ConfigurationFile). - # - # The configurations endpoint returns multiple entities when we ask for a single file. - # This screws up the default implementation of __getitem__ from Collection, which thinks - # that multiple entities means a name collision, so we have to override it here. - try: - response = self.get(key) - return ConfigurationFile(self.service, PATH_CONF % key, state={'title': key}) - except HTTPError as he: - if he.status == 404: # No entity matching key - raise KeyError(key) - else: - raise - - def __contains__(self, key): - # configs/conf-{name} never returns a 404. We have to post to properties/{name} - # in order to find out if a configuration exists. - try: - response = self.get(key) - return True - except HTTPError as he: - if he.status == 404: # No entity matching key - return False - else: - raise - - def create(self, name): - """ Creates a configuration file named *name*. - - If there is already a configuration file with that name, - the existing file is returned. - - :param name: The name of the configuration file. - :type name: ``string`` - - :return: The :class:`ConfigurationFile` object. - """ - # This has to be overridden to handle the plumbing of creating - # a ConfigurationFile (which is a Collection) instead of some - # Entity. - if not isinstance(name, six.string_types): - raise ValueError("Invalid name: %s" % repr(name)) - response = self.post(__conf=name) - if response.status == 303: - return self[name] - elif response.status == 201: - return ConfigurationFile(self.service, PATH_CONF % name, item=Stanza, state={'title': name}) - else: - raise ValueError("Unexpected status code %s returned from creating a stanza" % response.status) - - def delete(self, key): - """Raises `IllegalOperationException`.""" - raise IllegalOperationException("Cannot delete configuration files from the REST API.") - - def _entity_path(self, state): - # Overridden to make all the ConfigurationFile objects - # returned refer to the configs/ path instead of the - # properties/ path used by Configrations. - return PATH_CONF % state['title'] - - -class Stanza(Entity): - """This class contains a single configuration stanza.""" - - def submit(self, stanza): - """Adds keys to the current configuration stanza as a - dictionary of key-value pairs. - - :param stanza: A dictionary of key-value pairs for the stanza. - :type stanza: ``dict`` - :return: The :class:`Stanza` object. - """ - body = _encode(**stanza) - self.service.post(self.path, body=body) - return self - - def __len__(self): - # The stanza endpoint returns all the keys at the same level in the XML as the eai information - # and 'disabled', so to get an accurate length, we have to filter those out and have just - # the stanza keys. - return len([x for x in self._state.content.keys() - if not x.startswith('eai') and x != 'disabled']) - - -class StoragePassword(Entity): - """This class contains a storage password. - """ - def __init__(self, service, path, **kwargs): - state = kwargs.get('state', None) - kwargs['skip_refresh'] = kwargs.get('skip_refresh', state is not None) - super(StoragePassword, self).__init__(service, path, **kwargs) - self._state = state - - @property - def clear_password(self): - return self.content.get('clear_password') - - @property - def encrypted_password(self): - return self.content.get('encr_password') - - @property - def realm(self): - return self.content.get('realm') - - @property - def username(self): - return self.content.get('username') - - -class StoragePasswords(Collection): - """This class provides access to the storage passwords from this Splunk - instance. Retrieve this collection using :meth:`Service.storage_passwords`. - """ - def __init__(self, service): - if service.namespace.owner == '-' or service.namespace.app == '-': - raise ValueError("StoragePasswords cannot have wildcards in namespace.") - super(StoragePasswords, self).__init__(service, PATH_STORAGE_PASSWORDS, item=StoragePassword) - - def create(self, password, username, realm=None): - """ Creates a storage password. - - A `StoragePassword` can be identified by , or by : if the - optional realm parameter is also provided. - - :param password: The password for the credentials - this is the only part of the credentials that will be stored securely. - :type name: ``string`` - :param username: The username for the credentials. - :type name: ``string`` - :param realm: The credential realm. (optional) - :type name: ``string`` - - :return: The :class:`StoragePassword` object created. - """ - if not isinstance(username, six.string_types): - raise ValueError("Invalid name: %s" % repr(username)) - - if realm is None: - response = self.post(password=password, name=username) - else: - response = self.post(password=password, realm=realm, name=username) - - if response.status != 201: - raise ValueError("Unexpected status code %s returned from creating a stanza" % response.status) - - entries = _load_atom_entries(response) - state = _parse_atom_entry(entries[0]) - storage_password = StoragePassword(self.service, self._entity_path(state), state=state, skip_refresh=True) - - return storage_password - - def delete(self, username, realm=None): - """Delete a storage password by username and/or realm. - - The identifier can be passed in through the username parameter as - or :, but the preferred way is by - passing in the username and realm parameters. - - :param username: The username for the credentials, or : if the realm parameter is omitted. - :type name: ``string`` - :param realm: The credential realm. (optional) - :type name: ``string`` - :return: The `StoragePassword` collection. - :rtype: ``self`` - """ - if realm is None: - # This case makes the username optional, so - # the full name can be passed in as realm. - # Assume it's already encoded. - name = username - else: - # Encode each component separately - name = UrlEncoded(realm, encode_slash=True) + ":" + UrlEncoded(username, encode_slash=True) - - # Append the : expected at the end of the name - if name[-1] is not ":": - name = name + ":" - return Collection.delete(self, name) - - -class AlertGroup(Entity): - """This class represents a group of fired alerts for a saved search. Access - it using the :meth:`alerts` property.""" - def __init__(self, service, path, **kwargs): - Entity.__init__(self, service, path, **kwargs) - - def __len__(self): - return self.count - - @property - def alerts(self): - """Returns a collection of triggered alerts. - - :return: A :class:`Collection` of triggered alerts. - """ - return Collection(self.service, self.path) - - @property - def count(self): - """Returns the count of triggered alerts. - - :return: The triggered alert count. - :rtype: ``integer`` - """ - return int(self.content.get('triggered_alert_count', 0)) - - -class Indexes(Collection): - """This class contains the collection of indexes in this Splunk instance. - Retrieve this collection using :meth:`Service.indexes`. - """ - def get_default(self): - """ Returns the name of the default index. - - :return: The name of the default index. - - """ - index = self['_audit'] - return index['defaultDatabase'] - - def delete(self, name): - """ Deletes a given index. - - **Note**: This method is only supported in Splunk 5.0 and later. - - :param name: The name of the index to delete. - :type name: ``string`` - """ - if self.service.splunk_version >= (5,): - Collection.delete(self, name) - else: - raise IllegalOperationException("Deleting indexes via the REST API is " - "not supported before Splunk version 5.") - - -class Index(Entity): - """This class represents an index and provides different operations, such as - cleaning the index, writing to the index, and so forth.""" - def __init__(self, service, path, **kwargs): - Entity.__init__(self, service, path, **kwargs) - - def attach(self, host=None, source=None, sourcetype=None): - """Opens a stream (a writable socket) for writing events to the index. - - :param host: The host value for events written to the stream. - :type host: ``string`` - :param source: The source value for events written to the stream. - :type source: ``string`` - :param sourcetype: The sourcetype value for events written to the - stream. - :type sourcetype: ``string`` - - :return: A writable socket. - """ - args = { 'index': self.name } - if host is not None: args['host'] = host - if source is not None: args['source'] = source - if sourcetype is not None: args['sourcetype'] = sourcetype - path = UrlEncoded(PATH_RECEIVERS_STREAM + "?" + urllib.parse.urlencode(args), skip_encode=True) - - cookie_or_auth_header = "Authorization: Splunk %s\r\n" % \ - (self.service.token if self.service.token is _NoAuthenticationToken - else self.service.token.replace("Splunk ", "")) - - # If we have cookie(s), use them instead of "Authorization: ..." - if self.service.has_cookies(): - cookie_or_auth_header = "Cookie: %s\r\n" % _make_cookie_header(self.service.get_cookies().items()) - - # Since we need to stream to the index connection, we have to keep - # the connection open and use the Splunk extension headers to note - # the input mode - sock = self.service.connect() - headers = [("POST %s HTTP/1.1\r\n" % str(self.service._abspath(path))).encode('utf-8'), - ("Host: %s:%s\r\n" % (self.service.host, int(self.service.port))).encode('utf-8'), - b"Accept-Encoding: identity\r\n", - cookie_or_auth_header.encode('utf-8'), - b"X-Splunk-Input-Mode: Streaming\r\n", - b"\r\n"] - - for h in headers: - sock.write(h) - return sock - - @contextlib.contextmanager - def attached_socket(self, *args, **kwargs): - """Opens a raw socket in a ``with`` block to write data to Splunk. - - The arguments are identical to those for :meth:`attach`. The socket is - automatically closed at the end of the ``with`` block, even if an - exception is raised in the block. - - :param host: The host value for events written to the stream. - :type host: ``string`` - :param source: The source value for events written to the stream. - :type source: ``string`` - :param sourcetype: The sourcetype value for events written to the - stream. - :type sourcetype: ``string`` - - :returns: Nothing. - - **Example**:: - - import splunklib.client as client - s = client.connect(...) - index = s.indexes['some_index'] - with index.attached_socket(sourcetype='test') as sock: - sock.send('Test event\\r\\n') - - """ - try: - sock = self.attach(*args, **kwargs) - yield sock - finally: - sock.shutdown(socket.SHUT_RDWR) - sock.close() - - def clean(self, timeout=60): - """Deletes the contents of the index. - - This method blocks until the index is empty, because it needs to restore - values at the end of the operation. - - :param timeout: The time-out period for the operation, in seconds (the - default is 60). - :type timeout: ``integer`` - - :return: The :class:`Index`. - """ - self.refresh() - - tds = self['maxTotalDataSizeMB'] - ftp = self['frozenTimePeriodInSecs'] - was_disabled_initially = self.disabled - try: - if (not was_disabled_initially and \ - self.service.splunk_version < (5,)): - # Need to disable the index first on Splunk 4.x, - # but it doesn't work to disable it on 5.0. - self.disable() - self.update(maxTotalDataSizeMB=1, frozenTimePeriodInSecs=1) - self.roll_hot_buckets() - - # Wait until event count goes to 0. - start = datetime.now() - diff = timedelta(seconds=timeout) - while self.content.totalEventCount != '0' and datetime.now() < start+diff: - sleep(1) - self.refresh() - - if self.content.totalEventCount != '0': - raise OperationError("Cleaning index %s took longer than %s seconds; timing out." % (self.name, timeout)) - finally: - # Restore original values - self.update(maxTotalDataSizeMB=tds, frozenTimePeriodInSecs=ftp) - if (not was_disabled_initially and \ - self.service.splunk_version < (5,)): - # Re-enable the index if it was originally enabled and we messed with it. - self.enable() - - return self - - def roll_hot_buckets(self): - """Performs rolling hot buckets for this index. - - :return: The :class:`Index`. - """ - self.post("roll-hot-buckets") - return self - - def submit(self, event, host=None, source=None, sourcetype=None): - """Submits a single event to the index using ``HTTP POST``. - - :param event: The event to submit. - :type event: ``string`` - :param `host`: The host value of the event. - :type host: ``string`` - :param `source`: The source value of the event. - :type source: ``string`` - :param `sourcetype`: The sourcetype value of the event. - :type sourcetype: ``string`` - - :return: The :class:`Index`. - """ - args = { 'index': self.name } - if host is not None: args['host'] = host - if source is not None: args['source'] = source - if sourcetype is not None: args['sourcetype'] = sourcetype - - # The reason we use service.request directly rather than POST - # is that we are not sending a POST request encoded using - # x-www-form-urlencoded (as we do not have a key=value body), - # because we aren't really sending a "form". - self.service.post(PATH_RECEIVERS_SIMPLE, body=event, **args) - return self - - # kwargs: host, host_regex, host_segment, rename-source, sourcetype - def upload(self, filename, **kwargs): - """Uploads a file for immediate indexing. - - **Note**: The file must be locally accessible from the server. - - :param filename: The name of the file to upload. The file can be a - plain, compressed, or archived file. - :type filename: ``string`` - :param kwargs: Additional arguments (optional). For more about the - available parameters, see `Index parameters `_ on Splunk Developer Portal. - :type kwargs: ``dict`` - - :return: The :class:`Index`. - """ - kwargs['index'] = self.name - path = 'data/inputs/oneshot' - self.service.post(path, name=filename, **kwargs) - return self - - -class Input(Entity): - """This class represents a Splunk input. This class is the base for all - typed input classes and is also used when the client does not recognize an - input kind. - """ - def __init__(self, service, path, kind=None, **kwargs): - # kind can be omitted (in which case it is inferred from the path) - # Otherwise, valid values are the paths from data/inputs ("udp", - # "monitor", "tcp/raw"), or two special cases: "tcp" (which is "tcp/raw") - # and "splunktcp" (which is "tcp/cooked"). - Entity.__init__(self, service, path, **kwargs) - if kind is None: - path_segments = path.split('/') - i = path_segments.index('inputs') + 1 - if path_segments[i] == 'tcp': - self.kind = path_segments[i] + '/' + path_segments[i+1] - else: - self.kind = path_segments[i] - else: - self.kind = kind - - # Handle old input kind names. - if self.kind == 'tcp': - self.kind = 'tcp/raw' - if self.kind == 'splunktcp': - self.kind = 'tcp/cooked' - - def update(self, **kwargs): - """Updates the server with any changes you've made to the current input - along with any additional arguments you specify. - - :param kwargs: Additional arguments (optional). For more about the - available parameters, see `Input parameters `_ on Splunk Developer Portal. - :type kwargs: ``dict`` - - :return: The input this method was called on. - :rtype: class:`Input` - """ - # UDP and TCP inputs require special handling due to their restrictToHost - # field. For all other inputs kinds, we can dispatch to the superclass method. - if self.kind not in ['tcp', 'splunktcp', 'tcp/raw', 'tcp/cooked', 'udp']: - return super(Input, self).update(**kwargs) - else: - # The behavior of restrictToHost is inconsistent across input kinds and versions of Splunk. - # In Splunk 4.x, the name of the entity is only the port, independent of the value of - # restrictToHost. In Splunk 5.0 this changed so the name will be of the form :. - # In 5.0 and 5.0.1, if you don't supply the restrictToHost value on every update, it will - # remove the host restriction from the input. As of 5.0.2 you simply can't change restrictToHost - # on an existing input. - - # The logic to handle all these cases: - # - Throw an exception if the user tries to set restrictToHost on an existing input - # for *any* version of Splunk. - # - Set the existing restrictToHost value on the update args internally so we don't - # cause it to change in Splunk 5.0 and 5.0.1. - to_update = kwargs.copy() - - if 'restrictToHost' in kwargs: - raise IllegalOperationException("Cannot set restrictToHost on an existing input with the SDK.") - elif 'restrictToHost' in self._state.content and self.kind != 'udp': - to_update['restrictToHost'] = self._state.content['restrictToHost'] - - # Do the actual update operation. - return super(Input, self).update(**to_update) - - -# Inputs is a "kinded" collection, which is a heterogenous collection where -# each item is tagged with a kind, that provides a single merged view of all -# input kinds. -class Inputs(Collection): - """This class represents a collection of inputs. The collection is - heterogeneous and each member of the collection contains a *kind* property - that indicates the specific type of input. - Retrieve this collection using :meth:`Service.inputs`.""" - - def __init__(self, service, kindmap=None): - Collection.__init__(self, service, PATH_INPUTS, item=Input) - - def __getitem__(self, key): - # The key needed to retrieve the input needs it's parenthesis to be URL encoded - # based on the REST API for input - # - if isinstance(key, tuple) and len(key) == 2: - # Fetch a single kind - key, kind = key - key = UrlEncoded(key, encode_slash=True) - try: - response = self.get(self.kindpath(kind) + "/" + key) - entries = self._load_list(response) - if len(entries) > 1: - raise AmbiguousReferenceException("Found multiple inputs of kind %s named %s." % (kind, key)) - elif len(entries) == 0: - raise KeyError((key, kind)) - else: - return entries[0] - except HTTPError as he: - if he.status == 404: # No entity matching kind and key - raise KeyError((key, kind)) - else: - raise - else: - # Iterate over all the kinds looking for matches. - kind = None - candidate = None - key = UrlEncoded(key, encode_slash=True) - for kind in self.kinds: - try: - response = self.get(kind + "/" + key) - entries = self._load_list(response) - if len(entries) > 1: - raise AmbiguousReferenceException("Found multiple inputs of kind %s named %s." % (kind, key)) - elif len(entries) == 0: - pass - else: - if candidate is not None: # Already found at least one candidate - raise AmbiguousReferenceException("Found multiple inputs named %s, please specify a kind" % key) - candidate = entries[0] - except HTTPError as he: - if he.status == 404: - pass # Just carry on to the next kind. - else: - raise - if candidate is None: - raise KeyError(key) # Never found a match. - else: - return candidate - - def __contains__(self, key): - if isinstance(key, tuple) and len(key) == 2: - # If we specify a kind, this will shortcut properly - try: - self.__getitem__(key) - return True - except KeyError: - return False - else: - # Without a kind, we want to minimize the number of round trips to the server, so we - # reimplement some of the behavior of __getitem__ in order to be able to stop searching - # on the first hit. - for kind in self.kinds: - try: - response = self.get(self.kindpath(kind) + "/" + key) - entries = self._load_list(response) - if len(entries) > 0: - return True - else: - pass - except HTTPError as he: - if he.status == 404: - pass # Just carry on to the next kind. - else: - raise - return False - - def create(self, name, kind, **kwargs): - """Creates an input of a specific kind in this collection, with any - arguments you specify. - - :param `name`: The input name. - :type name: ``string`` - :param `kind`: The kind of input: - - - "ad": Active Directory - - - "monitor": Files and directories - - - "registry": Windows Registry - - - "script": Scripts - - - "splunktcp": TCP, processed - - - "tcp": TCP, unprocessed - - - "udp": UDP - - - "win-event-log-collections": Windows event log - - - "win-perfmon": Performance monitoring - - - "win-wmi-collections": WMI - - :type kind: ``string`` - :param `kwargs`: Additional arguments (optional). For more about the - available parameters, see `Input parameters `_ on Splunk Developer Portal. - - :type kwargs: ``dict`` - - :return: The new :class:`Input`. - """ - kindpath = self.kindpath(kind) - self.post(kindpath, name=name, **kwargs) - - # If we created an input with restrictToHost set, then - # its path will be :, not just , - # and we have to adjust accordingly. - - # Url encodes the name of the entity. - name = UrlEncoded(name, encode_slash=True) - path = _path( - self.path + kindpath, - '%s:%s' % (kwargs['restrictToHost'], name) \ - if 'restrictToHost' in kwargs else name - ) - return Input(self.service, path, kind) - - def delete(self, name, kind=None): - """Removes an input from the collection. - - :param `kind`: The kind of input: - - - "ad": Active Directory - - - "monitor": Files and directories - - - "registry": Windows Registry - - - "script": Scripts - - - "splunktcp": TCP, processed - - - "tcp": TCP, unprocessed - - - "udp": UDP - - - "win-event-log-collections": Windows event log - - - "win-perfmon": Performance monitoring - - - "win-wmi-collections": WMI - - :type kind: ``string`` - :param name: The name of the input to remove. - :type name: ``string`` - - :return: The :class:`Inputs` collection. - """ - if kind is None: - self.service.delete(self[name].path) - else: - self.service.delete(self[name, kind].path) - return self - - def itemmeta(self, kind): - """Returns metadata for the members of a given kind. - - :param `kind`: The kind of input: - - - "ad": Active Directory - - - "monitor": Files and directories - - - "registry": Windows Registry - - - "script": Scripts - - - "splunktcp": TCP, processed - - - "tcp": TCP, unprocessed - - - "udp": UDP - - - "win-event-log-collections": Windows event log - - - "win-perfmon": Performance monitoring - - - "win-wmi-collections": WMI - - :type kind: ``string`` - - :return: The metadata. - :rtype: class:``splunklib.data.Record`` - """ - response = self.get("%s/_new" % self._kindmap[kind]) - content = _load_atom(response, MATCH_ENTRY_CONTENT) - return _parse_atom_metadata(content) - - def _get_kind_list(self, subpath=None): - if subpath is None: - subpath = [] - - kinds = [] - response = self.get('/'.join(subpath)) - content = _load_atom_entries(response) - for entry in content: - this_subpath = subpath + [entry.title] - # The "all" endpoint doesn't work yet. - # The "tcp/ssl" endpoint is not a real input collection. - if entry.title == 'all' or this_subpath == ['tcp','ssl']: - continue - elif 'create' in [x.rel for x in entry.link]: - path = '/'.join(subpath + [entry.title]) - kinds.append(path) - else: - subkinds = self._get_kind_list(subpath + [entry.title]) - kinds.extend(subkinds) - return kinds - - @property - def kinds(self): - """Returns the input kinds on this Splunk instance. - - :return: The list of input kinds. - :rtype: ``list`` - """ - return self._get_kind_list() - - def kindpath(self, kind): - """Returns a path to the resources for a given input kind. - - :param `kind`: The kind of input: - - - "ad": Active Directory - - - "monitor": Files and directories - - - "registry": Windows Registry - - - "script": Scripts - - - "splunktcp": TCP, processed - - - "tcp": TCP, unprocessed - - - "udp": UDP - - - "win-event-log-collections": Windows event log - - - "win-perfmon": Performance monitoring - - - "win-wmi-collections": WMI - - :type kind: ``string`` - - :return: The relative endpoint path. - :rtype: ``string`` - """ - if kind == 'tcp': - return UrlEncoded('tcp/raw', skip_encode=True) - elif kind == 'splunktcp': - return UrlEncoded('tcp/cooked', skip_encode=True) - else: - return UrlEncoded(kind, skip_encode=True) - - def list(self, *kinds, **kwargs): - """Returns a list of inputs that are in the :class:`Inputs` collection. - You can also filter by one or more input kinds. - - This function iterates over all possible inputs, regardless of any arguments you - specify. Because the :class:`Inputs` collection is the union of all the inputs of each - kind, this method implements parameters such as "count", "search", and so - on at the Python level once all the data has been fetched. The exception - is when you specify a single input kind, and then this method makes a single request - with the usual semantics for parameters. - - :param kinds: The input kinds to return (optional). - - - "ad": Active Directory - - - "monitor": Files and directories - - - "registry": Windows Registry - - - "script": Scripts - - - "splunktcp": TCP, processed - - - "tcp": TCP, unprocessed - - - "udp": UDP - - - "win-event-log-collections": Windows event log - - - "win-perfmon": Performance monitoring - - - "win-wmi-collections": WMI - - :type kinds: ``string`` - :param kwargs: Additional arguments (optional): - - - "count" (``integer``): The maximum number of items to return. - - - "offset" (``integer``): The offset of the first item to return. - - - "search" (``string``): The search query to filter responses. - - - "sort_dir" (``string``): The direction to sort returned items: - "asc" or "desc". - - - "sort_key" (``string``): The field to use for sorting (optional). - - - "sort_mode" (``string``): The collating sequence for sorting - returned items: "auto", "alpha", "alpha_case", or "num". - - :type kwargs: ``dict`` - - :return: A list of input kinds. - :rtype: ``list`` - """ - if len(kinds) == 0: - kinds = self.kinds - if len(kinds) == 1: - kind = kinds[0] - logging.debug("Inputs.list taking short circuit branch for single kind.") - path = self.kindpath(kind) - logging.debug("Path for inputs: %s", path) - try: - path = UrlEncoded(path, skip_encode=True) - response = self.get(path, **kwargs) - except HTTPError as he: - if he.status == 404: # No inputs of this kind - return [] - entities = [] - entries = _load_atom_entries(response) - if entries is None: - return [] # No inputs in a collection comes back with no feed or entry in the XML - for entry in entries: - state = _parse_atom_entry(entry) - # Unquote the URL, since all URL encoded in the SDK - # should be of type UrlEncoded, and all str should not - # be URL encoded. - path = urllib.parse.unquote(state.links.alternate) - entity = Input(self.service, path, kind, state=state) - entities.append(entity) - return entities - - search = kwargs.get('search', '*') - - entities = [] - for kind in kinds: - response = None - try: - kind = UrlEncoded(kind, skip_encode=True) - response = self.get(self.kindpath(kind), search=search) - except HTTPError as e: - if e.status == 404: - continue # No inputs of this kind - else: - raise - - entries = _load_atom_entries(response) - if entries is None: continue # No inputs to process - for entry in entries: - state = _parse_atom_entry(entry) - # Unquote the URL, since all URL encoded in the SDK - # should be of type UrlEncoded, and all str should not - # be URL encoded. - path = urllib.parse.unquote(state.links.alternate) - entity = Input(self.service, path, kind, state=state) - entities.append(entity) - if 'offset' in kwargs: - entities = entities[kwargs['offset']:] - if 'count' in kwargs: - entities = entities[:kwargs['count']] - if kwargs.get('sort_mode', None) == 'alpha': - sort_field = kwargs.get('sort_field', 'name') - if sort_field == 'name': - f = lambda x: x.name.lower() - else: - f = lambda x: x[sort_field].lower() - entities = sorted(entities, key=f) - if kwargs.get('sort_mode', None) == 'alpha_case': - sort_field = kwargs.get('sort_field', 'name') - if sort_field == 'name': - f = lambda x: x.name - else: - f = lambda x: x[sort_field] - entities = sorted(entities, key=f) - if kwargs.get('sort_dir', 'asc') == 'desc': - entities = list(reversed(entities)) - return entities - - def __iter__(self, **kwargs): - for item in self.iter(**kwargs): - yield item - - def iter(self, **kwargs): - """ Iterates over the collection of inputs. - - :param kwargs: Additional arguments (optional): - - - "count" (``integer``): The maximum number of items to return. - - - "offset" (``integer``): The offset of the first item to return. - - - "search" (``string``): The search query to filter responses. - - - "sort_dir" (``string``): The direction to sort returned items: - "asc" or "desc". - - - "sort_key" (``string``): The field to use for sorting (optional). - - - "sort_mode" (``string``): The collating sequence for sorting - returned items: "auto", "alpha", "alpha_case", or "num". - - :type kwargs: ``dict`` - """ - for item in self.list(**kwargs): - yield item - - def oneshot(self, path, **kwargs): - """ Creates a oneshot data input, which is an upload of a single file - for one-time indexing. - - :param path: The path and filename. - :type path: ``string`` - :param kwargs: Additional arguments (optional). For more about the - available parameters, see `Input parameters `_ on Splunk Developer Portal. - :type kwargs: ``dict`` - """ - self.post('oneshot', name=path, **kwargs) - - -class Job(Entity): - """This class represents a search job.""" - def __init__(self, service, sid, **kwargs): - path = PATH_JOBS + sid - Entity.__init__(self, service, path, skip_refresh=True, **kwargs) - self.sid = sid - - # The Job entry record is returned at the root of the response - def _load_atom_entry(self, response): - return _load_atom(response).entry - - def cancel(self): - """Stops the current search and deletes the results cache. - - :return: The :class:`Job`. - """ - try: - self.post("control", action="cancel") - except HTTPError as he: - if he.status == 404: - # The job has already been cancelled, so - # cancelling it twice is a nop. - pass - else: - raise - return self - - def disable_preview(self): - """Disables preview for this job. - - :return: The :class:`Job`. - """ - self.post("control", action="disablepreview") - return self - - def enable_preview(self): - """Enables preview for this job. - - **Note**: Enabling preview might slow search considerably. - - :return: The :class:`Job`. - """ - self.post("control", action="enablepreview") - return self - - def events(self, **kwargs): - """Returns a streaming handle to this job's events. - - :param kwargs: Additional parameters (optional). For a list of valid - parameters, see `GET search/jobs/{search_id}/events - `_ - in the REST API documentation. - :type kwargs: ``dict`` - - :return: The ``InputStream`` IO handle to this job's events. - """ - kwargs['segmentation'] = kwargs.get('segmentation', 'none') - return self.get("events", **kwargs).body - - def finalize(self): - """Stops the job and provides intermediate results for retrieval. - - :return: The :class:`Job`. - """ - self.post("control", action="finalize") - return self - - def is_done(self): - """Indicates whether this job finished running. - - :return: ``True`` if the job is done, ``False`` if not. - :rtype: ``boolean`` - """ - if not self.is_ready(): - return False - done = (self._state.content['isDone'] == '1') - return done - - def is_ready(self): - """Indicates whether this job is ready for querying. - - :return: ``True`` if the job is ready, ``False`` if not. - :rtype: ``boolean`` - - """ - response = self.get() - if response.status == 204: - return False - self._state = self.read(response) - ready = self._state.content['dispatchState'] not in ['QUEUED', 'PARSING'] - return ready - - @property - def name(self): - """Returns the name of the search job, which is the search ID (SID). - - :return: The search ID. - :rtype: ``string`` - """ - return self.sid - - def pause(self): - """Suspends the current search. - - :return: The :class:`Job`. - """ - self.post("control", action="pause") - return self - - def results(self, **query_params): - """Returns a streaming handle to this job's search results. To get a - nice, Pythonic iterator, pass the handle to :class:`splunklib.results.ResultsReader`, - as in:: - - import splunklib.client as client - import splunklib.results as results - from time import sleep - service = client.connect(...) - job = service.jobs.create("search * | head 5") - while not job.is_done(): - sleep(.2) - rr = results.ResultsReader(job.results()) - for result in rr: - if isinstance(result, results.Message): - # Diagnostic messages may be returned in the results - print '%s: %s' % (result.type, result.message) - elif isinstance(result, dict): - # Normal events are returned as dicts - print result - assert rr.is_preview == False - - Results are not available until the job has finished. If called on - an unfinished job, the result is an empty event set. - - This method makes a single roundtrip - to the server, plus at most two additional round trips if - the ``autologin`` field of :func:`connect` is set to ``True``. - - :param query_params: Additional parameters (optional). For a list of valid - parameters, see `GET search/jobs/{search_id}/results - `_. - :type query_params: ``dict`` - - :return: The ``InputStream`` IO handle to this job's results. - """ - query_params['segmentation'] = query_params.get('segmentation', 'none') - return self.get("results", **query_params).body - - def preview(self, **query_params): - """Returns a streaming handle to this job's preview search results. - - Unlike :class:`splunklib.results.ResultsReader`, which requires a job to - be finished to - return any results, the ``preview`` method returns any results that have - been generated so far, whether the job is running or not. The - returned search results are the raw data from the server. Pass - the handle returned to :class:`splunklib.results.ResultsReader` to get a - nice, Pythonic iterator over objects, as in:: - - import splunklib.client as client - import splunklib.results as results - service = client.connect(...) - job = service.jobs.create("search * | head 5") - rr = results.ResultsReader(job.preview()) - for result in rr: - if isinstance(result, results.Message): - # Diagnostic messages may be returned in the results - print '%s: %s' % (result.type, result.message) - elif isinstance(result, dict): - # Normal events are returned as dicts - print result - if rr.is_preview: - print "Preview of a running search job." - else: - print "Job is finished. Results are final." - - This method makes one roundtrip to the server, plus at most - two more if - the ``autologin`` field of :func:`connect` is set to ``True``. - - :param query_params: Additional parameters (optional). For a list of valid - parameters, see `GET search/jobs/{search_id}/results_preview - `_ - in the REST API documentation. - :type query_params: ``dict`` - - :return: The ``InputStream`` IO handle to this job's preview results. - """ - query_params['segmentation'] = query_params.get('segmentation', 'none') - return self.get("results_preview", **query_params).body - - def searchlog(self, **kwargs): - """Returns a streaming handle to this job's search log. - - :param `kwargs`: Additional parameters (optional). For a list of valid - parameters, see `GET search/jobs/{search_id}/search.log - `_ - in the REST API documentation. - :type kwargs: ``dict`` - - :return: The ``InputStream`` IO handle to this job's search log. - """ - return self.get("search.log", **kwargs).body - - def set_priority(self, value): - """Sets this job's search priority in the range of 0-10. - - Higher numbers indicate higher priority. Unless splunkd is - running as *root*, you can only decrease the priority of a running job. - - :param `value`: The search priority. - :type value: ``integer`` - - :return: The :class:`Job`. - """ - self.post('control', action="setpriority", priority=value) - return self - - def summary(self, **kwargs): - """Returns a streaming handle to this job's summary. - - :param `kwargs`: Additional parameters (optional). For a list of valid - parameters, see `GET search/jobs/{search_id}/summary - `_ - in the REST API documentation. - :type kwargs: ``dict`` - - :return: The ``InputStream`` IO handle to this job's summary. - """ - return self.get("summary", **kwargs).body - - def timeline(self, **kwargs): - """Returns a streaming handle to this job's timeline results. - - :param `kwargs`: Additional timeline arguments (optional). For a list of valid - parameters, see `GET search/jobs/{search_id}/timeline - `_ - in the REST API documentation. - :type kwargs: ``dict`` - - :return: The ``InputStream`` IO handle to this job's timeline. - """ - return self.get("timeline", **kwargs).body - - def touch(self): - """Extends the expiration time of the search to the current time (now) plus - the time-to-live (ttl) value. - - :return: The :class:`Job`. - """ - self.post("control", action="touch") - return self - - def set_ttl(self, value): - """Set the job's time-to-live (ttl) value, which is the time before the - search job expires and is still available. - - :param `value`: The ttl value, in seconds. - :type value: ``integer`` - - :return: The :class:`Job`. - """ - self.post("control", action="setttl", ttl=value) - return self - - def unpause(self): - """Resumes the current search, if paused. - - :return: The :class:`Job`. - """ - self.post("control", action="unpause") - return self - - -class Jobs(Collection): - """This class represents a collection of search jobs. Retrieve this - collection using :meth:`Service.jobs`.""" - def __init__(self, service): - Collection.__init__(self, service, PATH_JOBS, item=Job) - # The count value to say list all the contents of this - # Collection is 0, not -1 as it is on most. - self.null_count = 0 - - def _load_list(self, response): - # Overridden because Job takes a sid instead of a path. - entries = _load_atom_entries(response) - if entries is None: return [] - entities = [] - for entry in entries: - state = _parse_atom_entry(entry) - entity = self.item( - self.service, - entry['content']['sid'], - state=state) - entities.append(entity) - return entities - - def create(self, query, **kwargs): - """ Creates a search using a search query and any additional parameters - you provide. - - :param query: The search query. - :type query: ``string`` - :param kwargs: Additiona parameters (optional). For a list of available - parameters, see `Search job parameters - `_ - on Splunk Developer Portal. - :type kwargs: ``dict`` - - :return: The :class:`Job`. - """ - if kwargs.get("exec_mode", None) == "oneshot": - raise TypeError("Cannot specify exec_mode=oneshot; use the oneshot method instead.") - response = self.post(search=query, **kwargs) - sid = _load_sid(response) - return Job(self.service, sid) - - def export(self, query, **params): - """Runs a search and immediately starts streaming preview events. - This method returns a streaming handle to this job's events as an XML - document from the server. To parse this stream into usable Python objects, - pass the handle to :class:`splunklib.results.ResultsReader`:: - - import splunklib.client as client - import splunklib.results as results - service = client.connect(...) - rr = results.ResultsReader(service.jobs.export("search * | head 5")) - for result in rr: - if isinstance(result, results.Message): - # Diagnostic messages may be returned in the results - print '%s: %s' % (result.type, result.message) - elif isinstance(result, dict): - # Normal events are returned as dicts - print result - assert rr.is_preview == False - - Running an export search is more efficient as it streams the results - directly to you, rather than having to write them out to disk and make - them available later. As soon as results are ready, you will receive - them. - - The ``export`` method makes a single roundtrip to the server (as opposed - to two for :meth:`create` followed by :meth:`preview`), plus at most two - more if the ``autologin`` field of :func:`connect` is set to ``True``. - - :raises `ValueError`: Raised for invalid queries. - :param query: The search query. - :type query: ``string`` - :param params: Additional arguments (optional). For a list of valid - parameters, see `GET search/jobs/export - `_ - in the REST API documentation. - :type params: ``dict`` - - :return: The ``InputStream`` IO handle to raw XML returned from the server. - """ - if "exec_mode" in params: - raise TypeError("Cannot specify an exec_mode to export.") - params['segmentation'] = params.get('segmentation', 'none') - return self.post(path_segment="export", - search=query, - **params).body - - def itemmeta(self): - """There is no metadata available for class:``Jobs``. - - Any call to this method raises a class:``NotSupportedError``. - - :raises: class:``NotSupportedError`` - """ - raise NotSupportedError() - - def oneshot(self, query, **params): - """Run a oneshot search and returns a streaming handle to the results. - - The ``InputStream`` object streams XML fragments from the server. To - parse this stream into usable Python objects, - pass the handle to :class:`splunklib.results.ResultsReader`:: - - import splunklib.client as client - import splunklib.results as results - service = client.connect(...) - rr = results.ResultsReader(service.jobs.oneshot("search * | head 5")) - for result in rr: - if isinstance(result, results.Message): - # Diagnostic messages may be returned in the results - print '%s: %s' % (result.type, result.message) - elif isinstance(result, dict): - # Normal events are returned as dicts - print result - assert rr.is_preview == False - - The ``oneshot`` method makes a single roundtrip to the server (as opposed - to two for :meth:`create` followed by :meth:`results`), plus at most two more - if the ``autologin`` field of :func:`connect` is set to ``True``. - - :raises ValueError: Raised for invalid queries. - - :param query: The search query. - :type query: ``string`` - :param params: Additional arguments (optional): - - - "output_mode": Specifies the output format of the results (XML, - JSON, or CSV). - - - "earliest_time": Specifies the earliest time in the time range to - search. The time string can be a UTC time (with fractional seconds), - a relative time specifier (to now), or a formatted time string. - - - "latest_time": Specifies the latest time in the time range to - search. The time string can be a UTC time (with fractional seconds), - a relative time specifier (to now), or a formatted time string. - - - "rf": Specifies one or more fields to add to the search. - - :type params: ``dict`` - - :return: The ``InputStream`` IO handle to raw XML returned from the server. - """ - if "exec_mode" in params: - raise TypeError("Cannot specify an exec_mode to oneshot.") - params['segmentation'] = params.get('segmentation', 'none') - return self.post(search=query, - exec_mode="oneshot", - **params).body - - -class Loggers(Collection): - """This class represents a collection of service logging categories. - Retrieve this collection using :meth:`Service.loggers`.""" - def __init__(self, service): - Collection.__init__(self, service, PATH_LOGGER) - - def itemmeta(self): - """There is no metadata available for class:``Loggers``. - - Any call to this method raises a class:``NotSupportedError``. - - :raises: class:``NotSupportedError`` - """ - raise NotSupportedError() - - -class Message(Entity): - def __init__(self, service, path, **kwargs): - Entity.__init__(self, service, path, **kwargs) - - @property - def value(self): - """Returns the message value. - - :return: The message value. - :rtype: ``string`` - """ - return self[self.name] - - -class ModularInputKind(Entity): - """This class contains the different types of modular inputs. Retrieve this - collection using :meth:`Service.modular_input_kinds`. - """ - def __contains__(self, name): - args = self.state.content['endpoints']['args'] - if name in args: - return True - else: - return Entity.__contains__(self, name) - - def __getitem__(self, name): - args = self.state.content['endpoint']['args'] - if name in args: - return args['item'] - else: - return Entity.__getitem__(self, name) - - @property - def arguments(self): - """A dictionary of all the arguments supported by this modular input kind. - - The keys in the dictionary are the names of the arguments. The values are - another dictionary giving the metadata about that argument. The possible - keys in that dictionary are ``"title"``, ``"description"``, ``"required_on_create``", - ``"required_on_edit"``, ``"data_type"``. Each value is a string. It should be one - of ``"true"`` or ``"false"`` for ``"required_on_create"`` and ``"required_on_edit"``, - and one of ``"boolean"``, ``"string"``, or ``"number``" for ``"data_type"``. - - :return: A dictionary describing the arguments this modular input kind takes. - :rtype: ``dict`` - """ - return self.state.content['endpoint']['args'] - - def update(self, **kwargs): - """Raises an error. Modular input kinds are read only.""" - raise IllegalOperationException("Modular input kinds cannot be updated via the REST API.") - - -class SavedSearch(Entity): - """This class represents a saved search.""" - def __init__(self, service, path, **kwargs): - Entity.__init__(self, service, path, **kwargs) - - def acknowledge(self): - """Acknowledges the suppression of alerts from this saved search and - resumes alerting. - - :return: The :class:`SavedSearch`. - """ - self.post("acknowledge") - return self - - @property - def alert_count(self): - """Returns the number of alerts fired by this saved search. - - :return: The number of alerts fired by this saved search. - :rtype: ``integer`` - """ - return int(self._state.content.get('triggered_alert_count', 0)) - - def dispatch(self, **kwargs): - """Runs the saved search and returns the resulting search job. - - :param `kwargs`: Additional dispatch arguments (optional). For details, - see the `POST saved/searches/{name}/dispatch - `_ - endpoint in the REST API documentation. - :type kwargs: ``dict`` - :return: The :class:`Job`. - """ - response = self.post("dispatch", **kwargs) - sid = _load_sid(response) - return Job(self.service, sid) - - @property - def fired_alerts(self): - """Returns the collection of fired alerts (a fired alert group) - corresponding to this saved search's alerts. - - :raises IllegalOperationException: Raised when the search is not scheduled. - - :return: A collection of fired alerts. - :rtype: :class:`AlertGroup` - """ - if self['is_scheduled'] == '0': - raise IllegalOperationException('Unscheduled saved searches have no alerts.') - c = Collection( - self.service, - self.service._abspath(PATH_FIRED_ALERTS + self.name, - owner=self._state.access.owner, - app=self._state.access.app, - sharing=self._state.access.sharing), - item=AlertGroup) - return c - - def history(self): - """Returns a list of search jobs corresponding to this saved search. - - :return: A list of :class:`Job` objects. - """ - response = self.get("history") - entries = _load_atom_entries(response) - if entries is None: return [] - jobs = [] - for entry in entries: - job = Job(self.service, entry.title) - jobs.append(job) - return jobs - - def update(self, search=None, **kwargs): - """Updates the server with any changes you've made to the current saved - search along with any additional arguments you specify. - - :param `search`: The search query (optional). - :type search: ``string`` - :param `kwargs`: Additional arguments (optional). For a list of available - parameters, see `Saved search parameters - `_ - on Splunk Developer Portal. - :type kwargs: ``dict`` - - :return: The :class:`SavedSearch`. - """ - # Updates to a saved search *require* that the search string be - # passed, so we pass the current search string if a value wasn't - # provided by the caller. - if search is None: search = self.content.search - Entity.update(self, search=search, **kwargs) - return self - - def scheduled_times(self, earliest_time='now', latest_time='+1h'): - """Returns the times when this search is scheduled to run. - - By default this method returns the times in the next hour. For different - time ranges, set *earliest_time* and *latest_time*. For example, - for all times in the last day use "earliest_time=-1d" and - "latest_time=now". - - :param earliest_time: The earliest time. - :type earliest_time: ``string`` - :param latest_time: The latest time. - :type latest_time: ``string`` - - :return: The list of search times. - """ - response = self.get("scheduled_times", - earliest_time=earliest_time, - latest_time=latest_time) - data = self._load_atom_entry(response) - rec = _parse_atom_entry(data) - times = [datetime.fromtimestamp(int(t)) - for t in rec.content.scheduled_times] - return times - - def suppress(self, expiration): - """Skips any scheduled runs of this search in the next *expiration* - number of seconds. - - :param expiration: The expiration period, in seconds. - :type expiration: ``integer`` - - :return: The :class:`SavedSearch`. - """ - self.post("suppress", expiration=expiration) - return self - - @property - def suppressed(self): - """Returns the number of seconds that this search is blocked from running - (possibly 0). - - :return: The number of seconds. - :rtype: ``integer`` - """ - r = self._run_action("suppress") - if r.suppressed == "1": - return int(r.expiration) - else: - return 0 - - def unsuppress(self): - """Cancels suppression and makes this search run as scheduled. - - :return: The :class:`SavedSearch`. - """ - self.post("suppress", expiration="0") - return self - - -class SavedSearches(Collection): - """This class represents a collection of saved searches. Retrieve this - collection using :meth:`Service.saved_searches`.""" - def __init__(self, service): - Collection.__init__( - self, service, PATH_SAVED_SEARCHES, item=SavedSearch) - - def create(self, name, search, **kwargs): - """ Creates a saved search. - - :param name: The name for the saved search. - :type name: ``string`` - :param search: The search query. - :type search: ``string`` - :param kwargs: Additional arguments (optional). For a list of available - parameters, see `Saved search parameters - `_ - on Splunk Developer Portal. - :type kwargs: ``dict`` - :return: The :class:`SavedSearches` collection. - """ - return Collection.create(self, name, search=search, **kwargs) - - -class Settings(Entity): - """This class represents configuration settings for a Splunk service. - Retrieve this collection using :meth:`Service.settings`.""" - def __init__(self, service, **kwargs): - Entity.__init__(self, service, "/services/server/settings", **kwargs) - - # Updates on the settings endpoint are POSTed to server/settings/settings. - def update(self, **kwargs): - """Updates the settings on the server using the arguments you provide. - - :param kwargs: Additional arguments. For a list of valid arguments, see - `POST server/settings/{name} - `_ - in the REST API documentation. - :type kwargs: ``dict`` - :return: The :class:`Settings` collection. - """ - self.service.post("/services/server/settings/settings", **kwargs) - return self - - -class User(Entity): - """This class represents a Splunk user. - """ - @property - def role_entities(self): - """Returns a list of roles assigned to this user. - - :return: The list of roles. - :rtype: ``list`` - """ - return [self.service.roles[name] for name in self.content.roles] - - -# Splunk automatically lowercases new user names so we need to match that -# behavior here to ensure that the subsequent member lookup works correctly. -class Users(Collection): - """This class represents the collection of Splunk users for this instance of - Splunk. Retrieve this collection using :meth:`Service.users`. - """ - def __init__(self, service): - Collection.__init__(self, service, PATH_USERS, item=User) - - def __getitem__(self, key): - return Collection.__getitem__(self, key.lower()) - - def __contains__(self, name): - return Collection.__contains__(self, name.lower()) - - def create(self, username, password, roles, **params): - """Creates a new user. - - This function makes two roundtrips to the server, plus at most - two more if - the ``autologin`` field of :func:`connect` is set to ``True``. - - :param username: The username. - :type username: ``string`` - :param password: The password. - :type password: ``string`` - :param roles: A single role or list of roles for the user. - :type roles: ``string`` or ``list`` - :param params: Additional arguments (optional). For a list of available - parameters, see `User authentication parameters - `_ - on Splunk Developer Portal. - :type params: ``dict`` - - :return: The new user. - :rtype: :class:`User` - - **Example**:: - - import splunklib.client as client - c = client.connect(...) - users = c.users - boris = users.create("boris", "securepassword", roles="user") - hilda = users.create("hilda", "anotherpassword", roles=["user","power"]) - """ - if not isinstance(username, six.string_types): - raise ValueError("Invalid username: %s" % str(username)) - username = username.lower() - self.post(name=username, password=password, roles=roles, **params) - # splunkd doesn't return the user in the POST response body, - # so we have to make a second round trip to fetch it. - response = self.get(username) - entry = _load_atom(response, XNAME_ENTRY).entry - state = _parse_atom_entry(entry) - entity = self.item( - self.service, - urllib.parse.unquote(state.links.alternate), - state=state) - return entity - - def delete(self, name): - """ Deletes the user and returns the resulting collection of users. - - :param name: The name of the user to delete. - :type name: ``string`` - - :return: - :rtype: :class:`Users` - """ - return Collection.delete(self, name.lower()) - - -class Role(Entity): - """This class represents a user role. - """ - def grant(self, *capabilities_to_grant): - """Grants additional capabilities to this role. - - :param capabilities_to_grant: Zero or more capabilities to grant this - role. For a list of capabilities, see - `Capabilities `_ - on Splunk Developer Portal. - :type capabilities_to_grant: ``string`` or ``list`` - :return: The :class:`Role`. - - **Example**:: - - service = client.connect(...) - role = service.roles['somerole'] - role.grant('change_own_password', 'search') - """ - possible_capabilities = self.service.capabilities - for capability in capabilities_to_grant: - if capability not in possible_capabilities: - raise NoSuchCapability(capability) - new_capabilities = self['capabilities'] + list(capabilities_to_grant) - self.post(capabilities=new_capabilities) - return self - - def revoke(self, *capabilities_to_revoke): - """Revokes zero or more capabilities from this role. - - :param capabilities_to_revoke: Zero or more capabilities to grant this - role. For a list of capabilities, see - `Capabilities `_ - on Splunk Developer Portal. - :type capabilities_to_revoke: ``string`` or ``list`` - - :return: The :class:`Role`. - - **Example**:: - - service = client.connect(...) - role = service.roles['somerole'] - role.revoke('change_own_password', 'search') - """ - possible_capabilities = self.service.capabilities - for capability in capabilities_to_revoke: - if capability not in possible_capabilities: - raise NoSuchCapability(capability) - old_capabilities = self['capabilities'] - new_capabilities = [] - for c in old_capabilities: - if c not in capabilities_to_revoke: - new_capabilities.append(c) - if new_capabilities == []: - new_capabilities = '' # Empty lists don't get passed in the body, so we have to force an empty argument. - self.post(capabilities=new_capabilities) - return self - - -class Roles(Collection): - """This class represents the collection of roles in the Splunk instance. - Retrieve this collection using :meth:`Service.roles`.""" - def __init__(self, service): - return Collection.__init__(self, service, PATH_ROLES, item=Role) - - def __getitem__(self, key): - return Collection.__getitem__(self, key.lower()) - - def __contains__(self, name): - return Collection.__contains__(self, name.lower()) - - def create(self, name, **params): - """Creates a new role. - - This function makes two roundtrips to the server, plus at most - two more if - the ``autologin`` field of :func:`connect` is set to ``True``. - - :param name: Name for the role. - :type name: ``string`` - :param params: Additional arguments (optional). For a list of available - parameters, see `Roles parameters - `_ - on Splunk Developer Portal. - :type params: ``dict`` - - :return: The new role. - :rtype: :class:`Role` - - **Example**:: - - import splunklib.client as client - c = client.connect(...) - roles = c.roles - paltry = roles.create("paltry", imported_roles="user", defaultApp="search") - """ - if not isinstance(name, six.string_types): - raise ValueError("Invalid role name: %s" % str(name)) - name = name.lower() - self.post(name=name, **params) - # splunkd doesn't return the user in the POST response body, - # so we have to make a second round trip to fetch it. - response = self.get(name) - entry = _load_atom(response, XNAME_ENTRY).entry - state = _parse_atom_entry(entry) - entity = self.item( - self.service, - urllib.parse.unquote(state.links.alternate), - state=state) - return entity - - def delete(self, name): - """ Deletes the role and returns the resulting collection of roles. - - :param name: The name of the role to delete. - :type name: ``string`` - - :rtype: The :class:`Roles` - """ - return Collection.delete(self, name.lower()) - - -class Application(Entity): - """Represents a locally-installed Splunk app.""" - @property - def setupInfo(self): - """Returns the setup information for the app. - - :return: The setup information. - """ - return self.content.get('eai:setup', None) - - def package(self): - """ Creates a compressed package of the app for archiving.""" - return self._run_action("package") - - def updateInfo(self): - """Returns any update information that is available for the app.""" - return self._run_action("update") - -class KVStoreCollections(Collection): - def __init__(self, service): - Collection.__init__(self, service, 'storage/collections/config', item=KVStoreCollection) - - def create(self, name, indexes = {}, fields = {}, **kwargs): - """Creates a KV Store Collection. - - :param name: name of collection to create - :type name: ``string`` - :param indexes: dictionary of index definitions - :type indexes: ``dict`` - :param fields: dictionary of field definitions - :type fields: ``dict`` - :param kwargs: a dictionary of additional parameters specifying indexes and field definitions - :type kwargs: ``dict`` - - :return: Result of POST request - """ - for k, v in six.iteritems(indexes): - if isinstance(v, dict): - v = json.dumps(v) - kwargs['index.' + k] = v - for k, v in six.iteritems(fields): - kwargs['field.' + k] = v - return self.post(name=name, **kwargs) - -class KVStoreCollection(Entity): - @property - def data(self): - """Returns data object for this Collection. - - :rtype: :class:`KVStoreData` - """ - return KVStoreCollectionData(self) - - def update_index(self, name, value): - """Changes the definition of a KV Store index. - - :param name: name of index to change - :type name: ``string`` - :param value: new index definition - :type value: ``dict`` or ``string`` - - :return: Result of POST request - """ - kwargs = {} - kwargs['index.' + name] = value if isinstance(value, basestring) else json.dumps(value) - return self.post(**kwargs) - - def update_field(self, name, value): - """Changes the definition of a KV Store field. - - :param name: name of field to change - :type name: ``string`` - :param value: new field definition - :type value: ``string`` - - :return: Result of POST request - """ - kwargs = {} - kwargs['field.' + name] = value - return self.post(**kwargs) - -class KVStoreCollectionData(object): - """This class represents the data endpoint for a KVStoreCollection. - - Retrieve using :meth:`KVStoreCollection.data` - """ - JSON_HEADER = [('Content-Type', 'application/json')] - - def __init__(self, collection): - self.service = collection.service - self.collection = collection - self.owner, self.app, self.sharing = collection._proper_namespace() - self.path = 'storage/collections/data/' + UrlEncoded(self.collection.name) + '/' - - def _get(self, url, **kwargs): - return self.service.get(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs) - - def _post(self, url, **kwargs): - return self.service.post(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs) - - def _delete(self, url, **kwargs): - return self.service.delete(self.path + url, owner=self.owner, app=self.app, sharing=self.sharing, **kwargs) - - def query(self, **query): - """ - Gets the results of query, with optional parameters sort, limit, skip, and fields. - - :param query: Optional parameters. Valid options are sort, limit, skip, and fields - :type query: ``dict`` - - :return: Array of documents retrieved by query. - :rtype: ``array`` - """ - return json.loads(self._get('', **query).body.read().decode('utf-8')) - - def query_by_id(self, id): - """ - Returns object with _id = id. - - :param id: Value for ID. If not a string will be coerced to string. - :type id: ``string`` - - :return: Document with id - :rtype: ``dict`` - """ - return json.loads(self._get(UrlEncoded(str(id))).body.read().decode('utf-8')) - - def insert(self, data): - """ - Inserts item into this collection. An _id field will be generated if not assigned in the data. - - :param data: Document to insert - :type data: ``string`` - - :return: _id of inserted object - :rtype: ``dict`` - """ - return json.loads(self._post('', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8')) - - def delete(self, query=None): - """ - Deletes all data in collection if query is absent. Otherwise, deletes all data matched by query. - - :param query: Query to select documents to delete - :type query: ``string`` - - :return: Result of DELETE request - """ - return self._delete('', **({'query': query}) if query else {}) - - def delete_by_id(self, id): - """ - Deletes document that has _id = id. - - :param id: id of document to delete - :type id: ``string`` - - :return: Result of DELETE request - """ - return self._delete(UrlEncoded(str(id))) - - def update(self, id, data): - """ - Replaces document with _id = id with data. - - :param id: _id of document to update - :type id: ``string`` - :param data: the new document to insert - :type data: ``string`` - - :return: id of replaced document - :rtype: ``dict`` - """ - return json.loads(self._post(UrlEncoded(str(id)), headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8')) - - def batch_find(self, *dbqueries): - """ - Returns array of results from queries dbqueries. - - :param dbqueries: Array of individual queries as dictionaries - :type dbqueries: ``array`` of ``dict`` - - :return: Results of each query - :rtype: ``array`` of ``array`` - """ - if len(dbqueries) < 1: - raise Exception('Must have at least one query.') - - data = json.dumps(dbqueries) - - return json.loads(self._post('batch_find', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8')) - - def batch_save(self, *documents): - """ - Inserts or updates every document specified in documents. - - :param documents: Array of documents to save as dictionaries - :type documents: ``array`` of ``dict`` - - :return: Results of update operation as overall stats - :rtype: ``dict`` - """ - if len(documents) < 1: - raise Exception('Must have at least one document.') - - data = json.dumps(documents) - - return json.loads(self._post('batch_save', headers=KVStoreCollectionData.JSON_HEADER, body=data).body.read().decode('utf-8')) diff --git a/solnlib/packages/splunklib/data.py b/solnlib/packages/splunklib/data.py deleted file mode 100644 index c29063d4..00000000 --- a/solnlib/packages/splunklib/data.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright 2011-2015 Splunk, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"): you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The **splunklib.data** module reads the responses from splunkd in Atom Feed -format, which is the format used by most of the REST API. -""" - -from __future__ import absolute_import -import sys -from xml.etree.ElementTree import XML -from . import six - -__all__ = ["load"] - -# LNAME refers to element names without namespaces; XNAME is the same -# name, but with an XML namespace. -LNAME_DICT = "dict" -LNAME_ITEM = "item" -LNAME_KEY = "key" -LNAME_LIST = "list" - -XNAMEF_REST = "{http://dev.splunk.com/ns/rest}%s" -XNAME_DICT = XNAMEF_REST % LNAME_DICT -XNAME_ITEM = XNAMEF_REST % LNAME_ITEM -XNAME_KEY = XNAMEF_REST % LNAME_KEY -XNAME_LIST = XNAMEF_REST % LNAME_LIST - -# Some responses don't use namespaces (eg: search/parse) so we look for -# both the extended and local versions of the following names. - -def isdict(name): - return name == XNAME_DICT or name == LNAME_DICT - -def isitem(name): - return name == XNAME_ITEM or name == LNAME_ITEM - -def iskey(name): - return name == XNAME_KEY or name == LNAME_KEY - -def islist(name): - return name == XNAME_LIST or name == LNAME_LIST - -def hasattrs(element): - return len(element.attrib) > 0 - -def localname(xname): - rcurly = xname.find('}') - return xname if rcurly == -1 else xname[rcurly+1:] - -def load(text, match=None): - """This function reads a string that contains the XML of an Atom Feed, then - returns the - data in a native Python structure (a ``dict`` or ``list``). If you also - provide a tag name or path to match, only the matching sub-elements are - loaded. - - :param text: The XML text to load. - :type text: ``string`` - :param match: A tag name or path to match (optional). - :type match: ``string`` - """ - if text is None: return None - text = text.strip() - if len(text) == 0: return None - nametable = { - 'namespaces': [], - 'names': {} - } - - # Convert to unicode encoding in only python 2 for xml parser - if(sys.version_info < (3, 0, 0) and isinstance(text, unicode)): - text = text.encode('utf-8') - - root = XML(text) - items = [root] if match is None else root.findall(match) - count = len(items) - if count == 0: - return None - elif count == 1: - return load_root(items[0], nametable) - else: - return [load_root(item, nametable) for item in items] - -# Load the attributes of the given element. -def load_attrs(element): - if not hasattrs(element): return None - attrs = record() - for key, value in six.iteritems(element.attrib): - attrs[key] = value - return attrs - -# Parse a element and return a Python dict -def load_dict(element, nametable = None): - value = record() - children = list(element) - for child in children: - assert iskey(child.tag) - name = child.attrib["name"] - value[name] = load_value(child, nametable) - return value - -# Loads the given elements attrs & value into single merged dict. -def load_elem(element, nametable=None): - name = localname(element.tag) - attrs = load_attrs(element) - value = load_value(element, nametable) - if attrs is None: return name, value - if value is None: return name, attrs - # If value is simple, merge into attrs dict using special key - if isinstance(value, six.string_types): - attrs["$text"] = value - return name, attrs - # Both attrs & value are complex, so merge the two dicts, resolving collisions. - collision_keys = [] - for key, val in six.iteritems(attrs): - if key in value and key in collision_keys: - value[key].append(val) - elif key in value and key not in collision_keys: - value[key] = [value[key], val] - collision_keys.append(key) - else: - value[key] = val - return name, value - -# Parse a element and return a Python list -def load_list(element, nametable=None): - assert islist(element.tag) - value = [] - children = list(element) - for child in children: - assert isitem(child.tag) - value.append(load_value(child, nametable)) - return value - -# Load the given root element. -def load_root(element, nametable=None): - tag = element.tag - if isdict(tag): return load_dict(element, nametable) - if islist(tag): return load_list(element, nametable) - k, v = load_elem(element, nametable) - return Record.fromkv(k, v) - -# Load the children of the given element. -def load_value(element, nametable=None): - children = list(element) - count = len(children) - - # No children, assume a simple text value - if count == 0: - text = element.text - if text is None: - return None - text = text.strip() - if len(text) == 0: - return None - return text - - # Look for the special case of a single well-known structure - if count == 1: - child = children[0] - tag = child.tag - if isdict(tag): return load_dict(child, nametable) - if islist(tag): return load_list(child, nametable) - - value = record() - for child in children: - name, item = load_elem(child, nametable) - # If we have seen this name before, promote the value to a list - if name in value: - current = value[name] - if not isinstance(current, list): - value[name] = [current] - value[name].append(item) - else: - value[name] = item - - return value - -# A generic utility that enables "dot" access to dicts -class Record(dict): - """This generic utility class enables dot access to members of a Python - dictionary. - - Any key that is also a valid Python identifier can be retrieved as a field. - So, for an instance of ``Record`` called ``r``, ``r.key`` is equivalent to - ``r['key']``. A key such as ``invalid-key`` or ``invalid.key`` cannot be - retrieved as a field, because ``-`` and ``.`` are not allowed in - identifiers. - - Keys of the form ``a.b.c`` are very natural to write in Python as fields. If - a group of keys shares a prefix ending in ``.``, you can retrieve keys as a - nested dictionary by calling only the prefix. For example, if ``r`` contains - keys ``'foo'``, ``'bar.baz'``, and ``'bar.qux'``, ``r.bar`` returns a record - with the keys ``baz`` and ``qux``. If a key contains multiple ``.``, each - one is placed into a nested dictionary, so you can write ``r.bar.qux`` or - ``r['bar.qux']`` interchangeably. - """ - sep = '.' - - def __call__(self, *args): - if len(args) == 0: return self - return Record((key, self[key]) for key in args) - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - raise AttributeError(name) - - def __delattr__(self, name): - del self[name] - - def __setattr__(self, name, value): - self[name] = value - - @staticmethod - def fromkv(k, v): - result = record() - result[k] = v - return result - - def __getitem__(self, key): - if key in self: - return dict.__getitem__(self, key) - key += self.sep - result = record() - for k,v in six.iteritems(self): - if not k.startswith(key): - continue - suffix = k[len(key):] - if '.' in suffix: - ks = suffix.split(self.sep) - z = result - for x in ks[:-1]: - if x not in z: - z[x] = record() - z = z[x] - z[ks[-1]] = v - else: - result[suffix] = v - if len(result) == 0: - raise KeyError("No key or prefix: %s" % key) - return result - - -def record(value=None): - """This function returns a :class:`Record` instance constructed with an - initial value that you provide. - - :param `value`: An initial record value. - :type `value`: ``dict`` - """ - if value is None: value = {} - return Record(value) - diff --git a/solnlib/packages/splunklib/modularinput/__init__.py b/solnlib/packages/splunklib/modularinput/__init__.py deleted file mode 100644 index ace954a0..00000000 --- a/solnlib/packages/splunklib/modularinput/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -"""The following imports allow these classes to be imported via -the splunklib.modularinput package like so: - -from splunklib.modularinput import * -""" -from .argument import Argument -from .event import Event -from .event_writer import EventWriter -from .input_definition import InputDefinition -from .scheme import Scheme -from .script import Script -from .validation_definition import ValidationDefinition diff --git a/solnlib/packages/splunklib/modularinput/argument.py b/solnlib/packages/splunklib/modularinput/argument.py deleted file mode 100644 index 4c4b3c82..00000000 --- a/solnlib/packages/splunklib/modularinput/argument.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2011-2015 Splunk, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"): you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import -try: - import xml.etree.ElementTree as ET -except ImportError: - import xml.etree.cElementTree as ET - -class Argument(object): - """Class representing an argument to a modular input kind. - - ``Argument`` is meant to be used with ``Scheme`` to generate an XML - definition of the modular input kind that Splunk understands. - - ``name`` is the only required parameter for the constructor. - - **Example with least parameters**:: - - arg1 = Argument(name="arg1") - - **Example with all parameters**:: - - arg2 = Argument( - name="arg2", - description="This is an argument with lots of parameters", - validation="is_pos_int('some_name')", - data_type=Argument.data_type_number, - required_on_edit=True, - required_on_create=True - ) - """ - - # Constant values, do not change. - # These should be used for setting the value of an Argument object's data_type field. - data_type_boolean = "BOOLEAN" - data_type_number = "NUMBER" - data_type_string = "STRING" - - def __init__(self, name, description=None, validation=None, - data_type=data_type_string, required_on_edit=False, required_on_create=False, title=None): - """ - :param name: ``string``, identifier for this argument in Splunk. - :param description: ``string``, human-readable description of the argument. - :param validation: ``string`` specifying how the argument should be validated, if using internal validation. - If using external validation, this will be ignored. - :param data_type: ``string``, data type of this field; use the class constants. - "data_type_boolean", "data_type_number", or "data_type_string". - :param required_on_edit: ``Boolean``, whether this arg is required when editing an existing modular input of this kind. - :param required_on_create: ``Boolean``, whether this arg is required when creating a modular input of this kind. - :param title: ``String``, a human-readable title for the argument. - """ - self.name = name - self.description = description - self.validation = validation - self.data_type = data_type - self.required_on_edit = required_on_edit - self.required_on_create = required_on_create - self.title = title - - def add_to_document(self, parent): - """Adds an ``Argument`` object to this ElementTree document. - - Adds an subelement to the parent element, typically - and sets up its subelements with their respective text. - - :param parent: An ``ET.Element`` to be the parent of a new subelement - :returns: An ``ET.Element`` object representing this argument. - """ - arg = ET.SubElement(parent, "arg") - arg.set("name", self.name) - - if self.title is not None: - ET.SubElement(arg, "title").text = self.title - - if self.description is not None: - ET.SubElement(arg, "description").text = self.description - - if self.validation is not None: - ET.SubElement(arg, "validation").text = self.validation - - # add all other subelements to this Argument, represented by (tag, text) - subelements = [ - ("data_type", self.data_type), - ("required_on_edit", self.required_on_edit), - ("required_on_create", self.required_on_create) - ] - - for name, value in subelements: - ET.SubElement(arg, name).text = str(value).lower() - - return arg \ No newline at end of file diff --git a/solnlib/packages/splunklib/modularinput/event.py b/solnlib/packages/splunklib/modularinput/event.py deleted file mode 100644 index f8404326..00000000 --- a/solnlib/packages/splunklib/modularinput/event.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2011-2015 Splunk, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"): you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import absolute_import -try: - import xml.etree.cElementTree as ET -except ImportError as ie: - import xml.etree.ElementTree as ET - -class Event(object): - """Represents an event or fragment of an event to be written by this modular input to Splunk. - - To write an input to a stream, call the ``write_to`` function, passing in a stream. - """ - def __init__(self, data=None, stanza=None, time=None, host=None, index=None, source=None, - sourcetype=None, done=True, unbroken=True): - """There are no required parameters for constructing an Event - - **Example with minimal configuration**:: - - my_event = Event( - data="This is a test of my new event.", - stanza="myStanzaName", - time="%.3f" % 1372187084.000 - ) - - **Example with full configuration**:: - - excellent_event = Event( - data="This is a test of my excellent event.", - stanza="excellenceOnly", - time="%.3f" % 1372274622.493, - host="localhost", - index="main", - source="Splunk", - sourcetype="misc", - done=True, - unbroken=True - ) - - :param data: ``string``, the event's text. - :param stanza: ``string``, name of the input this event should be sent to. - :param time: ``float``, time in seconds, including up to 3 decimal places to represent milliseconds. - :param host: ``string``, the event's host, ex: localhost. - :param index: ``string``, the index this event is specified to write to, or None if default index. - :param source: ``string``, the source of this event, or None to have Splunk guess. - :param sourcetype: ``string``, source type currently set on this event, or None to have Splunk guess. - :param done: ``boolean``, is this a complete ``Event``? False if an ``Event`` fragment. - :param unbroken: ``boolean``, Is this event completely encapsulated in this ``Event`` object? - """ - self.data = data - self.done = done - self.host = host - self.index = index - self.source = source - self.sourceType = sourcetype - self.stanza = stanza - self.time = time - self.unbroken = unbroken - - def write_to(self, stream): - """Write an XML representation of self, an ``Event`` object, to the given stream. - - The ``Event`` object will only be written if its data field is defined, - otherwise a ``ValueError`` is raised. - - :param stream: stream to write XML to. - """ - if self.data is None: - raise ValueError("Events must have at least the data field set to be written to XML.") - - event = ET.Element("event") - if self.stanza is not None: - event.set("stanza", self.stanza) - event.set("unbroken", str(int(self.unbroken))) - - # if a time isn't set, let Splunk guess by not creating a