From e3b3f8ffed60ad781b1053b35c36bfb4f9f3159e Mon Sep 17 00:00:00 2001 From: Norman Walsh Date: Fri, 10 Jul 2015 11:56:09 -0700 Subject: [PATCH] Initial checkin --- .gitignore | 3 + Makefile | 12 + README.md | 36 + docs/source/conf.py | 358 ++ docs/source/connections.rst | 5 + docs/source/databases.rst | 39 + docs/source/forests.rst | 5 + docs/source/hosts.rst | 5 + docs/source/index.rst | 31 + docs/source/permissions.rst | 5 + docs/source/privileges.rst | 5 + docs/source/roles.rst | 5 + docs/source/servers.rst | 17 + docs/source/users.rst | 5 + docs/source/utilities.rst | 15 + examples/data/customer-001.json | 13 + examples/data/customer-002.json | 17 + .../data/purchases/december/purchase-001.json | 15 + .../data/purchases/december/purchase-002.json | 14 + .../data/purchases/november/purchase-003.json | 15 + .../data/purchases/november/purchase-004.json | 14 + examples/load_content.py | 67 + examples/mlcploader.py | 123 + examples/quickstart.py | 107 + examples/resources.py | 6 + examples/tools.py | 132 + python_api/__init__.py | 16 + python_api/invirtual.py | 11 + python_api/marklogic/__init__.py | 15 + python_api/marklogic/models/__init__.py | 24 + .../marklogic/models/certificate/authority.py | 240 + .../marklogic/models/certificate/request.py | 294 ++ .../marklogic/models/certificate/template.py | 584 +++ python_api/marklogic/models/connection.py | 43 + .../marklogic/models/database/__init__.py | 4023 +++++++++++++++++ .../marklogic/models/database/backup.py | 340 ++ python_api/marklogic/models/database/field.py | 771 ++++ .../marklogic/models/database/fragment.py | 108 + python_api/marklogic/models/database/index.py | 684 +++ .../marklogic/models/database/lexicon.py | 118 + .../models/database/mergeblackout.py | 274 ++ .../marklogic/models/database/namelist.py | 93 + python_api/marklogic/models/database/path.py | 66 + .../marklogic/models/database/ruleset.py | 52 + .../models/database/scheduledbackup.py | 519 +++ .../marklogic/models/database/through.py | 113 + python_api/marklogic/models/forest.py | 236 + python_api/marklogic/models/host.py | 133 + python_api/marklogic/models/permission.py | 61 + python_api/marklogic/models/privilege.py | 406 ++ python_api/marklogic/models/role.py | 413 ++ .../marklogic/models/server/__init__.py | 2932 ++++++++++++ python_api/marklogic/models/server/module.py | 69 + .../marklogic/models/server/namespace.py | 83 + .../models/server/requestblackout.py | 355 ++ python_api/marklogic/models/server/schema.py | 69 + python_api/marklogic/models/user.py | 407 ++ .../marklogic/models/utilities/__init__.py | 17 + .../marklogic/models/utilities/exceptions.py | 49 + .../marklogic/models/utilities/files.py | 41 + .../marklogic/models/utilities/utilities.py | 246 + .../marklogic/models/utilities/validators.py | 286 ++ setup.py | 67 + test/__init__.py | 6 + test/certs/__init__.py | 2 + test/certs/test_authorities.py | 103 + test/certs/test_requests.py | 61 + test/certs/test_templates.py | 64 + test/databases/__init__.py | 2 + test/databases/test_database.py | 140 + test/databases/test_field_range.py | 137 + test/databases/test_paths.py | 58 + test/forests/__init__.py | 2 + test/forests/test_forest.py | 82 + test/hosts/__init__.py | 2 + test/hosts/test_host.py | 41 + test/privileges/__init__.py | 2 + test/privileges/test_privilege.py | 103 + test/resources.py | 4 + test/roles/__init__.py | 2 + test/roles/test_role.py | 120 + test/servers/__init__.py | 2 + test/servers/test_server.py | 99 + test/settings.py | 16 + test/users/__init__.py | 2 + test/users/test_user.py | 102 + 86 files changed, 16479 insertions(+) create mode 100644 Makefile create mode 100644 README.md create mode 100644 docs/source/conf.py create mode 100644 docs/source/connections.rst create mode 100644 docs/source/databases.rst create mode 100644 docs/source/forests.rst create mode 100644 docs/source/hosts.rst create mode 100644 docs/source/index.rst create mode 100644 docs/source/permissions.rst create mode 100644 docs/source/privileges.rst create mode 100644 docs/source/roles.rst create mode 100644 docs/source/servers.rst create mode 100644 docs/source/users.rst create mode 100644 docs/source/utilities.rst create mode 100644 examples/data/customer-001.json create mode 100644 examples/data/customer-002.json create mode 100644 examples/data/purchases/december/purchase-001.json create mode 100644 examples/data/purchases/december/purchase-002.json create mode 100644 examples/data/purchases/november/purchase-003.json create mode 100644 examples/data/purchases/november/purchase-004.json create mode 100644 examples/load_content.py create mode 100644 examples/mlcploader.py create mode 100644 examples/quickstart.py create mode 100644 examples/resources.py create mode 100644 examples/tools.py create mode 100644 python_api/__init__.py create mode 100644 python_api/invirtual.py create mode 100644 python_api/marklogic/__init__.py create mode 100644 python_api/marklogic/models/__init__.py create mode 100644 python_api/marklogic/models/certificate/authority.py create mode 100644 python_api/marklogic/models/certificate/request.py create mode 100644 python_api/marklogic/models/certificate/template.py create mode 100644 python_api/marklogic/models/connection.py create mode 100644 python_api/marklogic/models/database/__init__.py create mode 100644 python_api/marklogic/models/database/backup.py create mode 100644 python_api/marklogic/models/database/field.py create mode 100644 python_api/marklogic/models/database/fragment.py create mode 100644 python_api/marklogic/models/database/index.py create mode 100644 python_api/marklogic/models/database/lexicon.py create mode 100644 python_api/marklogic/models/database/mergeblackout.py create mode 100644 python_api/marklogic/models/database/namelist.py create mode 100644 python_api/marklogic/models/database/path.py create mode 100644 python_api/marklogic/models/database/ruleset.py create mode 100644 python_api/marklogic/models/database/scheduledbackup.py create mode 100644 python_api/marklogic/models/database/through.py create mode 100644 python_api/marklogic/models/forest.py create mode 100644 python_api/marklogic/models/host.py create mode 100644 python_api/marklogic/models/permission.py create mode 100644 python_api/marklogic/models/privilege.py create mode 100644 python_api/marklogic/models/role.py create mode 100644 python_api/marklogic/models/server/__init__.py create mode 100644 python_api/marklogic/models/server/module.py create mode 100644 python_api/marklogic/models/server/namespace.py create mode 100644 python_api/marklogic/models/server/requestblackout.py create mode 100644 python_api/marklogic/models/server/schema.py create mode 100644 python_api/marklogic/models/user.py create mode 100644 python_api/marklogic/models/utilities/__init__.py create mode 100644 python_api/marklogic/models/utilities/exceptions.py create mode 100644 python_api/marklogic/models/utilities/files.py create mode 100644 python_api/marklogic/models/utilities/utilities.py create mode 100644 python_api/marklogic/models/utilities/validators.py create mode 100644 setup.py create mode 100644 test/__init__.py create mode 100644 test/certs/__init__.py create mode 100644 test/certs/test_authorities.py create mode 100644 test/certs/test_requests.py create mode 100644 test/certs/test_templates.py create mode 100644 test/databases/__init__.py create mode 100644 test/databases/test_database.py create mode 100644 test/databases/test_field_range.py create mode 100644 test/databases/test_paths.py create mode 100644 test/forests/__init__.py create mode 100644 test/forests/test_forest.py create mode 100644 test/hosts/__init__.py create mode 100644 test/hosts/test_host.py create mode 100644 test/privileges/__init__.py create mode 100644 test/privileges/test_privilege.py create mode 100644 test/resources.py create mode 100644 test/roles/__init__.py create mode 100644 test/roles/test_role.py create mode 100644 test/servers/__init__.py create mode 100644 test/servers/test_server.py create mode 100644 test/settings.py create mode 100644 test/users/__init__.py create mode 100644 test/users/test_user.py diff --git a/.gitignore b/.gitignore index ba74660..11da1ac 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,6 @@ docs/_build/ # PyBuilder target/ + +# Examples cruft +examples/.mlcp/ diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..4936867 --- /dev/null +++ b/Makefile @@ -0,0 +1,12 @@ +all: test docs + +.PHONY: invenv + +test: invenv + python3 setup.py test + +docs: invenv + sphinx-build -b html docs/source docs/_build + +invenv: + @python3 python_api/invirtual.py diff --git a/README.md b/README.md new file mode 100644 index 0000000..20276d9 --- /dev/null +++ b/README.md @@ -0,0 +1,36 @@ +# MarkLogic Python API + +This is a (relatively low-level) Python(3) API to the MarkLogic REST +Management API. It may grow to support the Client API as well, over time, +and it will certainly grow higher-level APIs. + +The API aims to provide complete coverage of what's in the MarkLogic +REST API in idiomatic Python. + +## Features + +- Creation and configuration of databases, forests, servers, hosts, + users, roles, permissions, and privileges. + +## Getting Started + +1. Download and install MarkLogic (http://developer.marklogic.com/products) +3. Checkout the MarkLogic python package from + http://github.com/marklogic/python-api +4. Install using ``easy_install`` (``easy_install /path/to/python-api``) + +At this point you should be able to script away. In the near future +you’ll be able to directly install using easy_install without first +checking out the project. + +## Running Tests + +To run tests, edit the `test/resources.py` and `tests/settings.py` +files to match your MarkLogic setup. The tests reference these values +to connect with your MarkLogic server. + +## Credits + +This library began as a project by Paul Hoehne, to whom we are +indebted. + diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..bf3f7f6 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,358 @@ +# -*- coding: utf-8 -*- +# +# python_api documentation build configuration file, created by +# sphinx-quickstart on Fri Jul 10 09:35:10 2015. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('../../python_api')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.todo', + 'sphinx.ext.viewcode', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'python_api' +copyright = u'2015, Author' +author = u'Author' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '' +# The full version, including alpha/beta/rc tags. +release = '' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +#default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +#html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +#html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +#html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'python_apidoc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { +# The paper size ('letterpaper' or 'a4paper'). +#'papersize': 'letterpaper', + +# The font size ('10pt', '11pt' or '12pt'). +#'pointsize': '10pt', + +# Additional stuff for the LaTeX preamble. +#'preamble': '', + +# Latex figure (float) alignment +#'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'python_api.tex', u'python\\_api Documentation', + u'Author', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'python_api', u'python_api Documentation', + [author], 1) +] + +# If true, show URL addresses after external links. +#man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'python_api', u'python_api Documentation', + author, 'python_api', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + + +# -- Options for Epub output ---------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project +epub_author = author +epub_publisher = author +epub_copyright = copyright + +# The basename for the epub file. It defaults to the project name. +#epub_basename = project + +# The HTML theme for the epub output. Since the default themes are not optimized +# for small screen space, using the same theme for HTML and epub output is +# usually not wise. This defaults to 'epub', a theme designed to save visual +# space. +#epub_theme = 'epub' + +# The language of the text. It defaults to the language option +# or 'en' if the language is not set. +#epub_language = '' + +# The scheme of the identifier. Typical schemes are ISBN or URL. +#epub_scheme = '' + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +#epub_identifier = '' + +# A unique identification for the text. +#epub_uid = '' + +# A tuple containing the cover image and cover page html template filenames. +#epub_cover = () + +# A sequence of (type, uri, title) tuples for the guide element of content.opf. +#epub_guide = () + +# HTML files that should be inserted before the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_pre_files = [] + +# HTML files shat should be inserted after the pages created by sphinx. +# The format is a list of tuples containing the path and title. +#epub_post_files = [] + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# The depth of the table of contents in toc.ncx. +#epub_tocdepth = 3 + +# Allow duplicate toc entries. +#epub_tocdup = True + +# Choose between 'default' and 'includehidden'. +#epub_tocscope = 'default' + +# Fix unsupported image types using the Pillow. +#epub_fix_images = False + +# Scale large images. +#epub_max_image_width = 0 + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#epub_show_urls = 'inline' + +# If false, no index is generated. +#epub_use_index = True diff --git a/docs/source/connections.rst b/docs/source/connections.rst new file mode 100644 index 0000000..70764dc --- /dev/null +++ b/docs/source/connections.rst @@ -0,0 +1,5 @@ +MarkLogic Connections +===================== + +.. automodule:: marklogic.models.connection + :members: diff --git a/docs/source/databases.rst b/docs/source/databases.rst new file mode 100644 index 0000000..e489c13 --- /dev/null +++ b/docs/source/databases.rst @@ -0,0 +1,39 @@ +MarkLogic Databases +=================== + +.. automodule:: marklogic.models.database + :members: + +.. automodule:: marklogic.models.database.fragment + :members: + +.. automodule:: marklogic.models.database.through + :members: + +.. automodule:: marklogic.models.database.field + :members: + +.. automodule:: marklogic.models.database.path + :members: + +.. automodule:: marklogic.models.database.index + :members: + +.. automodule:: marklogic.models.database.lexicon + :members: + +.. automodule:: marklogic.models.database.ruleset + :members: + +.. automodule:: marklogic.models.database.namelist + :members: + +.. automodule:: marklogic.models.database.mergeblackout + :members: + +.. automodule:: marklogic.models.database.scheduledbackup + :members: + +.. automodule:: marklogic.models.database.backup + :members: + diff --git a/docs/source/forests.rst b/docs/source/forests.rst new file mode 100644 index 0000000..3f7690a --- /dev/null +++ b/docs/source/forests.rst @@ -0,0 +1,5 @@ +MarkLogic Forests +================= + +.. automodule:: marklogic.models.forest + :members: diff --git a/docs/source/hosts.rst b/docs/source/hosts.rst new file mode 100644 index 0000000..c1f3e78 --- /dev/null +++ b/docs/source/hosts.rst @@ -0,0 +1,5 @@ +MarkLogic Hosts +=============== + +.. automodule:: marklogic.models.host + :members: diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..5fd8d22 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,31 @@ +.. MarkLogic Python documentation master file, created by + sphinx-quickstart on Tue May 5 08:03:35 2015. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to MarkLogic Python's documentation! +============================================ + +Contents: + +.. toctree:: + :maxdepth: 2 + + databases.rst + servers.rst + roles.rst + users.rst + permissions.rst + privileges.rst + forests.rst + hosts.rst + connections.rst + utilities.rst + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/docs/source/permissions.rst b/docs/source/permissions.rst new file mode 100644 index 0000000..27772b7 --- /dev/null +++ b/docs/source/permissions.rst @@ -0,0 +1,5 @@ +MarkLogic Permissions +===================== + +.. automodule:: marklogic.models.permission + :members: diff --git a/docs/source/privileges.rst b/docs/source/privileges.rst new file mode 100644 index 0000000..b52e52f --- /dev/null +++ b/docs/source/privileges.rst @@ -0,0 +1,5 @@ +MarkLogic Privileges +===================== + +.. automodule:: marklogic.models.privilege + :members: diff --git a/docs/source/roles.rst b/docs/source/roles.rst new file mode 100644 index 0000000..ccd4c75 --- /dev/null +++ b/docs/source/roles.rst @@ -0,0 +1,5 @@ +MarkLogic Roles +=============== + +.. automodule:: marklogic.models.role + :members: diff --git a/docs/source/servers.rst b/docs/source/servers.rst new file mode 100644 index 0000000..a396bea --- /dev/null +++ b/docs/source/servers.rst @@ -0,0 +1,17 @@ +MarkLogic Application Servers +============================= + +.. automodule:: marklogic.models.server + :members: + +.. automodule:: marklogic.models.server.module + :members: + +.. automodule:: marklogic.models.server.requestblackout + :members: + +.. automodule:: marklogic.models.server.namespace + :members: + +.. automodule:: marklogic.models.server.schema + :members: diff --git a/docs/source/users.rst b/docs/source/users.rst new file mode 100644 index 0000000..624fd0d --- /dev/null +++ b/docs/source/users.rst @@ -0,0 +1,5 @@ +MarkLogic Users +=============== + +.. automodule:: marklogic.models.user + :members: diff --git a/docs/source/utilities.rst b/docs/source/utilities.rst new file mode 100644 index 0000000..db33964 --- /dev/null +++ b/docs/source/utilities.rst @@ -0,0 +1,15 @@ +Marklogic Utility Classes +========================= + +.. automodule:: marklogic.models.utilities.validators + :members: + +.. automodule:: marklogic.models.utilities.exceptions + :members: + +.. automodule:: marklogic.models.utilities.utilities + :members: + +.. automodule:: marklogic.models.utilities.files + :members: + diff --git a/examples/data/customer-001.json b/examples/data/customer-001.json new file mode 100644 index 0000000..c4a6d15 --- /dev/null +++ b/examples/data/customer-001.json @@ -0,0 +1,13 @@ +{ + "id": 1234, + "name": "John Smith", + "email": "john.smith@gmail.com", + "phone": "703-555-1212", + "address": { + "street": "123 Melody Ln", + "city": "Fairfax", + "state": "VA", + "zip": "99999" + }, + "customer-type": "consumer" +} \ No newline at end of file diff --git a/examples/data/customer-002.json b/examples/data/customer-002.json new file mode 100644 index 0000000..48fd1c1 --- /dev/null +++ b/examples/data/customer-002.json @@ -0,0 +1,17 @@ +{ + "id": 3242, + "name": "Arlington Corp", + "address": { + "street": "800 Lyndstrom Ct", + "suite": "200", + "city": "Fairfax", + "state": "VA", + "zip": "99999" + }, + "customer-type": "business", + "contact": { + "name": "Jane Doe", + "telephone": "703-555-1212", + "email": "joe@arlingtoncorp.com" + } +} \ No newline at end of file diff --git a/examples/data/purchases/december/purchase-001.json b/examples/data/purchases/december/purchase-001.json new file mode 100644 index 0000000..9e0137d --- /dev/null +++ b/examples/data/purchases/december/purchase-001.json @@ -0,0 +1,15 @@ +{ + "purchase-date": "2014-11-14T14:22:13.000000", + "customer-id": 1234, + "line-items": [ + {"item-id": "AB1234", "name": "Base Terraformer", "units": 3, "price": 123000.00, "total": 369000.00 }, + {"item-id": "FK5689", "name": "Plasma Rifles", "units": 125, "price": 10000.00, "total": 1250000.00 }, + {"item-id": "MT2423", "name": "Landing Craft", "units": 1, "price": 890000.00, "total": 890000.00 } + ], + "shipping": { + "street": "123 Melody Lane", + "city": "Fairfax", + "state": "VA", + "zip": "99999" + } +} \ No newline at end of file diff --git a/examples/data/purchases/december/purchase-002.json b/examples/data/purchases/december/purchase-002.json new file mode 100644 index 0000000..a12c51f --- /dev/null +++ b/examples/data/purchases/december/purchase-002.json @@ -0,0 +1,14 @@ +{ + "purchase-date": "2014-11-22T08:22:13.000000", + "customer-id": 3242, + "line-items": [ + {"item-id": "TX2343", "name": "Scout Walker", "units": 10, "price": 125000.00, "total": 1250000.00 }, + {"item-id": "RK3543", "name": "Landing Craft", "units": 1, "price": 890000.00, "total": 890000.00 } + ], + "shipping": { + "street": "500 Stoneridge CT", + "city": "Arlington", + "state": "VA", + "zip": "99999" + } +} \ No newline at end of file diff --git a/examples/data/purchases/november/purchase-003.json b/examples/data/purchases/november/purchase-003.json new file mode 100644 index 0000000..dc6f532 --- /dev/null +++ b/examples/data/purchases/november/purchase-003.json @@ -0,0 +1,15 @@ +{ + "purchase-date": "2014-12-14T14:22:13.000000", + "customer-id": 1234, + "line-items": [ + {"item-id": "AB1234", "name": "Base Terraformer", "units": 3, "price": 123000.00, "total": 369000.00 }, + {"item-id": "FK5689", "name": "Plasma Rifles", "units": 125, "price": 10000.00, "total": 1250000.00 }, + {"item-id": "MT2423", "name": "Landing Craft", "units": 1, "price": 890000.00, "total": 890000.00 } + ], + "shipping": { + "street": "123 Melody Lane", + "city": "Fairfax", + "state": "VA", + "zip": "99999" + } +} \ No newline at end of file diff --git a/examples/data/purchases/november/purchase-004.json b/examples/data/purchases/november/purchase-004.json new file mode 100644 index 0000000..1af04c0 --- /dev/null +++ b/examples/data/purchases/november/purchase-004.json @@ -0,0 +1,14 @@ +{ + "purchase-date": "2014-12-22T08:22:13.000000", + "customer-id": 3242, + "line-items": [ + {"item-id": "TX2343", "name": "Scout Walker", "units": 10, "price": 125000.00, "total": 1250000.00 }, + {"item-id": "RK3543", "name": "Landing Craft", "units": 1, "price": 890000.00, "total": 890000.00 } + ], + "shipping": { + "street": "500 Stoneridge CT", + "city": "Arlington", + "state": "VA", + "zip": "99999" + } +} \ No newline at end of file diff --git a/examples/load_content.py b/examples/load_content.py new file mode 100644 index 0000000..a6ca58b --- /dev/null +++ b/examples/load_content.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/09/2015 Initial development +# Paul Hoehne 04/25/2015 Adding check to see if MLCP is already +# in the user path. +# + +import os +import time +import logging +from mlcploader import MLCPLoader +from marklogic.models import Connection, Host +from quickstart import SimpleApplication +from requests.auth import HTTPDigestAuth +from resources import TestConnection as tc +import shutil + +class LoadContent(): + def __init__(self): + if os.path.isdir(".mlcp"): + shutil.rmtree(".mlcp") + + def setup_mlcp(self): + loader = MLCPLoader() + loader.clear_directory() + loader.download_mlcp() + + def load_data(self): + simpleapp = SimpleApplication(tc.appname, tc.port) + + conn = Connection(tc.hostname, HTTPDigestAuth(tc.admin, tc.password)) + hostname = Host.list(conn)[0] + exampleapp = simpleapp.create(conn, hostname) + + loader = MLCPLoader() + loader.load_directory(conn, exampleapp['content'], + "data", + collections=["example1"], prefix="/test/data1") + +if __name__ == "__main__": + logging.basicConfig(level=logging.WARNING) + logging.getLogger("requests").setLevel(logging.WARNING) + logging.getLogger("marklogic").setLevel(logging.DEBUG) + logging.getLogger("marklogic.examples").setLevel(logging.INFO) + + loader = LoadContent() + loader.setup_mlcp() + loader.load_data() diff --git a/examples/mlcploader.py b/examples/mlcploader.py new file mode 100644 index 0000000..4f6ba5f --- /dev/null +++ b/examples/mlcploader.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/03/2015 Initial development +# Norman Walsh 07/10/2015 Hacked at it +# + +import os +import requests +import zipfile +import platform +import shutil +import subprocess +import logging + + +""" +These are assorted tools to help the end user. The goal is to provide +a simple set of scripting interfaces. +""" + +class MLCPLoader(): + """ + This class will execute the content pump to load data. + """ + def __init__(self): + self.logger = logging.getLogger("marklogic.examples") + pass + + def load(self, conn): + pass + + def clear_directory(self): + self.logger.info("Clearing .mlcp") + if os.path.isdir(".mlcp"): + shutil.rmtree(".mlcp") + + def download_mlcp(self): + self.logger.info("Downloading MLCP") + os.mkdir(".mlcp") + mlcp_url = "http://developer.marklogic.com/download/binaries/mlcp/mlcp-Hadoop2-1.3-1-bin.zip" + archive_path = os.path.join(".mlcp", "mlcp.zip") + chunk_size = 16 * 1024 + + response = requests.get(mlcp_url, stream=True) + with open(archive_path, "wb") as bin_file: + for chunk in response.iter_content(chunk_size): + bin_file.write(chunk) + + archive = zipfile.ZipFile(archive_path) + archive.extractall(os.path.join(".mlcp")) + for filename in os.listdir(".mlcp"): + if filename.find("Hadoop") > -1: + os.rename(os.path.join(".mlcp", filename), os.path.join(".mlcp", "mlcp")) + + def load_directory(self, conn, database, data_directory, collections=None, prefix=''): + self.logger.info("Loading data from {0}".format(data_directory)) + mlcp_path = self.mlcp_path() + if not mlcp_path: + which_script = "mlcp.sh" + if platform.system() == "Windows": + which_script = "mlcp.bat" + + mlcp_path = os.path.join(".mlcp", "mlcp", "bin", which_script) + + if not os.path.exists(mlcp_path): + self.download_mlcp() + + if platform.system() != "Windows": + subprocess.call(["chmod", "+x", mlcp_path]) + + if collections: + collections_command = "-output_collections \"{0}\"".format(",".join(collections)) + else: + collections_command = '' + + command_line = "{0} import -username {1} -password {2} -host {3} -port {4} -database {5} {6} " \ + "-input_file_path {7} -output_uri_replace \"{8},'{9}'\"" + + full_path = os.path.abspath(data_directory) + if platform.system() == "Windows": + full_path = "/" + full_path.replace("\\", "/") + run_line = command_line.format(mlcp_path, conn.auth.username, conn.auth.password, conn.host, + conn.port, database.database_name(), collections_command, + full_path, full_path, prefix) + with os.popen(run_line) as in_file: + for line in in_file: + print(line.rstrip()) + + def mlcp_installed(self): + paths = os.environ["PATH"].split(os.pathsep) + return True in [t.endswith("mlcp/bin") or t.endswith("mlcp\\bin") for t in paths] + + def mlcp_path(self): + if platform.system() == "Windows": + for path in os.environ["PATH"].split(os.pathsep): + full_path = os.path.abspath(os.path.join(path, "mlcp.bat")) + if os.path.exists(full_path): + return full_path + else: + for path in os.environ["PATH"].split(os.pathsep): + full_path = os.path.abspath(os.path.join(path, "mlcp.sh")) + if os.path.exists(full_path): + return full_path + return None diff --git a/examples/quickstart.py b/examples/quickstart.py new file mode 100644 index 0000000..e1cfcf3 --- /dev/null +++ b/examples/quickstart.py @@ -0,0 +1,107 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/03/2015 Initial development +# Norman Walsh 07/10/2015 Hacked at it + +from __future__ import unicode_literals, print_function, absolute_import + +import sys +import logging + +from marklogic.models.connection import Connection +from marklogic.models.host import Host +from marklogic.models.database import Database +from marklogic.models.forest import Forest +from marklogic.models.server import HttpServer +from requests.auth import HTTPDigestAuth +from resources import TestConnection as tc + +class Quickstart(): + def __init__(self): + pass + + def create(self, conn): + pass + + def destroy(self): + pass + + +class SimpleApplication(Quickstart): + def __init__(self, app_name, port=8100, forests=3): + """ + Factory class to create databases with an HTTP server and + modules database. The parts will be named _db for the + database, _modules_db for the modules database, and + the HTTP server port will be on the given port. + + :param app_name: The base name for the application + :param port: The port number for the HTTP server + :param forests: The number of forests + :return: The initialized object + """ + Quickstart.__init__(self) + + self.logger = logging.getLogger("marklogic.examples") + self._db_name = app_name + "_db" + self._modules_db_name = app_name + "_modules_db" + self._app_port = port + self._http_server = app_name + "_http_" + str(port) + self._forests = [self._db_name + "_forest_" + str(i + 1) for i in range(0, forests)] + self._modules_forest = self._modules_db_name + "_forest" + + def create(self, conn, hostname='localhost.localdomain'): + """ + Connects to the server and creates the relevant artifacts, + including the database, the modules database, and the HTTP + server. + + :param conn: The server connection + :return:A map containing the content db, the modules db and the HTTP server. + """ + self.logger.info("Create simple application") + data_database = Database(self._db_name, hostname) + data_database.set_forest_names(self._forests) + + modules_database = Database(self._modules_db_name, hostname) + + server = HttpServer(self._http_server, "Default", + self._app_port, self._db_name, self._modules_db_name) + server.set_modules_database_name(self._modules_db_name) + + data_database.create(conn) + modules_database.create(conn) + server.create(conn) + + return { + u'content': data_database, + u'modules': modules_database, + u'server': server + } + +if __name__ == "__main__": + logging.basicConfig(level=logging.WARNING) + logging.getLogger("requests").setLevel(logging.WARNING) + logging.getLogger("marklogic").setLevel(logging.DEBUG) + logging.getLogger("marklogic.examples").setLevel(logging.INFO) + + simpleapp = SimpleApplication(tc.appname, tc.port) + conn = Connection(tc.hostname, HTTPDigestAuth(tc.admin, tc.password)) + hostname = Host.list(conn)[0] + myapp = simpleapp.create(conn, hostname) diff --git a/examples/resources.py b/examples/resources.py new file mode 100644 index 0000000..63baced --- /dev/null +++ b/examples/resources.py @@ -0,0 +1,6 @@ +class TestConnection(object): + hostname = "localhost" + admin = "admin" + password = "admin" + appname = "example_app" + port = 10101 diff --git a/examples/tools.py b/examples/tools.py new file mode 100644 index 0000000..7c1f3ad --- /dev/null +++ b/examples/tools.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/03/2015 Initial development +# Norman Walsh 07/10/2015 Hacked at it +# + +import os +import requests +import zipfile +import platform +import shutil +import subprocess +import logging + + +""" +These are assorted tools to help the end user. The goal is to provide +a simple set of scripting interfaces. +""" + +class MLCPLoader(): + """ + This class will execute the content pump to load data. + """ + def __init__(self): + pass + + def load(self, conn): + pass + + def clear_directory(self): + if os.path.isdir(".mlcp"): + shutil.rmtree(".mlcp") + + def download_mlcp(self): + os.mkdir(".mlcp") + mlcp_url = "http://developer.marklogic.com/download/binaries/mlcp/mlcp-Hadoop2-1.3-1-bin.zip" + archive_path = os.path.join(".mlcp", "mlcp.zip") + chunk_size = 16 * 1024 + + response = requests.get(mlcp_url, stream=True) + with open(archive_path, "wb") as bin_file: + for chunk in response.iter_content(chunk_size): + bin_file.write(chunk) + + archive = zipfile.ZipFile(archive_path) + archive.extractall(os.path.join(".mlcp")) + for filename in os.listdir(".mlcp"): + if filename.find("Hadoop") > -1: + os.rename(os.path.join(".mlcp", filename), os.path.join(".mlcp", "mlcp")) + + def load_directory(self, conn, database, data_directory, collections=None, prefix=''): + mlcp_path = self.mlcp_path() + if not mlcp_path: + which_script = "mlcp.sh" + if platform.system() == "Windows": + which_script = "mlcp.bat" + + mlcp_path = os.path.join(".mlcp", "mlcp", "bin", which_script) + + if not os.path.exists(mlcp_path): + self.download_mlcp() + + if platform.system() != "Windows": + subprocess.call(["chmod", "+x", mlcp_path]) + + if collections: + collections_command = "-output_collections \"{0}\"".format(",".join(collections)) + else: + collections_command = '' + + command_line = "{0} import -username {1} -password {2} -host {3} -port {4} -database {5} {6} " \ + "-input_file_path {7} -output_uri_replace \"{8},'{9}'\"" + + full_path = os.path.abspath(data_directory) + if platform.system() == "Windows": + full_path = "/" + full_path.replace("\\", "/") + run_line = command_line.format(mlcp_path, conn.auth.username, conn.auth.password, conn.host, + conn.port, database.database_name(), collections_command, + full_path, full_path, prefix) + with os.popen(run_line) as in_file: + for line in in_file: + print(line.rstrip()) + + def mlcp_installed(self): + paths = os.environ["PATH"].split(os.pathsep) + return True in [t.endswith("mlcp/bin") or t.endswith("mlcp\\bin") for t in paths] + + def mlcp_path(self): + if platform.system() == "Windows": + for path in os.environ["PATH"].split(os.pathsep): + full_path = os.path.abspath(os.path.join(path, "mlcp.bat")) + if os.path.exists(full_path): + return full_path + else: + for path in os.environ["PATH"].split(os.pathsep): + full_path = os.path.abspath(os.path.join(path, "mlcp.sh")) + if os.path.exists(full_path): + return full_path + return None + + +class Watcher(): + """ + Watcher will observe a directory and all the files in the director + or its descendants. If any change, it should upload the file to + the appropriate database. + """ + def __init__(self): + pass + + def watch(self, conn, directory): + pass diff --git a/python_api/__init__.py b/python_api/__init__.py new file mode 100644 index 0000000..f32cbcb --- /dev/null +++ b/python_api/__init__.py @@ -0,0 +1,16 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.1" diff --git a/python_api/invirtual.py b/python_api/invirtual.py new file mode 100644 index 0000000..2db27de --- /dev/null +++ b/python_api/invirtual.py @@ -0,0 +1,11 @@ +import sys + +if hasattr(sys, 'real_prefix'): + exit(0) +else: + print("**************************************") + print("Not in virtual environment.") + print("**************************************") + exit(1) + + diff --git a/python_api/marklogic/__init__.py b/python_api/marklogic/__init__.py new file mode 100644 index 0000000..b9a111a --- /dev/null +++ b/python_api/marklogic/__init__.py @@ -0,0 +1,15 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/python_api/marklogic/models/__init__.py b/python_api/marklogic/models/__init__.py new file mode 100644 index 0000000..be1b1fe --- /dev/null +++ b/python_api/marklogic/models/__init__.py @@ -0,0 +1,24 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from marklogic.models.connection import Connection +from marklogic.models.forest import Forest +from marklogic.models.database import Database +from marklogic.models.server import Server, HttpServer, XdbcServer, OdbcServer, WebDAVServer +from marklogic.models.host import Host +from marklogic.models.role import Role +from marklogic.models.user import User +from marklogic.models.privilege import Privilege diff --git a/python_api/marklogic/models/certificate/authority.py b/python_api/marklogic/models/certificate/authority.py new file mode 100644 index 0000000..d31e7cd --- /dev/null +++ b/python_api/marklogic/models/certificate/authority.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/13/2015 Hacked user.py into authority.py +# + +""" +Authority related classes for manipulating Certificate Authorities +""" + +from __future__ import unicode_literals, print_function, absolute_import + +import requests +import json +from marklogic.models.utilities.exceptions import UnexpectedManagementAPIResponse +from marklogic.models.utilities.validators import assert_boolean + +class Authority: + """ + The Authority class encapsulates a MarkLogic representation of + a certificate authority. Certificate authorities are created by + uploading trusted certificates. + """ + def __init__(self, certid): + """ + Create a new certificate authority. Except it doesn't, really. + It just creates a reference to the certificate authority with the + specified ID. There's nothing you can do with a certificate + authority, really. + """ + self._config = {'id': certid} + + def certificate_id(self): + """ + The MarkLogic certificate ID for the certificate authority + + :return: The id + """ + return self._config['certificate-id'] + + def enabled(self): + """ + Is this certificate enabled? + + Certificates that are automatically installed by MarkLogic are + marked "disabled" when they are deleted (rather than actually + removing them). This prevents an upgrade from simply re-installing + them. + + :return: The state of the enabled flag, True or False + """ + return self._config['enabled'] + + def properties(self): + """ + The properties of the certificate, as a python dictionary. The + exact properties available depends on the nature of the + certificate authority. + + :return: The certificate authority properties + """ + return self._config['cert'] + + def marshal(self): + """ + Return a flat structure suitable for conversion to JSON or XML. + + :return: A hash of the keys in this object and their values, recursively. + """ + struct = { } + for key in self._config: + struct[key] = self._config[key]; + return struct + + @classmethod + def unmarshal(cls, config): + """ + Construct a new Authority from a flat structure. This method is + principally used to construct an object from a Management API + payload. The configuration passed in is largely assumed to be + valid. + + :param: config: A hash of properties + :return: A newly constructed User object with the specified properties. + """ + result = Authority(config['certificate-id']) + result._config = config + return result + + @classmethod + def create(cls, connection, pem): + """ + Creates a new certificate authority + + Note that this is a class method, you cannot create a certificate + authority except by uploading a PEM-encoded "certificate authority" + certificate. + + :param connection: The connection to a MarkLogic server + :param pem: The PEM-encoded certificate authority certificate + + :return: The Authority object + """ + uri = "http://{0}:{1}/manage/v2/certificate-authorities" \ + .format(connection.host, connection.management_port) + + response = requests.post(uri, data=pem, auth=connection.auth, + headers={"content-type": "text/plain"}) + + if response.status_code not in [200, 201, 204]: + raise UnexpectedManagementAPIResponse(response.text) + + # All well and good, but we need to know what ID was assigned + uri = "http://{0}:{1}{2}/properties" \ + .format(connection.host, connection.management_port, + response.headers['location']) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code == 200: + result = Authority.unmarshal(json.loads(response.text)) + else: + raise UnexpectedManagementAPIResponse(response.text) + + return result + + def read(self, connection): + """ + Loads the Authority from the MarkLogic server. This will refresh + the properties of the object. + + :param connection: The connection to a MarkLogic server + + :return: The Authority object + """ + auth = Authority.lookup(self._config['certificate-id']) + if auth is None: + return None + else: + self._config = auth._config + return self + + def delete(self, connection): + """ + Deletes the Authority from the MarkLogic server. + + Note: Authorities have no meaningful identity except their + MarkLogic certificate id. After deleting an authority, no such + id exists, so this method returns None. + + :param connection: The connection to a MarkLogic server + + :return: None + """ + uri = "http://{0}:{1}/manage/v2/certificate-authorities/{2}" \ + .format(connection.host, connection.management_port, + self._config['certificate-id']) + + response = requests.delete(uri, auth=connection.auth) + + if (response.status_code not in [200, 204] + and not response.status_code == 404): + raise UnexpectedManagementAPIResponse(response.text) + + return None + + @classmethod + def list(cls, connection, include_names=False): + """ + List all the certificate authorities. + + If `include_names` is `True`, then the values in the list will be + structured values consisting of the certificate ID and the certificate + name separated by a "|". + + :param connection: The connection to a MarkLogic server + :param include_names: Indicates if structured names should be returned. + + :return: A list of certificate authority IDs. + """ + + uri = "http://{0}:{1}/manage/v2/certificate-authorities" \ + .format(connection.host, connection.management_port) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code != 200: + raise UnexpectedManagementAPIResponse(response.text) + + results = [] + json_doc = json.loads(response.text) + + for item in json_doc['certificate-authorities-default-list']['list-items']['list-item']: + if include_names: + results.append("{0}|{1}".format(item['idref'], item['nameref'])) + else: + results.append(item['idref']) + return results + + @classmethod + def lookup(cls, connection, certid): + """ + Look up an individual certificate by certificate id. + + :param connection: The connection to the MarkLogic database + :param certid: The certificate id + + :return: The Authority object + """ + uri = "http://{0}:{1}/manage/v2/certificate-authorities/{2}/properties" \ + .format(connection.host, connection.management_port, certid) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code == 200: + result = Authority.unmarshal(json.loads(response.text)) + return result + elif response.status_code == 404: + return None + else: + raise exceptions.UnexpectedManagementAPIResponse(response.text) diff --git a/python_api/marklogic/models/certificate/request.py b/python_api/marklogic/models/certificate/request.py new file mode 100644 index 0000000..426b259 --- /dev/null +++ b/python_api/marklogic/models/certificate/request.py @@ -0,0 +1,294 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/13/2015 Hacked user.py into request.py +# + +""" +Request related classes for manipulating Certificate Requests +""" + +from __future__ import unicode_literals, print_function, absolute_import + +import json +from marklogic.models.utilities.validators import validate_custom + +class Request: + """ + The Request class encapsulates a MarkLogic representation of + a certificate request. + """ + def __init__(self, version=0, countryName=None, stateOrProvinceName=None, + localityName=None, organizationName=None, + organizationalUnitName=None, emailAddress=None, + v3ext=None): + """ + Create a new certificate request. + + The names of the arguments are taken directly from the X509 + form. If specified, v3ext must be a dictionary. + + The `organizationName` is required. You should fill in as many of + these fields as possible because individual certificate authorities + have specific requirements for which fields must have values. + If a required field is missing, a certificate authority will + typically reject your certificate request with a cryptic error + message that your request is bad. + """ + if organizationName is None: + validate_custom("organizationName is required") + + self._config = { + 'version': version, + } + + subject = { 'organizationName': organizationName } + + if countryName is not None: + subject['countryName'] = countryName + + if stateOrProvinceName is not None: + subject['stateOrProvinceName'] = stateOrProvinceName + + if localityName is not None: + subject['localityName'] = localityName + + if organizationName is not None: + subject['organizationName'] = organizationName + + if organizationalUnitName is not None: + subject['organizationalUnitName'] = organizationalUnitName + + if emailAddress is not None: + subject['emailAddress'] = emailAddress + + self._config['subject'] = subject + + if v3ext is not None: + self._config['v3ext'] = v3ext + + def version(self): + """ + The version. + + :return The current version. + """ + if 'version' in self._config: + return self._config['version'] + return None + + def set_version(self, value): + """ + Set the version. + + :param value: The version. + + :return: The Request object. + """ + self._config['version'] = value + return self + + def countryName(self): + """ + The country. + + :return: The current country. + """ + if 'countryName' in self._config['subject']: + return self._config['subject']['countryName'] + return None + + def set_countryName(self, value): + """ + Set the country. + + :param value: The two character country code (e.g., "US"). + + :return: The Request object. + """ + self._config['subject']['countryName'] = value + return self + + def stateOrProvinceName(self): + """ + The state or privince. + + :return: The current state or province + """ + if 'stateOrProvinceName' in self._config['subject']: + return self._config['subject']['stateOrProvinceName'] + return None + + def set_stateOrProvinceName(self, value): + """ + Set the state province + + :param value: The name of the state or province your server in. + + :return: The Request object + """ + self._config['subject']['stateOrProvinceName'] = value + return self + + def localityName(self): + """ + The locality. + + :return: The current locality. + """ + if 'localityName' in self._config['subject']: + return self._config['subject']['localityName'] + return None + + def set_localityName(self, value): + """ + Set the locality. + + :param value: The city your server in. + + :return: The Request object + """ + self._config['subject']['localityName'] = value + return self + + def organizationName(self): + """ + The organization name. + + :return: The current organization name. + """ + if 'organizationName' in self._config['subject']: + return self._config['subject']['organizationName'] + return None + + def set_organizationName(self, value): + """ + Set the organization name. + + All certificate requests must include an organization name. + + :param value: The organization or company that owns your server. + + :return: The Request object + """ + self._config['subject']['organizationName'] = value + return self + + def organizationalUnitName(self): + """ + The organizational unit name. + + :return: the current organizational unit name. + """ + if 'organizationalUnitName' in self._config['subject']: + return self._config['subject']['organizationalUnitName'] + return None + + def set_organizationalUnitName(self, value): + """ + Set the organizational unit name. + + :param value: The organizational unit that operates your server. + + :return: The Request object + """ + self._config['subject']['organizationalUnitName'] = value + return self + + def emailAddress(self): + """ + The contact email address. + + :return: The current contact email address. + """ + if 'emailAddress' in self._config['subject']: + return self._config['subject']['emailAddress'] + return None + + def set_emailAddress(self, value): + """ + Set the contact email address. + + :param value: The email address to contact regarding your server. + + :return: The Request object + """ + self._config['subject']['emailAddress'] = value + return self + + def v3ext(self): + """ + The X.509v3 extensions. + + :return: The current X.509v3 extensions. + """ + if 'v3ext' in self._config: + return self._config['v3ext'] + return None + + def set_v3ext(self, value): + """ + Set the X.509v3 extensions. + + This value should be a (possibly nested) dictionary, for example: + + ```` + { + "nsCertType": { + "critical": false, + "value": "SSL Server" + }, + "subjectKeyIdentifier": { + "critical": false, + "value": "B2:2C:0C:F8:5E:A7:44:B7" + } + ```` + + :param value: The X.590v3 extensions dictionary + + :return: The Request object + """ + self._config['v3ext'] = value + return self + + def marshal(self): + """ + Return a flat structure suitable for conversion to JSON or XML. + + :return: A hash of the keys in this object and their values, recursively. + """ + struct = { } + for key in self._config: + struct[key] = self._config[key]; + return struct + + @classmethod + def unmarshal(cls, config): + """ + Construct a new Request from a flat structure. This method is + principally used to construct an object from a Management API + payload. The configuration passed in is largely assumed to be + valid. + + :param: config: A hash of properties + :return: A newly constructed User object with the specified properties. + """ + result = Request(organizationName="temp") + result._config = config + return result diff --git a/python_api/marklogic/models/certificate/template.py b/python_api/marklogic/models/certificate/template.py new file mode 100644 index 0000000..e382d63 --- /dev/null +++ b/python_api/marklogic/models/certificate/template.py @@ -0,0 +1,584 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/13/2015 Hacked user.py into template.py +# + +""" +Template related classes for manipulating Certificate Templates +""" + +from __future__ import unicode_literals, print_function, absolute_import + +import requests +import json +from marklogic.models.utilities.exceptions import UnexpectedManagementAPIResponse +from marklogic.models.utilities.validators import assert_type +from marklogic.models.utilities.validators import validate_custom +from marklogic.models.certificate.request import Request + +class Template: + """ + The Template class encapsulates a MarkLogic representation of + a certificate template. + """ + def __init__(self, name, description, cert_request, + key_type="rsa", key_length=None, pass_phrase=None): + """ + Create a new certificate template. + """ + self._config = { + "template-name": name, + "template-description": description, + "req": assert_type(cert_request, Request) + } + + if key_type is not None: + self._config['key-type'] = key_type + + if key_length is not None or pass_phrase is not None: + options = {} + if key_length is not None: + options['key-length'] = key_length + if pass_phrase is not None: + optoins['pass-phrase'] = pass_phrase + self._config['options'] = options + + self.etag = None + + def template_id(self): + """ + The template ID, MarkLogic's internal identifier. + + :return: The template ID. + """ + if 'template-id' in self._config: + return self._config['template-id'] + return None + + def template_name(self): + """ + The template name. + + :return: The current template name. + """ + if 'template-name' in self._config: + return self._config['template-name'] + return None + + def set_template_name(self, value): + """ + Set the template name. + + :param value: The new template name. + + :return: The Template object. + """ + self._config['template-name'] = value + return self + + def template_description(self): + """ + The template description. + + :return: The current template description. + """ + if 'template-description' in self._config: + return self._config['template-description'] + return None + + def set_template_description(self, value): + """ + Set the template description. + + :param value: The new template description. + + :return: The Template object. + """ + self._config['template-description'] = value + return self + + def template_version(self): + """ + The template version. + + :return: The current template version. + """ + if 'template-version' in self._config: + return self._config['template-version'] + return None + + def key_type(self): + """ + The key type. + + :return: The current key type. + """ + if 'key-type' in self._config: + return self._config['key-type'] + return None + + def set_key_type(self, value): + """ + Set the key type. + + The key type must be `rsa`. + """ + if value is not 'rsa': + validate_custom("The key-type must be 'rsa'") + self._config['key-type'] = value + return self + + def key_length(self): + """ + The key length. + + :return: The current key length. + """ + if 'options' in self._config: + if 'key-length' in self._config['options']: + return self._config['options']['key-length'] + return None + + def set_key_length(self, value): + """ + Set the key length. + + :param value: The new key length. + + :return: The Template object. + """ + if 'options' in self._config: + options = self._config['options'] + else: + options = {} + + options['key-length'] = value + self._config['options'] = options + return self + + def pass_phrase(self): + """ + The passphrase. + + :return: The current passphrase. + """ + if 'options' in self._config: + if 'pass-phrase' in self._config['options']: + return self._config['options']['pass-phrase'] + return None + + def set_pass_phrase(self, value): + """ + Set the passphrase. + + :param value: The new passphrase. + + :return: The Template object. + """ + if 'options' in self._config: + options = self._config['options'] + else: + options = {} + + options['pass-phrase'] = value + self._config['options'] = options + return self + + def options(self): + """ + The template options. + + The options are returned as a Python dictionary. Only the `key-length` + and `pass-phrase` options are supported by MarkLogic at this time, + but this method returns the entire dictionary. + + :return: The options dictionary. + """ + if 'options' in self._config: + return self._config['options'] + return None + + def set_options(self, value): + """ + Set the template options. + + The options are stored in a Python dictionary. Only the `key-length` + and `pass-phrase` options are supported by MarkLogic at this time, + but this method allows you to set any dictionary of options you like. + + :param value: A dictionary of options. + + :return: The Template object. + """ + self._config['options'] = value + return self + + def req(self): + """ + The certificate request. + + :return: The current certificate request. + """ + if 'req' in self._config: + return self._config['req'] + return None + + def set_req(self, value): + """ + Set the certificate request. + + :param value: The certificate request. + + :return: The Template object. + """ + self._config['req'] = assert_type(value, Request) + return self + + def marshal(self): + """ + Return a flat structure suitable for conversion to JSON or XML. + + :return: A hash of the keys in this object and their values, recursively. + """ + struct = { } + for key in self._config: + if key == "req": + struct[key] = self._config[key]._config + else: + struct[key] = self._config[key]; + return struct + + @classmethod + def unmarshal(cls, config): + """ + Construct a new Template from a flat structure. This method is + principally used to construct an object from a Management API + payload. The configuration passed in is largely assumed to be + valid. + + :param: config: A hash of properties + :return: A newly constructed User object with the specified properties. + """ + result = Template("temp","temp", Request(organizationName="temp")) + result._config = config + + if 'req' in result._config: + result._config['req'] = Request.unmarshal(result._config['req']) + + return result + + def create(self, connection): + """ + Creates the certificate template on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The Template object + """ + uri = "http://{0}:{1}/manage/v2/certificate-templates" \ + .format(connection.host, connection.management_port) + + struct = self.marshal() + response = requests.post(uri, json=struct, auth=connection.auth) + + if response.status_code not in [200, 201, 204]: + raise UnexpectedManagementAPIResponse(response.text) + + # All well and good, but we need to know what ID was assigned + uri = "http://{0}:{1}{2}/properties" \ + .format(connection.host, connection.management_port, + response.headers['location']) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code == 200: + result = Template.unmarshal(json.loads(response.text)) + self._config = result._config + else: + raise UnexpectedManagementAPIResponse(response.text) + + return self + + def read(self, connection): + """ + Loads the Template from the MarkLogic server. This will refresh + the properties of the object. + + :param connection: The connection to a MarkLogic server + + :return: The Template object + """ + if self.template_id() is None: + validate_custom("Cannot read an unsaved template") + + temp = Template.lookup(connection, self.template_id()) + + if auth is None: + return None + else: + self._config = auth._config + return self + + def update(self, connection): + """ + Updates the certificate template on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The Template object + """ + uri = "http://{0}:{1}/manage/v2/certificate-templates/{2}/properties" \ + .format(connection.host, connection.management_port,self.template_id()) + + payload = self.marshal() + del payload['template-version'] + del payload['template-id'] + response = requests.put(uri, json=payload, auth=connection.auth) + + if response.status_code not in [200, 204]: + raise UnexpectedManagementAPIResponse(response.text) + + return self + + def delete(self, connection): + """ + Deletes the Template from the MarkLogic server. + + :param connection: The connection to a MarkLogic server + + :return: None + """ + uri = "http://{0}:{1}/manage/v2/certificate-templates/{2}" \ + .format(connection.host, connection.management_port, + self.template_id()) + + response = requests.delete(uri, auth=connection.auth) + + if (response.status_code not in [200, 204] + and not response.status_code == 404): + raise UnexpectedManagementAPIResponse(response.text) + + del self._config['template-id'] + return self + + # ============================================================ + + def generate_template_certificate_authority(self, connection, valid_for): + """ + Attempts to generate an template certificate authority. + + :param valid_for: The number of days that the template should be valid. + + :return: The Template object. + """ + payload = { + 'operation': 'generate-template-certificate-authority', + 'valid-for': assert_type(valid_for, int) + } + + uri = "http://{0}:{1}/manage/v2/certificate-templates/{2}" \ + .format(connection.host, connection.management_port, + self.template_id()) + + response = requests.post(uri, json=payload, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code != 201: + raise UnexpectedManagementAPIResponse(response.text) + + return self + + def generate_temporary_certificate(self, connection, valid_for, + common_name, dns_name, ip_addr, + if_necessary=True): + """ + Attempts to generate a temporary certificate. + + If `if_necessary` is true, the server will only generate a new + temporary certificate if it does not already have one for the + specified server. + + :param valid_for: The number of days that the template should be valid. + :param common_name: The common name for the certificate ("Example Corp") + :param dns_name: The DNS name for the cert ("example.com") + :param ip_addr: The IP address of the server + :param if_necessary: Only generate the cert if it's necessary + + :return: The Template object. + """ + payload = { + 'operation': 'generate-temporary-certificate', + 'valid-for': assert_type(valid_for, int), + 'common-name': common_name, + 'dns-name': dns_name, + 'ip-addr': ip_addr, + 'if-necessary': 'true' if if_necessary else 'false' + } + + uri = "http://{0}:{1}/manage/v2/certificate-templates/{2}" \ + .format(connection.host, connection.management_port, + self.template_id()) + + response = requests.post(uri, json=payload, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code != 201: + raise UnexpectedManagementAPIResponse(response.text) + + return self + + def get_certificate(self, connection, + common_name, dns_name=None, ip_addr=None): + """ + Attempts to get the relevant certificate. + + :param common_name: The common name for the certificate ("Example Corp") + :param dns_name: The DNS name for the cert ("example.com") + :param ip_addr: The IP address of the server + + :return: The certificate or None if it isn't found. + """ + payload = { + 'operation': 'get-certificate', + 'common-name': common_name + } + + if dns_name is not None: + payload['dns-name'] = dns_name + + if ip-addr is not None: + payload['ip-addr'] = ip_addr + + uri = "http://{0}:{1}/manage/v2/certificate-templates/{2}" \ + .format(connection.host, connection.management_port, + self.template_id()) + + response = requests.post(uri, json=payload, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code == 200: + return json.loads(response.text) + elif response.status_code == 404: + return None + else: + raise UnexpectedManagementAPIResponse(response.text) + + def get_certificates_for_template(self, connection): + """ + Get a list of the certificates for this template. + + :return: The certificate list. + """ + payload = { + 'operation': 'get-certificates-for-template', + } + + uri = "http://{0}:{1}/manage/v2/certificate-templates/{2}" \ + .format(connection.host, connection.management_port, + self.template_id()) + + response = requests.post(uri, json=payload, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code == 200: + return json.loads(response.text) + elif response.status_code == 404: + return None + else: + raise UnexpectedManagementAPIResponse(response.text) + + def get_pending_certificate_request(self, connection, + common_name, dns_name=None, ip_addr=None): + pass + + def insert_host_certificates(self, connection, certs, pkeys): + pass + + def need_certificate(self, connection, + common_name, dns_name=None, ip_addr=None): + pass + + def generate_certificate_request(self, connection, + common_name, dns_name=None, ip_addr=None): + pass + + def get_template_certificate_authority(self, connection): + pass + + + + # ============================================================ + + @classmethod + def list(cls, connection, include_names=False): + """ + List all the certificate templates. + + If `include_names` is `True`, then the values in the list will be + structured values consisting of the template ID and the template + name separated by a "|". + + :param connection: The connection to a MarkLogic server + :param include_names: Indicates if structured names should be returned. + + :return: A list of certificate template IDs. + """ + + uri = "http://{0}:{1}/manage/v2/certificate-templates" \ + .format(connection.host, connection.management_port) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code != 200: + raise UnexpectedManagementAPIResponse(response.text) + + results = [] + json_doc = json.loads(response.text) + + for item in json_doc['certificate-templates-default-list']['list-items']['list-item']: + if include_names: + results.append("{0}|{1}".format(item['idref'], item['nameref'])) + else: + results.append(item['idref']) + return results + + @classmethod + def lookup(cls, connection, tempid): + """ + Look up an individual certificate template by template id. + + :param connection: The connection to the MarkLogic database + :param tempid: The certificate template id + + :return: The Template object + """ + uri = "http://{0}:{1}/manage/v2/certificate-templates/{2}/properties" \ + .format(connection.host, connection.management_port, tempid) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code == 200: + result = Template.unmarshal(json.loads(response.text)) + return result + elif response.status_code == 404: + return None + else: + raise UnexpectedManagementAPIResponse(response.text) diff --git a/python_api/marklogic/models/connection.py b/python_api/marklogic/models/connection.py new file mode 100644 index 0000000..676085c --- /dev/null +++ b/python_api/marklogic/models/connection.py @@ -0,0 +1,43 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/01/2015 Initial development +# + +from requests.auth import HTTPDigestAuth + +""" +Connection related classes and method to connect to MarkLogic. +""" + +class Connection: + """ + The connection class encapsulates the information to connect to + a MarkLogic server. The server (for the purpose of loading data + or creating databases, will listen on ports 8000 and 8002. + It depends on the database auth class from the requests package. + """ + def __init__(self, host, auth, port=8000, management_port=8002): + self.host = host + self.port = port + self.management_port = management_port + self.auth = auth + + @classmethod + def make_connection(cls, host, username, password): + return Connection(host, HTTPDigestAuth(username, password)) \ No newline at end of file diff --git a/python_api/marklogic/models/database/__init__.py b/python_api/marklogic/models/database/__init__.py new file mode 100644 index 0000000..030bf9c --- /dev/null +++ b/python_api/marklogic/models/database/__init__.py @@ -0,0 +1,4023 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/01/2015 Initial development +# Paul Hoehne 03/08/2014 Added support for field indexes + +""" +Database related classes for manipulating MarkLogic databases +""" + +from __future__ import unicode_literals, print_function, absolute_import + +import sys + +import requests +import json +import logging +from marklogic.models.forest import Forest +from marklogic.models.utilities import files +from marklogic.models.utilities.utilities import PropertyLists +from marklogic.models.utilities.validators import * +from marklogic.models.utilities.exceptions import * +from marklogic.models.database.fragment import FragmentRoot, FragmentParent +from marklogic.models.database.index import ElementRangeIndex, AttributeRangeIndex +from marklogic.models.database.index import PathRangeIndex, FieldRangeIndex +from marklogic.models.database.index import GeospatialElementIndex +from marklogic.models.database.index import GeospatialPathIndex +from marklogic.models.database.index import GeospatialElementChildIndex +from marklogic.models.database.index import GeospatialElementPairIndex +from marklogic.models.database.index import GeospatialElementAttributePairIndex +from marklogic.models.database.mergeblackout import MergeBlackout +from marklogic.models.database.mergeblackout import MergeBlackoutRecurringDuration +from marklogic.models.database.mergeblackout import MergeBlackoutRecurringStartEnd +from marklogic.models.database.mergeblackout import MergeBlackoutRecurringAllDay +from marklogic.models.database.mergeblackout import MergeBlackoutOneTimeDuration +from marklogic.models.database.mergeblackout import MergeBlackoutOneTimeStartEnd +from marklogic.models.database.scheduledbackup import ScheduledDatabaseBackup, ScheduledDatabaseBackupOnce +from marklogic.models.database.scheduledbackup import ScheduledDatabaseBackupWeekly +from marklogic.models.database.backup import DatabaseBackup, DatabaseRestore +from marklogic.models.database.path import PathNamespace +from marklogic.models.database.lexicon import ElementWordLexicon +from marklogic.models.database.lexicon import AttributeWordLexicon +from marklogic.models.database.namelist import NameList +from marklogic.models.database.through import PhraseThrough, PhraseAround +from marklogic.models.database.through import ElementWordQueryThrough +from marklogic.models.database.ruleset import RuleSet +from marklogic.models.database.field import Field, RootField, PathField, FieldPath, WordQuery, IncludedElement, ExcludedElement + +class Database(PropertyLists): + """ + The Database class encapsulates a MarkLogic database. It provides + methods to set/get database attributes. The use of methods will + allow IDEs with tooling to provide auto-completion hints. + """ + + def __init__(self, name, hostname='$ML-LOCALHOST'): + """ + Initialize the database object to either create a database or + lookup the existing database information + + :param name: The database name + :param hostname: Optional host name, used if forests are created + + :return: The database object with default data + """ + self._config = { + 'database-name': name, + 'forest': [ + name + '-Forest-001' + ], + 'security-database': 'Security', + 'schema-database': 'Schemas', + 'enabled': True, + 'language': 'en' + } + self.logger = logging.getLogger("marklogic") + self.name = name # separate so we can rename databases + self.etag = None + self.hostname = hostname + + def set_database_name(self, name): + """ + Sets the database name. + + :param name: The database name + + :return: The database object + """ + self._config['database-name'] = name + return self + + def database_name(self): + """ + The database name. + + :return: The name + """ + if 'database-name' in self._config: + return self._config['database-name'] + return None + + def set_enabled(self, enabled=True): + """ + Set the flag to enable or disable a database. + + :param enabled: The enable status + + :return: the database object + """ + validate_boolean(enabled) + self._config['enabled'] = enabled + return self + + def enabled(self): + """ + Returns the enable status + + :return: The database enable status + """ + if 'enabled' in self._config: + return self._config['enabled'] + return None + + def set_security_database_name(self, db='Security'): + """ + Sets the security database. + + This is the name of the database + in which security related documents will be stored. + + :param db: The name of the security database + + :return: The database object + """ + self._config['security-database'] = db + return self + + def security_database_name(self): + """ + The security database. + + This is the name of the database + in which security related documents will be stored. + + :return: The security database name + """ + if 'security-database' in self._config: + return self._config['security-database'] + return None + + def set_triggers_database_name(self, db='Triggers'): + """ + Sets the database that contains triggers. + + This is the name of the database + in which triggers will be stored. + + :param db: The name of the triggers database + + :return: The database object + """ + self._config['triggers-database'] = db + return self + + def triggers_database_name(self): + """ + The database that contains triggers. + + This is the name of the database + in which triggers will be stored. + + :return: The name of the triggers database + """ + if 'triggers-database' in self._config: + return self._config['triggers-database'] + return None + + def set_schema_database_name(self, db='Schemas'): + """ + Sets the database that contains schemas. + + This is the name of the database + in which schemas will be stored. + + :param db: The name of the schema database + + :return: The database object + """ + self._config['schema-database'] = db + return self + + def schema_database_name(self): + """ + The database that contains schemas. + + This is the name of the database + in which schemas will be stored. + + :return: The name of the schema database + """ + if 'schema-database' in self._config: + return self._config['schema-database'] + return None + + def set_forest_names(self, forests): + """ + Sets the names of the forests of the database. + + If a database is created from this database object, the named + forests will be attached to the database. The forests will be + created if necessary. + + :param forests: A list of forest names + + :return: The database object + """ + validate_list_of_strings(forests) + self._config['forest'] = forests + return self + + def add_forest_name(self, forest): + """ + Add a new forest name to the names of the forests of the database. + + If a database is created from this database object, the named + forests will be attached to the database. The forests will be + created if necessary. + + :param forest: The forest name + + :return: The database object + """ + return self.add_to_property_list('forest', forest) + + def set_forest_names(self, forests): + """ + Set the names of the forests of the database. + + If a database is created from this database object, the named + forests will be attached to the database. The forests will be + created if necessary. + + :param forest: The forest name + + :return: The database object + """ + return self.set_property_list('forest', forests) + + def forest_names(self): + """ + The names of the forests attached to the database. + + :return: The attached forests + """ + if 'forest' in self._config: + return self._config['forest'] + return None + + def set_language(self, language): + """ + Sets the default language assumed for content (if xml:lang + encoding is absent) + + *language* specifies the default language for content + in this database. Any content without an ``xml:lang`` + attribute will be indexed in the language specifed + here. + + :param language: The language abbreviation + + :return: The database object + """ + validate_string(language) + self._config['language'] = language + return self + + def language(self): + """ + The default language assumed for content (if xml:lang + encoding is absent) + + *language* specifies the default language for content + in this database. Any content without an ``xml:lang`` + attribute will be indexed in the language specifed + here. + + :return: The default language for the database. + """ + if 'language' in self._config: + return self._config['language'] + return None + + def set_stemmed_searches(self, which='basic'): + """ + Enable stemmed word searches (slower document loads + and larger database files). + + Stemmed searches specifies whether index terms should + be included in the database files to support stemming. + When set to ``basic``, basic stemming is enabled, and + the shortest stem of each word is indexed. When set + to ``advanced``, all stems of each word are indexed. + When set to ``decompounding``, all stems are indexed, + and smaller component words of large compound words + are also indexed. Each successive level of stemming + improves recall of word searches, but also causes slower + document loads and larger database files. Use ``off`` + to disable stemming. + + :param which: The stemmed search option + + :return: The database object + """ + validate_stemmed_searches_type(which) + self._config['stemmed-searches'] = which + return self + + def stemmed_searches(self): + """ + Returns the type of stemming currently associated with the database. + + See :meth:`set_stemmed_searches`. + + :return: The type of stemmed search + """ + if 'stemmed-searches' in self._config: + return self._config['stemmed-searches'] + return None + + def set_word_searches(self, enabled=False): + """ + Sets enable unstemmed word searches (slower document loads + and larger database files). + + *word searches* specifies whether index terms should + be included in the database files to support fast word + searches. When this parameter is true, word searches + are faster, but document loading is slower and the + database files are larger. + + :param enabled: Enable stemmed word searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['word-searches'] = enabled + return self + + def word_searches(self): + """ + Enable unstemmed word searches (slower document loads + and larger database files). + + *word searches* specifies whether index terms should + be included in the database files to support fast word + searches. When this parameter is true, word searches + are faster, but document loading is slower and the + database files are larger. + + :return: Stemmed word searches enabled + """ + if 'word-searches' in self._config: + return self._config['word-searches'] + return None + + def set_word_positions(self, enabled=False): + """ + Sets index word positions for faster phrase and near searches + (slower document loads and larger database files). + + *word positions* specifies whether index data should + be included in the database files to enable proximity + searches (``cts:near-query``). When this parameter + is true, positional searches are possible, but document + loading is slower and the database files are larger. + + :param enabled: Enable searching on word positions + + :return: The database object + """ + validate_boolean(enabled) + self._config['word-positions'] = enabled + return self + + def word_positions(self): + """ + Index word positions for faster phrase and near searches + (slower document loads and larger database files). + + *word positions* specifies whether index data should + be included in the database files to enable proximity + searches (``cts:near-query``). When this parameter + is true, positional searches are possible, but document + loading is slower and the database files are larger. + + :return: Word positions are enabled + """ + if 'word-positions' in self._config: + return self._config['word-positions'] + return None + + def set_fast_phrase_searches(self, enabled=True): + """ + Sets enable faster phrase searches (slower document loads + and larger database files). + + *fast phrase searches* specifies whether index terms + should be included in the database files to support + fast phrase searches. When this parameter is true, + phrase searches are faster, but document loading is + slower and the database files are larger. + + :param enabled: Enable faster phrase searching + + :return: The database object + """ + validate_boolean(enabled) + self._config['fast-phrase-searches'] = enabled + return self + + def fast_phrase_searches(self): + """ + Enable faster phrase searches (slower document loads + and larger database files). + + *fast phrase searches* specifies whether index terms + should be included in the database files to support + fast phrase searches. When this parameter is true, + phrase searches are faster, but document loading is + slower and the database files are larger. + + :return: Fast phrase searches enabled + """ + if 'fast-phrase-searches' in self._config: + return self._config['fast-phrase-searches'] + return None + + def set_fast_reverse_searches(self, enabled=True): + """ + Sets enable faster reverse searches (slower document loads + and larger database files). + + *fast reverse searches* (valid alerting license key + required) specifies whether index terms should be included + in the database files to support fast reverse searches. + When this parameter is true, cts:reverse-query searches + are faster, but document loading is slower and the + database files are larger. + + :param enabled: Faster reverse searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['fast-reverse-searches'] = enabled + return self + + def fast_reverse_searches(self): + """ + Enable faster reverse searches (slower document loads + and larger database files). + + *fast reverse searches* (valid alerting license key + required) specifies whether index terms should be included + in the database files to support fast reverse searches. + When this parameter is true, cts:reverse-query searches + are faster, but document loading is slower and the + database files are larger. + + :return: Fast reverse searches enabled + """ + if 'fast-reverse-searches' in self._config: + return self._config['fast-reverse-searches'] + return None + + def set_triple_index(self, enabled=False): + """ + Sets enable the RDF triple index (slower document loads + and larger database files). + + *triple index* (valid semantics license key required) + specifies whether index terms should be included in + the database files to support SPARQL execution over + RDF triples. When this parameter is true, sem:sparql() + can be used, but document loading is slower and the + database files are larger. + + :param enabled: Enable the triple index + + :return: The database object + """ + validate_boolean(enabled) + self._config['triple-index'] = enabled + return self + + def triple_index(self): + """ + Enable the RDF triple index (slower document loads + and larger database files). + + *triple index* (valid semantics license key required) + specifies whether index terms should be included in + the database files to support SPARQL execution over + RDF triples. When this parameter is true, sem:sparql() + can be used, but document loading is slower and the + database files are larger. + + :return: The triple index enabled + """ + if 'triple-index' in self._config: + return self._config['triple-index'] + return None + + def set_triple_positions(self, enabled=False): + """ + Sets index triple positions for faster near searches involving + cts:triple-range-query (slower document loads and larger + database files). + + *triple positions* specifies whether index data is + included which speeds up the performance of proximity + queries that use the ``cts:triple-range-query`` function. + Triple positions also improve the accuracy of the ``item-frequency`` + option of ``cts:triples``. + + :param enabled: Enable triple positions + + :return: The database object + """ + validate_boolean(enabled) + self._config['triple-positions'] = enabled + return self + + def triple_positions(self): + """ + Index triple positions for faster near searches involving + cts:triple-range-query (slower document loads and larger + database files). + + *triple positions* specifies whether index data is + included which speeds up the performance of proximity + queries that use the ``cts:triple-range-query`` function. + Triple positions also improve the accuracy of the ``item-frequency`` + option of ``cts:triples``. + + :return: Triple positions enabled + """ + if 'triple-positions' in self._config: + return self._config['triple-positions'] + return None + + def set_fast_case_sensitive_searches(self, enabled=True): + """ + Sets enable faster case sensitive searches (slower document + loads and larger database files). + + *fast case sensitive searches* specifies whether index + terms should be included in the database files to support + fast case-sensitive searches. When this parameter is + true, case-sensitive searches are faster, but document + loading is slower and the database files are larger. + + :param enabled: Enable faster case sensitive searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['fast-case-sensitive-searches'] = enabled + return self + + def fast_case_sensitive_searches(self): + """ + Enable faster case sensitive searches (slower document + loads and larger database files). + + *fast case sensitive searches* specifies whether index + terms should be included in the database files to support + fast case-sensitive searches. When this parameter is + true, case-sensitive searches are faster, but document + loading is slower and the database files are larger. + + :return: Fast case sensitive searches enabled + """ + if 'fast-case-sensitive-searches' in self._config: + return self._config['fast-case-sensitive-searches'] + return None + + def set_fast_diacritic_sensitive_searches(self, enabled=True): + """ + Sets enable faster diacritic sensitive searches (slower + document loads and larger database files). + + *fast diacritic sensitive searches* specifies whether + index terms should be included in the database files + to support fast diacritic-sensitive searches. When + this parameter is true, diacritic-sensitive searches + are faster, but document loading is slower and the + database files are larger. + + :param enabled: Fast diacritic sensitive searches enabled. + + :return: The database object + """ + validate_boolean(enabled) + self._config['fast-diacritic-sensitive-searches'] = enabled + return self + + def fast_diacritic_sensitive_searches(self): + """ + Enable faster diacritic sensitive searches (slower + document loads and larger database files). + + *fast diacritic sensitive searches* specifies whether + index terms should be included in the database files + to support fast diacritic-sensitive searches. When + this parameter is true, diacritic-sensitive searches + are faster, but document loading is slower and the + database files are larger. + + :return: Fast diacritic sensitive searches enabled + """ + if 'fast-diacritic-sensitive-searches' in self._config: + return self._config['fast-diacritic-sensitive-searches'] + return None + + def set_fast_element_word_searches(self, enabled=True): + """ + Sets enable faster element-word searches (slower document + loads and larger database files). + + *fast element word searches* specifies whether index + terms should be included in the database files to support + fast element-word searches. When this parameter is + true, element-word searches are faster, but document + loading is slower and the database files are larger. + + :param enabled: Enable fast element word searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['fast-element-word-searches'] = enabled + return self + + def fast_element_word_searches(self): + """ + Enable faster element-word searches (slower document + loads and larger database files). + + *fast element word searches* specifies whether index + terms should be included in the database files to support + fast element-word searches. When this parameter is + true, element-word searches are faster, but document + loading is slower and the database files are larger. + + :return: Fast element word searches enabled + """ + if 'fast-element-word-searches' in self._config: + return self._config['fast-element-word-searches'] + return None + + def set_element_word_positions(self, enabled=False): + """ + Sets index element word positions for faster element-based + phrase and near searches (slower document loads and + larger database files). + + *element word positions* specifies whether index data + should be included in the database files to enable + proximity searches (``cts:near-query``) within specific + XML elements or JSON properties. You must also enable + *word positions* in order to perform element position + searches. When this parameter is true, positional searches + are possible within an XML element or JSON property, + but document loading is slower and the database files + are larger. + + :param enabled: Enable element word positions + + :return: The database object + """ + validate_boolean(enabled) + self._config['element-word-positions'] = enabled + return self + + def element_word_positions(self): + """ + Index element word positions for faster element-based + phrase and near searches (slower document loads and + larger database files). + + *element word positions* specifies whether index data + should be included in the database files to enable + proximity searches (``cts:near-query``) within specific + XML elements or JSON properties. You must also enable + *word positions* in order to perform element position + searches. When this parameter is true, positional searches + are possible within an XML element or JSON property, + but document loading is slower and the database files + are larger. + + :return: Fast element word searches enabled + """ + if 'element-word-positions' in self._config: + return self._config['element-word-positions'] + return None + + def set_fast_element_phrase_searches(self, enabled=True): + """ + Sets enable faster element phrase searches (slower document + loads and larger database files). + + *fast element phrase searches* specifies whether index + terms should be included in the database files to enable + fast element-phrase searches. When this parameter is + true, element-phrase searches are faster, but document + loading is slower and the database files are larger. + + :param enabled: Enable fast element phrase searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['fast-element-phrase-searches'] = enabled + return self + + def fast_element_phrase_searches(self): + """ + Enable faster element phrase searches (slower document + loads and larger database files). + + *fast element phrase searches* specifies whether index + terms should be included in the database files to enable + fast element-phrase searches. When this parameter is + true, element-phrase searches are faster, but document + loading is slower and the database files are larger. + + :return: Fast element phrase searches enabled + """ + if 'fast-element-phrase-searches' in self._config: + return self._config['fast-element-phrase-searches'] + return None + + def set_element_value_positions(self, enabled=False): + """ + Sets index element value positions for faster near searches + involving element-value-query (slower document loads + and larger database files). + + *element value positions* specifies whether index data + is included which speeds up the performance of proximity + queries that use the ``cts:element-value-query`` function. + Turn this index off if you are not interested in proximity + queries and if you want to conserve disk space and + decrease loading time. + + :param enabled: Enable element value positions + + :return: The database object + """ + validate_boolean(enabled) + self._config['element-value-positions'] = enabled + return self + + def element_value_positions(self): + """ + Index element value positions for faster near searches + involving element-value-query (slower document loads + and larger database files). + + *element value positions* specifies whether index data + is included which speeds up the performance of proximity + queries that use the ``cts:element-value-query`` function. + Turn this index off if you are not interested in proximity + queries and if you want to conserve disk space and + decrease loading time. + + :return: Element value positions enabled + """ + if 'element-value-positions' in self._config: + return self._config['element-value-positions'] + return None + + def set_attribute_value_positions(self, enabled=False): + """ + Sets index attribute value positions for faster near searches + involving element-attribute-value-query (slower document + loads and larger database files). + + *attribute value positions* specifies whether index + data is included which speeds up the performance of + proximity queries that use the ``cts:element-attribute-value-query`` + function. Turn this index off if you are not interested + in proximity queries and if you want to conserve disk + space and decrease loading time. + + :param enabled: Attribute value positions + + :return: The database object + """ + validate_boolean(enabled) + self._config['attribute-value-positions'] = enabled + return self + + def attribute_value_positions(self): + """ + Index attribute value positions for faster near searches + involving element-attribute-value-query (slower document + loads and larger database files). + + *attribute value positions* specifies whether index + data is included which speeds up the performance of + proximity queries that use the ``cts:element-attribute-value-query`` + function. Turn this index off if you are not interested + in proximity queries and if you want to conserve disk + space and decrease loading time. + + :return: Attribute value positions enabled + """ + if 'attribute-value-positions' in self._config: + return self._config['attribute-value-positions'] + return None + + def set_field_value_searches(self, enabled=False): + """ + Sets index field values for faster searches involving field-value-query + (slower document loads and larger database files). + + *field value searches* specifies whether index data + is included which speeds up the performance of field + value queries that use the ``cts:field-value-query`` + function. Turn this index off if you are not interested + in field value queries and if you want to conserve + disk space and decrease loading time. + + :param enabled: Field value searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['field-value-searches'] = enabled + return self + + def field_value_searches(self): + """ + Index field values for faster searches involving field-value-query + (slower document loads and larger database files). + + *field value searches* specifies whether index data + is included which speeds up the performance of field + value queries that use the ``cts:field-value-query`` + function. Turn this index off if you are not interested + in field value queries and if you want to conserve + disk space and decrease loading time. + + :return: Field value searches enabled + """ + if 'field-value-searches' in self._config: + return self._config['field-value-searches'] + return None + + def set_field_value_positions(self, enabled=False): + """ + Sets index field value positions for faster near searches + involving field-value-query (slower document loads + and larger database files). + + *field value positions* specifies whether index data + is included which speeds up the performance of proximity + queries that use the ``cts:field-value-query`` function. + Turn this index off if you are not interested in proximity + queries and if you want to conserve disk space and + decrease loading time. + + :param enabled: Field value positions + + :return: The database object + """ + validate_boolean(enabled) + self._config['field-value-positions'] = enabled + return self + + def field_value_positions(self): + """ + Index field value positions for faster near searches + involving field-value-query (slower document loads + and larger database files). + + *field value positions* specifies whether index data + is included which speeds up the performance of proximity + queries that use the ``cts:field-value-query`` function. + Turn this index off if you are not interested in proximity + queries and if you want to conserve disk space and + decrease loading time. + + :return: Field value positions enabled + """ + if 'field-value-positions' in self._config: + return self._config['field-value-positions'] + return None + + def set_three_character_searches(self, enabled=False): + """ + Sets enable wildcard searches and faster character-based + XQuery predicates using three or more characters (slower + document loads and larger database files). + + *three character searches* specifies whether indexes + should be created to enable wildcard searches where + the search pattern contains three or more consecutive + non-wildcard characters (for example, abc*). When combined + with a codepoint *word lexicon*, speeds the performance + of any wildcard search (including searches with fewer + than three consecutive non-wildcard characters). MarkLogic + recommends combining the *three character search* index + with a codepoint collation *word lexicon*. When this + parameter is true, character searches are faster, but + document loading is slower and the database files are + larger. + + :param enabled: Three character wildcard searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['three-character-searches'] = enabled + return self + + def three_character_searches(self): + """ + Enable wildcard searches and faster character-based + XQuery predicates using three or more characters (slower + document loads and larger database files). + + *three character searches* specifies whether indexes + should be created to enable wildcard searches where + the search pattern contains three or more consecutive + non-wildcard characters (for example, abc*). When combined + with a codepoint *word lexicon*, speeds the performance + of any wildcard search (including searches with fewer + than three consecutive non-wildcard characters). MarkLogic + recommends combining the *three character search* index + with a codepoint collation *word lexicon*. When this + parameter is true, character searches are faster, but + document loading is slower and the database files are + larger. + + :return: Three character searches enabled + """ + if 'three-character-searches' in self._config: + return self._config['three-character-searches'] + return None + + def set_three_character_word_positions(self, enabled=False): + """ + Sets index word positions for three-character searches only + when three-character-searches are enabled (slower document + loads and larger database files). + + *three character word positions* specifies whether + index data should be included in the database files + to enable proximity searches (``cts:near-query``) within + wildcard queries. You must also enable *three character + searches* in order to perform wildcard position searches. + When this parameter is true, positional searches are + possible within a wildcard query, but document loading + is slower and the database files are larger. + + :param enabled: Three character word positions + + :return: The database object + """ + validate_boolean(enabled) + self._config['three-character-word-positions'] = enabled + return self + + def three_character_word_positions(self): + """ + Index word positions for three-character searches only + when three-character-searches are enabled (slower document + loads and larger database files). + + *three character word positions* specifies whether + index data should be included in the database files + to enable proximity searches (``cts:near-query``) within + wildcard queries. You must also enable *three character + searches* in order to perform wildcard position searches. + When this parameter is true, positional searches are + possible within a wildcard query, but document loading + is slower and the database files are larger. + + :return: Three character word positions enabled + """ + if 'three-character-word-positions' in self._config: + return self._config['three-character-word-positions'] + return None + + def set_fast_element_character_searches(self, enabled=False): + """ + Sets enable element wildcard searches and element-character-based + XQuery predicates (slower document loads and larger + database files). + + *fast element character searches* specifies whether + index terms should be included in the database files + to enable element wildcard searches and faster character-based + XQuery predicates. When this parameter is true, element-character + searches are faster, but document loading is slower + and the database files are larger. + + :param enabled: Fast element character searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['fast-element-character-searches'] = enabled + return self + + def fast_element_character_searches(self): + """ + Enable element wildcard searches and element-character-based + XQuery predicates (slower document loads and larger + database files). + + *fast element character searches* specifies whether + index terms should be included in the database files + to enable element wildcard searches and faster character-based + XQuery predicates. When this parameter is true, element-character + searches are faster, but document loading is slower + and the database files are larger. + + :return: Fast element character searches + """ + if 'fast-element-character-searches' in self._config: + return self._config['fast-element-character-searches'] + return None + + def set_trailing_wildcard_searches(self, enabled=False): + """ + Sets enable trailing wildcard searches (slower document + loads and larger database files). + + *trailing wildcard searches* specifies whether indexes + should be created to enable wildcard searches where + the search pattern contains one or more consecutive + non-wildcard characters at the beginning of the word, + with the wildcard at the end of the word (for example, + abc*). When this parameter is true, character searches + are faster, but document loading is slower and the + database files are larger. + + :param enabled: Wild card searches enabled + + :return: The database object + """ + validate_boolean(enabled) + self._config['trailing-wildcard-searches'] = enabled + return self + + def trailing_wildcard_searches(self): + """ + Enable trailing wildcard searches (slower document + loads and larger database files). + + *trailing wildcard searches* specifies whether indexes + should be created to enable wildcard searches where + the search pattern contains one or more consecutive + non-wildcard characters at the beginning of the word, + with the wildcard at the end of the word (for example, + abc*). When this parameter is true, character searches + are faster, but document loading is slower and the + database files are larger. + + :return: Trailing wild card searches enabled + """ + if 'trailing-wildcard-searches' in self._config: + return self._config['trailing-wildcard-searches'] + return None + + def set_trailing_wildcard_word_positions(self, enabled=False): + """ + Sets index word positions for trailing-wildcard searches + only when trailing-wildcard-searches are enabled (slower + document loads and larger database files). + + *trailing wildcard word positions* specifies whether + index data should be included in the database files + to enable proximity searches (``cts:near-query``) within + trailing wildcard queries. You must also enable *trailing + wildcard searches* in order to perform trailing wildcard + position searches. When this parameter is true, positional + searches are possible within a trailing wildcard query, + but document loading is slower and the database files + are larger. + + :param enabled: Index word positions for trailing wildcard searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['trailing-wildcard-word-positions'] = enabled + return self + + def trailing_wildcard_word_positions(self): + """ + Index word positions for trailing-wildcard searches + only when trailing-wildcard-searches are enabled (slower + document loads and larger database files). + + *trailing wildcard word positions* specifies whether + index data should be included in the database files + to enable proximity searches (``cts:near-query``) within + trailing wildcard queries. You must also enable *trailing + wildcard searches* in order to perform trailing wildcard + position searches. When this parameter is true, positional + searches are possible within a trailing wildcard query, + but document loading is slower and the database files + are larger. + + :return: Index word positions enabled + """ + if 'trailing-wildcard-word-positions' in self._config: + return self._config['trailing-wildcard-word-positions'] + return None + + def set_fast_element_trailing_wildcard_searches(self, enabled=False): + """ + Sets enable element trailing wildcard searches (slower document + loads and larger database files). + + *fast element trailing wildcard searches* specifies + whether index terms should be included in the database + files to enable element trailing wildcard searches + and faster character-based XQuery predicates. When + this parameter is true, element-trailing-wildcard searches + are faster, but document loading is slower and the + database files are larger. + + :param enabled: Enable trailing wildcard searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['fast-element-trailing-wildcard-searches'] = enabled + return self + + def fast_element_trailing_wildcard_searches(self): + """ + Enable element trailing wildcard searches (slower document + loads and larger database files). + + *fast element trailing wildcard searches* specifies + whether index terms should be included in the database + files to enable element trailing wildcard searches + and faster character-based XQuery predicates. When + this parameter is true, element-trailing-wildcard searches + are faster, but document loading is slower and the + database files are larger. + + :return: Fast element trailing wildcard searches enabled + """ + if 'fast-element-trailing-wildcard-searches' in self._config: + return self._config['fast-element-trailing-wildcard-searches'] + return None + + def set_two_character_searches(self, enabled=False): + """ + Sets enable wildcard searches and faster character-based + XQuery predicates using two character (slower document + loads and larger database files). + + *two character searches* specifies whether indexes + should be created to enable wildcard searches where + the search pattern contains two consecutive non-wildcard + character (for example, ``ab*``). This index is not + needed if you have *three character searches* and a + *word lexicon*. + + :param enabled: Enable two character wildcard searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['two-character-searches'] = enabled + return self + + def two_character_searches(self): + """ + Enable wildcard searches and faster character-based + XQuery predicates using two character (slower document + loads and larger database files). + + *two character searches* specifies whether indexes + should be created to enable wildcard searches where + the search pattern contains two consecutive non-wildcard + character (for example, ``ab*``). This index is not + needed if you have *three character searches* and a + *word lexicon*. + + :return: Two character wildcard searches enabled + """ + if 'two-character-searches' in self._config: + return self._config['two-character-searches'] + return None + + def set_one_character_searches(self, enabled=False): + """ + Sets enable wildcard searches and faster character-based + XQuery predicates using one character (slower document + loads and larger database files). + + *one character searches* specifies whether indexes + should be created to enable wildcard searches where + the search pattern contains a single non-wildcard character + (for example, ``a*``). This index is not needed if + you have *three character searches* and a *word lexicon*. + + :param enabled: Enable one character wildcard searches + + :return: The database object + """ + validate_boolean(enabled) + self._config['one-character-searches'] = enabled + return self + + def one_character_searches(self): + """ + Enable wildcard searches and faster character-based + XQuery predicates using one character (slower document + loads and larger database files). + + *one character searches* specifies whether indexes + should be created to enable wildcard searches where + the search pattern contains a single non-wildcard character + (for example, ``a*``). This index is not needed if + you have *three character searches* and a *word lexicon*. + + :return: One character wildcard searches enabled + """ + if 'one-character-searches' in self._config: + return self._config['one-character-searches'] + return None + + def set_uri_lexicon(self, enabled=True): + """ + Sets maintain a lexicon of document URIs (slower document + loads and larger database files). + + *uri lexicon* specifies whether to create a lexicon + of all of the URIs in the database. The URI lexicon + allows you to quickly list all of the URIs in the database + and to perform lexicon-based queries on the URIs. + + :param enabled: Enable URI lexicon + + :return: The database object + """ + validate_boolean(enabled) + self._config['uri-lexicon'] = enabled + return self + + def uri_lexicon(self): + """ + Maintain a lexicon of document URIs (slower document + loads and larger database files). + + *uri lexicon* specifies whether to create a lexicon + of all of the URIs in the database. The URI lexicon + allows you to quickly list all of the URIs in the database + and to perform lexicon-based queries on the URIs. + + :return: URI lexicon enabled + """ + if 'uri-lexicon' in self._config: + return self._config['uri-lexicon'] + return None + + def set_collection_lexicon(self, enabled=False): + """ + Sets maintain a lexicon of collection URIs (slower document + loads and larger database files). + + *collection lexicon* specifies whether to create a + lexicon of all of the collection URIs in the database. + The collection lexicon allows you to quickly list all + of the collection URIs in the database and to perform + lexicon-based queries on the URIs. + + :param enabled: Enable collection URI lexicon + + :return: The database object + """ + validate_boolean(enabled) + self._config['collection-lexicon'] = enabled + return self + + def collection_lexicon(self): + """ + Maintain a lexicon of collection URIs (slower document + loads and larger database files). + + *collection lexicon* specifies whether to create a + lexicon of all of the collection URIs in the database. + The collection lexicon allows you to quickly list all + of the collection URIs in the database and to perform + lexicon-based queries on the URIs. + + :return: Collection lexicon enabled + """ + if 'collection-lexicon' in self._config: + return self._config['collection-lexicon'] + return None + + def set_reindexer_enable(self, enabled=True): + """ + Sets enable automatic reindexing after configuration changes. + + *reindexer enable* specifies whether indexes are automatically + rebuilt in the background after index configuration + settings are changed. When set to true, index configuration + changes automatically initiate a background reindexing + operation on the entire database. When set to false, + any new index settings take effect for future documents + loaded into the database; existing documents retain + the old settings until they are reloaded or until you + set reindexer enabled to true. + + :param enabled: + + :return: + """ + validate_boolean(enabled) + self._config['reindexer-enable'] = enabled + return self + + def reindexer_enable(self): + """ + Enable automatic reindexing after configuration changes. + + *reindexer enable* specifies whether indexes are automatically + rebuilt in the background after index configuration + settings are changed. When set to true, index configuration + changes automatically initiate a background reindexing + operation on the entire database. When set to false, + any new index settings take effect for future documents + loaded into the database; existing documents retain + the old settings until they are reloaded or until you + set reindexer enabled to true. + + :return: Automatic reindexing enabled + """ + if 'reindexer-enable' in self._config: + return self._config['reindexer-enable'] + return None + + def set_reindexer_throttle(self, limit=5): + """ + Sets larger numbers mean work harder at reindexing. + + *reindexer throttle* sets the priority of system resources + devoted to reindexing. Reindexing occurs in batches, + where each batch is approximately 200 fragments. When + set to 5 (the default), the reindexer works aggressively, + starting the next batch of reindexing soon after finishing + the previous batch. When set to 4, it waits longer + between batches, when set to 3 it waits longer still, + and so on until when it is set to 1, it waits the longest. + Therefore, higher numbers give reindexing a higher + priority and uses the most system resources. + + :param limit: The level of system resources + + :return: The database object + """ + validate_integer_range(limit, 1, 5) + self._config['reindexer-throttle'] = limit + return self + + def reindexer_throttle(self): + """ + Larger numbers mean work harder at reindexing. + + *reindexer throttle* sets the priority of system resources + devoted to reindexing. Reindexing occurs in batches, + where each batch is approximately 200 fragments. When + set to 5 (the default), the reindexer works aggressively, + starting the next batch of reindexing soon after finishing + the previous batch. When set to 4, it waits longer + between batches, when set to 3 it waits longer still, + and so on until when it is set to 1, it waits the longest. + Therefore, higher numbers give reindexing a higher + priority and uses the most system resources. + + :return: The level of system resources + """ + if 'reindexer-throttle' in self._config: + return self._config['reindexer-throttle'] + return None + + def set_reindexer_timestamp(self, limit=0): + """ + Sets reindex/refragment all fragments with timestamps less + than or equal to the timestamp specified. 0 means no + forced reindexing. + + *reindexer timestamp* specifies the timestamp of fragments + to force a reindex/refragment operation. Click the + get current timestamp button to enter the current system + timestamp. When you set this parameter to a timestamp + and *reindex enable* is set to ``true``, it causes + a reindex and refragment operation on all fragments + in the database that have a timestamp equal to or less + than the specified timestamp. Note that if you restore + a database that has a timestamp set, if there are fragments + in the restored content that are older than the specified + timestamp, they will start to reindex as soon as they + are restored. + + :param limit: Reindexer timestamp + + :return: The document object + """ + self._config['reindexer-timestamp'] = limit + return self + + def reindexer_timestamp(self): + """ + Reindex/refragment all fragments with timestamps less + than or equal to the timestamp specified. 0 means no + forced reindexing. + + *reindexer timestamp* specifies the timestamp of fragments + to force a reindex/refragment operation. Click the + get current timestamp button to enter the current system + timestamp. When you set this parameter to a timestamp + and *reindex enable* is set to ``true``, it causes + a reindex and refragment operation on all fragments + in the database that have a timestamp equal to or less + than the specified timestamp. Note that if you restore + a database that has a timestamp set, if there are fragments + in the restored content that are older than the specified + timestamp, they will start to reindex as soon as they + are restored. + + :return: Reindexer timestamp in milliseconds + """ + if 'reindexer-timestamp' in self._config: + return self._config['reindexer-timestamp'] + return None + + def set_directory_creation(self, which='manual'): + """ + Sets automatically (for WebDAV) or manually manage directories + + *directory creation* specifies whether directories + are automatically created in the database when documents + are created. The default for a new database is *manual*. + The settings are: + + *automatic* specifies that a directory hierarchy is + automatically created to match the URI of a document + or a directory that is created. This is the recommended + setting, especially if you are accessing the database + with a WebDAV Server or if you are using it as a Modules + database.*manual* specifies that directories must be + manually created. No directory hierarchy is enforced. + *manual-enforced* is the same as manual, except it + raises an error if the parent directory does not exist + when creating a document or directory. For example, + in order to create a document with the URI http://marklogic/file.xml, + the directory http://marklogic/ must first exist. + + :param which: The method of directory configuration + + :return: The database object + """ + validate_directory_creation(which) + self._config['directory-creation'] = which + return self + + def directory_creation(self): + """ + Automatically (for WebDAV) or manually manage directories + + *directory creation* specifies whether directories + are automatically created in the database when documents + are created. The default for a new database is *manual*. + The settings are: + + *automatic* specifies that a directory hierarchy is + automatically created to match the URI of a document + or a directory that is created. This is the recommended + setting, especially if you are accessing the database + with a WebDAV Server or if you are using it as a Modules + database.*manual* specifies that directories must be + manually created. No directory hierarchy is enforced. + *manual-enforced* is the same as manual, except it + raises an error if the parent directory does not exist + when creating a document or directory. For example, + in order to create a document with the URI http://marklogic/file.xml, + the directory http://marklogic/ must first exist. + + :return: Directory creation method + """ + if 'directory-creation' in self._config: + return self._config['directory-creation'] + return None + + def set_maintain_last_modified(self, enabled=False): + """ + Sets maintain last-modified properties of documents. + + *maintain last modified* specifies whether to include + a timestamp on the properties document for each document + in the database. + + :param enabled: Maintain last-modified + + :return: The database object + """ + validate_boolean(enabled) + self._config['maintain-last-modified'] = enabled + return self + + def maintain_last_modified(self): + """ + Maintain last-modified properties of documents. + + *maintain last modified* specifies whether to include + a timestamp on the properties document for each document + in the database. + + :return: Maintain last modified + """ + if 'maintain-last-modified' in self._config: + return self._config['maintain-last-modified'] + return None + + def set_maintain_directory_last_modified(self, enabled=False): + """ + Sets maintain last-modified properties of directories. + + *maintain directory last modified* specifies whether + to include a timestamp on the properties for each directory + in the database. + + :param enabled: Maintain last-modified + + :return: The database object + """ + validate_boolean(enabled) + self._config['maintain-directory-last-modified'] = enabled + return self + + def maintain_directory_last_modified(self): + """ + Maintain last-modified properties of directories. + + *maintain directory last modified* specifies whether + to include a timestamp on the properties for each directory + in the database. + + :return: Maintain directory last modified property enabled + """ + if 'maintain-directory-last-modified' in self._config: + return self._config['maintain-directory-last-modified'] + return None + + def set_inherit_permissions(self, enabled=False): + """ + Sets new document default permissions include parent directory + permissions. + + *inherit permissions* specifies whether documents and + directories will inherit default permissions from the + parent directory. + + :param enabled: Inherit document permissions from parent + + :return: The database object + """ + validate_boolean(enabled) + self._config['inherit-permissions'] = enabled + return self + + def inherit_permissions(self): + """ + New document default permissions include parent directory + permissions. + + *inherit permissions* specifies whether documents and + directories will inherit default permissions from the + parent directory. + + :return: Inherit document permissions from parent enabled + """ + if 'inherit-permissions' in self._config: + return self._config['inherit-permissions'] + return None + + def set_inherit_collections(self, enabled=False): + """ + Sets new document default collections include parent directory + collections. + + *inherit collections* specifies whether documents and + directories will inherit default collections from the + parent directory. + + :param enabled: Inherit collection from parent directory + + :return: The database object + """ + validate_boolean(enabled) + self._config['inherit-collections'] = enabled + return self + + def inherit_collections(self): + """ + New document default collections include parent directory + collections. + + *inherit collections* specifies whether documents and + directories will inherit default collections from the + parent directory. + + :return: Inherit default collections enabled + """ + if 'inherit-collections' in self._config: + return self._config['inherit-collections'] + return None + + def set_inherit_quality(self, enabled=False): + """ + Sets new document default quality is inherited parent directory + quality. + + *inherit quality* specifies whether documents and directories + will inherit default quality settings from the parent + directory. + + :param enabled: Inherit parent directory quality + + :return: The database object + """ + validate_boolean(enabled) + self._config['inherit-quality'] = enabled + return self + + def inherit_quality(self): + """ + New document default quality is inherited parent directory + quality. + + *inherit quality* specifies whether documents and directories + will inherit default quality settings from the parent + directory. + + :return: Inherity document quality + """ + if 'inherit-quality' in self._config: + return self._config['inherit-quality'] + return None + + def set_in_memory_limit(self, limit=262144): + """ + Sets the maximum number of fragments in an in-memory stand. + + *in memory limit* specifies the maximum number of fragments + in an in-memory stand. An in-memory stand contains + the latest version of any new or changed fragments. + Periodically, in-memory stands are written to disk + as a new stand in the forest. Also, if a stand accumulates + a number of fragments beyond this limit, it is automatically + saved to disk by a background thread. + + :param limit: In memory fragment limit + + :return: The database object + """ + self._config['in-memory-limit'] = limit + return self + + def in_memory_limit(self): + """ + The maximum number of fragments in an in-memory stand. + + *in memory limit* specifies the maximum number of fragments + in an in-memory stand. An in-memory stand contains + the latest version of any new or changed fragments. + Periodically, in-memory stands are written to disk + as a new stand in the forest. Also, if a stand accumulates + a number of fragments beyond this limit, it is automatically + saved to disk by a background thread. + + :return: In memory fragment limit + """ + if 'in-memory-limit' in self._config: + return self._config['in-memory-limit'] + return None + + def set_in_memory_list_size(self, limit=512): + """ + Sets size of the in-memory list storage, in megabytes. + + *in memory list size* specifies the amount of cache + and buffer memory to be allocated for managing termlist + data for an in-memory stand. + + :param limit: The in memory list storage in megabytes + + :return: The database object + """ + self._config['in-memory-list-size'] = limit + return self + + def in_memory_list_size(self): + """ + Size of the in-memory list storage, in megabytes. + + *in memory list size* specifies the amount of cache + and buffer memory to be allocated for managing termlist + data for an in-memory stand. + + :return: The in memory list storage size in megabytes + """ + if 'in-memory-list-size' in self._config: + return self._config['in-memory-list-size'] + return None + + def set_in_memory_tree_size(self, limit=128): + """ + Sets size of the in-memory tree storage, in megabytes. + + *in memory tree size* specifies the amount of cache + and buffer memory to be allocated for managing fragment + data for an in-memory stand. + + :param limit: In memory tree storage size + + :return: The database object + """ + self._config['in-memory-tree-size'] = limit + return self + + def in_memory_tree_size(self): + """ + Size of the in-memory tree storage, in megabytes. + + *in memory tree size* specifies the amount of cache + and buffer memory to be allocated for managing fragment + data for an in-memory stand. + + :return: In memory tree storage size + """ + if 'in-memory-tree-size' in self._config: + return self._config['in-memory-tree-size'] + return None + + def set_in_memory_range_index_size(self, limit=16): + """ + Sets size of the in-memory range index storage, in megabytes. + + *in memory range index size* specifies the amount + of cache and buffer memory to be allocated for managing + range index data for an in-memory stand. + + :param limit: The in memory range index size + + :return: The database object + """ + self._config['in-memory-range-index-size'] = limit + return self + + def in_memory_range_index_size(self): + """ + Size of the in-memory range index storage, in megabytes. + + *in memory range index size* specifies the amount + of cache and buffer memory to be allocated for managing + range index data for an in-memory stand. + + :return: The in-memory range index size + """ + if 'in-memory-range-index-size' in self._config: + return self._config['in-memory-range-index-size'] + return None + + def set_in_memory_reverse_index_size(self, limit=16): + """ + Sets size of the in-memory reverse index storage, in megabytes. + + *in memory reverse index size* specifies the amount + of cache and buffer memory to be allocated for managing + reverse index data for an in-memory stand. + + :param limit: In memory reverse index size + + :return: The database object + """ + self._config['in-memory-reverse-index-size'] = limit + return self + + def in_memory_reverse_index_size(self): + """ + Size of the in-memory reverse index storage, in megabytes. + + *in memory reverse index size* specifies the amount + of cache and buffer memory to be allocated for managing + reverse index data for an in-memory stand. + + :return: In memory reverse index size + """ + if 'in-memory-reverse-index-size' in self._config: + return self._config['in-memory-reverse-index-size'] + return None + + def set_in_memory_triple_index_size(self, limit=64): + """ + Sets size of the in-memory triple index storage, in megabytes. + + *in memory triple index size* specifies the amount + of cache and buffer memory to be allocated for managing + triple index data for an in-memory stand. + + :param limit: The in memory triple index size + + :return: The database object + """ + self._config['in-memory-triple-index-size'] = limit + return self + + def in_memory_triple_index_size(self): + """ + Size of the in-memory triple index storage, in megabytes. + + *in memory triple index size* specifies the amount + of cache and buffer memory to be allocated for managing + triple index data for an in-memory stand. + + :return: In memory triple index size + """ + if 'in-memory-triple-index-size' in self._config: + return self._config['in-memory-triple-index-size'] + return None + + def set_large_size_threshold(self, limit=1024): + """ + Sets size threshold for large objects, in kilobytes. + + *large size threshold* specifies the size threshold + for the system to decide whether to treat a document + as "large". + + :param limit: Size limit for large objects + + :return: The database object + """ + self._config['large-size-threshold'] = limit + return self + + def large_size_threshold(self): + """ + Size threshold for large objects, in kilobytes. + + *large size threshold* specifies the size threshold + for the system to decide whether to treat a document + as "large". + + :return: The large size threshold + """ + if 'large-size-threshold' in self._config: + return self._config['large-size-threshold'] + return None + + def set_locking(self, which='fast'): + """ + Sets specifies how robust transaction locking should be. + + *locking* specifies how robust transaction locking + should be. When set to ``strict``, locking enforces + mutual exclusion on existing documents and on new documents. + When set to ``fast``, locking enforces mutual exclusion + on existing and new documents. Instead of locking all + the forests on new documents, it uses a hash function + to select one forest to lock. In general, this is faster + than strict. However, for a short period of time after + a new forest is added, some of the transactions need + to be retried internally. When set to ``off``, locking + does not enforce mutual exclusion on existing documents + or on new documents; only use this setting if you are + sure all documents you are loading are new (a new bulk + load, for example), otherwise you might create duplicate + URIs in the database. + + :param which: The type of transaction logging + + :return: The database object + """ + validate_locking_type(which) + self._config['locking'] = which + return self + + def locking(self): + """ + Specifies how robust transaction locking should be. + + *locking* specifies how robust transaction locking + should be. When set to ``strict``, locking enforces + mutual exclusion on existing documents and on new documents. + When set to ``fast``, locking enforces mutual exclusion + on existing and new documents. Instead of locking all + the forests on new documents, it uses a hash function + to select one forest to lock. In general, this is faster + than strict. However, for a short period of time after + a new forest is added, some of the transactions need + to be retried internally. When set to ``off``, locking + does not enforce mutual exclusion on existing documents + or on new documents; only use this setting if you are + sure all documents you are loading are new (a new bulk + load, for example), otherwise you might create duplicate + URIs in the database. + + :return: The transaction locking + """ + if 'locking' in self._config: + return self._config['locking'] + return None + + def set_journaling(self, which='fast'): + """ + Sets specifies how robust transaction journaling should + be. + + *journaling* specifies how robust transaction journaling + should be. When set to ``strict``, the journal protects + against MarkLogic Server process failures, host operating + system kernel failures, and host hardware failures. + When set to ``fast``, the journal protects against + MarkLogic Server process failures but not against host + operating system kernel failures or host hardware failures. + When set to ``off``, the journal does not protect against + MarkLogic Server process failures, host operating system + kernel failures, or host hardware failures. + + :param which:The type of journaling + + :return: The database object + """ + validate_locking_type(which) + self._config['journaling'] = which + return self + + def journaling(self): + """ + Specifies how robust transaction journaling should + be. + + *journaling* specifies how robust transaction journaling + should be. When set to ``strict``, the journal protects + against MarkLogic Server process failures, host operating + system kernel failures, and host hardware failures. + When set to ``fast``, the journal protects against + MarkLogic Server process failures but not against host + operating system kernel failures or host hardware failures. + When set to ``off``, the journal does not protect against + MarkLogic Server process failures, host operating system + kernel failures, or host hardware failures. + + :return: The journaling + """ + if 'journaling' in self._config: + return self._config['journaling'] + return None + + def set_journal_size(self, limit=682): + """ + Sets size of each journal file, in megabytes. + + *journal size* specifies the amount of disk storage + to be allocated for each transaction journal. + + :param limit: The journal size + + :return: The database object + """ + self._config['journal-size'] = limit + return self + + def journal_size(self): + """ + Size of each journal file, in megabytes. + + *journal size* specifies the amount of disk storage + to be allocated for each transaction journal. + + :return: The journal size + """ + if 'journal-size' in self._config: + return self._config['journal-size'] + return None + + def set_journal_count(self, limit=2): + """ + The journal count + + :param limit:The journal count + + :return: The database object + """ + self._config['journal-count'] = limit + return self + + def journal_count(self): + """ + The journal count + + :return: The journal count + """ + if 'journal-count' in self._config: + return self._config['journal-count'] + return None + + def set_preallocate_journals(self, enabled=False): + """ + Sets allocate journal files before executing transactions. + + *preallocate journals* specifies whether the transaction + journal files should be allocated in the filesystem + before executing any transactions. When set to true, + initializing a forest may be slower, but subsequent + loading will be faster. + + :param enabled:Pre-allocate journal files + + :return: The database object + """ + validate_boolean(enabled) + self._config['preallocate-journals'] = enabled + return self + + def preallocate_journals(self): + """ + Allocate journal files before executing transactions. + + *preallocate journals* specifies whether the transaction + journal files should be allocated in the filesystem + before executing any transactions. When set to true, + initializing a forest may be slower, but subsequent + loading will be faster. + + :return: Pre-allocate journal files + """ + if 'preallocate-journals' in self._config: + return self._config['preallocate-journals'] + return None + + def set_preload_mapped_data(self, enabled=False): + """ + Sets preload memory mapped forest information while mounting + forest. + + *preload mapped data* specifies whether memory mapped + data (for example, range indexes and word lexicons) + are loaded immediately into memory when a stand is + opened. If you do not preload the mapped data, it will + be paged into memory dynamically when a query needs + it. + + :param enabled: Preload memory mapped forest information + + :return: The database object + """ + validate_boolean(enabled) + self._config['preload-mapped-data'] = enabled + return self + + def preload_mapped_data(self): + """ + Preload memory mapped forest information while mounting + forest. + + *preload mapped data* specifies whether memory mapped + data (for example, range indexes and word lexicons) + are loaded immediately into memory when a stand is + opened. If you do not preload the mapped data, it will + be paged into memory dynamically when a query needs + it. + + :return: Preload memory mapped forest information + """ + if 'preload-mapped-data' in self._config: + return self._config['preload-mapped-data'] + return None + + def set_preload_replica_mapped_data(self, enabled=False): + """ + Sets preload memory mapped forest information while mounting + replica forest. + + *preload mapped replica data* specifies whether memory + mapped data (for example, range indexes and word lexicons) + are loaded immediately into memory when a stand is + opened. The setting of preload-replica-mapped-data + is ignored if preload-mapped-data is set to false. + + :param enabled:Preload mapped replica forest information + + :return: The database object + """ + validate_boolean(enabled) + self._config['preload-replica-mapped-data'] = enabled + return self + + def preload_replica_mapped_data(self): + """ + Preload memory mapped forest information while mounting + replica forest. + + *preload mapped replica data* specifies whether memory + mapped data (for example, range indexes and word lexicons) + are loaded immediately into memory when a stand is + opened. The setting of preload-replica-mapped-data + is ignored if preload-mapped-data is set to false. + + :return: Preload mapped replica forest information + """ + if 'preload-replica-mapped-data' in self._config: + return self._config['preload-replica-mapped-data'] + return None + + def set_range_index_optimize(self, which='facet-time'): + """ + Sets specifies how to optimize range indexes. + + *range index optimize* specifies how range indexes + are to be optimized. When set to ``facet-time``, range + indexes are optimized to minimize the amount of CPU + time used. When set to ``memory-size``, range indexes + are optimized to minimize the amount of memory used. + + :param which:Range index optimization option + + :return: The database object + """ + validate_range_index_optimize_options(which) + self._config['range-index-optimize'] = which + return self + + def range_index_optimize(self): + """ + Specifies how to optimize range indexes. + + *range index optimize* specifies how range indexes + are to be optimized. When set to ``facet-time``, range + indexes are optimized to minimize the amount of CPU + time used. When set to ``memory-size``, range indexes + are optimized to minimize the amount of memory used. + + :return: Range index optimization type + """ + if 'range-index-optimize' in self._config: + return self._config['range-index-optimize'] + return None + + def set_positions_list_max_size(self, limit=256): + """ + Sets maximum size of a positions-containing list, in megabytes. + Lists longer than this have positions discarded. + + *positions list max size* specifies the maximum size, + in megabytes, of the position list portion of the index + for a given term. If the position list size for a given + term grows larger than the limit specified, then the + position information for that term is discarded. The + default value is 256, the minimum value is 1, and the + maximum value is 512. For example, position queries + (``cts:near-query``) for frequently occurring words + that have reached this limit (words like , , , and + so on) are resolved without using the indexes. Even + though those types of words are resolved without using + the indexes, this limit helps improve performance by + making the indexes smaller and more efficient to the + data actually loaded in the database. + + :param limit:Max position containing list size + + :return: The database object + """ + self._config['positions-list-max-size'] = limit + return self + + def positions_list_max_size(self): + """ + Maximum size of a positions-containing list, in megabytes. + Lists longer than this have positions discarded. + + *positions list max size* specifies the maximum size, + in megabytes, of the position list portion of the index + for a given term. If the position list size for a given + term grows larger than the limit specified, then the + position information for that term is discarded. The + default value is 256, the minimum value is 1, and the + maximum value is 512. For example, position queries + (``cts:near-query``) for frequently occurring words + that have reached this limit (words like , , , and + so on) are resolved without using the indexes. Even + though those types of words are resolved without using + the indexes, this limit helps improve performance by + making the indexes smaller and more efficient to the + data actually loaded in the database. + + :return: The maximum position containing list size + """ + if 'positions-list-max-size' in self._config: + return self._config['positions-list-max-size'] + return None + + def set_format_compatibility(self, which='automatic'): + """ + Sets version of on-disk forest format. + + *format compatibility* specifies the version compatibility + that MarkLogic Server applies to the indexes for this + database during request evaluation. Setting this to + a value other than ``automatic`` specifies that all + forest data has the specified on-disk format, and it + disables the automatic checking for index compatibility + information. The automatic detection occurs during + database startup and after any database configuration + changes, and can take some time and system resources + for very large forests and for very large clusters. + The default value of ``automatic`` is recommended for + most installations. + + :param which:On disk forest format + + :return: The database object + """ + validate_format_compatibility_options(which) + self._config['format-compatibility'] = which + return self + + def format_compatibility(self): + """ + Version of on-disk forest format. + + *format compatibility* specifies the version compatibility + that MarkLogic Server applies to the indexes for this + database during request evaluation. Setting this to + a value other than ``automatic`` specifies that all + forest data has the specified on-disk format, and it + disables the automatic checking for index compatibility + information. The automatic detection occurs during + database startup and after any database configuration + changes, and can take some time and system resources + for very large forests and for very large clusters. + The default value of ``automatic`` is recommended for + most installations. + + :return: The on-disk forest format + """ + if 'format-compatibility' in self._config: + return self._config['format-compatibility'] + return None + + def set_index_detection(self, which='automatic'): + """ + Sets handling of differences between the current configuration + of database indexes and on-disk settings. + + *index detection* specifies whether to auto-detect + index compatibility between the content and the current + database settings. This detection occurs during database + startup and after any database configuration changes, + and can take some time and system resources for very + large forests and for very large clusters. Setting + this to ``none`` also causes queries to use the current + database index settings, even if some settings have + not completed reindexing. The default value of ``automatic`` + is recommended for most installations. + + :param which:How to handle differences in configuration settings + + :return: The database object + """ + validate_index_detection_options(which) + self._config['index-detection'] = which + return self + + def index_detection(self): + """ + Handling of differences between the current configuration + of database indexes and on-disk settings. + + *index detection* specifies whether to auto-detect + index compatibility between the content and the current + database settings. This detection occurs during database + startup and after any database configuration changes, + and can take some time and system resources for very + large forests and for very large clusters. Setting + this to ``none`` also causes queries to use the current + database index settings, even if some settings have + not completed reindexing. The default value of ``automatic`` + is recommended for most installations. + + :return: How to handle differences in configuration settings + """ + if 'index-detection' in self._config: + return self._config['index-detection'] + return None + + def set_expunge_locks(self, which='none'): + """ + Sets garbage collection of timed locks that have expired. + + *expunge locks* specifies if MarkLogic Server will + automatically expunge any lock fragments created using + ``xdmp:lock-acquire`` with specified timeouts. Setting + this ``automatic`` causes a background task to run + regularly to clean up expired lock fragments. The default + setting is ``none``, meaning lock fragments will remain + in the database after the locks expire (although they + will no longer be locking any documents) until they + are explicitly removed with ``xdmp:lock-release``. + + :param which:Garbage collect timed locks + + :return: The database object + """ + validate_expunge_locks_options(which) + self._config['expunge-locks'] = which + return self + + def expunge_locks(self): + """ + Garbage collection of timed locks that have expired. + + *expunge locks* specifies if MarkLogic Server will + automatically expunge any lock fragments created using + ``xdmp:lock-acquire`` with specified timeouts. Setting + this ``automatic`` causes a background task to run + regularly to clean up expired lock fragments. The default + setting is ``none``, meaning lock fragments will remain + in the database after the locks expire (although they + will no longer be locking any documents) until they + are explicitly removed with ``xdmp:lock-release``. + + :return: How to garbage collect timed locks + """ + if 'expunge-locks' in self._config: + return self._config['expunge-locks'] + return None + + def set_tf_normalization(self, which='scaled-log'): + """ + Sets what kind of TF normalization to apply. + + *tf normalization* specifies whether to use the default + term-frequency normalization (``scaled-log``), which + scales the term frequency based on the size of the + document, or to use the ``unscaled-log``, which uses + term frequency as a function of the actual term frequency + in a document, regardless of the document size, or + to choose an intermediate level of scaling with lower + impact than the default document size-based scaling. + + :param which: The term frequency normalization + + :return: The database object + """ + validate_term_frequency_normalization_options(which) + self._config['tf-normalization'] = which + return self + + def tf_normalization(self): + """ + What kind of TF normalization to apply. + + *tf normalization* specifies whether to use the default + term-frequency normalization (``scaled-log``), which + scales the term frequency based on the size of the + document, or to use the ``unscaled-log``, which uses + term frequency as a function of the actual term frequency + in a document, regardless of the document size, or + to choose an intermediate level of scaling with lower + impact than the default document size-based scaling. + + :return: The term frequency normalization option + """ + if 'tf-normalization' in self._config: + return self._config['tf-normalization'] + return None + + def set_merge_priority(self, which='lower'): + """ + Sets the CPU scheduler priority for merges. + + *merge priority* specifies the CPU scheduler priority + at which merges should run. The settings are: + + *normal* specifies the same CPU scheduler priority + as for requests. + + *lower* specifies a lower CPU scheduler priority than + for requests. + + Merges always run with normal priority on forests with + more than 16 stands. + + :param which:CPU scheduling hint for merges + + :return: The database object + """ + validate_merge_priority_options(which) + self._config['merge-priority'] = which + return self + + def merge_priority(self): + """ + The CPU scheduler priority for merges. + + *merge priority* specifies the CPU scheduler priority + at which merges should run. The settings are: + + *normal* specifies the same CPU scheduler priority + as for requests. + + *lower* specifies a lower CPU scheduler priority than + for requests. + + Merges always run with normal priority on forests with + more than 16 stands. + + :return: CPU scheduling hint for merges + """ + if 'merge-priority' in self._config: + return self._config['merge-priority'] + return None + + def set_merge_max_size(self, limit=32768): + """ + Sets maximum allowable size (in megabytes) for merges, or + 0 for no limit. + + *merge max size* specifies the maximum size, in megabytes, + of a stand that will result from a merge. If a stand + grows beyond the specified size, it will not be merged. + If two stands would be larger than the specified size + if merged, they will not be merged together. If you + set this to smaller sizes, large merges (which may + require more disk and CPU resources) will be prevented. + Set this to 0 to allow any sized stand to merge. The + default is 32768 (32G), which provides a good balance + between keeping the number of stands low and preventing + merges from needing large amounts of free disk space. + Use care when setting this to a non-zero value lower + than the default value, however, as this can prevent + merges which are ultimately required for the system + to maintain performance levels and to allow optimized + updates to the system. It is possible for a stand larger + than the merge-max-size to merge if the stand has enough + deleted fragments to trigger the merge min ratio; in + this case, MarkLogic will do a single-stand merge, + merging out the deleted fragments (even if the resulting + stand is larger than the merge-max-size value specified). + + :param limit:Size in megabytes + + :return: The database object + """ + self._config['merge-max-size'] = limit + return self + + def merge_max_size(self): + """ + Maximum allowable size (in megabytes) for merges, or + 0 for no limit. + + *merge max size* specifies the maximum size, in megabytes, + of a stand that will result from a merge. If a stand + grows beyond the specified size, it will not be merged. + If two stands would be larger than the specified size + if merged, they will not be merged together. If you + set this to smaller sizes, large merges (which may + require more disk and CPU resources) will be prevented. + Set this to 0 to allow any sized stand to merge. The + default is 32768 (32G), which provides a good balance + between keeping the number of stands low and preventing + merges from needing large amounts of free disk space. + Use care when setting this to a non-zero value lower + than the default value, however, as this can prevent + merges which are ultimately required for the system + to maintain performance levels and to allow optimized + updates to the system. It is possible for a stand larger + than the merge-max-size to merge if the stand has enough + deleted fragments to trigger the merge min ratio; in + this case, MarkLogic will do a single-stand merge, + merging out the deleted fragments (even if the resulting + stand is larger than the merge-max-size value specified). + """ + if 'merge-max-size' in self._config: + return self._config['merge-max-size'] + return None + + def set_merge_min_size(self, limit=1024): + """ + Sets stands with fewer than this number of fragments are + merged together. + + *merge min size* specifies the minimum number of fragments + that stands can contain. Two or more Stands with fewer + than this number of fragments are automatically merged. + + :param limit:Minimum stand count for merge + + :return: The database object + """ + self._config['merge-min-size'] = limit + return self + + def merge_min_size(self): + """ + Stands with fewer than this number of fragments are + merged together. + + *merge min size* specifies the minimum number of fragments + that stands can contain. Two or more Stands with fewer + than this number of fragments are automatically merged. + + :return: Minimum stand count for merge + """ + if 'merge-min-size' in self._config: + return self._config['merge-min-size'] + return None + + def set_merge_min_ratio(self, limit=2): + """ + Sets larger ratios trigger more merges. + + *merge min ratio* specifies the minimum ratio between + the number of stand fragments. Stands with a fragment + count below this ratio relative to all smaller stands + are automatically merged with the smaller stands. Specify + a positive integer for the merge min ratio. + + :param limit: The marge min ratio + + :return: The database object + """ + self._config['merge-min-ratio'] = limit + return self + + def merge_min_ratio(self): + """ + Larger ratios trigger more merges. + + *merge min ratio* specifies the minimum ratio between + the number of stand fragments. Stands with a fragment + count below this ratio relative to all smaller stands + are automatically merged with the smaller stands. Specify + a positive integer for the merge min ratio. + + :return: The marge min ratio + """ + if 'merge-min-ratio' in self._config: + return self._config['merge-min-ratio'] + return None + + def set_merge_timestamp(self, limit=0): + """ + Sets the earliest system timestamp allowed for requests, + or 0 to indicate the timestamp corresponding to the + time of latest merge. Merges discard information about + earlier timestamps. Entering a value of type xs:dateTime + will have it automatically converted to its corresponding + timestamp. A negative value indicates a timestamp relative + to the time of the latest merge, at ten million ticks + per second. For example, -6000000000 means ten minutes + before the latest merge. A value in red indicates that + you have filled in the text field with the current + timestamp, but have not clicked ok to save the value + to your config file. + + *merge timestamp* specifies the timestamp stored on + merged stands. This is used for point-in-time queries, + and determines when space occupied by deleted fragments + and old versions of fragments may be reclaimed by the + database. If a fragment is deleted or updated at a + time after the merge timestamp, then the old version + of the fragment is retained for use in point-in-time + queries. Set this to 0 (the default) to let the system + reclaim the maximum amount of disk space during merge + activities. A setting of 0 will remove all deleted + and updated fragments when a merge occurs. Set this + to 1 before loading or updating any content to create + a complete archive of the changes to the database over + time. Set this to the current timestamp (by clicking + the *current timestamp* button) to preserve all versions + of content from this point on. Set this to a negative + number to specify a window of timestamp values, relative + to the last merge, at ten million ticks per second. + The timestamp is a number maintained by MarkLogic Server + that increments every time a change occurs in any of + the databases in a system (including configuration + changes from any host in a cluster). To set to the + current timestamp, click the *current timestamp* button; + the timestamp is displayed in in red until you press + OK to activate the timestamp for future merges. For + details on point-in-time queries, see the . + + :param limit:Minimum value + + :return: The database object + """ + self._config['merge-timestamp'] = limit + return self + + def merge_timestamp(self): + """ + The earliest system timestamp allowed for requests, + or 0 to indicate the timestamp corresponding to the + time of latest merge. Merges discard information about + earlier timestamps. Entering a value of type xs:dateTime + will have it automatically converted to its corresponding + timestamp. A negative value indicates a timestamp relative + to the time of the latest merge, at ten million ticks + per second. For example, -6000000000 means ten minutes + before the latest merge. A value in red indicates that + you have filled in the text field with the current + timestamp, but have not clicked ok to save the value + to your config file. + + *merge timestamp* specifies the timestamp stored on + merged stands. This is used for point-in-time queries, + and determines when space occupied by deleted fragments + and old versions of fragments may be reclaimed by the + database. If a fragment is deleted or updated at a + time after the merge timestamp, then the old version + of the fragment is retained for use in point-in-time + queries. Set this to 0 (the default) to let the system + reclaim the maximum amount of disk space during merge + activities. A setting of 0 will remove all deleted + and updated fragments when a merge occurs. Set this + to 1 before loading or updating any content to create + a complete archive of the changes to the database over + time. Set this to the current timestamp (by clicking + the *current timestamp* button) to preserve all versions + of content from this point on. Set this to a negative + number to specify a window of timestamp values, relative + to the last merge, at ten million ticks per second. + The timestamp is a number maintained by MarkLogic Server + that increments every time a change occurs in any of + the databases in a system (including configuration + changes from any host in a cluster). To set to the + current timestamp, click the *current timestamp* button; + the timestamp is displayed in in red until you press + OK to activate the timestamp for future merges. For + details on point-in-time queries, see the . + + :return: Minimum value + """ + if 'merge-timestamp' in self._config: + return self._config['merge-timestamp'] + return None + + def set_retain_until_backup(self, enabled=False): + """ + Sets retain deleted fragments until backup. + + *retain until backup* specifies whether the deleted + fragments are retained since the last full or incremental + backup. + """ + self._config['retain-until-backup'] = enabled + return self + + def set_rebalancer_enable(self, enabled=True): + """ + Sets enable automatic rebalancing after configuration changes. + + *rebalancer enable* specifies whether rebalancing are + automatically performed in the background after configuration + settings are changed. When set to true, configuration + changes automatically initiate a background rebalancing + operation on the entire database. + + :param enabled: Enable automatic rebalancing + + :return: The database object + """ + validate_boolean(enabled) + self._config['rebalancer-enable'] = enabled + return self + + def rebalancer_enable(self): + """ + Enable automatic rebalancing after configuration changes. + + *rebalancer enable* specifies whether rebalancing are + automatically performed in the background after configuration + settings are changed. When set to true, configuration + changes automatically initiate a background rebalancing + operation on the entire database. + + :return: Enable automatic rebalancing + """ + if 'rebalancer-enable' in self._config: + return self._config['rebalancer-enable'] + return None + + def set_rebalancer_throttle(self, limit=5): + """ + Sets larger numbers mean work harder at rebalancing. + + *rebalancer throttle* sets the priority of system resources + devoted to rebalancing. Rebalancing occurs in batches, + where each batch is approximately 200 fragments. When + set to 5 (the default), the rebalancer works aggressively, + starting the next batch of rebalancing soon after finishing + the previous batch. When set to 4, it waits longer + between batches, when set to 3 it waits longer still, + and so on until when it is set to 1, it waits the longest. + Therefore, higher numbers give rebalancing a higher + priority and uses the most system resources. + + :param limit:The relative amount of resources to dedicate to rebalancing + + :return: The database object + """ + validate_integer_range(limit, 1, 5) + self._config['rebalancer-throttle'] = limit + return self + + def rebalancer_throttle(self): + """ + Larger numbers mean work harder at rebalancing. + + *rebalancer throttle* sets the priority of system resources + devoted to rebalancing. Rebalancing occurs in batches, + where each batch is approximately 200 fragments. When + set to 5 (the default), the rebalancer works aggressively, + starting the next batch of rebalancing soon after finishing + the previous batch. When set to 4, it waits longer + between batches, when set to 3 it waits longer still, + and so on until when it is set to 1, it waits the longest. + Therefore, higher numbers give rebalancing a higher + priority and uses the most system resources. + + :return: The relative amount of resources to dedicate to rebalancing + """ + if 'rebalancer-throttle' in self._config: + return self._config['rebalancer-throttle'] + return None + + def set_assignment_policy(self, which='bucket'): + """ + Sets the policy to use for assignment and rebalancing. + + *assignment policy* specifies what policy to use for + assignment and rebalancing. The default for a new database + is *bucket*. The settings are: *legacy* specifies the + policy that already exists on MarkLogic 6. *bucket* + specifies a policy that first assigns a document to + a logical bucket based on its URI then assigns the + bucket to a forest. *range* specifies a policy that + assigns a document based on its data correspondent + to the "partition key" of the database. + + :param which:The policy for assignment and rebalancing + + :return: The database object + """ + validate_assignment_policy_options(which) + self._config['assignment-policy'] = {"assignment-policy-name": which} + return self + + def assignment_policy(self): + """ + The policy to use for assignment and rebalancing. + + *assignment policy* specifies what policy to use for + assignment and rebalancing. The default for a new database + is *bucket*. The settings are: *legacy* specifies the + policy that already exists on MarkLogic 6. *bucket* + specifies a policy that first assigns a document to + a logical bucket based on its URI then assigns the + bucket to a forest. *range* specifies a policy that + assigns a document based on its data correspondent + to the "partition key" of the database. + + :return: The policy for assignment and rebalancing + """ + if 'assignment-policy' in self._config: + return self._config['assignment-policy'] + return None + + def path_namespaces(self): + """ + Return the path namespaces defined or None, if no path namespaces + are defined. + + :return: The path namespaces or none + """ + if 'path-namespace' in self._config: + return self._config['path-namespace'] + return None + + def add_path_namespace(self, path): + """ + Add a path namespace for use by field paths. + + :param path: The PathNamespace + + :return: The database object + """ + return self.add_to_property_list('path-namespace', path, PathNamespace) + + def set_path_namespaces(self, paths): + if isinstance(paths, PathNamespace): + self._config['path-namespace'] = [ paths ] + else: + if type(paths) is not list: + raise ValidationError("List of paths expected.", repr(paths)) + for path in paths: + if not(isinstance(path, PathNamespace)): + raise ValidationError("List of paths expected.", repr(path)) + self._config['path-namespace'] = paths + + def element_word_lexicons(self): + """ + Return the word lexicons defined or None, if no lexicons + are defined. + + :return: The lexicons or None + """ + if 'element-word-lexicon' in self._config: + return self._config['element-word-lexicon'] + return None + + def add_element_word_lexicon(self, lexicon): + """ + Add a lexicon. + + :param lexicon: The lexicon. + + :return: The database object + """ + return self.add_to_property_list('element-word-lexicon', + lexicon, ElementWordLexicon) + + def set_element_word_lexicons(self, lexicons): + self._config['element-word-lexicon'] \ + = assert_list_of_type(lexicons, ElementWordLexicon) + + def attribute_word_lexicons(self): + """ + Return the attribute lexicons defined or None, if no lexicons + are defined. + + :return: The lexicons or None + """ + if 'element-attribute-word-lexicon' in self._config: + return self._config['element-attribute-word-lexicon'] + return None + + def add_attribute_word_lexicon(self, lexicon): + """ + Add an attribute lexicon. + + :param lexicon: The lexicon + + :return: The database object + """ + return self.add_to_property_list('element-attribute-word-lexicon', + lexicon, AttributeWordLexicon) + + def set_attribute_word_lexicons(self, lexicons): + if isinstance(lexicons, AttributeWordLexicon): + self._config['element-attribute-word-lexicon'] = [ lexicons ] + else: + if type(lexicons) is not list: + raise ValidationError("List of lexicons expected.", repr(lexicons)) + for lexicon in lexicons: + if not(isinstance(lexicon, AttributeWordLexicon)): + raise ValidationError("List of lexicons expected.", repr(lexicon)) + self._config['element-attribute-word-lexicon'] = lexicons + + def phrase_throughs(self): + """ + Return the phrase throughs defined or None, if no phrase throughs + are defined. + + :return: The throughs or None + """ + if 'phrase-through' in self._config: + return self._config['phrase-through'] + return None + + def add_phrase_through(self, through): + """ + Add a phrase through. + + :param through: The phrase through. + + :return: The database object + """ + return self.add_to_property_list('phrase-through', + through, PhraseThrough) + + def set_phrase_throughs(self, throughs): + self._config['phrase-through'] \ + = assert_list_of_type(throughs, PhraseThrough) + + def phrase_arounds(self): + """ + Return the phrase arounds defined or None, if no phrase arounds + are defined. + + :return: The arounds or None + """ + if 'phrase-around' in self._config: + return self._config['phrase-around'] + return None + + def add_phrase_around(self, around): + """ + Add a phrase around. + + :param around: The phrase around. + + :return: The database object + """ + return self.add_to_property_list('phrase-around', + around, PhraseAround) + + def set_phrase_arounds(self, arounds): + self._config['phrase-around'] \ + = assert_list_of_type(arounds, PhraseAround) + + def element_word_query_throughs(self): + """ + Return the word query throughs defined or None, if no query throughs + are defined. + + :return: The query throughs or None + """ + if 'element-word-query-through' in self._config: + return self._config['element-word-query_through'] + return None + + def add_element_word_query_through(self, query_through): + """ + Add a query through. + + :param query_through: The query through. + + :return: The database object + """ + return self.add_to_property_list('element-word-query-through', + query_through, ElementWordQueryThrough) + + def set_element_word_query_throughs(self, query_throughs): + if isinstance(query_throughs, ElementWordQueryThrough): + self._config['element-word-query-through'] = [ query_throughs ] + else: + if type(query_throughs) is not list: + raise ValidationError("List of query throughs expected.", repr(query_throughs)) + for query_through in query_throughs: + if not(isinstance(query_through, ElementWordQueryThrough)): + raise ValidationError("List of query_throughs expected.", repr(query_through)) + self._config['element-word-query_through'] = query_throughs + + def default_rulesets(self): + """ + Return the default rule sets or None, if no default rule sets + are defined. + + :return: The default rule sets or None + """ + if 'default-ruleset' in self._config: + return self._config['default-ruleset'] + return None + + def add_default_ruleset(self, ruleset): + """ + Add a default rule set. + + :param ruleset: The rule set. + + :return: The database object + """ + return self.add_to_property_list('default-ruleset', ruleset, RuleSet) + + def set_rulesets(self, rulesets): + """ + Set the default rulesets. + """ + return self.set_property_list('default-ruleset', rulesets, RuleSet) + + def fragment_roots(self): + """ + The fragment roots. + """ + if 'fragment-root' in self._config: + return self._config['fragment-root'] + return None + + def add_fragment_root(self, root): + """ + Add a fragment root. + """ + return self.add_to_property_list('fragment-root', root, FragmentRoot) + + def remove_fragment_root(self, root): + """ + Remove a fragment root. + """ + return self.remove_from_property_list('fragment-root', root, FragmentRoot) + + def set_fragment_roots(self, roots): + """ + Set the fragment roots. + """ + return self.set_property_list('fragment-root', roots, FragmentRoot) + + def fragment_parents(self): + """ + The fragment parents. + """ + if 'fragment-parent' in self._config: + return self._config['fragment-parent'] + return None + + def add_fragment_parent(self, parent): + """ + Add a fragment parent. + """ + return self.add_to_property_list('fragment-parent', parent, FragmentParent) + + def remove_fragment_parent(self, parent): + """ + Remove a fragment parent. + """ + return self.remove_from_property_list('fragment-parent', + parent, FragmentParent) + + def set_fragment_parents(self, parents): + """ + Set the fragment parents. + """ + return self.set_property_list('fragment-parent', parents, FragmentParent) + + def merge_blackouts(self): + """ + The merge blackouts + """ + if 'merge-blackout' in self._config: + return self._config['merge-blackout'] + return None + + def set_merge_blackouts(self, blackouts): + """ + Set the list of merge blackouts. + """ + return self.set_property_list('merge-blackout', blackouts, MergeBlackout) + + def add_merge_blackout(self, merge_blackout): + """ + Add a merge blackout. + """ + return self.add_to_property_list('merge-blackout', + merge_blackout, MergeBlackout) + + def remove_merge_blackout(self, merge_blackout): + """ + Remove a merge blackout. + """ + return self.remove_from_property_list('merge-blackout', + merge_blackout, MergeBlackout) + + def scheduled_backups(self): + """ + The scheduled backups. + """ + if 'database-backup' in self._config: + return self._config['database-backup'] + return None + + def set_scheduled_backups(self, backups): + """ + Set the scheduled backups. + """ + return self.set_property_list('database-backup', + backups, ScheduledDatabaseBackup) + + def add_scheduled_backup(self, backup): + """ + Add a scheduled backup. + """ + return self.add_to_property_list('database-backup', + backup, ScheduledDatabaseBackup) + + def remove_scheduled_backup(self, backup): + """ + Remove a scheduled backup. + """ + return self.remove_from_property_list('database-backup', + backup, ScheduledDatabaseBackup) + + # ============================================================ + + def backup(self, conn, backup_dir, forests=None, + journal_archiving=False, journal_archive_path=None, + lag_limit=30, + incremental=False, incremental_dir=None): + """ + Start a database backup. + """ + return DatabaseBackup.backup(conn, self.name, backup_dir, forests, + journal_archiving, journal_archive_path, + lag_limit, + incremental, incremental_dir) + + def restore(self, conn, backup_dir, forests=None, + journal_archiving=False, journal_archive_path=None, + incremental=False, incremental_dir=None): + """ + Start a database restore. + """ + return DatabaseRestore.restore(conn, self.name, backup_dir, forests, + journal_archiving, journal_archive_path, + incremental, incremental_dir) + + def clear(self, conn): + """ + Clear the database. + """ + payload = { + 'operation': 'clear-database', + } + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, self.name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return + + def merge(self, conn): + """ + Initiate a merge on the database. + """ + payload = { + 'operation': 'merge-database', + } + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, self.name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return + + def reindex(self, conn): + """ + Initiate a re-index on the database. + """ + payload = { + 'operation': 'reindex-database', + } + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, self.name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return + + # ============================================================ + + def create(self, connection): + """ + Create a new database defined by these parameters on the given connection. + + :param connection: The server connection + + :return: The database object + """ + uri = "http://{0}:{1}/manage/v2/databases" \ + .format(connection.host, connection.management_port) + + forest_names = [] + # unicode doesn't exist in Python 3 + if sys.version_info[0] < 3: + for forest_info in self._config['forest']: + if isinstance(forest_info, str) or isinstance(forest_info, unicode): + new_forest = Forest(forest_info, host=self.hostname) + new_forest.create(connection) + forest_names.append(forest_info) + + elif isinstance(forest_info, Forest): + forest_info.create(connection) + forest_names.append(forest_info.name()) + else: + for forest_info in self._config['forest']: + if isinstance(forest_info, str): + new_forest = Forest(forest_info, host=self.hostname) + new_forest.create(connection) + forest_names.append(forest_info) + + elif isinstance(forest_info, Forest): + forest_info.create(connection) + forest_names.append(forest_info.name()) + + self._config['forest'] = forest_names + + self.logger.debug("Creating database: {0}".format(self.database_name())) + response = requests.post(uri, json=self._config, auth=connection.auth) + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return self + + def read(self, connection): + """ + Loads the database from the MarkLogic server. This will refresh + the properties of the object. + + :param connection: The connection to a MarkLogic server + :return: The server object + """ + database = Server.lookup(connection, self.database_name()) + if database is None: + return None + else: + self._config = database._config + self.etag = database.etag + return self + + def update(self, connection): + """ + Save the configuration changes with the given connection. + If the database already exists on the + given connection, then you can update the settings with this method. + + :param connection:The server connection + + :return: The database object + """ + uri = "http://{0}:{1}/manage/v2/databases/{2}/properties" \ + .format(connection.host, connection.management_port, self.name) + + headers = {} + if self.etag is not None: + headers['if-match'] = self.etag + + struct = self.marshal() + response = requests.put(uri, json=struct, auth=connection.auth, + headers=headers) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + # In case we renamed it + self.name = self._config['database-name'] + + return self + + def delete(self, connection): + """ + Remove the given database and all its forests. + + :param connection: The server connection + + :return: The database object + """ + uri = "http://{0}:{1}/manage/v2/databases/{2}?forest-delete=data" \ + .format(connection.host, connection.management_port, self.name) + response = requests.delete(uri, auth=connection.auth) + + if response.status_code > 299 and not response.status_code == 404: + raise UnexpectedManagementAPIResponse(response.text) + + return self + + def load_file(self, connection, path, uri, collections=None, content_type="application/json"): + """ + Load a given file into a given database. + + :param connection: The server connection + :param path: The path to the file + :param uri: The uri for the file contents in the database + :param collections: A list of collections + :param content_type: The content type of the data + + :return: The database object + """ + doc_url = "http://{0}:{1}/v1/documents?uri={2}&database={3}" \ + .format(connection.host, connection.port, uri, self.name) + + if collections is not None: + for collection in collections: + doc_url += ("&collection=" + collection) + + with open(path) as data_file: + file_data = data_file.read() + response = requests.put(doc_url, data=file_data, auth=connection.auth, + headers={'content-type': content_type}) + if response.status_code > 299: + raise UnexpectedAPIResponse(response.text) + + return self + + def load_directory_files(self, connection, path, prefix="/", collections=None, content_type="application/json"): + """ + Load all the given files in a directory. It will combine the prefix with the filename to generate + a uri for the file on the server. + + :param connection: The server connection + :param path: The path to the directory + :param prefix: The prefix to the individuals files + :param collections: A list of collections to use for the files + :param content_type: The content type of the files + + :return: The database object + """ + file_list = files.walk_directories(path) + for result in file_list: + self.load_file(connection, result['partial-directory'], prefix + result['filename'], + collections=collections, content_type=content_type) + return self + + def load_directory(self, connection, path, prefix="/", collections=None, content_type="application/json"): + """ + Load all the file in a directory, preserving the partial path between the directory root and the + file. So a file located at /data/files/myfile.xml, with a prefix parameter of '/data' will be + loaded as /files/myfile.xml. (Using the default prefix). + + :param connection: The server connection + :param path: The path to the directory root + :param prefix: The prefix to use when constructing the server URI for the file + :param collections: The collections to use for the files + :param content_type: The content type of the files + + :return: The database object + """ + file_list = files.walk_directories(path) + for result in file_list: + self.load_file(connection, result['partial-directory'], prefix + result['partial-directory'], + collections=collections, content_type=content_type) + return self + + @classmethod + def lookup(cls, connection, name): + """ + Lookup a database configuration by name. + + :param name:The name of the database + :param connection:The server connection + + :return: The database configuration + """ + logger = logging.getLogger("marklogic") + + uri = "http://{0}:{1}/manage/v2/databases/{2}/properties" \ + .format(connection.host, connection.management_port, name) + + logger.debug("Reading database configuration: {0}".format(name)) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + result = None + if response.status_code == 200: + result = Database.unmarshal(json.loads(response.text)) + if 'etag' in response.headers: + result.etag = response.headers['etag'] + + elif response.status_code != 404: + raise UnexpectedManagementAPIResponse(response.text) + + return result + + @classmethod + def list_databases(cls, connection): + uri = "http://{0}:{1}/manage/v2/databases".format(connection.host, connection.management_port) + response = requests.get(uri, auth=connection.auth, headers={'accept': 'application/json'}) + + if response.status_code == 200: + response_json = json.loads(response.text) + db_count = response_json['database-default-list']['list-items']['list-count']['value'] + + result = [] + if db_count > 0: + for item in response_json['database-default-list']['list-items']['list-item']: + result.append(Database(item['nameref'])) + else: + raise UnexpectedManagementAPIResponse(response.text) + + return result + + + @classmethod + def unmarshal(cls, config): + result = Database("temp") + result._config = config + result.name = result._config['database-name'] + + olist = [] + if 'range-element-index' in result._config: + for index in result._config['range-element-index']: + temp = ElementRangeIndex(index['scalar-type'], + index['namespace-uri'], + index['localname'], + index['collation'], + index['range-value-positions'] == 'true', + index['invalid-values']) + olist.append(temp) + result._config['range-element-index'] = olist + + olist = [] + if 'range-field-index' in result._config: + for index in result._config['range-field-index']: + temp = FieldRangeIndex(index['scalar-type'], + index['field-name'], + index['collation'], + index['range-value-positions'] == 'true', + index['invalid-values']) + olist.append(temp) + result._config['range-field-index'] = olist + + olist = [] + if 'range-element-attribute-index' in result._config: + for index in result._config['range-element-attribute-index']: + temp = AttributeRangeIndex(index['scalar-type'], + index['parent-namespace-uri'], + index['parent-localname'], + index['namespace-uri'], + index['localname'], + index['collation'], + index['range-value-positions'] == 'true', + index['invalid-values']) + olist.append(temp) + result._config['range-element-attribute-index'] = olist + + olist = [] + if 'range-path-index' in result._config: + for index in result._config['range-path-index']: + temp = PathRangeIndex(index['scalar-type'], + index['path-expression'], + index['collation'], + index['range-value-positions'] == 'true', + index['invalid-values']) + olist.append(temp) + result._config['range-path-index'] = olist + + olist = [] + if 'geospatial-element-index' in result._config: + for index in result._config['geospatial-element-index']: + temp = GeospatialElementIndex(index['namespace-uri'], + index['localname'], + index['coordinate-system'], + index['point-format'], + index['range-value-positions'] == 'true', + index['invalid-values']) + olist.append(temp) + result._config['geospatial-element-index'] = olist + + olist = [] + if 'geospatial-path-index' in result._config: + for index in result._config['geospatial-path-index']: + temp = GeospatialPathIndex(index['path-expression'], + index['coordinate-system'], + index['point-format'], + index['range-value-positions'] == 'true', + index['invalid-values']) + olist.append(temp) + result._config['geospatial-path-index'] = olist + + olist = [] + if 'geospatial-element-child-index' in result._config: + for index in result._config['geospatial-element-child-index']: + temp = GeospatialElementChildIndex( + index['parent-namespace-uri'], + index['parent-localname'], + index['namespace-uri'], + index['localname'], + index['coordinate-system'], + index['point-format'], + index['range-value-positions'] == 'true', + index['invalid-values']) + olist.append(temp) + result._config['geospatial-element-child-index'] = olist + + olist = [] + if 'geospatial-element-pair-index' in result._config: + for index in result._config['geospatial-element-pair-index']: + temp = GeospatialElementPairIndex( + index['parent-namespace-uri'], + index['parent-localname'], + index['longitude-namespace-uri'], + index['longitude-localname'], + index['latitude-namespace-uri'], + index['latitude-localname'], + index['coordinate-system'], + index['range-value-positions'] == 'true', + index['invalid-values']) + olist.append(temp) + result._config['geospatial-element-pair-index'] = olist + + olist = [] + if 'geospatial-element-attribute-pair-index' in result._config: + for index in result._config['geospatial-element-attribute-pair-index']: + temp = GeospatialElementAttributePairIndex( + index['parent-namespace-uri'], + index['parent-localname'], + index['longitude-namespace-uri'], + index['longitude-localname'], + index['latitude-namespace-uri'], + index['latitude-localname'], + index['coordinate-system'], + index['range-value-positions'] == 'true', + index['invalid-values']) + olist.append(temp) + result._config['geospatial-element-attribute-pair-index'] = olist + + olist = [] + if 'fragment-root' in result._config: + for root in result._config['fragment-root']: + temp = FragmentRoot(root['namespace-uri'],root['localname']) + olist.append(temp) + result._config['fragment-root'] = olist + + olist = [] + if 'fragment-parent' in result._config: + for root in result._config['fragment-parent']: + temp = FragmentParent(root['namespace-uri'],root['localname']) + olist.append(temp) + result._config['fragment-parent'] = olist + + olist = [] + if 'merge-blackout' in result._config: + for blackout in result._config['merge-blackout']: + temp = None + if (blackout['blackout-type'] == 'recurring' + and blackout['period'] is None): + temp = MergeBlackout.recurringAllDay( + blackout['merge-priority'], + blackout['limit'], + blackout['day']) + elif (blackout['blackout-type'] == 'recurring' + and 'duration' in blackout['period']): + temp = MergeBlackout.recurringDuration( + blackout['merge-priority'], + blackout['limit'], + blackout['day'], + blackout['period']['start-time'], + blackout['period']['duration']) + elif (blackout['blackout-type'] == 'recurring' + and 'end-time' in blackout['period']): + temp = MergeBlackout.recurringStartEnd( + blackout['merge-priority'], + blackout['limit'], + blackout['day'], + blackout['period']['start-time'], + blackout['period']['end-time']) + elif (blackout['blackout-type'] == 'once' + and 'end-time' in blackout['period']): + temp = MergeBlackout.oneTimeStartEnd( + blackout['merge-priority'], + blackout['limit'], + blackout['period']['start-date'], + blackout['period']['start-time'], + blackout['period']['end-date'], + blackout['period']['end-time']) + elif (blackout['blackout-type'] == 'once' + and 'duration' in blackout['period']): + temp = MergeBlackout.oneTimeDuration( + blackout['merge-priority'], + blackout['limit'], + blackout['period']['start-date'], + blackout['period']['start-time'], + blackout['period']['duration']) + else: + raise UnexpectedManagementAPIResponse("Unparseable merge blackout period") + + olist.append(temp) + result._config['merge-blackout'] = olist + + olist = [] + if 'database-backup' in result._config: + for backup in result._config['database-backup']: + incremental = None + if 'incremental' in backup: + incremental = backup['incremental'] + + temp = None + if (backup['backup-type'] == 'minutely'): + temp = ScheduledDatabaseBackup.minutely( + backup['backup-directory'], + backup['backup-period'], + backup['max-backups'], + backup['backup-security-database'], + backup['backup-schemas-database'], + backup['backup-triggers-database'], + backup['include-replicas'], + incremental, + backup['journal-archiving'], + backup['journal-archive-path'], + backup['journal-archive-lag-limit']) + elif (backup['backup-type'] == 'hourly'): + temp = ScheduledDatabaseBackup.hourly( + backup['backup-directory'], + backup['backup-period'], + backup['backup-start-time'], + backup['max-backups'], + backup['backup-security-database'], + backup['backup-schemas-database'], + backup['backup-triggers-database'], + backup['include-replicas'], + incremental, + backup['journal-archiving'], + backup['journal-archive-path'], + backup['journal-archive-lag-limit']) + elif (backup['backup-type'] == 'daily'): + temp = ScheduledDatabaseBackup.daily( + backup['backup-directory'], + backup['backup-period'], + backup['backup-start-time'], + backup['max-backups'], + backup['backup-security-database'], + backup['backup-schemas-database'], + backup['backup-triggers-database'], + backup['include-replicas'], + incremental, + backup['journal-archiving'], + backup['journal-archive-path'], + backup['journal-archive-lag-limit']) + elif (backup['backup-type'] == 'weekly'): + temp = ScheduledDatabaseBackup.weekly( + backup['backup-directory'], + backup['backup-period'], + backup['backup-day'], + backup['backup-start-time'], + backup['max-backups'], + backup['backup-security-database'], + backup['backup-schemas-database'], + backup['backup-triggers-database'], + backup['include-replicas'], + incremental, + backup['journal-archiving'], + backup['journal-archive-path'], + backup['journal-archive-lag-limit']) + elif (backup['backup-type'] == 'monthly'): + temp = ScheduledDatabaseBackup.monthly( + backup['backup-directory'], + backup['backup-period'], + backup['backup-month-day'], + backup['backup-start-time'], + backup['max-backups'], + backup['backup-security-database'], + backup['backup-schemas-database'], + backup['backup-triggers-database'], + backup['include-replicas'], + incremental, + backup['journal-archiving'], + backup['journal-archive-path'], + backup['journal-archive-lag-limit']) + elif (backup['backup-type'] == 'once'): + temp = ScheduledDatabaseBackup.once( + backup['backup-directory'], + backup['backup-start-date'], + backup['backup-start-time'], + backup['max-backups'], + backup['backup-security-database'], + backup['backup-schemas-database'], + backup['backup-triggers-database'], + backup['include-replicas'], + incremental, + backup['journal-archiving'], + backup['journal-archive-path'], + backup['journal-archive-lag-limit']) + else: + raise UnexpectedManagementAPIResponse("Unparseable backup") + temp._config['backup-id'] = backup['backup-id'] + olist.append(temp) + result._config['database-backup'] = olist + + olist = [] + if 'path-namespace' in result._config: + for path in result._config['path-namespace']: + temp = PathNamespace( + path['prefix'], + path['namespace-uri']) + olist.append(temp) + result._config['path-namespace'] = olist + + olist = [] + if 'element-word-lexicon' in result._config: + for path in result._config['element-word-lexicon']: + temp = ElementWordLexicon( + path['namespace-uri'], + path['localname'], + path['collation']) + olist.append(temp) + result._config['element-word-lexicon'] = olist + + olist = [] + if 'element-attribute-word-lexicon' in result._config: + for path in result._config['element-attribute-word-lexicon']: + temp = AttributeWordLexicon( + path['parent-namespace-uri'], + path['parent-localname'], + path['namespace-uri'], + path['localname'], + path['collation']) + olist.append(temp) + result._config['element-attribute-word-lexicon'] = olist + + olist = [] + if 'element-word-query-through' in result._config: + for path in result._config['element-word-query-through']: + temp = ElementWordQueryThrough( + path['namespace-uri'], + path['localname']) + olist.append(temp) + result._config['element-word-query-through'] = olist + + olist = [] + if 'phrase-through' in result._config: + for path in result._config['phrase-through']: + temp = PhraseThrough( + path['namespace-uri'], + path['localname']) + olist.append(temp) + result._config['phrase-through'] = olist + + olist = [] + if 'phrase-around' in result._config: + for path in result._config['phrase-around']: + temp = PhraseAround( + path['namespace-uri'], + path['localname']) + olist.append(temp) + result._config['phrase-around'] = olist + + olist = [] + if 'default-ruleset' in result._config: + for path in result._config['default-ruleset']: + temp = RuleSet( + path['location']) + olist.append(temp) + result._config['default-ruleset'] = olist + + olist = [] + if 'field' in result._config: + for field in result._config['field']: + name = field['field-name'] + if 'field-path' in field: + paths = [] + for path in field['field-path']: + paths.append(FieldPath( + path['path'], path['weight'])) + temp = PathField(name, paths) + else: + root = False + if 'include-root' in field: + root = (field['include-root'] == 'true') + if field['field-name'] == "": + temp = WordQuery(root) + else: + temp = RootField(name, root) + temp.unmarshal(field) + olist.append(temp) + result._config['field'] = olist + + return result + + def marshal(self): + struct = { } + for key in self._config: + if (key == 'range-element-index' + or key == 'range-field-index' + or key == 'range-element-attribute-index' + or key == 'range-path-index' + or key == 'geospatial-element-index' + or key == 'geospatial-path-index' + or key == 'geospatial-element-child-index' + or key == 'geospatial-element-pair-index' + or key == 'geospatial-element-attribute-pair-index' + or key == 'fragment-root' + or key == 'fragment-parent' + or key == 'element-word-lexicon' + or key == 'element-attribute-word-lexicon' + or key == 'element-word-query-through' + or key == 'phrase-through' + or key == 'phrase-around' + or key == 'default-ruleset' + or key == 'path-namespace' + or key == 'database-backup' + or key == 'merge-blackout'): + jlist = [] + for index in self._config[key]: + jlist.append(index._config) + struct[key] = jlist + elif key == "field": + fstruct = [] + for field in self._config['field']: + fstruct.append(field.marshal()) + struct[key] = fstruct + else: + struct[key] = self._config[key]; + return struct + + def add_index(self, index_def): + """ + Add a new index to the database configuration. + + The index isn't actually created on the server until + the server configuration is saved. + + :param index_def: The index definition + + :return: The database configuration. + """ + # N.B. Get these in the right order because it's a class hierarchy + if isinstance(index_def, ElementRangeIndex): + return self.add_to_property_list('range-element-index', + index_def, ElementRangeIndex) + elif isinstance(index_def, AttributeRangeIndex): + return self.add_to_property_list('range-element-attribute-index', + index_def, AttributeRangeIndex) + elif isinstance(index_def, FieldRangeIndex): + return self.add_to_property_list('range-field-index', + index_def, FieldRangeIndex) + elif isinstance(index_def, PathRangeIndex): + return self.add_to_property_list('range-path-index', + index_def, PathRangeIndex) + elif isinstance(index_def, GeospatialElementChildIndex): + return self.add_to_property_list('geospatial-element-child-index', + index_def, GeospatialElementChildIndex) + elif isinstance(index_def, GeospatialElementAttributePairIndex): + return self.add_to_property_list('geospatial-element-attribute-pair-index', + index_def, GeospatialElementAttributePairIndex) + elif isinstance(index_def, GeospatialElementPairIndex): + return self.add_to_property_list('geospatial-element-pair-index', + index_def, GeospatialElementPairIndex) + elif isinstance(index_def, GeospatialElementIndex): + return self.add_to_property_list('geospatial-element-index', + index_def, GeospatialElementIndex) + elif isinstance(index_def, GeospatialPathIndex): + return self.add_to_property_list('geospatial-path-index', + index_def, GeospatialPathIndex) + else: + raise ValidationError('Not an index', index_def) + + def element_range_indexes(self): + """ + The element range indexes. + """ + if 'range-element-index' in self._config: + return self._config['range-element-index'] + return None + + def field_range_indexes(self): + """ + The field range indexes. + """ + if 'range-field-index' in self._config: + return self._config['range-field-index'] + return None + + def attribute_range_indexes(self): + """ + The attribute range indexes. + """ + if 'range-element-attribute-index' in self._config: + return self._config['range-element-attribute-index'] + return None + + def path_range_indexes(self): + """ + The path range indexes. + """ + if 'range-path-index' in self._config: + return self._config['range-path-index'] + return None + + def geospatial_element_indexes(self): + """ + The geospatial element indexes. + """ + if 'geospatial-element-index' in self._config: + return self._config['geospatial-element-index'] + return None + + def geospatial_path_indexes(self): + """ + The geospatial path indexes. + """ + if 'geospatial-path-index' in self._config: + return self._config['geospatial-path-index'] + return None + + def geospatial_element_child_indexes(self): + """ + The geospatial element child indexes. + """ + if 'geospatial-element-child-index' in self._config: + return self._config['geospatial-element-child-index'] + return None + + def geospatial_element_pair_indexes(self): + """ + The geospatial element pair indexes. + """ + if 'geospatial-element-pair-index' in self._config: + return self._config['geospatial-element-pair-index'] + return None + + def geospatial_element_attribute_pair_indexes(self): + """ + The geospatial element attribute pair indexes. + """ + if 'geospatial-element-attribute-pair-index' in self._config: + return self._config['geospatial-element-attribute-pair-index'] + return None + + def fields(self): + """ + The fields. + + Note: The list of fields does not include the word query settings + that happen to be stored in the server configuration as a field + with an empty name. + """ + # The field named "" is special, it's the word query settings + fields = [] + if 'field' in self._config: + for field in self._config['field']: + if field.field_name() is not None: + fields.append(field) + return fields + else: + return None + + def add_field(self, field): + value = assert_type(field, Field) + if value.field_name() is None: + raise ValidationError('Fields must have a non-empty name', value) + return self.add_to_property_list('field', field, Field) + + def set_fields(self, fields): + values = assert_list_of_type(fields, Field) + for value in values: + if value.field_name() is None: + raise ValidationError('Fields must have a non-empty name', value) + return self.set_property_list('field', fields, Field) + + def word_query(self): + if 'field' in self._config: + for field in self._config['field']: + if field.field_name() is None: + return field + return WordQuery(False) + + def set_word_query(self, word_query): + changed = False + fields = [] + if 'field' in self._config: + fields = self._config['field'] + for index, item in enumerate(fields): + if item.field_name() == "": + items[index] = assert_type(word_query, WordQuery) + changed = True + if not changed: + fields.append(assert_type(word_query, WordQuery)) + self._config['field'] = fields + + def get_document(self, conn, document_uri, content_type='*/*'): + doc_url = "http://{0}:{1}/v1/documents?uri={2}&database={3}" \ + .format(conn.host, conn.port, document_uri, self.name) + + response = requests.get(doc_url, auth=conn.auth, headers={'accept': content_type}) + if response.status_code == 404: + return None + elif response.status_code == 200: + return response.text + else: + raise UnexpectedAPIResponse(response.text) + diff --git a/python_api/marklogic/models/database/backup.py b/python_api/marklogic/models/database/backup.py new file mode 100644 index 0000000..6924da0 --- /dev/null +++ b/python_api/marklogic/models/database/backup.py @@ -0,0 +1,340 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/07/2015 Initial development + +""" +Classes for dealing with scheduled backups +""" + +import requests +import json +from marklogic.models.utilities.validators import * +from marklogic.models.utilities.exceptions import * + +class DatabaseBackup: + """ + The DatabaseBackup class represents a backup job that is running + on the server. + """ + def __init__(self, job_id, database_name, host_name=None): + """ + Instantiate a database backup job. This constructor is used internally, + it should never be called directly. Use the `backup` class + method instead. + """ + self.job_id = job_id + self.database_name = database_name + self.host_name = host_name + self.settings = {} + + @classmethod + def backup(cls, conn, database_name, backup_dir, forests=None, + journal_archiving=False, journal_archive_path=None, + lag_limit=30, + incremental=False, incremental_dir=None): + """ + Start a backup on the server and return an object that represents + that job. + """ + payload = { + 'operation': 'backup-database', + 'backup-dir': backup_dir, + 'journal-archiving': assert_type(journal_archiving, bool), + 'lag-limit': assert_type(lag_limit, int), + 'incremental': assert_type(incremental, bool), + } + + if forests is not None: + payload['forest'] = assert_list_of_type(forests, str) + + if journal_archiving: + payload['journal-archive-path'] \ + = assert_type(journal_archive_path, str) + + if incremental: + payload['incremental-dir'] = assert_type(incremental_dir, str) + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, database_name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + result = json.loads(response.text) + host_name = None + if 'host-name' in result: + host_name = result['host-name'] + backup = DatabaseBackup(result['job-id'], database_name, host_name) + + backup.settings = { + 'backup-dir': backup_dir, + 'journal-archiving': assert_type(journal_archiving, bool), + 'lag-limit': assert_type(lag_limit, int), + 'incremental': assert_type(incremental, bool), + } + + if forests is not None: + backup.settings['forest'] = assert_list_of_type(forests, str) + + if journal_archiving: + backup.settings['journal-archive-path'] \ + = assert_type(journal_archive_path, str) + + if incremental: + backup.settings['incremental-dir'] = assert_type(incremental_dir, str) + + return backup + + def status(self, conn): + """ + The status of the backup job. + """ + payload = { + 'operation': 'backup-status', + 'job-id': self.job_id + } + + if self.host_name is not None: + payload['host-name'] = self.host_name + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, self.database_name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return json.loads(response.text) + + def cancel(self, conn): + """ + Request to cancel the backup job. + """ + payload = { + 'operation': 'backup-cancel', + 'job-id': self.job_id + } + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, self.database_name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return json.loads(response.text) + + def validate(self, conn): + """ + Validate the (completed) backup job. + """ + payload = { + 'operation': 'backup-validate' + } + for key in self.settings: + payload[key] = self.settings[key] + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, self.database_name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return json.loads(response.text) + + def purge(self, conn, keep_num=3, backup_dir=None): + """ + Purge old backups. + """ + if backup_dir is None: + backup_dir = self.settings['backup-dir'] + payload = { + 'operation': 'backup-purge', + 'backup-dir': backup_dir, + 'keep-num-backups': assert_type(keep_num, int) + } + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, self.database_name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return json.loads(response.text) + +class DatabaseRestore: + """ + The DatabaseRestore class represents a restore job that is running + on the server. + """ + def __init__(self, job_id, database_name, host_name=None): + """ + Instantiate a database restore job. This constructor is used internally, + it should never be called directly. Use the `restore` class + method instead. + """ + self.job_id = job_id + self.database_name = database_name + self.host_name = host_name + self.settings = {} + + @classmethod + def restore(cls, conn, database_name, backup_dir, forests=None, + journal_archiving=False, journal_archive_path=None, + incremental=False, incremental_dir=None): + """ + Start a restore on the server and return an object that represents + that job. + """ + payload = { + 'operation': 'restore-database', + 'backup-dir': backup_dir, + 'journal-archiving': assert_type(journal_archiving, bool), + 'incremental': assert_type(incremental, bool), + } + + if forests is not None: + payload['forest'] = assert_list_of_type(forests, str) + + if journal_archiving: + payload['journal-archive-path'] \ + = assert_type(journal_archive_path, str) + + if incremental: + payload['incremental-dir'] = assert_type(incremental_dir, str) + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, database_name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + result = json.loads(response.text) + host_name = None + if 'host-name' in result: + host_name = result['host-name'] + restore = DatabaseRestore(result['job-id'], database_name, host_name) + + restore.settings = { + 'backup-dir': backup_dir, + 'journal-archiving': assert_type(journal_archiving, bool), + 'incremental': assert_type(incremental, bool), + } + + if forests is not None: + restore.settings['forest'] = assert_list_of_type(forests, str) + + if journal_archiving: + restore.settings['journal-archive-path'] \ + = assert_type(journal_archive_path, str) + + if incremental: + restore.settings['incremental-dir'] = assert_type(incremental_dir, str) + + return restore + + def status(self, conn): + """ + The restore status. + """ + payload = { + 'operation': 'restore-status', + 'job-id': self.job_id + } + + if self.host_name is not None: + payload['host-name'] = self.host_name + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, self.database_name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return json.loads(response.text) + + def cancel(self, conn): + """ + Request to cancel the restore. + """ + payload = { + 'operation': 'restore-cancel', + 'job-id': self.job_id + } + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, self.database_name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return json.loads(response.text) + + def validate(self, conn): + """ + Validate a (completed) restore. + """ + payload = { + 'operation': 'restore-validate' + } + for key in self.settings: + payload[key] = self.settings[key] + + uri = "http://{0}:{1}/manage/v2/databases/{2}" \ + .format(conn.host, conn.management_port, self.database_name) + + response = requests.post(uri, json=payload, auth=conn.auth, + headers={'content-type': 'application/json', + 'accept': 'application/json'}) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return json.loads(response.text) diff --git a/python_api/marklogic/models/database/field.py b/python_api/marklogic/models/database/field.py new file mode 100644 index 0000000..96801dc --- /dev/null +++ b/python_api/marklogic/models/database/field.py @@ -0,0 +1,771 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/10/2015 Initial development + +# FIXME: how should the relationship between fields and field-range-indexes +# be modeled? + +""" +Classes for dealing with fields. +""" + +from marklogic.models.utilities.validators import assert_list_of_type, assert_boolean +from marklogic.models.utilities.utilities import PropertyLists + +class _IncludedExcludedElement: + """ + An included or excluded element. This class is abstract. + """ + def __init__(self): + raise ValueError("Do not instantiate _IncludedExcludedElement directly") + + def namespace_uri(self): + """ + The namespace URI" + """ + return self._config['namespace-uri'] + + def set_namespace_uri(self, namespace_uri): + """ + Set the namespace URI. + """ + self._config['namespace-uri'] = namespace_uri + return self + + def localname(self): + """ + The localname. + """ + return self._config['localname'] + + def set_localname(self, localname): + """ + Set the localname. + """ + self._config['localname'] = localname + return self + + def weight(self): + """ + The weight. + """ + return self._config['weight'] + + def set_weight(self, weight): + """ + Set the weight. + """ + self._config['weight'] = weight + return self + + def attribute_namespace_uri(self): + """ + The attribute namespace URI. + """ + return self._config['attribute-namespace-uri'] + + def set_attribute_namespace_uri(self, attribute_namespace_uri): + """ + Set the attribute namespace URI. + """ + self._config['attribute-namespace-uri'] = attribute_namespace_uri + return self + + def attribute_localname(self): + """ + The attribute localname. + """ + return self._config['attribute-localname'] + + def set_attribute_localname(self, attribute_localname): + """ + Set the attribute localname. + """ + self._config['attribute-localname'] = attribute_localname + return self + + def attribute_value(self): + """ + The attribute value. + """ + return self._config['attribute-value'] + + def set_attribute_value(self, attribute_value): + """ + Set the attribute value. + """ + self._config['attribute-value'] = attribute_value + return self + +class IncludedElement(_IncludedExcludedElement): + """ + An included element. + """ + def __init__(self, namespace_uri, localname, weight=1.0, + attribute_namespace_uri=None, + attribute_localname=None, + attribute_value=None): + """ + Create an included element. + """ + # FIXME: check for attribute ns/local/value errors + self._config = { + "namespace-uri": namespace_uri, + "localname": localname, + "weight": weight, + "attribute-namespace-uri": \ + "" if attribute_namespace_uri is None else attribute_namespace_uri, + "attribute-localname": \ + "" if attribute_localname is None else attribute_localname, + "attribute-value": "" if attribute_value is None else attribute_value + } + +class ExcludedElement(_IncludedExcludedElement): + """ + An excluded element. + """ + def __init__(self, namespace_uri, localname, + attribute_namespace_uri=None, + attribute_localname=None, + attribute_value=None): + """ + Create an excluded element. + """ + # FIXME: check for attribute ns/local/value errors + self._config = { + "namespace-uri": namespace_uri, + "localname": localname, + "attribute-namespace-uri": \ + "" if attribute_namespace_uri is None else attribute_namespace_uri, + "attribute-localname": \ + "" if attribute_localname is None else attribute_localname, + "attribute-value": "" if attribute_value is None else attribute_value + } + +class TokenizerOverride: + """ + A tokenizer override. + """ + def __init__(self, character, tokenizer_class): + """ + Instantiate a tokenizer override. + """ + # FIXME: check classes + self._config = { + "character": character, + "tokenizer-class": tokenizer_class + } + + def character(self): + """ + The character. + """ + return self._config['character'] + + def set_character(self, character): + """ + Set the character. + """ + self._config['character'] = character + return self + + def tokenizer_override(self): + """ + The override class. + """ + return self._config['tokenizer-override'] + + def set_tokenizer_override(self, override): + """ + Set the overide class. + """ + self._config['tokenizer-override'] = override + return self + +class FieldPath: + """ + A field path. + """ + def __init__(self, path, weight): + """ + Initialize a field path. + """ + self._config = { + "path": path, + "weight": weight + } + + def path(self): + """ + The path. + """ + return self._config['path'] + + def set_path(self, path): + """ + Set the path. + """ + self._config['path'] = path + return self + + def weight(self): + """ + The weight. + """ + return self._config['weight'] + + def set_weight(self, weight): + """ + Set the weight. + """ + self._config['weight'] = weight + return self + +class Field(PropertyLists): + """ + A field. This class is abstract. + """ + def __init__(self): + raise ValueError("Do not instantiate Field directly") + + def field_name(self): + """ + The field name. + """ + return self._config['field-name'] + + def set_field_name(self, name): + """ + Set the field name. + """ + if name is None or not name: + raise ValidationError('Fields must have a non-empty name', name) + self._config['field-name'] = name + return self + + def word_lexicons(self): + """ + Word lexicons. + """ + if 'word-lexicon' in self._config: + return self._config['word-lexicon'] + return None + + def add_word_lexicons(self, collation): + """ + Add a word lexicon. + """ + return self.add_to_property_list('word-lexicon', collation) + + def set_word_lexicons(self, collations): + """ + Set the list of word lexicons + """ + return self.set_property_list('word-lexicon', collations) + + def remove_word_lexicons(self, collation): + """ + Remove a word lexicon. + """ + return self.remove_from_property_list('word-lexicon', collation) + + def included_elements(self): + """ + The included elements. + """ + if 'included-element' in self._config: + return self._config['included-element'] + return None + + def add_included_element(self, element): + """ + Add an included element. + """ + return self.add_to_property_list('included-element', + element, IncludedElement) + + def remove_included_element(self, element): + """ + Remove an included element. + """ + return self.remove_from_property_list('included-element', + element, IncludedElement) + + def set_included_elements(self, elements): + """ + Set the included elements. + """ + return self.set_property_list('included-element', elements, + IncludedElement) + + def excluded_elements(self): + """ + The excluded elements. + """ + if 'excluded-element' in self._config: + return self._config['excluded-element'] + return None + + def add_excluded_element(self, element): + """ + Add an excluded element. + """ + return self.add_to_property_list('excluded-element', + element, ExcludedElement) + + def remove_excluded_element(self, element): + """ + Remove an excluded element. + """ + return self.remove_from_property_list('excluded-element', + element, ExcludedElement) + + def set_excluded_elements(self, elements): + """ + Set the excluded elements. + """ + return self.set_property_list('excluded-element', elements, + ExcludedElement) + + def tokenizer_overrides(self): + """ + The tokenizer overrides. + """ + if 'tokenizer-override' in self._config: + return self._config['tokenizer-override'] + return None + + def add_tokenizer_overrides(self, override): + """ + Add a tokenizer override. + """ + return self.add_to_property_list('tokenizer-override', overide, + TokenizerOverride) + + def remove_tokenizer_overrides(self, override): + """ + Remove a tokenizer override. + """ + return self.remove_from_property_list('tokenizer-override', overide, + TokenizerOverride) + + def set_tokenizer_overrides(self, overrides): + """ + Set the list of tokenizer overrides. + """ + return self.set_property_list('tokenizer-override', overides, + TokenizerOverride) + + def stemmed_searches(self): + """ + Stemmed searches. + """ + if 'stemmed-searches' in self._config: + return self._config['stemmed-searches'] + return None + + def set_stemmed_searches(self, stemmed_searches): + """ + Set stemmed searches. + """ + self._config['stemmed-searches'] = stemmed_searches + return self + + def word_searches(self): + """ + Word searches. + """ + if 'word-searches' in self._config: + return self._config['word-searches'] + return None + + def set_word_searches(self, word_searches): + """ + Set word searches. + """ + self._config['word-searches'] = assert_boolean(word_searches) + return self + + def field_value_searches(self): + """ + Field value searches. + """ + if 'field-value-searches' in self._config: + return self._config['field-value-searches'] + return None + + def set_field_value_searches(self, field_value_searches): + """ + Set field value searches. + """ + self._config['field-value-searches'] = assert_boolean(field_value_searches) + return self + + def field_value_positions(self): + """ + Field value positions. + """ + if 'field-value-positions' in self._config: + return self._config['field-value-positions'] + return None + + def set_field_value_positions(self, field_value_positions): + """ + Set field value positions. + """ + self._config['field-value-positions'] = assert_boolean(field_value_positions) + return self + + def fast_phrase_searches(self): + """ + Fast phrase searches. + """ + if 'fast-phrase-searches' in self._config: + return self._config['fast-phrase-searches'] + return None + + def set_fast_phrase_searches(self, fast_phrase_searches): + """ + Set fast phrase searches. + """ + self._config['fast-phrase-searches'] = assert_boolean(fast_phrase_searches) + return self + + def fast_case_sensitive_searches(self): + """ + Fast case-sensitive searches. + """ + if 'fast-case-sensitive-searches' in self._config: + return self._config['fast-case-sensitive-searches'] + return None + + def set_fast_case_sensitive_searches(self, fast_case_sensitive_searches): + """ + Set fast case-sensitive searches. + """ + self._config['fast-case-sensitive-searches'] = assert_boolean(fast_case_sensitive_searches) + return self + + def fast_diacritic_sensitive_searches(self): + """ + Fast diacritic-sensitive searches. + """ + if 'fast-diacritic-sensitive-searches' in self._config: + return self._config['fast-diacritic-sensitive-searches'] + return None + + def set_fast_diacritic_sensitive_searches(self, fdss): + """ + Set fast diacritic-sensitive searches + """ + self._config['fast-diacritic-sensitive-searches'] = assert_boolean(fdss) + return self + + def trailing_wildcard_searches(self): + """ + Trailing wildcard searches. + """ + if 'trailing_wildcard_searches' in self._config: + return self._config['trailing-wildcard-searches'] + return None + + def set_trailing_wildcard_searches(self, trailing_wildcard_searches): + """ + Set trailing wildcard searches. + """ + self._config['trailing-wildcard-searches'] = assert_boolean(trailing_wildcard_searches) + return self + + def trailing_wildcard_word_positions(self): + """ + Trailing wildcard word positions. + """ + if 'trailing_wildcard_word_positions' in self._config: + return self._config['trailing-wildcard-word-positions'] + return None + + def set_trailing_wildcard_word_positions(self, trailing_wildcard_word_positions): + """ + Set trailing wildcard word positions. + """ + self._config['trailing-wildcard-word-positions'] = assert_boolean(trailing_wildcard_word_positions) + return self + + def three_character_searches(self): + """ + Three character searches. + """ + if 'three_character_searches' in self._config: + return self._config['three-character-searches'] + return None + + def set_three_character_searches(self, three_character_searches): + """ + Set three character searches. + """ + self._config['three-character-searches'] = assert_boolean(three_character_searches) + return self + + def three_character_word_positions(self): + """ + Three character word positions. + """ + if 'three_character_word_positions' in self._config: + return self._config['three-character-word-positions'] + return None + + def set_three_character_word_positions(self, three_character_word_positions): + """ + Set three character word positions. + """ + self._config['three-character-word-positions'] = assert_boolean(three_character_word_positions) + return self + + def two_character_searches(self): + """ + Two character searches. + """ + if 'two_character_searches' in self._config: + return self._config['two-character-searches'] + return None + + def set_two_character_searches(self, two_character_searches): + """ + Set two character searches. + """ + self._config['two-character-searches'] = assert_boolean(two_character_searches) + return self + + def one_character_searches(self): + """ + One character searches. + """ + if 'one_character_searches' in self._config: + return self._config['one-character-searches'] + return None + + def set_one_character_searches(self, one_character_searches): + """ + Set one character searches. + """ + self._config['one-character-searches'] = assert_boolean(one_character_searches) + return self + + def unmarshal(self, field): + """ + Construct a new field from a flat structure. This method is + principally used to construct an object from a Management API + payload. The configuration passed in is largely assumed to be + valid. + + :param: config: A hash of properties + :return: A newly constructed field object with the specified properties. + """ + for key in field: + if key == 'stemmed-searches': + self._config[key] = field[key] + elif key == 'word-searches': + self._config[key] = (field[key] == 'true') + elif key == 'word-searches': + self._config[key] = (field[key] == 'true') + elif key == 'field-value-searches': + self._config[key] = (field[key] == 'true') + elif key == 'field-value-positions': + self._config[key] = (field[key] == 'true') + elif key == 'fast-phrase-searches': + self._config[key] = (field[key] == 'true') + elif key == 'fast-case-sensitive-searches': + self._config[key] = (field[key] == 'true') + elif key == 'fast-diacritic-sensitive-searches': + self._config[key] = (field[key] == 'true') + elif key == 'trailing-wildcard-searches': + self._config[key] = (field[key] == 'true') + elif key == 'trailing-wildcard-word-positions': + self._config[key] = (field[key] == 'true') + elif key == 'three-character-searches': + self._config[key] = (field[key] == 'true') + elif key == 'three-character-word-positions': + self._config[key] = (field[key] == 'true') + elif key == 'two-character-searches': + self._config[key] = (field[key] == 'true') + elif key == 'one-character-searches': + self._config[key] = (field[key] == 'true') + elif key == 'word-lexicon': + self._config[key] = field[key] + elif key == 'included-element': + elems = [] + for item in field[key]: + elem = IncludedElement( + item['namespace-uri'], + item['localname'], + item['weight'], + item['attribute-namespace-uri'], + item['attribute-localname'], + item['attribute-value']) + elems.append(elem) + self._config[key] = elems + elif key == 'excluded-element': + elems = [] + for item in field[key]: + elem = ExcludedElement( + item['namespace-uri'], + item['localname'], + item['attribute-namespace-uri'], + item['attribute-localname'], + item['attribute-value']) + elems.append(elem) + self._config[key] = elems + elif key == 'tokenizer-override': + elems = [] + for item in field[key]: + over = TokenizerOverride( + item['character'], + item['tokenizer-class']) + elems.append(over) + self._config[key] = elems + elif (key == 'field-name' + or key == 'include-root' + or key == 'field-path'): + pass + else: + raise ValueError("Unexpected key in field: {0}".format(key)) + + def marshal(self): + """ + Return a flat structure suitable for conversion to JSON or XML. + + :return: A hash of the keys in this object and their values, recursively. + """ + struct = { } + for key in self._config: + if (key == 'included-element' + or key == 'excluded-element' + or key == 'tokenizer-override' + or key == 'field-path'): + jlist = [] + for index in self._config[key]: + jlist.append(index._config) + struct[key] = jlist + else: + struct[key] = self._config[key]; + return struct + +class RootField(Field): + """ + A root field. + """ + def __init__(self, field_name, include_root=False, includes=None, + excludes=None, tokenizer_overrides=None): + """ + Create a root field. + """ + self._config = { + 'field-name': field_name, + 'include-root': include_root + } + + if includes is not None: + self.set_included_elements(includes) + + if excludes is not None: + self.set_excluded_elements(excludes) + + if tokenizer_overrides is not None: + self.set_tokenizer_overrides(tokenizer_overrides) + + + def include_root(self): + """ + Include the root? + """ + return self._config['include-root'] + + def set_include_root(self, name): + """ + Set the included root. + """ + self._config['include-root'] = name + return self + +class WordQuery(RootField): + """ + Word query settings. In the database configuration, word query settings + are stored as a field with an empty name. They're represented separately + in the Python API. + """ + def __init__(self, include_root): + """ + Create word query settings. + """ + self._config = { + 'field-name': "", + 'include-root': include_root + } + + def field_name(self): + """ + Always returns None. + """ + return None + + def set_field_name(self, name): + """ + Raises an error. + """ + raise ValueError("Cannot set name on WordQuery", name) + +class PathField(Field): + """ + A path field. + """ + def __init__(self, field_name, paths): + """ + Create a path field. + """ + self._config = { + 'field-name': field_name, + 'field-path': assert_list_of_type(paths, FieldPath) + } + + def field_paths(self): + """ + The field paths. + """ + return self._config['field-path'] + + def add_field_path(self, path): + """ + Add a field path. + """ + return self.add_to_property_list('field-path', path, FieldPath) + + def remove_field_path(self, path): + """ + Remove a field path. + """ + return self.remove_from_property_list('field-path', path, FieldPath) + + def set_field_paths(self, paths): + """ + Set the field paths. + """ + if paths is None or not paths: + raise ValueError("The list of field paths cannot be empty", paths) + return self.set_property_list('field-path', paths, FieldPath) diff --git a/python_api/marklogic/models/database/fragment.py b/python_api/marklogic/models/database/fragment.py new file mode 100644 index 0000000..50adac4 --- /dev/null +++ b/python_api/marklogic/models/database/fragment.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/07/2015 Initial development + +""" +Classes for dealing with database fragment parents and roots. +""" + +from __future__ import unicode_literals, print_function, absolute_import + +import sys +import requests +import json +from marklogic.models.utilities.validators import * +from marklogic.models.utilities.exceptions import * + +class FragmentRoot: + """ + Fragment root. + """ + def __init__(self, namespace_uri, localname): + """ + Initialize a fragment root. + + :param namespace: The namespace of the fragment root element + :param localname: The local name of the fragment root element + + :return: The fragment root object + """ + + if namespace_uri is None: + namespace_uri = "" + + self._config = { + 'namespace-uri': namespace_uri, + 'localname': localname + } + + def namespace_uri(self): + if self._config['namespace-uri'] == '': + return None + return self._config['namespace-uri'] + + def set_namespace_uri(self, namespace_uri): + self._config['namespace-uri'] = namespace_uri + return self + + def localname(self): + return self._config['localname'] + + def set_localname(self, localname): + self._config['localname'] = localname + return self + +class FragmentParent: + """ + Fragment parent. + """ + def __init__(self, namespace_uri, localname): + """ + Initialize a fragment parent. + + :param namespace: The namespace of the fragment root element + :param localname: The local name of the fragment root element + + :return: The fragment root object + """ + + if namespace_uri is None: + namespace_uri = "" + + self._config = { + 'namespace-uri': namespace_uri, + 'localname': localname + } + + def namespace_uri(self): + if self._config['namespace-uri'] == '': + return None + return self._config['namespace-uri'] + + def set_namespace_uri(self, namespace_uri): + self._config['namespace-uri'] = namespace_uri + return self + + def localname(self): + return self._config['localname'] + + def set_localname(self, localname): + self._config['localname'] = localname + return self diff --git a/python_api/marklogic/models/database/index.py b/python_api/marklogic/models/database/index.py new file mode 100644 index 0000000..09c6b28 --- /dev/null +++ b/python_api/marklogic/models/database/index.py @@ -0,0 +1,684 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/01/2015 Initial development +# Norman Walsh 05/07/2015 Hacked in more index types +# + +from abc import ABCMeta, abstractmethod +from marklogic.models.utilities.validators import validate_index_type +from marklogic.models.utilities.validators import validate_index_invalid_value_actions +from marklogic.models.utilities.validators import validate_boolean +from marklogic.models.utilities.validators import validate_collation + +class _Index: + """ + Defines a MarkLogic index. + + This is an abstract class. + """ + __metaclass__ = ABCMeta + + def range_value_positions(self): + """ + Returns range value positions. + + :return: The range value positions setting + """ + if 'range-value-positions' in self._config: + return self._config['range-value-positions'] + return None + + def set_range_value_positions(self, pos=False): + """ + Sets the range value positions. + + :param pos: The range value positions setting + + :return: The index object + """ + validate_boolean(pos) + self._config['range-value-positions'] = pos + return self + + def invalid_values(self): + """ + Returns invalid values setting. + + :return: The invalid values setting + """ + if 'invalid-values' in self._config: + return self._config['invalid-values'] + return None + + def set_invalid_values(self, invalid='reject'): + """ + Sets the invalid values setting. + + :param invalid: The invalid values setting + + :return: The index object + """ + validate_index_invalid_value_actions(invalid) + self._config['invalid-values'] = invalid + return self + +class _RangeIndex(_Index): + """ + Defines a MarkLogic range index. + + This is an abstract class. + """ + __metaclass__ = ABCMeta + + def scalar_type(self): + """ + Returns scalar type of the index. + + :return: The scalar type of the index. + """ + if 'scalar-type' in self._config: + return self._config['scalar-type'] + return None + + def set_scalar_type(self, scalar='string'): + """ + Sets the index type + + :param scalar: The scalar type of the index + + :return: The index object + """ + # FIXME: validate_scalar_type(scalar) + self._config['scalar-type'] = scalar + return self + + def collation(self): + """ + Returns the collation. Collations are only relevant to string + indexes. ``None`` is returned for all non-string indexes. + + :return: The collation URI. + """ + if self.scalar_type() != 'string': + return None + + if 'collation' in self._config: + return self._config['collation'] + + return None + + def set_collation(self, collation): + """ + Sets the collation + + :param collation: The collation URI. + + :return: The index object + """ + # FIXME: validate_collation(collation) + self._config['collation'] = collation + return self + +class _LocalNameIndex(): + """ + A mixin for indexes that have local names. + + This is an abstract class. + """ + __metaclass__ = ABCMeta + + def namespace_uri(self): + """ + Returns the namespace-uri. + + :return: The namespace-uri URI. + """ + if 'namespace-uri' in self._config: + return self._config['namespace-uri'] + return None + + def set_namespace_uri(self, namespace_uri): + """ + Sets the namespace URI + + :param namespace_uri: The namespace URI. + + :return: The index object + """ + # FIXME: validate_namespace-uri(namespace_uri) + self._config['namespace-uri'] = namespace_uri + return self + + def localname(self): + """ + Returns the localname. + + :return: The localname URI. + """ + if 'localname' in self._config: + return self._config['localname'] + return None + + def set_localname(self, localname): + """ + Sets the localname + + :param localname: The localname URI. + + :return: The index object + """ + # FIXME: validate_localname(localname) + self._config['localname'] = localname + return self + +class _ParentNameIndex(): + """ + A mixin for indexes that have parent names. + + This is an abstract class. + """ + __metaclass__ = ABCMeta + + def parent_namespace_uri(self): + """ + Returns the parent namespace URI. + + :return: The parent namespace URI. + """ + if 'parent-namespace-uri' in self._config: + return self._config['parent-namespace-uri'] + return None + + def set_parent_namespace_uri(self, parent_namespace_uri): + """ + Sets the parent namespace URI + + :param parent_namespace_uri: The parent namespace URI. + + :return: The index object + """ + # FIXME: validate_namespace-uri(parent_namespace_uri) + self._config['parent-namespace-uri'] = parent_namespace_uri + return self + + def parent_localname(self): + """ + Returns the parent localname. + + :return: The parent localname URI. + """ + if 'parent-localname' in self._config: + return self._config['parent-localname'] + return None + + def set_parent_localname(self, parent_localname): + """ + Sets the parent localname + + :param parent_localname: The parent localname. + + :return: The index object + """ + # FIXME: validate_localname(parent_localname) + self._config['parent-localname'] = parent_localname + return self + +class ElementRangeIndex(_RangeIndex, _LocalNameIndex): + """ + An element range index. + """ + def __init__(self, scalar_type, namespace_uri, localname, + collation="", range_value_positions=False, + invalid_values='reject'): + """ + Create an element range index. + """ + validate_index_type(scalar_type) + validate_boolean(range_value_positions) + validate_index_invalid_value_actions(invalid_values) + validate_collation(scalar_type, collation) + + if collation == '': + collation = None + + self._config = { + 'scalar-type': scalar_type, + 'namespace-uri': namespace_uri, + 'localname': localname, + 'collation': '', + 'range-value-positions': range_value_positions, + 'invalid-values': invalid_values + } + + if collation is not None: + self._config['collation'] = collation + +class AttributeRangeIndex(_RangeIndex, _LocalNameIndex, _ParentNameIndex): + """ + An attribute range index. + """ + def __init__(self, scalar_type, + parent_uri, parent_localname, + namespace_uri, localname, + collation="", range_value_positions=False, + invalid_values='reject'): + """ + Create an attribute range index. + """ + validate_index_type(scalar_type) + validate_boolean(range_value_positions) + validate_index_invalid_value_actions(invalid_values) + validate_collation(scalar_type, collation) + + if collation == '': + collation = None + + self._config = { + 'scalar-type': scalar_type, + 'parent-namespace-uri': parent_uri, + 'parent-localname': parent_localname, + 'namespace-uri': namespace_uri, + 'localname': localname, + 'collation': '', + 'range-value-positions': range_value_positions, + 'invalid-values': invalid_values + } + + if collation is not None: + self._config['collation'] = collation + +class _PathExpressionIndex(): + """ + A mixin for indexes that have path expressions. + + This is an abstract class. + """ + __metaclass__ = ABCMeta + + def path_expression(self): + """ + Returns the path expression, + the path to the root of the field. + + :return: The path expression. + """ + if 'path-expression' in self._config: + return self._config['path-expression'] + return None + + def set_path_expression(self, path_expression): + """ + Sets the path expression. + + :param path_expression: The path expression. + + :return: The index object + """ + # FIXME: validate_path_expression(path_expression) + self._config['path-expression'] = path_expression + return self + +class PathRangeIndex(_RangeIndex, _PathExpressionIndex): + """ + A path range index. + """ + def __init__(self, scalar_type, path_expr, + collation="", range_value_positions=False, + invalid_values='reject'): + """ + Create a path range index. + """ + validate_index_type(scalar_type) + validate_boolean(range_value_positions) + validate_index_invalid_value_actions(invalid_values) + validate_collation(scalar_type, collation) + + if collation == '': + collation = None + + self._config = { + 'scalar-type': scalar_type, + 'path-expression': path_expr, + 'collation': '', + 'range-value-positions': range_value_positions, + 'invalid-values': invalid_values + } + + if collation is not None: + self._config['collation'] = collation + +class FieldRangeIndex(_RangeIndex): + """ + A field range index. + """ + def __init__(self, scalar_type, field_name, + collation="", range_value_positions=False, + invalid_values='reject'): + """ + Create a field range index. + """ + validate_index_type(scalar_type) + validate_boolean(range_value_positions) + validate_index_invalid_value_actions(invalid_values) + validate_collation(scalar_type, collation) + + if collation == '': + collation = None + + self._config = { + 'scalar-type': scalar_type, + 'field-name': field_name, + 'collation': '', + 'range-value-positions': range_value_positions, + 'invalid-values': invalid_values + } + + if collation is not None: + self._config['collation'] = collation + + def field_name(self): + """ + Returns the field name. + + :return: The field name. + """ + if 'field-name' in self._config: + return self._config['field-name'] + return None + + def set_field_name(self, field_name): + """ + Sets the field name. + + :param field_name: The field name. + + :return: The index object + """ + # FIXME: validate_field_name(field_name) + self._config['field-name'] = field_name + return self + +class _GeospatialIndex(_Index): + """ + Defines a MarkLogic geospatal index. + + This is an abstract class. + """ + __metaclass__ = ABCMeta + + def coordinate_system(self): + """ + Returns the coordinate system used for points in the index. + + :return: The coordinate system. + """ + if 'coordinate-system' in self._config: + return self._config['coordinate-system'] + return None + + def set_coordinate_system(self, coordinate_system): + """ + Sets the coordinate system used for points in the index. + + :param coordinate_system: The coordinate system. + + :return: The index object + """ + # FIXME: validate_coordinate_system(coordinate_system) + self._config['coordinate-system'] = coordinate_system + return self + + def point_format(self): + """ + Returns the point format used for points in the index. + + :return: The point format. + """ + if 'point-format' in self._config: + return self._config['point-format'] + return None + + def set_point_format(self, point_format): + """ + Sets the point format used for points in the index. + + :param point_format: The point format. + + :return: The index object + """ + # FIXME: validate_point_format(point_format) + self._config['point-format'] = point_format + return self + +class GeospatialElementIndex(_GeospatialIndex, _ParentNameIndex): + """ + A geospatial element index. + """ + def __init__(self, namespace_uri, localname, + coordinate_system="wgs84", point_format="point", + range_value_positions=False, invalid_values='reject'): + """ + Create a geospatial element index. + """ + validate_coordinate_system(coordinate_system) + validate_point_format(point_format) + validate_boolean(range_value_positions) + validate_index_invalid_value_actions(invalid_values) + + self._config = { + 'namespace-uri': namespace_uri, + 'localname': localname, + 'coordinate-system': coordinate_system, + 'point-format': point_format, + 'range-value-positions': range_value_positions, + 'invalid-values': invalid_values + } + +class GeospatialPathIndex(_GeospatialIndex, _PathExpressionIndex): + """ + A geospatial path index. + """ + def __init__(self, path_expr, + coordinate_system="wgs84", point_format="point", + range_value_positions=False, invalid_values='reject'): + """ + Create a geospatial path index. + """ + validate_coordinate_system(coordinate_system) + validate_point_format(point_format) + validate_boolean(range_value_positions) + validate_index_invalid_value_actions(invalid_values) + + self._config = { + 'path-expression': path_expr, + 'coordinate-system': coordinate_system, + 'point-format': point_format, + 'range-value-positions': range_value_positions, + 'invalid-values': invalid_values + } + +class GeospatialElementChildIndex(GeospatialElementIndex, _ParentNameIndex): + """ + A geospatial element index. + """ + def __init__(self, parent_uri, parent_localname, namespace_uri, localname, + coordinate_system="wgs84", point_format="point", + range_value_positions=False, invalid_values='reject'): + """ + Create a geospatial element index. + """ + validate_coordinate_system(coordinate_system) + validate_point_format(point_format) + validate_boolean(range_value_positions) + validate_index_invalid_value_actions(invalid_values) + + self._config = { + 'parent-namespace-uri': parent_uri, + 'parent-localname': parent_localname, + 'namespace-uri': namespace_uri, + 'localname': localname, + 'coordinate-system': coordinate_system, + 'point-format': point_format, + 'range-value-positions': range_value_positions, + 'invalid-values': invalid_values + } + +class GeospatialElementPairIndex(_GeospatialIndex, _ParentNameIndex): + """ + A geospatial element pair index. + """ + def __init__(self, parent_uri, parent_localname, + long_namespace_uri, long_localname, + lat_namespace_uri, lat_localname, + coordinate_system="wgs84", + range_value_positions=False, invalid_values='reject'): + """ + Create a geospatial element pair index. + """ + validate_coordinate_system(coordinate_system) + validate_boolean(range_value_positions) + validate_index_invalid_value_actions(invalid_values) + + self._config = { + 'parent-namespace-uri': parent_uri, + 'parent-localname': parent_localname, + 'longitude-namespace-uri': long_namespace_uri, + 'longitude-localname': long_localname, + 'latitude-namespace-uri': lat_namespace_uri, + 'latitude-localname': lat_localname, + 'coordinate-system': coordinate_system, + 'range-value-positions': range_value_positions, + 'invalid-values': invalid_values + } + + def longitude_namespace_uri(self): + """ + Returns the longitude namespace URI. + + :return: The longitude namespace URI. + """ + if 'longitude-namespace-uri' in self._config: + return self._config['longitude-namespace-uri'] + return None + + def set_longitude_namespace_uri(self, longitude_namespace_uri): + """ + Sets the longitude namespace URI + + :param longitude_namespace_uri: The longitude namespace URI. + + :return: The index object + """ + # FIXME: validate_namespace_uri(longitude_namespace_uri) + self._config['longitude-namespace-uri'] = longitude_namespace_uri + return self + + def longitude_localname(self): + """ + Returns the longitude localname. + + :return: The longitude localname URI. + """ + if 'longitude-localname' in self._config: + return self._config['longitude-localname'] + return None + + def set_longitude_localname(self, longitude_localname): + """ + Sets the longitude localname + + :param longitude_localname: The longitude localname. + + :return: The index object + """ + # FIXME: validate_localname(longitude_localname) + self._config['longitude-localname'] = longitude_localname + return self + + def latitude_namespace_uri(self): + """ + Returns the latitude namespace URI. + + :return: The latitude namespace URI. + """ + if 'latitude-namespace-uri' in self._config: + return self._config['latitude-namespace-uri'] + return None + + def set_latitude_namespace_uri(self, latitude_namespace_uri): + """ + Sets the latitude namespace URI + + :param latitude_namespace_uri: The latitude namespace URI. + + :return: The index object + """ + # FIXME: validate_namespace_uri(latitude_namespace_uri) + self._config['latitude-namespace-uri'] = latitude_namespace_uri + return self + + def latitude_localname(self): + """ + Returns the latitude localname. + + :return: The latitude localname URI. + """ + if 'latitude-localname' in self._config: + return self._config['latitude-localname'] + return None + + def set_latitude_localname(self, latitude_localname): + """ + Sets the latitude localname + + :param latitude_localname: The latitude localname. + + :return: The index object + """ + # FIXME: validate_localname(latitude_localname) + self._config['latitude-localname'] = latitude_localname + return self + +class GeospatialElementAttributePairIndex(GeospatialElementPairIndex): + """ + A geospatial element attribute pair index. + """ + def __init__(self, parent_uri, parent_localname, + long_namespace_uri, long_localname, + lat_namespace_uri, lat_localname, + coordinate_system="wgs84", + range_value_positions=False, invalid_values='reject'): + """ + Create a geospatial element attribute pair index. + """ + validate_coordinate_system(coordinate_system) + validate_boolean(range_value_positions) + validate_index_invalid_value_actions(invalid_values) + + self._config = { + 'parent-namespace-uri': parent_uri, + 'parent-localname': parent_localname, + 'longitude-namespace-uri': long_namespace_uri, + 'longitude-localname': long_localname, + 'latitude-namespace-uri': lat_namespace_uri, + 'latitude-localname': lat_localname, + 'coordinate-system': coordinate_system, + 'range-value-positions': range_value_positions, + 'invalid-values': invalid_values + } diff --git a/python_api/marklogic/models/database/lexicon.py b/python_api/marklogic/models/database/lexicon.py new file mode 100644 index 0000000..da06400 --- /dev/null +++ b/python_api/marklogic/models/database/lexicon.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/08/2015 Initial development + +""" +Classes for dealing with lexicons +""" + +class _Lexicon: + """ + A lexicon. This class is abstract. + """ + def __init__(self): + raise ValueError("Do not instantiate _Lexicon directly") + + def namespace_uri(self): + """ + The namespace URI. + """ + return self._config['namespace-uri'] + + def set_namespace_uri(self, namespace_uri): + """ + Set the namespace URI. + """ + self._config['namespace-uri'] = namespace_uri + return self + + def localname(self): + """ + The localname. + """ + return self._config['localname'] + + def set_localname(self, localname): + """ + Set the localname. + """ + self._config['localname'] = localname + return self + + def collation(self): + """ + The collation. + """ + return self._config['collation'] + + def set_collation(self, collation): + """ + Set the collation. + """ + self._config['collation'] = collation + return self + +class ElementWordLexicon(_Lexicon): + """ + An elmeent word lexicon + """ + def __init__(self, namespace_uri, localname, + collation="http://marklogic.com/collation/"): + """ + Create an elmeent word lexicon + """ + self._config = { + 'namespace-uri': namespace_uri, + 'localname': localname, + 'collation': collation + } + +class AttributeWordLexicon(_Lexicon): + """ + An element attribute word lexicion. + """ + def __init__(self, parent_namespace_uri, parent_localname, + namespace_uri, localname, + collation="http://marklogic.com/collation/"): + """ + Create an element attribute word lexicion. + """ + self._config = { + 'parent-namespace-uri': parent_namespace_uri, + 'parent-localname': parent_localname, + 'namespace-uri': namespace_uri, + 'localname': localname, + 'collation': collation + } + + def parent_namespace_uri(self): + return self._config['parent-namespace-uri'] + + def set_parent_namespace_uri(self, namespace_uri): + self._config['parent-namespace-uri'] = namespace_uri + return self + + def parent_localname(self): + return self._config['parent-localname'] + + def set_parent_localname(self, localname): + self._config['parent-localname'] = localname + return self + diff --git a/python_api/marklogic/models/database/mergeblackout.py b/python_api/marklogic/models/database/mergeblackout.py new file mode 100644 index 0000000..37d60eb --- /dev/null +++ b/python_api/marklogic/models/database/mergeblackout.py @@ -0,0 +1,274 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/07/2015 Initial development + +""" +Classes for dealing with database merge blackouts +""" + +class MergeBlackout: + """ + A merge blackout period. This is an abstract class. + """ + def __init__(self): + raise ValueError("Do not instantiate MergeBlackout directly") + + def blackout_type(self): + """ + The blackout type. + """ + return self._config['blackout_type'] + + def limit(self): + """ + The limit. + """ + return self._config['limit'] + + def merge_priority(self): + """ + The merge priority. + """ + return self._config['merge-priority'] + + @classmethod + def recurringDuration(cls, priority, limit, days, start_time, duration): + """ + Create a recurring blackout with a duration. + """ + # FIXME: validate args + return MergeBlackoutRecurringDuration(priority,limit,days, + start_time, duration) + + @classmethod + def recurringStartEnd(cls, priority, limit, days, start_time, end_time): + """ + Create a recurring blackout with a start and end time. + """ + # FIXME: validate args + return MergeBlackoutRecurringStartEnd(priority,limit,days, + start_time, end_time) + @classmethod + def recurringAllDay(cls, priority, limit, days): + """ + Create a recurring blackout that lasts all day. + """ + # FIXME: validate args + return MergeBlackoutRecurringAllDay(priority,limit, days) + + @classmethod + def oneTimeDuration(cls, priority, limit, start_date, start_time, duration): + """ + Create a one-time blackout with a duration. + """ + # FIXME: validate args + return MergeBlackoutOneTimeDuration(priority,limit, + start_date, start_time, duration) + + @classmethod + def oneTimeStartEnd(cls, priority, limit, + start_date, start_time, + end_date, end_time): + """ + Create a one-time blackout with a start and end time. + """ + # FIXME: validate args + return MergeBlackoutOneTimeStartEnd(priority,limit, + start_date, start_time, + end_date, end_time) + + +class MergeBlackoutRecurringDuration(MergeBlackout): + """ + A recurring merge blackout period for a duration + """ + def __init__(self, priority, limit, days, start_time, duration): + """ + Create a recurring merge blackout period for a duration + """ + self._config = { + 'blackout-type': 'recurring', + 'merge-priority': priority, + 'limit': limit, + 'day': days, + 'period': { + 'start-time': start_time, + 'duration': duration + } + } + + def days(): + """ + The days. + """ + return self._config['days'] + + def start_time(): + """ + The start time. + """ + return self._config['period']['start-time'] + + def duration(): + """ + The duration. + """ + return self._config['period']['duration'] + +class MergeBlackoutRecurringStartEnd(MergeBlackout): + """ + A recurring merge blackout period with start and end times + """ + def __init__(self, priority, limit, days, start_time, end_time): + """ + Create a recurring merge blackout period with start and end times + """ + self._config = { + 'blackout-type': "recurring", + 'merge-priority': priority, + 'limit': limit, + 'day': days, + 'period': { + 'start-time': start_time, + 'end-time': end_time + } + } + + def days(): + """ + The days. + """ + return self._config['days'] + + def start_time(): + """ + The start time. + """ + return self._config['period']['start-time'] + + def end_time(): + """ + The end time. + """ + return self._config['period']['end-time'] + +class MergeBlackoutRecurringAllDay(MergeBlackout): + """ + A recurring merge blackout period for a whole day + """ + def __init__(self, priority, limit, days): + """ + Create a recurring merge blackout period for a whole day + """ + self._config = { + 'blackout-type': "recurring", + 'merge-priority': priority, + 'limit': limit, + 'day': days, + 'period': None + } + + def days(): + """ + The days. + """ + return self._config['days'] + +class MergeBlackoutOneTimeDuration(MergeBlackout): + """ + A one time merge blackout period with a duration + """ + def __init__(self, priority, limit, start_date, start_time, duration): + """ + Create a one time merge blackout period with a duration + """ + self._config = { + 'blackout-type': "once", + 'merge-priority': priority, + 'limit': limit, + 'period': { + 'start-date': start_date, + 'start-time': start_time, + 'duration': duration + } + } + + def start_date(): + """ + The start date. + """ + return self._config['period']['start-date'] + + def start_time(): + """ + The start time. + """ + return self._config['period']['start-time'] + + def duration(): + """ + The duration. + """ + return self._config['period']['duration'] + + +class MergeBlackoutOneTimeStartEnd(MergeBlackout): + """ + A one time merge blackout period with start and end times + """ + def __init__(self, priority, limit, start_date, start_time, end_date, end_time): + """ + Create a one time merge blackout period with start and end times + """ + self._config = { + 'blackout-type': "once", + 'merge-priority': priority, + 'limit': limit, + 'period': { + 'start-date': start_date, + 'start-time': start_time, + 'end-date': end_date, + 'end-time': end_time, + } + } + + def start_date(): + """ + The start date. + """ + return self._config['period']['start-date'] + + def start_time(): + """ + The start time. + """ + return self._config['period']['start-time'] + + def end_date(): + """ + The end date. + """ + return self._config['period']['end-date'] + + def end_time(): + """ + The end time. + """ + return self._config['period']['end-time'] diff --git a/python_api/marklogic/models/database/namelist.py b/python_api/marklogic/models/database/namelist.py new file mode 100644 index 0000000..528ffcf --- /dev/null +++ b/python_api/marklogic/models/database/namelist.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/08/2015 Initial development + +""" +Classes for dealing with lists of names, a namespace plus a list of localnames. +""" + +from marklogic.models.utilities.utilities import PropertyLists + +class NameList(PropertyLists): + """ + A database name list. + """ + def __init__(self, namespace_uri, localnames): + """ + Create a name list. + + :param namespace_uri: The namespace uri (i.e. 'http://bar.com') + :param localnames: A (list) of localname(s) + """ + names = [] + if isinstance(localnames, str): + names = [localnames] + else: + if type(localnames) is not list: + raise ValidationError("List of names", repr(localnames)) + for name in localnames: + if not(isinstance(name, str)): + raise ValidationError("List of names.", repr(localnames)) + names = localnames + + self._config = { + 'namespace-uri': namespace_uri, + 'localname': names + } + + def namespace_uri(self): + """ + The namespace URI. + """ + return self._config['namespace-uri'] + + def set_namespace_uri(self, namespace_uri): + """ + Set the namespace URI. + """ + self._config['namespace-uri'] = namespace_uri + return self + + def localnames(self): + """ + The localnames. + """ + if 'localname' in self._config['localname']: + return self._config['localname'] + else: + return None + + def add_localname(self, name): + """ + Add a localname. + """ + return self.add_to_property_list('localname', name) + + def remove_localname(self, name): + """ + Remove a localname. + """ + return self.remove_from_property_list('localname', name) + + def set_localnames(self, names): + """ + Set the list of localnames. + """ + return self.set_property_list('localname', names) diff --git a/python_api/marklogic/models/database/path.py b/python_api/marklogic/models/database/path.py new file mode 100644 index 0000000..58e600e --- /dev/null +++ b/python_api/marklogic/models/database/path.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/08/2015 Initial development + +""" +Classes for dealing with path namespaces +""" + +class PathNamespace: + """ + A database path namespace. + """ + def __init__(self, prefix, namespace_uri): + """ + Create a path namespace. + + :param prefix: The prefix to use in field (i.e. 'foo') + :param namespace: The namespace uri (i.e. 'http://bar.com') + """ + self._config = { + 'prefix': prefix, + 'namespace-uri': namespace_uri + } + + def prefix(self): + """ + The prefix. + """ + return self._config['prefix'] + + def set_prefix(self, prefix): + """ + Set the prefix. + """ + self._config['prefix'] = prefix + return self + + def namespace_uri(self): + """ + The namespace URI. + """ + return self._config['namespace-uri'] + + def set_namespace_uri(self, namespace_uri): + """ + Set the namespace URI. + """ + self._config['namespace-uri'] = namespace_uri + return self diff --git a/python_api/marklogic/models/database/ruleset.py b/python_api/marklogic/models/database/ruleset.py new file mode 100644 index 0000000..eed907f --- /dev/null +++ b/python_api/marklogic/models/database/ruleset.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/10/2015 Initial development + +""" +Classes for dealing with rulesets. +""" + +class RuleSet: + """ + A database rule set. + """ + def __init__(self, location): + """ + Create a rule set. + + :param location: the ruleset location + """ + self._config = { + 'location': location + } + + def location(self): + """ + The location. + """ + return self._config['location'] + + def set_location(self, location): + """ + Set the location. + """ + self._config['location'] = location + return self + diff --git a/python_api/marklogic/models/database/scheduledbackup.py b/python_api/marklogic/models/database/scheduledbackup.py new file mode 100644 index 0000000..f52d0bd --- /dev/null +++ b/python_api/marklogic/models/database/scheduledbackup.py @@ -0,0 +1,519 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/07/2015 Initial development + +""" +Classes for dealing with scheduled backups +""" + +class ScheduledDatabaseBackup: + """ + A database backup. This is an abstract class. + """ + def __init__(self): + raise ValueError("Do not instantiate ScheduledDatabaseBackup directly") + + def backup_id(self): + """ + The backup id. + """ + if 'backup-id' in self._config: + return self._config['backup-id'] + return None + + def backup_enabled(self): + """ + Backup enabled. + """ + return self._config['backup-enabled'] + + def set_backup_enabled(self, enabled): + """ + Set backup enabled. + """ + self._config['backup-enabled'] = assert_type(enabled, bool) + return self + + def backup_directory(self): + """ + The backup directory. + """ + return self._config['backup-directory'] + + def set_backup_directory(self, value): + """ + Set the backup directory. + """ + self._config['backup-directory'] = value + return self + + def backup_type(self): + """ + The backup type. + """ + return self._config['backup-type'] + + def backup_timestamp(self): + """ + The backup timestamp. + """ + return self._config['backup-timestamp'] + + def max_backups(self): + """ + The maximum number of backups. + """ + return self._config['max-backups'] + + def set_max_backups(self, value): + """ + Set the maximum number of backups. + """ + self._config['max-backups'] = value + return self + + def backup_security_database(self): + """ + Backup the security database? + """ + return self._config['backup-security-database'] + + def set_backup_security_database(self, value): + """ + Set backup the security database. + """ + self._config['backup-security-database'] = assert_type(value, bool) + return self + + def backup_schemas_database(self): + """ + Backup the schema database? + """ + return self._config['backup-schemas-database'] + + def set_backup_schemas_database(self, value): + """ + Set backup the schema database + """ + self._config['backup-schemas-database'] = assert_type(value, bool) + return self + + def backup_triggers_database(self): + """ + Backup the triggers database? + """ + return self._config['backup-triggers-database'] + + def set_backup_triggers_database(self, value): + """ + Set backup the triggers database + """ + self._config['backup-triggers-database'] = assert_type(value, bool) + return self + + def include_replicas(self): + """ + Include replicas? + """ + return self._config['include-replicas'] + + def set_include_replicas(self, value): + """ + Set include replicas. + """ + self._config['include-replicas'] = assert_type(value, bool) + return self + + def incremental_backup(self): + """ + Incremental backup? + """ + return self._config['incremental-backup'] + + def set_incremental_backup(self, value): + """ + Set incremental backup + """ + self._config['incremental-backup'] = assert_type(value, bool) + return self + + def journal_archiving(self): + """ + Journal archiving? + """ + return self._config['journal-archiving'] + + def set_journal_archiving(self, value): + """ + Set journal archiving. + """ + self._config['journal-archiving'] = assert_type(value, bool) + return self + + def journal_archive_path(self): + """ + The journal archive path. + """ + return self._config['journal-archive-path'] + + def set_journal_archive_path(self, value): + """ + Set the journal archive path. + """ + self._config['journal-archive-path'] = value + return self + + def journal_archive_lag_limit(self): + """ + The journal archive lag limit. + """ + return self._config['journal-archive-lag_limit'] + + def set_journal_archive_lag_limit(self, value): + """ + Set the journal archive lag limit. + """ + self._config['journal-archive-lag_limit'] = assert_type(value, int) + return self + + @classmethod + def minutely(cls, backup_dir, period, + max_backups=2, backup_security=True, backup_schemas=True, + backup_triggers=True, include_replicas=True, + incremental_backup=False, journal_archiving=False, + journal_archive_path="", lag_limit=15): + """ + Create a minutely backup. + """ + return ScheduledDatabaseBackupMinutely(backup_dir, period, + max_backups, backup_security, backup_schemas, + backup_triggers, include_replicas, + incremental_backup, journal_archiving, + journal_archive_path, lag_limit) + + @classmethod + def hourly(cls, backup_dir, period, start_time, + max_backups=2, backup_security=True, backup_schemas=True, + backup_triggers=True, include_replicas=True, + incremental_backup=False, journal_archiving=False, + journal_archive_path="", lag_limit=15): + """ + Create an hourly backup. + """ + return ScheduledDatabaseBackupHourly(backup_dir, period, start_time, + max_backups, backup_security, backup_schemas, + backup_triggers, include_replicas, + incremental_backup, journal_archiving, + journal_archive_path, lag_limit) + + @classmethod + def daily(cls, backup_dir, period, start_time, + max_backups=2, backup_security=True, backup_schemas=True, + backup_triggers=True, include_replicas=True, + incremental_backup=False, journal_archiving=False, + journal_archive_path="", lag_limit=15): + """ + Create a daily backup. + """ + return ScheduledDatabaseBackupDaily(backup_dir, period, start_time, + max_backups, backup_security, backup_schemas, + backup_triggers, include_replicas, + incremental_backup, journal_archiving, + journal_archive_path, lag_limit) + + @classmethod + def weekly(cls, backup_dir, period, days, start_time, + max_backups=2, backup_security=True, backup_schemas=True, + backup_triggers=True, include_replicas=True, + incremental_backup=False, journal_archiving=False, + journal_archive_path="", lag_limit=15): + """ + Create a weekly backup. + """ + return ScheduledDatabaseBackupWeekly(backup_dir, period, days, start_time, + max_backups, backup_security, backup_schemas, + backup_triggers, include_replicas, + incremental_backup, journal_archiving, + journal_archive_path, lag_limit) + + @classmethod + def monthly(cls, backup_dir, period, month_day, start_time, + max_backups=2, backup_security=True, backup_schemas=True, + backup_triggers=True, include_replicas=True, + incremental_backup=False, journal_archiving=False, + journal_archive_path="", lag_limit=15): + """ + Create a monthly backup. + """ + return ScheduledDatabaseBackupMonthly(backup_dir, period, month_day, start_time, + max_backups, backup_security, backup_schemas, + backup_triggers, include_replicas, + incremental_backup, journal_archiving, + journal_archive_path, lag_limit) + + @classmethod + def once(cls, backup_dir, start_date, start_time, + max_backups=2, backup_security=True, backup_schemas=True, + backup_triggers=True, include_replicas=True, + incremental_backup=False, journal_archiving=False, + journal_archive_path="", lag_limit=15): + """ + Create a one-time backup. + """ + return ScheduledDatabaseBackupOnce(backup_dir, start_date, start_time, + max_backups, backup_security, backup_schemas, + backup_triggers, include_replicas, + incremental_backup, journal_archiving, + journal_archive_path, lag_limit) + + +class ScheduledDatabaseBackupMinutely(ScheduledDatabaseBackup): + def __init__(self, backup_dir, period, + max_backups=2, backup_security=True, backup_schemas=True, + backup_triggers=True, include_replicas=True, + incremental_backup=False, journal_archiving=False, + journal_archive_path="", lag_limit=15): + """ + Create a minutely backup. + """ + self._config = { + 'backup-type': 'minutely', + 'backup-enabled': True, + 'backup-directory': backup_dir, + 'backup-period': period, + 'max-backups': max_backups, + 'backup-security-database': backup_security, + 'backup-schemas-database': backup_schemas, + 'backup-triggers-database': backup_triggers, + 'include-replicas': include_replicas, + 'journal-archiving': journal_archiving, + 'journal-archive-path': journal_archive_path, + 'journal-archive-lag-limit': lag_limit + } + + def period(self): + """ + The period. + """ + return self._config['backup-period'] + +class ScheduledDatabaseBackupHourly(ScheduledDatabaseBackup): + def __init__(self, backup_dir, period, start_time, + max_backups=2, backup_security=True, backup_schemas=True, + backup_triggers=True, include_replicas=True, + incremental_backup=False, journal_archiving=False, + journal_archive_path="", lag_limit=15): + """ + Create an hourly backup. + """ + self._config = { + 'backup-type': 'hourly', + 'backup-enabled': True, + 'backup-directory': backup_dir, + 'backup-period': period, + 'backup-start-time': start_time, + 'max-backups': max_backups, + 'backup-security-database': backup_security, + 'backup-schemas-database': backup_schemas, + 'backup-triggers-database': backup_triggers, + 'include-replicas': include_replicas, + 'journal-archiving': journal_archiving, + 'journal-archive-path': journal_archive_path, + 'journal-archive-lag-limit': lag_limit + } + + def period(self): + """ + The period. + """ + return self._config['backup-period'] + + def start_time(self): + """ + The start time. + """ + return self._config['backup-start-date'] + +class ScheduledDatabaseBackupDaily(ScheduledDatabaseBackup): + def __init__(self, backup_dir, period, start_time, + max_backups=2, backup_security=True, backup_schemas=True, + backup_triggers=True, include_replicas=True, + incremental_backup=False, journal_archiving=False, + journal_archive_path="", lag_limit=15): + """ + Create a daily backup. + """ + self._config = { + 'backup-type': 'daily', + 'backup-enabled': True, + 'backup-directory': backup_dir, + 'backup-period': period, + 'backup-start-time': start_time, + 'max-backups': max_backups, + 'backup-security-database': backup_security, + 'backup-schemas-database': backup_schemas, + 'backup-triggers-database': backup_triggers, + 'include-replicas': include_replicas, + 'journal-archiving': journal_archiving, + 'journal-archive-path': journal_archive_path, + 'journal-archive-lag-limit': lag_limit + } + + def period(self): + """ + The period. + """ + return self._config['backup-period'] + + def start_time(self): + """ + The start time. + """ + return self._config['backup-start-date'] + +class ScheduledDatabaseBackupWeekly(ScheduledDatabaseBackup): + def __init__(self, backup_dir, period, days, start_time, + max_backups, backup_security, backup_schemas, + backup_triggers, include_replicas, + incremental_backup, journal_archiving, + journal_archive_path="", lag_limit=15): + """ + Create a weekly backup. + """ + self._config = { + 'backup-type': 'weekly', + 'backup-enabled': True, + 'backup-directory': backup_dir, + 'backup-period': period, + 'backup-day': days, + 'backup-start-time': start_time, + 'max-backups': max_backups, + 'backup-security-database': backup_security, + 'backup-schemas-database': backup_schemas, + 'backup-triggers-database': backup_triggers, + 'include-replicas': include_replicas, + 'journal-archiving': journal_archiving, + 'journal-archive-path': journal_archive_path, + 'journal-archive-lag-limit': lag_limit + } + if incremental_backup is not None: + self._config['incremental-backup'] = incremental_backup + + def period(self): + """ + The period. + """ + return self._config['backup-period'] + + def days(self): + """ + The days. + """ + return self._config['backup-day'] + + def start_time(self): + """ + The start time. + """ + return self._config['backup-start-time'] + +class ScheduledDatabaseBackupMonthly(ScheduledDatabaseBackup): + def __init__(self, backup_dir, period, month_day, start_time, + max_backups=2, backup_security=True, backup_schemas=True, + backup_triggers=True, include_replicas=True, + incremental_backup=False, journal_archiving=False, + journal_archive_path="", lag_limit=15): + """ + Create a monthly backup. + """ + self._config = { + 'backup-type': 'monthly', + 'backup-enabled': True, + 'backup-directory': backup_dir, + 'backup-period': period, + 'backup-start-time': start_time, + 'backup-month-day': month_day, + 'max-backups': max_backups, + 'backup-security-database': backup_security, + 'backup-schemas-database': backup_schemas, + 'backup-triggers-database': backup_triggers, + 'include-replicas': include_replicas, + 'journal-archiving': journal_archiving, + 'journal-archive-path': journal_archive_path, + 'journal-archive-lag-limit': lag_limit + } + + def period(self): + """ + The period. + """ + return self._config['backup-period'] + + def month_day(self): + """ + The day of the month. + """ + return self._config['backup-month-day'] + + def start_time(self): + """ + The start time. + """ + return self._config['backup-start-time'] + +class ScheduledDatabaseBackupOnce(ScheduledDatabaseBackup): + def __init__(self, backup_dir, start_date, start_time, + max_backups, backup_security, backup_schemas, + backup_triggers, include_replicas, + incremental_backup, journal_archiving, + journal_archive_path, lag_limit): + """ + Create a one-time backup. + """ + self._config = { + 'backup-type': 'once', + 'backup-enabled': True, + 'backup-directory': backup_dir, + 'backup-start-date': start_date, + 'backup-start-time': start_time, + 'max-backups': max_backups, + 'backup-security-database': backup_security, + 'backup-schemas-database': backup_schemas, + 'backup-triggers-database': backup_triggers, + 'include-replicas': include_replicas, + 'journal-archiving': journal_archiving, + 'journal-archive-path': journal_archive_path, + 'journal-archive-lag-limit': lag_limit + } + if incremental_backup is not None: + self._config['incremental-backup'] = incremental_backup + + def start_date(self): + """ + The start date. + """ + return self._config['backup-start-date'] + + def start_time(self): + """ + The start time. + """ + return self._config['backup-start-date'] + diff --git a/python_api/marklogic/models/database/through.py b/python_api/marklogic/models/database/through.py new file mode 100644 index 0000000..603f775 --- /dev/null +++ b/python_api/marklogic/models/database/through.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/10/2015 Initial development + +""" +Classes for dealing with phrase arounds, phrase throughs, and query throughs. +""" + +from marklogic.models.utilities.validators import assert_list_of_type +from marklogic.models.utilities.utilities import PropertyLists + +class _Through(PropertyLists): + """ + A phrase through or around. + """ + def __init__(self): + raise ValueError("Do not instantiate _Through directly") + + def namespace_uri(self): + """ + The namespace URI. + """ + return self._config['namespace-uri'] + + def set_namespace_uri(self, namespace_uri): + """ + Set the namespace URI. + """ + self._config['namespace-uri'] = namespace_uri + return self + + def localnames(self): + """ + The localnames. + """ + if 'localname' in self._config['localname']: + return self._config['localname'] + else: + return None + + def add_localname(self, name): + """ + Add a localname. + """ + return self.add_to_property_list('localname', name) + + def remove_localname(self, name): + """ + Remove a localname. + """ + return self.remove_from_property_list('localname', name) + + def set_localnames(self, names): + """ + Set the list of localnames. + """ + return self.set_property_list('localname', names) + +class PhraseThrough(_Through): + """ + A phrase through. + """ + def __init__(self, namespace_uri, localnames): + """ + Create a phrase through. + """ + self._config = { + 'namespace-uri': namespace_uri, + 'localname': assert_list_of_type(localnames, str) + } + +class PhraseAround(_Through): + """ + A phrase around. + """ + def __init__(self, namespace_uri, localnames): + """ + Create a phrase around. + """ + self._config = { + 'namespace-uri': namespace_uri, + 'localname': assert_list_of_type(localnames, str) + } + +class ElementWordQueryThrough(_Through): + """ + An element word query through. + """ + def __init__(self, namespace_uri, localnames): + """ + Create an element word query through. + """ + self._config = { + 'namespace-uri': namespace_uri, + 'localname': assert_list_of_type(localnames, str) + } diff --git a/python_api/marklogic/models/forest.py b/python_api/marklogic/models/forest.py new file mode 100644 index 0000000..85673b0 --- /dev/null +++ b/python_api/marklogic/models/forest.py @@ -0,0 +1,236 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/01/2015 Initial development +# + +import socket +import requests +import json +import logging +from .utilities.validators import validate_forest_availability +from .utilities.exceptions import UnexpectedManagementAPIResponse + +""" +MarkLogic Forest support classes. +""" + +class Forest: + """ + Encapsulates a MarkLogic forest. Can be added to a database configuration to create forests + with specific options. There are two types of attributes in Forest. The properties can be + changed after creation. The config is the non-mutable state that is configured when the forest + is created. + """ + def __init__(self, name, host=None, data_directory=None, large_data_directory=None, fast_data_directory=None): + self.properties = { + } + + self.config = { + 'forest-name': name + } + + if data_directory is not None: + self.config['data-directory'] = data_directory + + if large_data_directory is not None: + self.config['large-data-directory'] = large_data_directory + + if fast_data_directory is not None: + self.config['fast-data-directory'] = fast_data_directory + + if host is not None: + self.config['host'] = host + else: + self.config['host'] = socket.gethostname().lower() + + self.logger = logging.getLogger("marklogic") + + def host(self): + """ + Return the hostname for this forest + + :return: The hostname + """ + return self.config['host'] + + def set_database(self, db='Documents'): + """ + The database to which this forest belongs. + + :param db: A database name + :return: The Forest object + """ + self.config['database'] = db + return self + + def database(self): + """ + Return the database for the forest + + :return: The asociated database + """ + if 'database' in self.config: + return self.config['database'] + return None + + def data_directory(self): + """ + Returns the data directory for the forest. + + :return: The data directory path + """ + if 'data-directory' in self.config: + return self.config['data-directory'] + return None + + def large_data_directory(self): + """ + Return the large data directory for the forest + + :return:The large data directory path + """ + if 'large-data-directory' in self.config: + return self.config['large-data-directory'] + return None + + def fast_data_directory(self): + """ + Return the fast data directory for the forest. + + :return:The fast data directory + """ + if 'fast-data-directory' in self.config: + return self.config['fast-data-directory'] + return None + + def set_availability(self, which='online'): + """ + Indicate weather the forest is available. + + :param which: The availability of the forest + :return: The Forest object + """ + validate_forest_availability(which) + self.properties['availability'] = which + return self + + def availability(self): + """ + Returns the availability status for the forest. + + :return: Availability status + """ + if 'availability' in self.properties: + return self.properties['availability'] + return None + + def forest_name(self): + """ + Returns the name of the forest. + + :return: The forest name + """ + return self.config['forest-name'] + + def create(self, connection): + """ + Creates the forest on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The Forest object + """ + uri = "http://{0}:{1}/manage/v2/forests".format(connection.host, connection.management_port) + payload = {} + payload.update(self.properties) + payload.update(self.config) + + self.logger.debug("Creating forest: {0}".format(self.forest_name())) + response = requests.post(uri, json=payload, auth=connection.auth) + if response.status_code > 299: + raise Exception(response.text) + + return Forest.lookup(connection, self.config['forest-name']) + + def save(self, connection): + """ + Saves the updated forest configuration to the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The Forest object + """ + uri = "http://{0}:{1}/manage/v2/forests/{2}/properties".format(connection.host, connection.management_port, + self.config['forest-name']) + response = requests.put(uri, json=self.config, auth=connection.auth) + + if response.status_code > 299: + raise Exception(response.text) + + return self + + def remove(self, connection): + """ + Delete a forest from the MarkLogic server. + + :param connection: The connection to a MerkLogic server + :return: The Forest object + """ + uri = "http://{0}:{1}/manage/v2/forests/{2}?level=full".format(connection.host, connection.management_port, + self.config[u'forest-name']) + response = requests.delete(uri, auth=connection.auth) + + if response.status_code > 299 and not response.status_code == 404: + raise Exception(response.text) + + return self + + @classmethod + def lookup(cls, conn, name): + """ + Look up a forest's configuration from the MarkLogic server. + + :param name: The name of the forest + :param connection: The connection to a MarkLogic server + :return: The Forest object + """ + result = Forest('temp') + + uri = "http://{0}:{1}/manage/v2/forests/{2}/properties".format(conn.host, conn.management_port, name) + response = requests.get(uri, auth=conn.auth, headers={'accept': 'application/json'}) + if response.status_code != 200: + raise UnexpectedManagementAPIResponse(response.text) + + result.properties = json.loads(response.text) + + uri='http://{0}:{1}/manage/v2/forests/{2}?view=config'.format(conn.host, conn.management_port, name) + response = requests.get(uri, auth=conn.auth, headers={'accept': 'application/json'}) + if response.status_code != 200: + raise UnexpectedManagementAPIResponse(response.text) + + response_json = json.loads(response.text) + result.config = response_json['forest-config']['config-properties'] + result.config['forest-name'] = response_json['forest-config']['name'] + + for relation_group in response_json['forest-config']['relations']['relation-group']: + if relation_group['typeref'] == 'hosts': + result.config['host'] = relation_group['relation'][0]['nameref'] + + return result diff --git a/python_api/marklogic/models/host.py b/python_api/marklogic/models/host.py new file mode 100644 index 0000000..399ad2c --- /dev/null +++ b/python_api/marklogic/models/host.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/26/2015 Initial development +# + +""" +Host related classes for manipulating MarkLogic hosts +""" + +from __future__ import unicode_literals, print_function, absolute_import +import requests +import json + +class Host: + """ + The Host class encapsulates a MarkLogic host. + """ + def __init__(self): + """ + Create a host. + """ + self._config = {} + + def host_name(self): + """ + Returns the host name of the cluster member + :return: The member host name + """ + return self._config['host-name'] + + def group_name(self): + """ + The cluster member's group + + :return: Host's Group + """ + return self._config['group'] + + def bind_port(self): + """ + The bind port of the cluster member + + :return: The host's bind port + """ + return self._config['bind-port'] + + def foreign_bind_port(self): + """ + The foreign bind port. + + :return: The Host's foreign bind port + """ + return self._config['foreign-bind-port'] + + def zone(self): + """ + The zone + + :return: The zone + """ + return self._config['zone'] + + def bootstrap_host(self): + """ + Indicates if this is the bootstrap host + + :return:Bootstrap host indicator + """ + return self._config['boostrap-host'] + + @classmethod + def lookup(cls, connection, name): + """ + Look up an individual host within the cluster. + + :param name: The name of the host + :param connection: A connection to a MarkLogic server + :return: The host information + """ + uri = "http://{0}:{1}/manage/v2/hosts/{2}/properties".format(connection.host, connection.management_port, + name) + result = None + response = requests.get(uri, auth=connection.auth, headers={'accept': 'application/json'}) + if response.status_code == 200: + result = Host() + result._config = json.loads(response.text) + elif response.status_code != 404: + raise UnexpectedManagementAPIResponse(response.text) + return result + + @classmethod + def list(cls, connection): + """ + Lists the names of hosts available on this cluster. + + :param connection: A connection to a MarkLogic server + :return: A list of host names + """ + uri = "http://{0}:{1}/manage/v2/hosts" \ + .format(connection.host, connection.management_port) + + response = requests.get(uri, auth=connection.auth, + headers={u'accept': u'application/json'}) + + if response.status_code == 200: + response_json = json.loads(response.text) + host_count = response_json['host-default-list']['list-items']['list-count']['value'] + + result = [] + if host_count > 0: + for item in response_json['host-default-list']['list-items']['list-item']: + result.append(item['nameref']) + else: + raise UnexpectedManagementAPIResponse(response.text) + + return result diff --git a/python_api/marklogic/models/permission.py b/python_api/marklogic/models/permission.py new file mode 100644 index 0000000..4bb5b53 --- /dev/null +++ b/python_api/marklogic/models/permission.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/06/2015 Initial development +# + +""" +Classes for manipulating MarkLogic permissions. +""" + +from __future__ import unicode_literals, print_function, absolute_import + +import requests +from marklogic.models.utilities.validators import * +from marklogic.models.utilities import exceptions +import json + +class Permission(object): + """ + The Permission class encapsulates a MarkLogic permission. + A permission is the combination of a role and a capability. + Permissions are immutable. + """ + def __init__(self, role, capability): + validate_capability(capability) + self._config = { + 'role-name': role, + 'capability': capability + } + + def role_name(self): + """ + Return the name of the role. + + :return: The role name + """ + return self._config['role-name'] + + def capability(self): + """ + Return the capability. + + :return: The capability. + """ + return self._config['capability'] diff --git a/python_api/marklogic/models/privilege.py b/python_api/marklogic/models/privilege.py new file mode 100644 index 0000000..66d2c24 --- /dev/null +++ b/python_api/marklogic/models/privilege.py @@ -0,0 +1,406 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 04/02/2015 Initial development +# Norman Walsh 04/29/2015 Hacked role.py into privilege.py +# + +""" +Classes for manipulating MarkLogic privileges. +""" + +from __future__ import unicode_literals, print_function, absolute_import + +import requests +from marklogic.models.utilities import exceptions +from marklogic.models.utilities.validators import validate_custom +from marklogic.models.utilities.validators import validate_privilege_kind +from marklogic.models.utilities.utilities import PropertyLists +import json + +class Privilege(PropertyLists): + """ + The Privilege class encapsulates a MarkLogic privilege. + """ + PRIVLIST = None + + def __init__(self, name, action, kind): + validate_privilege_kind(kind) + + self._config = {} + self._config['privilege-name'] = name + self._config['action'] = action + self.the_kind = kind + self.etag = None + + def privilege_name(self): + """ + Return the name of the privilege. + + :return: The privilege name + """ + return self._config['privilege-name'] + + def set_privilege_name(self, name): + """ + Set the name of the privilege. + + :return: The privilege object + """ + self._config['privilege-name'] = name + return self + + def action(self): + """ + Return the action URI of the privilege. + + :return: The privilege action + """ + return self._config['action'] + + def set_action(self, action): + """ + Set the action URI of the privilege. + + :return: The privilege object + """ + self._config['action'] = action + return self + + def kind(self): + """ + Return the kind of privilege. + + :return: The privilege kind + """ + return self.the_kind + + def set_kind(self, kind): + """ + Set the action URI of the privilege. + + :return: The privilege object + """ + validate_privilege_kind(kind) + self.the_kind = kind + return self + + def role_names(self): + """ + Returns the role names for this privilege + + :return: The list of role names + """ + if u'role' not in self._config: + return None + return self._config[u'role'] + + def set_role_names(self, roles): + """ + Sets the roles for this privilege + + :return: The privilege object + """ + return self.set_property_list('role', roles) + + def add_role_name(self, add_role): + """ + Adds the specified role to roles for this privilege + + :return: The privilege object + """ + return self.add_to_property_list('role', add_role) + + def remove_role_name(self, remove_role): + """ + Removes the specified role to roles for this privilege + + :return: The privilege object + """ + return self.remove_from_property_list('role', remove_role) + + def marshal(self): + """ + Return a flat structure suitable for conversion to JSON or XML. + + :return: A hash of the keys in this object and their values, recursively. + """ + struct = { } + for key in self._config: + struct[key] = self._config[key]; + return struct + + @classmethod + def unmarshal(cls, config): + """ + Construct a new Privilege from a flat structure. This method is + principally used to construct an object from a Management API + payload. The configuration passed in is largely assumed to be + valid. + + :param: config: A hash of properties + :return: A newly constructed Privilege object with the specified properties. + """ + kind = config['kind'] + validate_privilege_kind(kind) + + result = Privilege("temp", "http://example.com/", kind) + result._config = config + result.the_kind = kind + return result + + def create(self, connection): + """ + Creates the Privilege on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The Privilege object + """ + uri = "http://{0}:{1}/manage/v2/privileges".format(connection.host, connection.management_port) + + post_config = self._config + post_config['kind'] = self.kind() + + response = requests.post(uri, json=post_config, auth=connection.auth) + if response.status_code not in [200, 201, 204]: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + return self + + def read(self, connection): + """ + Loads the Privilege from the MarkLogic server. This will refresh + the properties of the object. + + :param connection: The connection to a MarkLogic server + :return: The Privilege object + """ + priv = Privilege.lookup(connection, self.privilege_name(), self.kind()) + if priv is None: + return None + else: + self._config = priv._config + self.etag = priv.etag + return self + + def update(self, connection): + """ + Updates the Privilege on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The Privilege object + """ + uri = "http://{0}:{1}/manage/v2/privileges/{2}/properties?kind={3}" \ + .format(connection.host, connection.management_port, + self.privilege_name(), self.kind()) + + headers = {} + if self.etag is not None: + headers['if-match'] = self.etag + + response = requests.put(uri, json=self._config, auth=connection.auth, + headers=headers) + + if response.status_code not in [200, 204]: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + if 'etag' in response.headers: + self.etag = response.headers['etag'] + + return self + + def delete(self, connection): + """ + Deletes the Privilege from the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The Privilege object + """ + uri = "http://{0}:{1}/manage/v2/privileges/{2}?kind={3}" \ + .format(connection.host, connection.management_port, + self.privilege_name(), self.kind()) + + headers = {} + if self.etag is not None: + headers['if-match'] = self.etag + + response = requests.delete(uri, auth=connection.auth, headers=headers) + + if (response.status_code not in [200, 204] + and not response.status_code == 404): + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + return self + + @classmethod + def list(cls, connection, include_actions=False): + """ + List all the privilege names. Privilege names are structured values, + they consist of a kind and a name separated by "|". + + If `include_actions` is true, the structured values consist of + kind, name, and action separated by "|". + + :param connection: The connection to a MarkLogic server + :return: A list of Privilege names. + """ + + uri = "http://{0}:{1}/manage/v2/privileges" \ + .format(connection.host, connection.management_port) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code != 200: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + results = [] + json_doc = json.loads(response.text) + + for item in json_doc['privilege-default-list']['list-items']['list-item']: + if include_actions: + results.append("{0}|{1}|{2}" \ + .format(item['kind'],item['nameref'],item['action'])) + else: + results.append("{0}|{1}" \ + .format(item['kind'],item['nameref'])) + + return results + + @classmethod + def exists(cls, connection, name, kind=None): + """ + Returns true if (and only if) the specified privilege exists. + + If the name is a structured value consisting of the kind and the + name separated by a "|", as returned by the list() method, then + the kind is optional. + + :param connection: The connection to the MarkLogic database + :param name: The name of the privilege + :param kind: The kind of privilege + :return: The privilege + """ + parts = name.split("|") + if len(parts) == 1: + pass + elif len(parts) == 2 or len(parts) == 3: + if kind is not None and kind != parts[0]: + raise validate_custom("Kinds must match") + kind = parts[0] + name = parts[1] + else: + raise validate_custom("Unparseable privilege name") + + uri = "http://{0}:{1}/manage/v2/privileges/{2}/properties?kind={3}" \ + .format(connection.host, connection.management_port, name, kind) + + response = requests.head(uri, auth=connection.auth) + + if response.status_code == 200: + return True + elif response.status_code == 404: + return False + else: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + @classmethod + def lookup(cls, connection, name=None, kind=None, action=None): + """ + Look up an individual privilege. + + At least one of name or action must be specified. Privileges can + be looked up directly with a name. If only an action is provided, + the method will get the current list of privileges and search for + the matching action. The list of privileges is stored in + `Privilege.PRIVLIST` and can be reset by calling + `Privilege.flush_cache()`. + + The `kind` must be provided either directly or as part of a + structured name. + + If the name is a structured value consisting of the kind and the + name separated by a "|", as returned by the list() method, then + the kind is optional. + + :param connection: The connection to the MarkLogic database + :param name: The name of the privilege + :param action: The action URI of the privilege + :param kind: The kind of privilege + :return: The privilege + """ + if name is not None: + parts = name.split("|") + if len(parts) == 1: + pass + elif len(parts) == 2 or len(parts) == 3: + if kind is not None and kind != parts[0]: + raise validate_custom("Kinds must match") + kind = parts[0] + name = parts[1] + if action is not None and len(parts) == 3: + if parts[2] != action: + raise validate_custom("Actions must match") + else: + raise validate_custom("Unparseable privilege name") + + if name is None and action is None: + raise validate_custom("Name or action must be specified") + + if kind is None: + raise validate_custom("Kind must be specified") + + if name is None: + return cls._lookup_action(connection, action, kind) + else: + uri = "http://{0}:{1}/manage/v2/privileges/{2}/properties?kind={3}" \ + .format(connection.host, connection.management_port, name, kind) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code == 200: + result = Privilege.unmarshal(json.loads(response.text)) + if 'etag' in response.headers: + result.etag = response.headers['etag'] + return result + elif response.status_code == 404: + return None + else: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + @classmethod + def _lookup_action(cls, conn, action, kind): + if Privilege.PRIVLIST is None: + Privilege.PRIVLIST = Privilege.list(conn, include_actions=True) + + for priv in Privilege.PRIVLIST: + parts = priv.split("|") + if parts[0] == kind and parts[2] == action: + return cls.lookup(conn, parts[1], kind) + + return None + + @classmethod + def flush_cache(cls): + """ + Reset the cache of saved privileges. + """ + Privilege.PRIVLIST = None + diff --git a/python_api/marklogic/models/role.py b/python_api/marklogic/models/role.py new file mode 100644 index 0000000..1753f70 --- /dev/null +++ b/python_api/marklogic/models/role.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 04/02/2015 Initial development +# + +""" +Role related classes for manipulating MarkLogic roles +""" + +from __future__ import unicode_literals, print_function, absolute_import + +import requests +from marklogic.models.utilities import exceptions +from marklogic.models.utilities.utilities import PropertyLists +import json + +class Role(PropertyLists): + """ + The Role class encapsulates a MarkLogic role. It provides + methods to set/get database attributes. The use of methods will + allow IDEs with tooling to provide auto-completion hints. + """ + + def __init__(self, name): + self._config = {} + self._config['role-name'] = name + self.name = name + self.etag = None + + def role_name(self): + """ + The Role name (unique) + + *role name* is the name of the role. + + :return: The role name + """ + return self._config['role-name'] + + def set_role_name(self, name): + """ + Sets the Role name (unique) + + *role name* is the name of the role. + + :param: The new role name + + :return: The role object + """ + return self._config['role-name'] + + def compartment(self): + """ + The compartment that this role is part of. + + A *compartment* is an optional field that places the + role into the named compartment. If a document has + any permissions (role/capability pairs) with roles + that have a compartment, then the user must have those + roles with each of the compartments (regardless of + which permission they are in) to perform any of the + capabilities. + + Once set, the compartment name cannot be changed. + + :return: The compartment name + """ + if 'compartment' in self._config: + return self._config['compartment'] + return None + + def role_names(self): + """ + Returns the names of the roles assigned to this role + + :return: The list of roles + """ + if "role" not in self._config: + return None + return self._config['role'] + + def set_role_names(self, roles): + """ + Sets the names of the roles assigned to this role + + :return: The role object + """ + return self.set_property_list('role', roles) + + def add_role_name(self, add_role): + """ + Adds the specified role to roles assigned to this role + + :return: The role object + """ + return self.add_to_property_list('role', add_role) + + def remove_role_name(self, remove_role): + """ + Removes the specified role from the roles assigned to this role + + :return: The role object + """ + return self.remove_from_property_list('role', remove_role) + + def set_description(self, description): + """ + Sets an object's description. + + *description* is an optional field to describe the + user. + + :param description: A description for the role + + :return: The role object + """ + self._config['description'] = description + return self + + def description(self): + """ + An object's description. + + *description* is an optional field to describe the + user. + + :return: The role description + """ + if 'description' not in self._config: + return None + return self._config['description'] + + def add_privilege(self, name, kind=None): + """ + Add a new privilege to the list of role privileges. + + If the name is a structured value consisting of the kind and the + name separated by a "|", as returned by the list() method, then + the kind is optional. + + :param name: The name of the privilege + :param kind: The kind of privilege + + :return: The role object + """ + parts = name.split("|") + if len(parts) == 1: + pass + elif len(parts) == 2: + if kind is not None and kind != parts[0]: + raise validate_custom("Kinds must match") + kind = parts[0] + name = parts[1] + else: + raise validate_custom("Unparseable privilege name") + + key = "{0}|{1}".format(kind,name) + return self.add_to_property_list('privilege', key) + + def set_privileges(self, names): + """ + Set the list of privileges associates with this role. + + The names must be structured values consisting of the kind and the + name separated by a "|", as returned by the list() method. + + :param names: The structure names of the privileges + + :return: The role object + """ + for name in names: + parts = name.split("|") + if len(parts) == 1: + pass + elif len(parts) == 2: + if kind is not None and kind != parts[0]: + raise validate_custom("Kinds must match") + kind = parts[0] + name = parts[1] + else: + raise validate_custom("Unparseable privilege name") + return self.set_property_list('privilege', names) + + def remove_privilege(self, name, kind=None): + """ + Remove a privilege from the list of role privileges. + + If the name is a structured value consisting of the kind and the + name separated by a "|", as returned by the list() method, then + the kind is optional. + + :param name: The name of the privilege + :param kind: The kind of privilege + + :return: The role object + """ + parts = name.split("|") + if len(parts) == 1: + pass + elif len(parts) == 2: + if kind is not None and kind != parts[0]: + raise validate_custom("Kinds must match") + kind = parts[0] + name = parts[1] + else: + raise validate_custom("Unparseable privilege name") + + key = "{0}|{1}".format(kind,name) + return self.remove_from_property_list('privilege', key) + + def privileges(self): + """ + Returns the privileges for a given role + + :return: The list of privileges + """ + if 'privilege' not in self._config: + return None + return self._config['privilege'] + + @classmethod + def unmarshal(cls, config): + """ + Return a flat structure suitable for conversion to JSON or XML. + + :return: A hash of the keys in this object and their values, recursively. + """ + result = Role("temp") + result._config = config + result.name = config['role-name'] + result.etag = None + return result + + def marshal(self): + """ + Construct a new Role from a flat structure. This method is + principally used to construct an object from a Management API + payload. The configuration passed in is largely assumed to be + valid. + + :param: config: A hash of properties + :return: A newly constructed Role object with the specified properties. + """ + struct = { } + for key in self._config: + struct[key] = self._config[key]; + return struct + + def create(self, connection): + """ + Creates the Role on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The Role object + """ + uri = "http://{0}:{1}/manage/v2/roles" \ + .format(connection.host, connection.management_port) + + response = requests.post(uri, json=self._config, auth=connection.auth) + if response.status_code not in [200, 201, 204]: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + return self + + def read(self, connection): + """ + Loads the Role from the MarkLogic server. This will refresh + the properties of the object. + + :param connection: The connection to a MarkLogic server + :return: The Role object + """ + role = Role.lookup(connection, self._config['role-name']) + if role is None: + return None + else: + self._config = role._config + self.etag = role.etag + return self + + def update(self, connection): + """ + Updates the Role on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The Role object + """ + uri = "http://{0}:{1}/manage/v2/roles/{2}/properties" \ + .format(connection.host, connection.management_port,self.name) + + headers = {} + if self.etag is not None: + headers['if-match'] = self.etag + + response = requests.put(uri, json=self._config, auth=connection.auth, + headers=headers) + + if response.status_code not in [200, 204]: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + self.name = self._config['role-name'] + if 'etag' in response.headers: + self.etag = response.headers['etag'] + + return self + + def delete(self, connection): + """ + Deletes the Role from the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The Role object + """ + uri = "http://{0}:{1}/manage/v2/roles/{2}" \ + .format(connection.host, connection.management_port, self.name) + + response = requests.delete(uri, auth=connection.auth) + + if (response.status_code not in [200, 204] + and not response.status_code == 404): + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + return self + + @classmethod + def list(cls, connection): + """ + List all the roles names. + + :param connection: The connection to a MarkLogic server + :return: A list of Roles + """ + + uri = "http://{0}:{1}/manage/v2/roles" \ + .format(connection.host, connection.management_port) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code != 200: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + results = [] + json_doc = json.loads(response.text) + + for item in json_doc['role-default-list']['list-items']['list-item']: + results.append(item['nameref']) + + return results + + @classmethod + def exists(cls, connection, name): + """ + Returns true if (and only if) the specified role exits. + + :param connection: The connection to the MarkLogic database + :param name: The name of the role + :return: The role + """ + uri = "http://{0}:{1}/manage/v2/roles/{2}/properties" \ + .format(connection.host, connection.management_port, name) + + response = requests.head(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code == 200: + return True + elif response.status_code == 404: + return False + else: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + @classmethod + def lookup(cls, connection, name): + """ + Look up an individual role. + + :param connection: The connection to the MarkLogic database + :param name: The name of the role + :return: The role + """ + uri = "http://{0}:{1}/manage/v2/roles/{2}/properties" \ + .format(connection.host, connection.management_port, name) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code == 200: + result = Role.unmarshal(json.loads(response.text)) + if 'etag' in response.headers: + result.etag = response.headers['etag'] + return result + elif response.status_code == 404: + return None + else: + raise exceptions.UnexpectedManagementAPIResponse(response.text) diff --git a/python_api/marklogic/models/server/__init__.py b/python_api/marklogic/models/server/__init__.py new file mode 100644 index 0000000..f8dbf37 --- /dev/null +++ b/python_api/marklogic/models/server/__init__.py @@ -0,0 +1,2932 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/01/2015 Initial development + +""" +Server related classes for manipulating MarkLogic databases +""" + +from abc import ABCMeta, abstractmethod +import re +import requests +import time +import json +import logging +from marklogic.models.utilities.exceptions import UnexpectedManagementAPIResponse +from marklogic.models.utilities.validators import validate_custom +from marklogic.models.utilities.utilities import PropertyLists +from marklogic.models.server.schema import Schema +from marklogic.models.server.namespace import UsingNamespace, Namespace +from marklogic.models.server.requestblackout import RequestBlackout +from marklogic.models.server.module import ModuleLocation + +class Server(PropertyLists): + """ + The Server class encapsulates a MarkLogic application server. It provides + methods to set/get common attributes. The use of methods will + allow IDEs with tooling to provide auto-completion hints. + + Server is the base class for all of the actual server types: + HttpServer, OdbcServer, XdbcServer, and WebDAVServer. + """ + __metaclass__ = ABCMeta + + def address(self): + """ + The server socket bind numeric internet address. + + *address* specifies the IP address for the App Server. + """ + if 'address' in self._config: + return self._config['address'] + return None + + def set_address(self, address): + """ + Sets the server socket bind numeric internet address. + + *address* specifies the IP address for the App Server. + """ + self._config['address'] = address + return self + + def authentication(self): + """ + The authentication scheme to use for this server + + *authentication* specifies the authentication scheme + to use for the server. + """ + if 'authentication' in self._config: + return self._config['authentication'] + return None + + def set_authentication(self, authentication): + """ + Sets the authentication scheme to use for this server + + *authentication* specifies the authentication scheme + to use for the server. + """ + self._config['authentication'] = authentication + return self + + def backlog(self): + """ + The socket listen backlog. + + *backlog* specifies the maximum number of pending connections + allowed on the HTTP server socket. + """ + if 'backlog' in self._config: + return self._config['backlog'] + return None + + def set_backlog(self, backlog): + """ + Sets the socket listen backlog. + + *backlog* specifies the maximum number of pending connections + allowed on the HTTP server socket. + """ + self._config['backlog'] = backlog + return self + + def collation(self): + """ + The default collation for queries. + + *collation* specifies the default collation for queries + run in this appserver. This will be the collation used + for string comparison and sorting if none is specified + in the query. + """ + if 'collation' in self._config: + return self._config['collation'] + return None + + def set_collation(self, collation): + """ + Sets the default collation for queries. + + *collation* specifies the default collation for queries + run in this appserver. This will be the collation used + for string comparison and sorting if none is specified + in the query. + """ + self._config['collation'] = collation + return self + + def concurrent_request_limit(self): + """ + The maximum number of concurrent requests per user. + + *concurrent request limit* specifies the maximum number + of requests any user may have running at a specific + time. 0 indicates no maximum. + """ + if 'concurrent-request-limit' in self._config: + return self._config['concurrent-request-limit'] + return None + + def set_concurrent_request_limit(self, concurrent_request_limit): + """ + Sets the maximum number of concurrent requests per user. + + *concurrent request limit* specifies the maximum number + of requests any user may have running at a specific + time. 0 indicates no maximum. + """ + self._config['concurrent-request-limit'] = concurrent_request_limit + return self + + def debug_allow(self): + """ + Allow debugging on this server. + + *debug-allow* specifies whether to allow requests against + this App Server to be stopped for debugging, using + the MarkLogic Server debugging APIs. + """ + if 'debug-allow' in self._config: + return self._config['debug-allow'] + return None + + def set_debug_allow(self, debug_allow): + """ + Sets allow debugging on this server. + + *debug-allow* specifies whether to allow requests against + this App Server to be stopped for debugging, using + the MarkLogic Server debugging APIs. + """ + self._config['debug-allow'] = debug_allow + return self + + def default_xquery_version(self): + """ + The default XQuery language version for this server. + + *default-xquery-version* specifies the default XQuery + language for this App Server if an XQuery module does + explicitly declare its language version. + """ + if 'default-xquery-version' in self._config: + return self._config['default-xquery-version'] + return None + + def set_default_xquery_version(self, default_xquery_version): + """ + Sets the default XQuery language version for this server. + + *default-xquery-version* specifies the default XQuery + language for this App Server if an XQuery module does + explicitly declare its language version. + """ + self._config['default-xquery-version'] = default_xquery_version + return self + + def last_login_database_name(self): + """ + The database that contains users' last login information. + + *last login* specifies the name of the database in + which this HTTP server stores users' last login information. + """ + if 'last-login-database' in self._config: + return self._config['last-login-database'] + return None + + def set_last_login_database_name(self, database): + """ + Sets the database that contains users' last login information. + + *last login* specifies the name of the database in + which this HTTP server stores users' last login information. + """ + self._config['last-login-database'] = database + return self + + def display_last_login(self): + """ + Indicates whether an appserver should display users' + last login information. + + *display last login* specifies if the ``xdmp:display-last-login`` + API should return true or false in the ``display-last-login`` + element. + """ + if 'display-last-login' in self._config: + return self._config['display-last-login'] + return None + + def set_display_last_login(self, display_last_login): + """ + Sets indicates whether an appserver should display users' + last login information. + + *display last login* specifies if the ``xdmp:display-last-login`` + API should return true or false in the ``display-last-login`` + element. + """ + self._config['display-last-login'] = display_last_login + return self + + def distribute_timestamps(self): + """ + Specifies the distribution of commit timestamps after + updates. + + *distribute timestamps* specifies how the latest timestamp + is distributed after updates. This affects performance + of updates and the timeliness of read-after-write query + results from other hosts in the group. + + When set to ``fast``, updates return as quicky as possible. + No special timestamp notification messages are broadcasted + to other hosts. Instead, timestamps are distributed + to other hosts when any other message is sent. The + maximum amount of time that could pass before other + hosts see the timestamp is one second, because a heartbeat + message is sent to other hosts every second. + + When set to ``strict``, updates immediately broadcast + timestamp notification messages to every other host + in the group. Updates do not return until their timestamp + has been distributed. This ensures timeliness of read-after-write + query results from other hosts in the group. + """ + if 'distribute-timestamps' in self._config: + return self._config['distribute-timestamps'] + return None + + def set_distribute_timestamps(self, distribute_timestamps): + """ + Sets specifies the distribution of commit timestamps after + updates. + + *distribute timestamps* specifies how the latest timestamp + is distributed after updates. This affects performance + of updates and the timeliness of read-after-write query + results from other hosts in the group. + + When set to ``fast``, updates return as quicky as possible. + No special timestamp notification messages are broadcasted + to other hosts. Instead, timestamps are distributed + to other hosts when any other message is sent. The + maximum amount of time that could pass before other + hosts see the timestamp is one second, because a heartbeat + message is sent to other hosts every second. + + When set to ``strict``, updates immediately broadcast + timestamp notification messages to every other host + in the group. Updates do not return until their timestamp + has been distributed. This ensures timeliness of read-after-write + query results from other hosts in the group. + """ + self._config['distribute-timestamps'] = distribute_timestamps + return self + + def enabled(self): + """ + Returns the enabled status. + + :return: The enabled status or None if it is unknown + """ + if 'enabled' in self._config: + return self._config['enabled'] + return None + + def set_enabled(self, enabled): + """ + Sets the enabled status. + + :param: enabled: The enabled status, either True or False + :return: The server object. + """ + self._config['enabled'] = enabled + return self + + def group_name(self): + """ + Returns the group name. + + The group name cannot be changed. + + :return: The group name or None if it is unknown + """ + if 'group-name' in self._config: + return self._config['group-name'] + return None + + def internal_security(self): + """ + Whether or not the security database is used for authentication + and authorization. + + *internal-security* specifies whether security database + is used for authentication and authorization if the + user is found in the security database. + """ + if 'internal-security' in self._config: + return self._config['internal-security'] + return None + + def set_internal_security(self, internal_security): + """ + Sets whether or not the security database is used for authentication + and authorization. + + *internal-security* specifies whether security database + is used for authentication and authorization if the + user is found in the security database. + """ + self._config['internal-security'] = internal_security + return self + + def log_errors(self): + """ + Log uncaught request processing errors to ErrorLog.txt. + + *log-errors* specifes whether to log uncaught errors + for this App Server to the ``ErrorLog.txt`` file. This + is useful to log exceptions that might occur on an + App Server for later debugging. + """ + if 'log-errors' in self._config: + return self._config['log-errors'] + return None + + def set_log_errors(self, log_errors): + """ + Sets log uncaught request processing errors to ErrorLog.txt. + + *log-errors* specifes whether to log uncaught errors + for this App Server to the ``ErrorLog.txt`` file. This + is useful to log exceptions that might occur on an + App Server for later debugging. + """ + self._config['log-errors'] = log_errors + return self + + def multi_version_concurrency_control(self): + """ + Specifies concurrency control of read-only queries. + + *multi version concurrency control* specifies how the + latest timestamp is chosen for lock-free queries. When + set to ``contemporaneous``, the server chooses the + latest timestamp for which transaction is known to + have committed, even though there still may be other + transactions for that timestamp that have not yet fully + committed. Queries will see more timely results, but + may block waiting for contemporaneous transactions + to fully commit. When set to ``nonblocking``, the server + chooses the latest timestamp for which transactions + are known to have committed, even though there may + be a slightly later timestamp for which another transaction + has committed. Queries won't block waiting for transactions, + but they may see less timely results. + """ + if 'multi-version-concurrency-control' in self._config: + return self._config['multi-version-concurrency-control'] + return None + + def set_multi_version_concurrency_control(self, mvcc): + """ + Sets specifies concurrency control of read-only queries. + + *multi version concurrency control* specifies how the + latest timestamp is chosen for lock-free queries. When + set to ``contemporaneous``, the server chooses the + latest timestamp for which transaction is known to + have committed, even though there still may be other + transactions for that timestamp that have not yet fully + committed. Queries will see more timely results, but + may block waiting for contemporaneous transactions + to fully commit. When set to ``nonblocking``, the server + chooses the latest timestamp for which transactions + are known to have committed, even though there may + be a slightly later timestamp for which another transaction + has committed. Queries won't block waiting for transactions, + but they may see less timely results. + """ + self._config['multi-version-concurrency-control'] = mvcc + return self + + def output_byte_order_mark(self): + """ + The output sequence of octets is to be preceded by + a Byte Order Mark. + + *output-byte-order-mark* Valid values are ``yes`` or + ``no``. This is like the "byte-order-mark" option of + both the XSLT ``xsl:output`` instruction and the MarkLogic + XQuery ``xdmp:output`` prolog statement. + """ + if 'output-byte-order-mark' in self._config: + return self._config['output-byte-order-mark'] + return None + + def set_output_byte_order_mark(self, output_byte_order_mark): + """ + Sets the output sequence of octets is to be preceded by + a Byte Order Mark. + + *output-byte-order-mark* Valid values are ``yes`` or + ``no``. This is like the "byte-order-mark" option of + both the XSLT ``xsl:output`` instruction and the MarkLogic + XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-byte-order-mark'] = output_byte_order_mark + return self + + def output_cdata_section_localname(self): + """ + Element localname or list of element localnames to + be output as CDATA sections. + + *output-cdata-section-localname* is an element or list + of elements to be output as CDATA sections: a space-separated + sequence of name strings (without namespace qualifiers) + of elements defined in the "cdata section namespace + uri" specified above. This corresponds to the "cdata-section-elements" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + You can only configure CDATA sections in one namespace + at the level of server defaults. + """ + if 'output-cdata-section-localname' in self._config: + return self._config['output-cdata-section-localname'] + return None + + def set_output_cdata_section_localname(self, output_cdata_section_localname): + """ + Sets element localname or list of element localnames to + be output as CDATA sections. + + *output-cdata-section-localname* is an element or list + of elements to be output as CDATA sections: a space-separated + sequence of name strings (without namespace qualifiers) + of elements defined in the "cdata section namespace + uri" specified above. This corresponds to the "cdata-section-elements" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + You can only configure CDATA sections in one namespace + at the level of server defaults. + """ + self._config['output-cdata-section-localname'] = output_cdata_section_localname + return self + + def output_cdata_section_namespace_uri(self): + """ + Namespace URI of the "cdata section localname" specified + below. + + *output-cdata-section-namespace-uri* is used in conjunction + with output-cdata-section-localname; it is a namespace + URI in which elements whose text contents should be + output as CDATA sections may be specified. You can + only configure CDATA sections in one namespace at the + level of server defaults. + """ + if 'output-cdata-section-namespace-uri' in self._config: + return self._config['output-cdata-section-namespace-uri'] + return None + + def set_output_cdata_section_namespace_uri(self, output_cdata_section_namespace_uri): + """ + Sets namespace URI of the "cdata section localname" specified + below. + + *output-cdata-section-namespace-uri* is used in conjunction + with output-cdata-section-localname; it is a namespace + URI in which elements whose text contents should be + output as CDATA sections may be specified. You can + only configure CDATA sections in one namespace at the + level of server defaults. + """ + self._config['output-cdata-section-namespace-uri'] = output_cdata_section_namespace_uri + return self + + def output_doctype_public(self): + """ + A public identifier to use on the emitted DOCTYPE. + + *output-doctype-public* A public identifier, which + is the public identifier to use on the emitted DOCTYPE. + This is like the "doctype-public" option of both the + XSLT ``xsl:output`` instruction and the MarkLogic XQuery + ``xdmp:output`` prolog statement. + """ + if 'output-doctype-public' in self._config: + return self._config['output-doctype-public'] + return None + + def set_output_doctype_public(self, output_doctype_public): + """ + Sets a public identifier to use on the emitted DOCTYPE. + + *output-doctype-public* A public identifier, which + is the public identifier to use on the emitted DOCTYPE. + This is like the "doctype-public" option of both the + XSLT ``xsl:output`` instruction and the MarkLogic XQuery + ``xdmp:output`` prolog statement. + """ + self._config['output-doctype-public'] = output_doctype_public + return self + + def output_doctype_system(self): + """ + A system identifier to use on the emitted DOCTYPE. + + *output-doctype-system* A system identifier, which + is the system identifier to use on the emitted DOCTYPE. + This is like the "doctype-system" option of both the + XSLT ``xsl:output`` instruction and the MarkLogic XQuery + ``xdmp:output`` prolog statement. + """ + if 'output-doctype-system' in self._config: + return self._config['output-doctype-system'] + return None + + def set_output_doctype_system(self, output_doctype_system): + """ + Sets a system identifier to use on the emitted DOCTYPE. + + *output-doctype-system* A system identifier, which + is the system identifier to use on the emitted DOCTYPE. + This is like the "doctype-system" option of both the + XSLT ``xsl:output`` instruction and the MarkLogic XQuery + ``xdmp:output`` prolog statement. + """ + self._config['output-doctype-system'] = output_doctype_system + return self + + def output_encoding(self): + """ + The default output encoding. + + *output-encoding* specifies the default output encoding + for this App Server. This is like the "encoding" option + of both the XSLT ``xsl:output`` instruction and the + MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + if 'output-encoding' in self._config: + return self._config['output-encoding'] + return None + + def set_output_encoding(self, output_encoding): + """ + Sets the default output encoding. + + *output-encoding* specifies the default output encoding + for this App Server. This is like the "encoding" option + of both the XSLT ``xsl:output`` instruction and the + MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-encoding'] = output_encoding + return self + + def output_escape_uri_attributes(self): + """ + Apply Unicode normalization, percent-encoding, and + HTML escaping to serialized URI attributes. + + *output-escape-uri-attributes* Valid values are ``yes`` + or ``no``. This is like the "escape-uri-attributes" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + if 'output-escape-uri-attributes' in self._config: + return self._config['output-escape-uri-attributes'] + return None + + def set_output_escape_uri_attributes(self, output_escape_uri_attributes): + """ + Sets apply Unicode normalization, percent-encoding, and + HTML escaping to serialized URI attributes. + + *output-escape-uri-attributes* Valid values are ``yes`` + or ``no``. This is like the "escape-uri-attributes" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-escape-uri-attributes'] = output_escape_uri_attributes + return self + + def output_include_content_type(self): + """ + Include the content-type declaration when serializing + the node. + + *output-include-content-type* Include the content-type + declaration when serializing the node. Valid values + are ``yes`` or ``no``. This is like the "include-content-type" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + if 'output-include-content-type' in self._config: + return self._config['output-include-content-type'] + return None + + def set_output_include_content_type(self, output_include_content_type): + """ + Sets include the content-type declaration when serializing + the node. + + *output-include-content-type* Include the content-type + declaration when serializing the node. Valid values + are ``yes`` or ``no``. This is like the "include-content-type" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-include-content-type'] = output_include_content_type + return self + + def output_include_default_attributes(self): + """ + Specifies whether attributes defaulted with a schema + should be included in the serialization. + + *output-include-default-attributes* Serialized output + includes default attributes.This is like the "include-default-attributes" + option of the MarkLogic XQuery ``xdmp:output`` prolog + statement. + """ + if 'output-include-default-attributes' in self._config: + return self._config['output-include-default-attributes'] + return None + + def set_output_include_default_attributes(self, output_include_default_attributes): + """ + Sets specifies whether attributes defaulted with a schema + should be included in the serialization. + + *output-include-default-attributes* Serialized output + includes default attributes.This is like the "include-default-attributes" + option of the MarkLogic XQuery ``xdmp:output`` prolog + statement. + """ + self._config['output-include-default-attributes'] = output_include_default_attributes + return self + + def output_indent(self): + """ + Pretty-print typed XML (that is, XML for which there + is an in-scope schema). + + *output-indent* Specifies if typed XML (that is, XML + for which there is an in-scope schema) should be pretty-printed + (indented). Valid values are ``yes`` or ``no``. This + is like the "indent" option of both the XSLT ``xsl:output`` + instruction and the MarkLogic XQuery ``xdmp:output`` + prolog statement. + """ + if 'output-indent' in self._config: + return self._config['output-indent'] + return None + + def set_output_indent(self, output_indent): + """ + Sets pretty-print typed XML (that is, XML for which there + is an in-scope schema). + + *output-indent* Specifies if typed XML (that is, XML + for which there is an in-scope schema) should be pretty-printed + (indented). Valid values are ``yes`` or ``no``. This + is like the "indent" option of both the XSLT ``xsl:output`` + instruction and the MarkLogic XQuery ``xdmp:output`` + prolog statement. + """ + self._config['output-indent'] = output_indent + return self + + def output_indent_untyped(self): + """ + Pretty-print untyped XML (that is, XML for which there + is no in-scope schema). + + *output-indent-untyped* Specifies if untyped XML (that + is, XML for which there is no in-scope schema) should + be pretty-printed (indented). Valid values are ``yes`` + or ``no``. This is like the "indent-untyped" option + of the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + if 'output-indent-untyped' in self._config: + return self._config['output-indent-untyped'] + return None + + def set_output_indent_untyped(self, output_indent_untyped): + """ + Sets pretty-print untyped XML (that is, XML for which there + is no in-scope schema). + + *output-indent-untyped* Specifies if untyped XML (that + is, XML for which there is no in-scope schema) should + be pretty-printed (indented). Valid values are ``yes`` + or ``no``. This is like the "indent-untyped" option + of the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-indent-untyped'] = output_indent_untyped + return self + + def output_media_type(self): + """ + A mimetype representing a media type. + + *output-media-type* A mimetype representing a media + type. For example, ``text/plain`` or ``text/xml`` (or + other valid mimetypes). This is like the "media-type" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + if 'output-media-type' in self._config: + return self._config['output-media-type'] + return None + + def set_output_media_type(self, output_media_type): + """ + Sets a mimetype representing a media type. + + *output-media-type* A mimetype representing a media + type. For example, ``text/plain`` or ``text/xml`` (or + other valid mimetypes). This is like the "media-type" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-media-type'] = output_media_type + return self + + def output_method(self): + """ + Output method. + + *output-method* Valid values are ``xml``, ``html``, + ``xhtml``, and ``text``. This is like the "method" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + if 'output-method' in self._config: + return self._config['output-method'] + return None + + def set_output_method(self, output_method): + """ + Sets output method. + + *output-method* Valid values are ``xml``, ``html``, + ``xhtml``, and ``text``. This is like the "method" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-method'] = output_method + return self + + def output_normalization_form(self): + """ + A Unicode normalization to be applied to serialized + output. + + *output-normalization-form* Valid values are ``NFC``, + ``NFD``, and ``NFKD``. This is like the "normalization-form" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + if 'output-normalization-form' in self._config: + return self._config['output-normalization-form'] + return None + + def set_output_normalization_form(self, output_normalization_form): + """ + Sets a Unicode normalization to be applied to serialized + output. + + *output-normalization-form* Valid values are ``NFC``, + ``NFD``, and ``NFKD``. This is like the "normalization-form" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-normalization-form'] = output_normalization_form + return self + + def output_omit_xml_declaration(self): + """ + Omit the XML declaration in serialized output. + + *output-omit-xml-declaration* Valid values are ``yes`` + or ``no``. This is like the "omit-xml-declaration" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + if 'output-omit-xml-declaration' in self._config: + return self._config['output-omit-xml-declaration'] + return None + + def set_output_omit_xml_declaration(self, output_omit_xml_declaration): + """ + Sets omit the XML declaration in serialized output. + + *output-omit-xml-declaration* Valid values are ``yes`` + or ``no``. This is like the "omit-xml-declaration" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-omit-xml-declaration'] = output_omit_xml_declaration + return self + + def output_sgml_character_entities(self): + """ + Output SGML character entities. + + *output-sgml-character-entities* specifies whether + to output SGML character entities for this App Server, + and how to resolve name conflicts. Valid values are + ``normal``, ``none``, ``math``, and ``pub``. By default + (that is, if this option is not specified), no SGML + entities are serialized on output, unless the App Server + is configured to output SGML character entities. + """ + if 'output-sgml-character-entities' in self._config: + return self._config['output-sgml-character-entities'] + return None + + def set_output_sgml_character_entities(self, output_sgml_character_entities): + """ + Sets output SGML character entities. + + *output-sgml-character-entities* specifies whether + to output SGML character entities for this App Server, + and how to resolve name conflicts. Valid values are + ``normal``, ``none``, ``math``, and ``pub``. By default + (that is, if this option is not specified), no SGML + entities are serialized on output, unless the App Server + is configured to output SGML character entities. + """ + self._config['output-sgml-character-entities'] = output_sgml_character_entities + return self + + def output_standalone(self): + """ + For a value of "yes" or "no", include "standalone=" + in the XML declaration; for a value of "omit", omit + "standalone=". + + *output-standalone* Valid values are ``yes``, ``no``, + or ``omit``. This is like the "standalone" option of + both the XSLT ``xsl:output`` instruction and the MarkLogic + XQuery ``xdmp:output`` prolog statement. + """ + if 'output-standalone' in self._config: + return self._config['output-standalone'] + return None + + def set_output_standalone(self, output_standalone): + """ + Sets for a value of "yes" or "no", include "standalone=" + in the XML declaration; for a value of "omit", omit + "standalone=". + + *output-standalone* Valid values are ``yes``, ``no``, + or ``omit``. This is like the "standalone" option of + both the XSLT ``xsl:output`` instruction and the MarkLogic + XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-standalone'] = output_standalone + return self + + def output_undeclare_prefixes(self): + """ + Undeclare the namespace prefix of any child element + that does not bind the prefix of its parent element. + + *output-undeclare-prefixes* Valid values are ``yes`` + or ``no``. This is like the "undeclare-prefixes" option + of both the XSLT ``xsl:output`` instruction and the + MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + if 'output-undeclare-prefixes' in self._config: + return self._config['output-undeclare-prefixes'] + return None + + def set_output_undeclare_prefixes(self, output_undeclare_prefixes): + """ + Sets undeclare the namespace prefix of any child element + that does not bind the prefix of its parent element. + + *output-undeclare-prefixes* Valid values are ``yes`` + or ``no``. This is like the "undeclare-prefixes" option + of both the XSLT ``xsl:output`` instruction and the + MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-undeclare-prefixes'] = output_undeclare_prefixes + return self + + def output_version(self): + """ + Optionally stipulate conformance to a specific version + of the output method. + + *output-version* Valid values are ``1.0`` (for XML + or XHTML) or ``4.0`` (for HTML). This is like the "version" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + if 'output-version' in self._config: + return self._config['output-version'] + return None + + def set_output_version(self, output_version): + """ + Sets optionally stipulate conformance to a specific version + of the output method. + + *output-version* Valid values are ``1.0`` (for XML + or XHTML) or ``4.0`` (for HTML). This is like the "version" + option of both the XSLT ``xsl:output`` instruction + and the MarkLogic XQuery ``xdmp:output`` prolog statement. + """ + self._config['output-version'] = output_version + return self + + def port(self): + """ + The server socket bind internet port number. + + *port* specifes the socket port for the HTTP server. + """ + if 'port' in self._config: + return self._config['port'] + return None + + def set_port(self, port): + """ + Sets the server socket bind internet port number. + + *port* specifes the socket port for the HTTP server. + """ + self._config['port'] = port + return self + + def pre_commit_trigger_depth(self): + """ + The maximum depth of pre-commit trigger invocation. + + *pre-commit trigger limit* specifies the maximum number + of pre-commit triggers a single statement against this + App Server can invoke. + """ + if 'pre-commit-trigger-depth' in self._config: + return self._config['pre-commit-trigger-depth'] + return None + + def set_pre_commit_trigger_depth(self, pre_commit_trigger_depth): + """ + Sets the maximum depth of pre-commit trigger invocation. + + *pre-commit trigger limit* specifies the maximum number + of pre-commit triggers a single statement against this + App Server can invoke. + """ + self._config['pre-commit-trigger-depth'] = pre_commit_trigger_depth + return self + + def pre_commit_trigger_limit(self): + """ + The maximum number of triggers a single statement can + invoke. + + *pre-commit trigger depth* specifies the maximum depth + (how many triggers can cause other triggers to fire, + which in turn cause others to fire, and so on) for + pre-commit triggers that are executed against this + App Server. + """ + if 'pre-commit-trigger-limit' in self._config: + return self._config['pre-commit-trigger-limit'] + return None + + def set_pre_commit_trigger_limit(self, pre_commit_trigger_limit): + """ + Sets the maximum number of triggers a single statement can + invoke. + + *pre-commit trigger depth* specifies the maximum depth + (how many triggers can cause other triggers to fire, + which in turn cause others to fire, and so on) for + pre-commit triggers that are executed against this + App Server. + """ + self._config['pre-commit-trigger-limit'] = pre_commit_trigger_limit + return self + + def profile_allow(self): + """ + Allow profiling on this server. + + *profile-allow* specifies whether to allow requests + against this App Server to be profiled, using the MarkLogic + Server profiling APIs. + """ + if 'profile-allow' in self._config: + return self._config['profile-allow'] + return None + + def set_profile_allow(self, profile_allow): + """ + Sets allow profiling on this server. + + *profile-allow* specifies whether to allow requests + against this App Server to be profiled, using the MarkLogic + Server profiling APIs. + """ + self._config['profile-allow'] = profile_allow + return self + + def root(self): + """ + The root document directory pathname. + + *root* specifies the root directory for the web applications + search path. + + *root* specifies the modules root directory. + """ + if 'root' in self._config: + return self._config['root'] + return None + + def set_root(self, root): + """ + Sets the root document directory pathname. + + *root* specifies the root directory for the web applications + search path. + + *root* specifies the modules root directory. + """ + self._config['root'] = root + return self + + def server_name(self): + """ + The server name. + + :return: The server name + """ + # Can't be unknown, right? + return self._config['server-name'] + + def set_server_name(self, server_name): + """ + Set the server name. + + :param: server_name: The new server name + :return: The server object + """ + self._config['server-name'] = server_name + return self + + def server_type(self): + """ + The server type. + + The server type cannot be changed. + + :return: The server type + """ + # Can't be unknown, right? + return self._config['server-type'] + + def ssl_allow_sslv3(self): + """ + Whether or not SSLv3 is allowed. + + *SSL enabled* specifies whether SSLv3 is allowed for + this XDQP. + """ + if 'ssl-allow-sslv3' in self._config: + return self._config['ssl-allow-sslv3'] + return None + + def set_ssl_allow_sslv3(self, ssl_allow_sslv3): + """ + Sets whether or not SSLv3 is allowed. + + *SSL enabled* specifies whether SSLv3 is allowed for + this XDQP. + """ + self._config['ssl-allow-sslv3'] = ssl_allow_sslv3 + return self + + def ssl_allow_tls(self): + """ + Whether or not TLS is allowed. + + *SSL enabled* specifies whether TLS is allowed for + XDQP. + """ + if 'ssl-allow-tls' in self._config: + return self._config['ssl-allow-tls'] + return None + + def set_ssl_allow_tls(self, ssl_allow_tls): + """ + Sets whether or not TLS is allowed. + + *SSL enabled* specifies whether TLS is allowed for + XDQP. + """ + self._config['ssl-allow-tls'] = ssl_allow_tls + return self + + def ssl_certificate_template(self): + """ + The certificate template. When a certificate template + is specified, the App Server uses an SSL encrypted + protocol (e.g. https, davs, xccs). The certificate + template specifies the common information for the individual + SSL certificates needed for each host in the group. + You can add a new certificate template by navigating + to Security > Certificate Templates > Create + + *ssl certificate template* specifies the certificate + template for the App Server. When a certificate template + is specified, the App Server uses an SSL encrypted + protocol (e.g. https, davs, xccs). The certificate + template specifies the common information for the individual + SSL certificates needed for each host in the group. + """ + if 'ssl-certificate-template' in self._config: + return self._config['ssl-certificate-template'] + return None + + def set_ssl_certificate_template(self, ssl_certificate_template): + """ + Sets the certificate template. When a certificate template + is specified, the App Server uses an SSL encrypted + protocol (e.g. https, davs, xccs). The certificate + template specifies the common information for the individual + SSL certificates needed for each host in the group. + You can add a new certificate template by navigating + to Security > Certificate Templates > Create + + *ssl certificate template* specifies the certificate + template for the App Server. When a certificate template + is specified, the App Server uses an SSL encrypted + protocol (e.g. https, davs, xccs). The certificate + template specifies the common information for the individual + SSL certificates needed for each host in the group. + """ + self._config['ssl-certificate-template'] = ssl_certificate_template + return self + + def ssl_ciphers(self): + """ + A colon separated list of ciphers (e.g. ALL:!LOW:@STRENGTH) + + *ssl ciphers* specifies the SSL ciphers that may be + used. + """ + if 'ssl-ciphers' in self._config: + return self._config['ssl-ciphers'] + return None + + def set_ssl_ciphers(self, ssl_ciphers): + """ + Sets a colon separated list of ciphers (e.g. ALL:!LOW:@STRENGTH) + + *ssl ciphers* specifies the SSL ciphers that may be + used. + """ + self._config['ssl-ciphers'] = ssl_ciphers + return self + + def ssl_hostname(self): + """ + The host name for the server's SSL certificate. This + is useful when many servers are running behind a load + balancer. If not specified, each host will use a certificate + specifying its own hostname. Note that per RFC 2459, + hostnames must not exceed 64 characters in length. + + *ssl hostname* specifies the hostname for the server's + SSL certificate. This is useful when many servers are + running behind a load balancer. If not specified, each + host will use a certificate for its own hostname. + """ + if 'ssl-hostname' in self._config: + return self._config['ssl-hostname'] + return None + + def set_ssl_hostname(self, ssl_hostname): + """ + Sets the host name for the server's SSL certificate. This + is useful when many servers are running behind a load + balancer. If not specified, each host will use a certificate + specifying its own hostname. Note that per RFC 2459, + hostnames must not exceed 64 characters in length. + + *ssl hostname* specifies the hostname for the server's + SSL certificate. This is useful when many servers are + running behind a load balancer. If not specified, each + host will use a certificate for its own hostname. + """ + self._config['ssl-hostname'] = ssl_hostname + return self + + def ssl_require_client_certificate(self): + """ + Whether or not a client certificate is required. This + only has an effect when one or more more client certificate + authorities are specified, in which case a value of + true will refuse a client request if it does not present + a valid client certificate. + + *ssl require client certificate* specifies whether + a client certificate is required when connecting to + this application server. + """ + if 'ssl-require-client-certificate' in self._config: + return self._config['ssl-require-client-certificate'] + return None + + def set_ssl_require_client_certificate(self, ssl_require_client_certificate): + """ + Sets whether or not a client certificate is required. This + only has an effect when one or more more client certificate + authorities are specified, in which case a value of + true will refuse a client request if it does not present + a valid client certificate. + + *ssl require client certificate* specifies whether + a client certificate is required when connecting to + this application server. + """ + self._config['ssl-require-client-certificate'] = ssl_require_client_certificate + return self + + def threads(self): + """ + The maximum number of server threads allowed. + + *threads* specifies the maximum number of App Server + threads. + """ + if 'threads' in self._config: + return self._config['threads'] + return None + + def set_threads(self, threads): + """ + Sets the maximum number of server threads allowed. + + *threads* specifies the maximum number of App Server + threads. + """ + self._config['threads'] = threads + return self + + def schemas(self): + """ + The schema binding specifications. + + :return: The schema bindings or None if there aren't any. + """ + if 'schema' in self._config: + return self._config['schema'] + return None + + def add_schema(self, schema): + """ + Add a schema binding. + """ + return self.add_to_property_list('schema', schema, Schema) + + def set_schemas(self, schemas): + """ + Set the schema bindings + """ + return self.set_property_list('schema', schemas, Schema) + + def remove_schema(self, schema): + """ + Remove a schema binding. + """ + return self.remove_from_property_list('schema', schema, Schema) + + def namespaces(self): + """ + The namespace bindings. + """ + if 'namespace' in self._config: + return self._config['namespace'] + return None + + def add_namespace(self, namespace): + """ + Add a namespace binding. + """ + return self.add_to_property_list('namespace', namespace, Namespace) + + def set_namespaces(self, namespaces): + """ + Set the namespace bindings. + """ + return self.set_property_list('namespace', namespaces, Namespace) + + def remove_namespace(self, remove_ns): + """ + Remove a namespace binding. + """ + return self.remove_from_property_list('namespace', remove_ns, Namespace) + + def using_namespaces(self): + """ + The namespace path URIs. + """ + if 'using-namespaces' in self._config: + return self._config['using-namespace'] + return None + + def add_using_namespace(self, namespace): + """ + Add a namespace path URI. + """ + return self.add_to_property_list('using-schema', namespace, UsingNamespace) + + def set_using_namespaces(self, using_ns): + """ + Set the namespace path URIs. + """ + return self.set_property_list('using-namespace', using_ns, UsingNamespace) + + def remove_using_namespaces(self, removens): + """ + Remove a namespace path URI. + """ + return self.remove_from_property_list('using-namespace', + removens, UsingNamespace) + + def module_locations(self): + """ + The module locations. + """ + if 'module-location' in self._config: + return self._config['module-location'] + return None + + def add_module_location(self, location): + """ + Add a module location. + """ + return self.add_to_property_list('module-location', + location, ModuleLocation) + + def set_module_locations(self, locations): + """ + Set the module locations. + """ + return self.set_property_list('module-locations', locations, ModuleLocation) + + def request_blackouts(self): + """ + The request blackout periods. + """ + if 'request-blackout' in self._config: + return self._config['request-blackout'] + else: + return None + + def add_request_blackout(self, blackout): + """ + Add a request blackout period. + """ + return self.add_to_property_list('request-blackout', + blackout, RequestBlackout) + + def set_request_blackouts(self, blackouts): + """ + Set the list of request blackout periods. + """ + return self.set_property_list('request-blackout', + blackouts, RequestBlackout) + + def create(self, connection): + """ + Creates a server on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The server object + """ + uri = "http://{0}:{1}/manage/v2/servers" \ + .format(connection.host, connection.management_port) + + response = requests.post(uri, json=self._config, auth=connection.auth) + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + return self + + def read(self, connection): + """ + Loads the server from the MarkLogic server. This will refresh + the properties of the object. + + :param connection: The connection to a MarkLogic server + :return: The server object + """ + server = Server.lookup(connection, self.server_name(), self.group_name()) + if server is None: + return None + else: + self._config = server._config + self.etag = server.etag + return self + + def update(self, connection): + """ + Updates the server on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The server object + """ + uri = "http://{0}:{1}/manage/v2/servers/{2}/properties?group-id={3}" \ + .format(connection.host, connection.management_port, + self.name, self.group_name()) + + headers = {} + if self.etag is not None: + headers['if-match'] = self.etag + + struct = self.marshal() + response = requests.put(uri, json=struct, auth=connection.auth, + headers=headers) + + if response.status_code > 299: + raise UnexpectedManagementAPIResponse(response.text) + + self.name = self._config['server-name'] + if 'etag' in response.headers: + self.etag = response.headers['etag'] + + if response.status_code == 202: + Server.wait_for_restart(connection, response) + + return self + + def delete(self, connection): + """ + Deletes the server on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The server object + """ + uri = "http://{0}:{1}/manage/v2/servers/{2}?group-id={3}" \ + .format(connection.host, connection.management_port, + self.server_name(), self.group_name()) + + headers = {'accept': 'application/json'} + if self.etag is not None: + headers['if-match'] = self.etag + + response = requests.delete(uri, auth=connection.auth, headers=headers) + + if response.status_code > 299 and not response.status_code == 404: + raise UnexpectedManagementAPIResponse(response.text) + + if response.status_code == 202: + Server.wait_for_restart(connection, response) + + return self + + @classmethod + def list(cls, connection): + """ + List the names of all the servers on the system. Server + names are structured values, they consist of the group name and + the server name separated by "|". + + :param connection: The connection to a MarkLogic server + + :return: A list of servers + """ + uri = "http://{0}:{1}/manage/v2/servers" \ + .format(connection.host, connection.port) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code != 200: + raise UnexpectedManagementAPIResponse(response.text) + + results = [] + json_doc = json.loads(response.text) + + for item in json_doc['server-default-list']['list-items']['list-item']: + results.append("{0}|{1}" \ + .format(item['groupnameref'], item['nameref'])) + + return results + + @classmethod + def exists(cls, connection, name, group="Default"): + """ + Returns true if (and only if) the server exists. The server + name may be a structured value consisting of the name of the group + and the name of the server separated by "|". If a structured name + is used the group parameter is ignored. + + :param name: The server name + :param group: The group name + :param: connection: The connection to a MarkLogic server + :return: True or False + """ + parts = name.split("|") + if len(parts) == 1: + pass + elif len(parts) == 2: + group = parts[0] + name = parts[1] + else: + raise validate_custom("Unparseable server name") + + uri = "http://{0}:{1}/manage/v2/servers/{2}/properties?group-id={3}" \ + .format(connection.host, connection.management_port, + name, group) + + response = requests.head(uri, auth=connection.auth) + + if response.status_code > 299 and not response.status_code == 404: + raise UnexpectedManagementAPIResponse(response.text) + + return (response.status_code == 200) + + @classmethod + def lookup(cls, connection, name, group='Default'): + """ + Returns a server configuration. The server + name may be a structured value consisting of the name of the group + and the name of the server separated by "|". If a structured name + is used the group parameter is ignored. + + :param name: The server name + :param group: The group name + :param: connection: The connection to a MarkLogic server + :return: True or False + """ + parts = name.split("|") + if len(parts) == 1: + pass + elif len(parts) == 2: + group = parts[0] + name = parts[1] + else: + raise validate_custom("Unparseable server name") + + uri = "http://{0}:{1}/manage/v2/servers/{2}/properties?group-id={3}" \ + .format(connection.host, connection.management_port, name, group) + + logging.info("Reading server configuration: {0}[{1}]" \ + .format(name,group)) + + response = requests.get(uri, auth=connection.auth, + headers={u'accept': u'application/json'}) + + if response.status_code > 299 and not response.status_code == 404: + raise UnexpectedManagementAPIResponse(response.text) + + result = None + + if response.status_code == 404: + return result + + if response.status_code != 200: + raise UnexpectedManagementAPIResponse(response.text) + + result = Server.unmarshal(json.loads(response.text)) + if 'etag' in response.headers: + result.etag = response.headers['etag'] + + return result + + @classmethod + def wait_for_restart(cls, connection, response): + """ + Waits for the server to restart. + + Some operations (removing a server, changing a server's port, etc.) + require a restart. On receipt of a 202 response from the server, + you can pass that response to this method and it will wait until + the server has restarted. + """ + rconfig = json.loads(response.text) + timestamp = rconfig['restart']['last-startup'][0]['value'] + uri = "http://localhost:8001/admin/v1/timestamp" + waiting = True + while waiting: + waiting = False + stamp = None + try: + response = requests.get(uri, auth=connection.auth) + stamp = response.text + except requests.exceptions.ConnectionError as e: + waiting = True + time.sleep(2) + pass + except http.client.BadStatusLine as e: + waiting = True + time.sleep(2) + pass + + if not(waiting): + if re.match(r"\d\d\d\d-\d\d-\d\dT", stamp): + waiting = (stamp <= timestamp) + else: + waiting = True + + @classmethod + def unmarshal(cls, config): + """ + Construct a new server from a flat structure. This method is + principally used to construct an object from a Management API + payload. The configuration passed in is largely assumed to be + valid. + + :param: config: A hash of properties + :return: A newly constructed server object with the specified properties. + """ + name = config['server-name'] + group = config['group-name'] + root = config['root'] + port = config['port'] + + result = None + if config['server-type'] == 'http': + result = HttpServer(name, group, port, root) + if config['server-type'] == 'odbc': + result = OdbcServer(name, group, port, root) + if config['server-type'] == 'xdbc': + result = XdbcServer(name, group, port, root) + if config['server-type'] == 'webdav': + result = WebDAVServer(name, group, port, root) + + if result is None: + raise UnexpectedManagementAPIResponse("Unexpected server type") + + result._config = config + result.name = result._config['server-name'] + + olist = [] + if 'schema' in result._config: + for index in result._config['schema']: + temp = Schema(index['namespace-uri'], index['schema-location']) + olist.append(temp) + result._config['schema'] = olist + + olist = [] + if 'namespace' in result._config: + for index in result._config['namespace']: + temp = Namespace(index['prefix'], index['namespace-uri']) + olist.append(temp) + result._config['namespace'] = olist + + olist = [] + if 'using-namespace' in result._config: + for index in result._config['using-namespace']: + temp = UsingNamespace(index['namespace-uri']) + olist.append(temp) + result._config['using-namespace'] = olist + + olist = [] + if 'module-location' in result._config: + for index in result._config['module-location']: + temp = ModuleLocation(index['namespace-uri'], index['location']) + olist.append(temp) + result._config['module-location'] = olist + + olist = [] + if 'request-blackout' in result._config: + for blackout in result._config['request-blackout']: + temp = None + if (blackout['blackout-type'] == 'recurring' + and blackout['period'] is None): + temp = RequestBlackout.recurringAllDay( + blackout['day'], + blackout['user'] if 'user' in blackout else None, + blackout['role'] if 'role' in blackout else None) + elif (blackout['blackout-type'] == 'recurring' + and 'duration' in blackout['period']): + temp = RequestBlackout.recurringDuration( + blackout['day'], + blackout['period']['start-time'], + blackout['period']['duration'], + blackout['user'] if 'user' in blackout else None, + blackout['role'] if 'role' in blackout else None) + elif (blackout['blackout-type'] == 'recurring' + and 'end-time' in blackout['period']): + temp = RequestBlackout.recurringStartEnd( + blackout['day'], + blackout['period']['start-time'], + blackout['period']['end-time'], + blackout['user'] if 'user' in blackout else None, + blackout['role'] if 'role' in blackout else None) + elif (blackout['blackout-type'] == 'once' + and 'end-time' in blackout['period']): + temp = RequestBlackout.oneTimeStartEnd( + blackout['period']['start-date'], + blackout['period']['start-time'], + blackout['period']['end-date'], + blackout['period']['end-time'], + blackout['user'] if 'user' in blackout else None, + blackout['role'] if 'role' in blackout else None) + elif (blackout['blackout-type'] == 'once' + and 'duration' in blackout['period']): + temp = RequestBlackout.oneTimeDuration( + blackout['period']['start-date'], + blackout['period']['start-time'], + blackout['period']['duration'], + blackout['user'] if 'user' in blackout else None, + blackout['role'] if 'role' in blackout else None) + else: + raise UnexpectedManagementAPIResponse("Unparseable request blackout period") + + olist.append(temp) + result._config['request-blackout'] = olist + return result + + def marshal(self): + """ + Return a flat structure suitable for conversion to JSON or XML. + + :return: A hash of the keys in this object and their values, recursively. + """ + struct = { } + for key in self._config: + if (key == 'schema' + or key == 'namespace' + or key == 'using-namespace' + or key == 'module-location' + or key == 'request-blackout'): + jlist = [] + for index in self._config[key]: + jlist.append(index._config) + struct[key] = jlist + else: + struct[key] = self._config[key]; + return struct + +class HttpServer(Server): + def __init__(self, name, group="Default", port=0, root='/', + content_db_name=None, modules_db_name=None): + super(Server, self).__init__() + self.name = name + self.etag = None + self._config = { + 'server-name': name, + 'server-type': 'http', + 'port': port, + 'root': root, + 'group-name': group + } + if content_db_name is not None: + self._config['content-database'] = content_db_name + if modules_db_name is not None: + self._config['modules-database'] = modules_db_name + + def compute_content_length(self): + """ + Compute content lengths for webDAV. + + *compute-content-length* specifes whether to compute + content length when using a webDAV server. + """ + if 'compute-content-length' in self._config: + return self._config['compute-content-length'] + return None + + def set_compute_content_length(self, compute_content_length): + """ + Sets compute content lengths for webDAV. + + *compute-content-length* specifes whether to compute + content length when using a webDAV server. + """ + self._config['compute-content-length'] = compute_content_length + return self + + def content_database_name(self): + """ + The database name. + + *database* specifies the database to which this App + Server connects for query execution. + """ + if 'content-database' in self._config: + return self._config['content-database'] + return None + + def set_content_database_name(self, content_database): + """ + Sets the database name. + + *database* specifies the database to which this App + Server connects for query execution. + """ + self._config['content-database'] = content_database + return self + + def default_error_format(self): + """ + The default error format for protocol errors. One of + html,xml,json,compatiable + + *default error handler* specifies the default format + for protocol errors for this server. + """ + if 'default-error-format' in self._config: + return self._config['default-error-format'] + return None + + def set_default_error_format(self, default_error_format): + """ + Sets the default error format for protocol errors. One of + html,xml,json,compatiable + + *default error handler* specifies the default format + for protocol errors for this server. + """ + self._config['default-error-format'] = default_error_format + return self + + def default_inference_size(self): + """ + The default inference size for a request, in megabytes. + + *default inference size* specifies the default value + for any request's inference size. + """ + if 'default-inference-size' in self._config: + return self._config['default-inference-size'] + return None + + def set_default_inference_size(self, default_inference_size): + """ + Sets the default inference size for a request, in megabytes. + + *default inference size* specifies the default value + for any request's inference size. + """ + self._config['default-inference-size'] = default_inference_size + return self + + def default_time_limit(self): + """ + The default time limit for a request, in seconds. + + *default time limit* specifies the default value for + any request's time limit, when otherwise unspecified. + A request can change its time limit using ``xdmp:set-request-time-limit``. + The time limit is the default number of seconds allowed + for servicing a query request. + """ + if 'default-time-limit' in self._config: + return self._config['default-time-limit'] + return None + + def set_default_time_limit(self, default_time_limit): + """ + Sets the default time limit for a request, in seconds. + + *default time limit* specifies the default value for + any request's time limit, when otherwise unspecified. + A request can change its time limit using ``xdmp:set-request-time-limit``. + The time limit is the default number of seconds allowed + for servicing a query request. + """ + self._config['default-time-limit'] = default_time_limit + return self + + def default_user(self): + """ + The user used as the default user in application level + authentication. Using the admin user as the default + user is equivalent to turning security off. + + *default user* only applies for application-level authentication. + It specifies the default user who is authenticated + (without a password) for all users accessing the server. + Setting the default user to a user with the admin role + effectively disables security, because everyone who + accesses the server then has the admin role. + """ + if 'default-user' in self._config: + return self._config['default-user'] + return None + + def set_default_user(self, default_user): + """ + Sets the user used as the default user in application level + authentication. Using the admin user as the default + user is equivalent to turning security off. + + *default user* only applies for application-level authentication. + It specifies the default user who is authenticated + (without a password) for all users accessing the server. + Setting the default user to a user with the admin role + effectively disables security, because everyone who + accesses the server then has the admin role. + """ + self._config['default-user'] = default_user + return self + + def error_handler(self): + """ + The script that handles 400 and 500 errors for this + server. + + *error handler* specifies the page to internally redirect + to in case of any 400 or 500 errors. + """ + if 'error-handler' in self._config: + return self._config['error-handler'] + return None + + def set_error_handler(self, error_handler): + """ + Sets the script that handles 400 and 500 errors for this + server. + + *error handler* specifies the page to internally redirect + to in case of any 400 or 500 errors. + """ + self._config['error-handler'] = error_handler + return self + + def execute(self): + """ + The execute flag + """ + if 'execute' in self._config: + return self._config['execute'] + return None + + def set_execute(self, execute): + """ + Set the execute flag + """ + self._config['execute'] = execute + return self + + def keep_alive_timeout(self): + """ + The keep-alive socket recv timeout, in seconds. + + *keep alive timeout* specifies the maximum number of + seconds before a socket receives a timeout for subsequent + requests over the same connection. + """ + if 'keep-alive-timeout' in self._config: + return self._config['keep-alive-timeout'] + return None + + def set_keep_alive_timeout(self, keep_alive_timeout): + """ + Sets the keep-alive socket recv timeout, in seconds. + + *keep alive timeout* specifies the maximum number of + seconds before a socket receives a timeout for subsequent + requests over the same connection. + """ + self._config['keep-alive-timeout'] = keep_alive_timeout + return self + + def max_inference_size(self): + """ + The upper bound for a request's inference size, in + megabytes. + + *max inference size* specifies the upper bound for + any request's inference size. No request may set its + inference size higher than this number. The inference + size, in turn, is the maximum amount of memory in megabytes + allowed for sem:store performing inference. The App + Server gives up on queries which exceed the memory + limit, and returns an error. + """ + if 'max-inference-size' in self._config: + return self._config['max-inference-size'] + return None + + def set_max_inference_size(self, max_inference_size): + """ + Sets the upper bound for a request's inference size, in + megabytes. + + *max inference size* specifies the upper bound for + any request's inference size. No request may set its + inference size higher than this number. The inference + size, in turn, is the maximum amount of memory in megabytes + allowed for sem:store performing inference. The App + Server gives up on queries which exceed the memory + limit, and returns an error. + """ + self._config['max-inference-size'] = max_inference_size + return self + + def max_time_limit(self): + """ + The upper bound for a request's time limit, in seconds. + + *max time limit* specifies the upper bound for any + request's time limit. No request may set its time limit + (for example with ``xdmp:set-request-time-limit``) + higher than this number. The time limit, in turn, is + the maximum number of seconds allowed for servicing + a query request. The App Server gives up on queries + which take longer, and returns an error. + """ + if 'max-time-limit' in self._config: + return self._config['max-time-limit'] + return None + + def set_max_time_limit(self, max_time_limit): + """ + Sets the upper bound for a request's time limit, in seconds. + + *max time limit* specifies the upper bound for any + request's time limit. No request may set its time limit + (for example with ``xdmp:set-request-time-limit``) + higher than this number. The time limit, in turn, is + the maximum number of seconds allowed for servicing + a query request. The App Server gives up on queries + which take longer, and returns an error. + """ + self._config['max-time-limit'] = max_time_limit + return self + + def modules_database_name(self): + """ + The database that contains application modules. + + *modules* specifies the name of the database in which + this HTTP server locates XQuery application code. If + set to (file system), then any files in the specified + *root* directory are executable (given the proper permissions). + If set to a database, then any documents in the database + whose URI begins with the specified *root* directory + are executable. + """ + if 'modules-database' in self._config: + return self._config['modules-database'] + return None + + def set_modules_database_name(self, modules_database): + """ + Sets the database that contains application modules. + + *modules* specifies the name of the database in which + this HTTP server locates XQuery application code. If + set to (file system), then any files in the specified + *root* directory are executable (given the proper permissions). + If set to a database, then any documents in the database + whose URI begins with the specified *root* directory + are executable. + """ + self._config['modules-database'] = modules_database + return self + + def privilege_name(self): + """ + The privilege restricting access to the server. + + *privilege* specifies the execute privilege required + to access the server. + """ + if 'privilege' in self._config: + return self._config['privilege'] + return None + + def set_privilege_name(self, privilege): + """ + Sets the privilege restricting access to the server. + + *privilege* specifies the execute privilege required + to access the server. + """ + self._config['privilege'] = privilege + return self + + def request_timeout(self): + """ + The request socket recv timeout, in seconds. + + *request timeout* specifies the maximum number of seconds + before a socket receives a timeout for the first request. + """ + if 'request-timeout' in self._config: + return self._config['request-timeout'] + return None + + def set_request_timeout(self, request_timeout): + """ + Sets the request socket recv timeout, in seconds. + + *request timeout* specifies the maximum number of seconds + before a socket receives a timeout for the first request. + """ + self._config['request-timeout'] = request_timeout + return self + + def rewrite_resolves_globally(self): + """ + Allow rewritten URLs to be resolved from the global + MarkLogic Modules/ directory. + + *rewrite resolves globally* specifies whether to allow + rewritten URLs to be resolved from the global MarkLogic + Modules/ directory. + """ + if 'rewrite-resolves-globally' in self._config: + return self._config['rewrite-resolves-globally'] + return None + + def set_rewrite_resolves_globally(self, rewrite_resolves_globally): + """ + Sets allow rewritten URLs to be resolved from the global + MarkLogic Modules/ directory. + + *rewrite resolves globally* specifies whether to allow + rewritten URLs to be resolved from the global MarkLogic + Modules/ directory. + """ + self._config['rewrite-resolves-globally'] = rewrite_resolves_globally + return self + + def session_timeout(self): + """ + The session expiration timeout, in seconds. + + *session timeout* specifies the maximum number of seconds + before a session times out. + """ + if 'session-timeout' in self._config: + return self._config['session-timeout'] + return None + + def set_session_timeout(self, session_timeout): + """ + Sets the session expiration timeout, in seconds. + + *session timeout* specifies the maximum number of seconds + before a session times out. + """ + self._config['session-timeout'] = session_timeout + return self + + def static_expires(self): + """ + Add an "expires" HTTP header for static content to + expire after this many seconds. + + *static expires* adds an "expires" HTTP header for + static content to expire after this many seconds. + """ + if 'static-expires' in self._config: + return self._config['static-expires'] + return None + + def set_static_expires(self, static_expires): + """ + Sets add an "expires" HTTP header for static content to + expire after this many seconds. + + *static expires* adds an "expires" HTTP header for + static content to expire after this many seconds. + """ + self._config['static-expires'] = static_expires + return self + + def url_rewriter(self): + """ + The script that rewrites URLs for this server. + + *url rewriter* specifies the script to run to rewrite + URLs. + """ + if 'url-rewriter' in self._config: + return self._config['url-rewriter'] + return None + + def set_url_rewriter(self, url_rewriter): + """ + Sets the script that rewrites URLs for this server. + + *url rewriter* specifies the script to run to rewrite + URLs. + """ + self._config['url-rewriter'] = url_rewriter + return self + + def webDAV(self): + """ + The webDAV setting. + """ + if 'webDAV' in self._config: + return self._config['webDAV'] + return None + +class OdbcServer(Server): + def __init__(self, name, group='Default', port=0, root='/', + content_db_name=None, modules_db_name=None): + super(Server, self).__init__() + self.name = name + self.etag = None + self._config = { + 'server-name': name, + 'server-type': 'odbc', + 'port': port, + 'root': root, + 'group-name': group + } + if content_db_name is not None: + self._config['content-database'] = content_db_name + if modules_db_name is not None: + self._config['modules-database'] = modules_db_name + + def connection_timeout(self): + """ + The idle connection expiration timeout, in seconds, + or 0 to indicate no idle connection timeout. + + *connection timeout* specifies the maximum number of + seconds in an idle state before a connection times + out. A value of 0 means the connection will never time + out. + """ + if 'connection-timeout' in self._config: + return self._config['connection-timeout'] + return None + + def set_connection_timeout(self, connection_timeout): + """ + Sets the idle connection expiration timeout, in seconds, + or 0 to indicate no idle connection timeout. + + *connection timeout* specifies the maximum number of + seconds in an idle state before a connection times + out. A value of 0 means the connection will never time + out. + """ + self._config['connection-timeout'] = connection_timeout + return self + + def default_query_time_limit(self): + """ + The default time limit for a query, in seconds. + + *default query time limit* specifies the default value + for any query's time limit, when otherwise unspecified. + The query timeout can be changed at runtime using the + statement ``SET statement_timeout``. The time limit, + in turn, is the maximum number of seconds allowed for + servicing a query request. The App Server gives up + on queries which take longer, and returns an error. + """ + if 'default-query-time-limit' in self._config: + return self._config['default-query-time-limit'] + return None + + def set_default_query_time_limit(self, default_query_time_limit): + """ + Sets the default time limit for a query, in seconds. + + *default query time limit* specifies the default value + for any query's time limit, when otherwise unspecified. + The query timeout can be changed at runtime using the + statement ``SET statement_timeout``. The time limit, + in turn, is the maximum number of seconds allowed for + servicing a query request. The App Server gives up + on queries which take longer, and returns an error. + """ + self._config['default-query-time-limit'] = default_query_time_limit + return self + + def max_query_time_limit(self): + """ + The upper bound for a query's time limit, in seconds. + + *max query time limit* specifies the upper bound for + any query's time limit. No runtime statement may set + the time limit (for example with ``SET statement_timeout``) + higher than this number. The time limit, in turn, is + the maximum number of seconds allowed for servicing + a query request. The App Server gives up on queries + which take longer, and returns an error. + """ + if 'max-query-time-limit' in self._config: + return self._config['max-query-time-limit'] + return None + + def set_max_query_time_limit(self, max_query_time_limit): + """ + Sets the upper bound for a query's time limit, in seconds. + + *max query time limit* specifies the upper bound for + any query's time limit. No runtime statement may set + the time limit (for example with ``SET statement_timeout``) + higher than this number. The time limit, in turn, is + the maximum number of seconds allowed for servicing + a query request. The App Server gives up on queries + which take longer, and returns an error. + """ + self._config['max-query-time-limit'] = max_query_time_limit + return self + +class XdbcServer(Server): + def __init__(self, name, group='Default', port=0, root='/', + content_db_name=None, modules_db_name=None): + super(Server, self).__init__() + self.name = name + self.etag = None + self._config = { + 'server-name': name, + 'server-type': 'xdbc', + 'port': port, + 'root': root, + 'group-name': group + } + if content_db_name is not None: + self._config['content-database'] = content_db_name + if modules_db_name is not None: + self._config['modules-database'] = modules_db_name + + def default_inference_size(self): + """ + The default inference size for a request, in megabytes. + + *default inference size* specifies the default value + for any request's inference size. + """ + if 'default-inference-size' in self._config: + return self._config['default-inference-size'] + return None + + def set_default_inference_size(self, default_inference_size): + """ + Sets the default inference size for a request, in megabytes. + + *default inference size* specifies the default value + for any request's inference size. + """ + self._config['default-inference-size'] = default_inference_size + return self + + def default_time_limit(self): + """ + The default time limit for a request, in seconds. + + *default time limit* specifies the default value for + any request's time limit, when otherwise unspecified. + A request can change its time limit using ``xdmp:set-request-time-limit``. + The time limit is the default number of seconds allowed + for servicing a query request. + """ + if 'default-time-limit' in self._config: + return self._config['default-time-limit'] + return None + + def set_default_time_limit(self, default_time_limit): + """ + Sets the default time limit for a request, in seconds. + + *default time limit* specifies the default value for + any request's time limit, when otherwise unspecified. + A request can change its time limit using ``xdmp:set-request-time-limit``. + The time limit is the default number of seconds allowed + for servicing a query request. + """ + self._config['default-time-limit'] = default_time_limit + return self + + def keep_alive_timeout(self): + """ + The keep-alive socket recv timeout, in seconds. + + *keep alive timeout* specifies the maximum number of + seconds before a socket receives a timeout for subsequent + requests over the same connection. + """ + if 'keep-alive-timeout' in self._config: + return self._config['keep-alive-timeout'] + return None + + def set_keep_alive_timeout(self, keep_alive_timeout): + """ + Sets the keep-alive socket recv timeout, in seconds. + + *keep alive timeout* specifies the maximum number of + seconds before a socket receives a timeout for subsequent + requests over the same connection. + """ + self._config['keep-alive-timeout'] = keep_alive_timeout + return self + + def max_inference_size(self): + """ + The upper bound for a request's inference size, in + megabytes. + + *max inference size* specifies the upper bound for + any request's inference size. No request may set its + inference size higher than this number. The inference + size, in turn, is the maximum amount of memory in megabytes + allowed for sem:store performing inference. The App + Server gives up on queries which exceed the memory + limit, and returns an error. + """ + if 'max-inference-size' in self._config: + return self._config['max-inference-size'] + return None + + def set_max_inference_size(self, max_inference_size): + """ + Sets the upper bound for a request's inference size, in + megabytes. + + *max inference size* specifies the upper bound for + any request's inference size. No request may set its + inference size higher than this number. The inference + size, in turn, is the maximum amount of memory in megabytes + allowed for sem:store performing inference. The App + Server gives up on queries which exceed the memory + limit, and returns an error. + """ + self._config['max-inference-size'] = max_inference_size + return self + + def max_time_limit(self): + """ + The upper bound for a request's time limit, in seconds. + + *max time limit* specifies the upper bound for any + request's time limit. No request may set its time limit + (for example with ``xdmp:set-request-time-limit``) + higher than this number. The time limit, in turn, is + the maximum number of seconds allowed for servicing + a query request. The App Server gives up on queries + which take longer, and returns an error. + """ + if 'max-time-limit' in self._config: + return self._config['max-time-limit'] + return None + + def set_max_time_limit(self, max_time_limit): + """ + Sets the upper bound for a request's time limit, in seconds. + + *max time limit* specifies the upper bound for any + request's time limit. No request may set its time limit + (for example with ``xdmp:set-request-time-limit``) + higher than this number. The time limit, in turn, is + the maximum number of seconds allowed for servicing + a query request. The App Server gives up on queries + which take longer, and returns an error. + """ + self._config['max-time-limit'] = max_time_limit + return self + + def request_timeout(self): + """ + The request socket recv timeout, in seconds. + + *request timeout* specifies the maximum number of seconds + before a socket receives a timeout for the first request. + """ + if 'request-timeout' in self._config: + return self._config['request-timeout'] + return None + + def set_request_timeout(self, request_timeout): + """ + Sets the request socket recv timeout, in seconds. + + *request timeout* specifies the maximum number of seconds + before a socket receives a timeout for the first request. + """ + self._config['request-timeout'] = request_timeout + return self + + def session_timeout(self): + """ + The session expiration timeout, in seconds. + + *session timeout* specifies the maximum number of seconds + before a session times out. + """ + if 'session-timeout' in self._config: + return self._config['session-timeout'] + return None + + def set_session_timeout(self, session_timeout): + """ + Sets the session expiration timeout, in seconds. + + *session timeout* specifies the maximum number of seconds + before a session times out. + """ + self._config['session-timeout'] = session_timeout + return self + +class WebDAVServer(Server): + def __init__(self, name, group='Default', port=0, root='/', + content_db_name=None): + super(Server, self).__init__() + self.name = name + self.etag = None + self._config = { + 'server-name': name, + 'server-type': 'webdav', + 'port': port, + 'root': root, + 'group-name': group + } + # Yes, modules-database is intentional here! + if content_db_name is not None: + self._config['modules-database'] = content_db_name + + def compute_content_length(self): + """ + Compute content lengths for webDAV. + + *compute-content-length* specifes whether to compute + content length when using a webDAV server. + """ + if 'compute-content-length' in self._config: + return self._config['compute-content-length'] + return None + + def set_compute_content_length(self, compute_content_length): + """ + Sets compute content lengths for webDAV. + + *compute-content-length* specifes whether to compute + content length when using a webDAV server. + """ + self._config['compute-content-length'] = compute_content_length + return self + + def default_error_format(self): + """ + The default error format for protocol errors. One of + html,xml,json,compatiable + + *default error handler* specifies the default format + for protocol errors for this server. + """ + if 'default-error-format' in self._config: + return self._config['default-error-format'] + return None + + def set_default_error_format(self, default_error_format): + """ + Sets the default error format for protocol errors. One of + html,xml,json,compatiable + + *default error handler* specifies the default format + for protocol errors for this server. + """ + self._config['default-error-format'] = default_error_format + return self + + def default_inference_size(self): + """ + The default inference size for a request, in megabytes. + + *default inference size* specifies the default value + for any request's inference size. + """ + if 'default-inference-size' in self._config: + return self._config['default-inference-size'] + return None + + def set_default_inference_size(self, default_inference_size): + """ + Sets the default inference size for a request, in megabytes. + + *default inference size* specifies the default value + for any request's inference size. + """ + self._config['default-inference-size'] = default_inference_size + return self + + def default_time_limit(self): + """ + The default time limit for a request, in seconds. + + *default time limit* specifies the default value for + any request's time limit, when otherwise unspecified. + A request can change its time limit using ``xdmp:set-request-time-limit``. + The time limit is the default number of seconds allowed + for servicing a query request. + """ + if 'default-time-limit' in self._config: + return self._config['default-time-limit'] + return None + + def set_default_time_limit(self, default_time_limit): + """ + Sets the default time limit for a request, in seconds. + + *default time limit* specifies the default value for + any request's time limit, when otherwise unspecified. + A request can change its time limit using ``xdmp:set-request-time-limit``. + The time limit is the default number of seconds allowed + for servicing a query request. + """ + self._config['default-time-limit'] = default_time_limit + return self + + def default_user(self): + """ + The user used as the default user in application level + authentication. Using the admin user as the default + user is equivalent to turning security off. + + *default user* only applies for application-level authentication. + It specifies the default user who is authenticated + (without a password) for all users accessing the server. + Setting the default user to a user with the admin role + effectively disables security, because everyone who + accesses the server then has the admin role. + """ + if 'default-user' in self._config: + return self._config['default-user'] + return None + + def set_default_user(self, default_user): + """ + Sets the user used as the default user in application level + authentication. Using the admin user as the default + user is equivalent to turning security off. + + *default user* only applies for application-level authentication. + It specifies the default user who is authenticated + (without a password) for all users accessing the server. + Setting the default user to a user with the admin role + effectively disables security, because everyone who + accesses the server then has the admin role. + """ + self._config['default-user'] = default_user + return self + + def error_handler(self): + """ + The script that handles 400 and 500 errors for this + server. + + *error handler* specifies the page to internally redirect + to in case of any 400 or 500 errors. + """ + if 'error-handler' in self._config: + return self._config['error-handler'] + return None + + def set_error_handler(self, error_handler): + """ + Sets the script that handles 400 and 500 errors for this + server. + + *error handler* specifies the page to internally redirect + to in case of any 400 or 500 errors. + """ + self._config['error-handler'] = error_handler + return self + + def execute(self): + if 'execute' in self._config: + return self._config['execute'] + return None + + def set_execute(self, execute): + self._config['execute'] = execute + return self + + def keep_alive_timeout(self): + """ + The keep-alive socket recv timeout, in seconds. + + *keep alive timeout* specifies the maximum number of + seconds before a socket receives a timeout for subsequent + requests over the same connection. + """ + if 'keep-alive-timeout' in self._config: + return self._config['keep-alive-timeout'] + return None + + def set_keep_alive_timeout(self, keep_alive_timeout): + """ + Sets the keep-alive socket recv timeout, in seconds. + + *keep alive timeout* specifies the maximum number of + seconds before a socket receives a timeout for subsequent + requests over the same connection. + """ + self._config['keep-alive-timeout'] = keep_alive_timeout + return self + + def max_inference_size(self): + """ + The upper bound for a request's inference size, in + megabytes. + + *max inference size* specifies the upper bound for + any request's inference size. No request may set its + inference size higher than this number. The inference + size, in turn, is the maximum amount of memory in megabytes + allowed for sem:store performing inference. The App + Server gives up on queries which exceed the memory + limit, and returns an error. + """ + if 'max-inference-size' in self._config: + return self._config['max-inference-size'] + return None + + def set_max_inference_size(self, max_inference_size): + """ + Sets the upper bound for a request's inference size, in + megabytes. + + *max inference size* specifies the upper bound for + any request's inference size. No request may set its + inference size higher than this number. The inference + size, in turn, is the maximum amount of memory in megabytes + allowed for sem:store performing inference. The App + Server gives up on queries which exceed the memory + limit, and returns an error. + """ + self._config['max-inference-size'] = max_inference_size + return self + + def max_time_limit(self): + """ + The upper bound for a request's time limit, in seconds. + + *max time limit* specifies the upper bound for any + request's time limit. No request may set its time limit + (for example with ``xdmp:set-request-time-limit``) + higher than this number. The time limit, in turn, is + the maximum number of seconds allowed for servicing + a query request. The App Server gives up on queries + which take longer, and returns an error. + """ + if 'max-time-limit' in self._config: + return self._config['max-time-limit'] + return None + + def set_max_time_limit(self, max_time_limit): + """ + Sets the upper bound for a request's time limit, in seconds. + + *max time limit* specifies the upper bound for any + request's time limit. No request may set its time limit + (for example with ``xdmp:set-request-time-limit``) + higher than this number. The time limit, in turn, is + the maximum number of seconds allowed for servicing + a query request. The App Server gives up on queries + which take longer, and returns an error. + """ + self._config['max-time-limit'] = max_time_limit + return self + + def request_timeout(self): + """ + The request socket recv timeout, in seconds. + + *request timeout* specifies the maximum number of seconds + before a socket receives a timeout for the first request. + """ + if 'request-timeout' in self._config: + return self._config['request-timeout'] + return None + + def set_request_timeout(self, request_timeout): + """ + Sets the request socket recv timeout, in seconds. + + *request timeout* specifies the maximum number of seconds + before a socket receives a timeout for the first request. + """ + self._config['request-timeout'] = request_timeout + return self + + def rewrite_resolves_globally(self): + """ + Allow rewritten URLs to be resolved from the global + MarkLogic Modules/ directory. + + *rewrite resolves globally* specifies whether to allow + rewritten URLs to be resolved from the global MarkLogic + Modules/ directory. + """ + if 'rewrite-resolves-globally' in self._config: + return self._config['rewrite-resolves-globally'] + return None + + def set_rewrite_resolves_globally(self, rewrite_resolves_globally): + """ + Sets allow rewritten URLs to be resolved from the global + MarkLogic Modules/ directory. + + *rewrite resolves globally* specifies whether to allow + rewritten URLs to be resolved from the global MarkLogic + Modules/ directory. + """ + self._config['rewrite-resolves-globally'] = rewrite_resolves_globally + return self + + def session_timeout(self): + """ + The session expiration timeout, in seconds. + + *session timeout* specifies the maximum number of seconds + before a session times out. + """ + if 'session-timeout' in self._config: + return self._config['session-timeout'] + return None + + def set_session_timeout(self, session_timeout): + """ + Sets the session expiration timeout, in seconds. + + *session timeout* specifies the maximum number of seconds + before a session times out. + """ + self._config['session-timeout'] = session_timeout + return self + + def static_expires(self): + """ + Add an "expires" HTTP header for static content to + expire after this many seconds. + + *static expires* adds an "expires" HTTP header for + static content to expire after this many seconds. + """ + if 'static-expires' in self._config: + return self._config['static-expires'] + return None + + def set_static_expires(self, static_expires): + """ + Sets add an "expires" HTTP header for static content to + expire after this many seconds. + + *static expires* adds an "expires" HTTP header for + static content to expire after this many seconds. + """ + self._config['static-expires'] = static_expires + return self + + def url_rewriter(self): + """ + The script that rewrites URLs for this server. + + *url rewriter* specifies the script to run to rewrite + URLs. + """ + if 'url-rewriter' in self._config: + return self._config['url-rewriter'] + return None + + def set_url_rewriter(self, url_rewriter): + """ + Sets the script that rewrites URLs for this server. + + *url rewriter* specifies the script to run to rewrite + URLs. + """ + self._config['url-rewriter'] = url_rewriter + return self + + def webDAV(self): + if 'webDAV' in self._config: + return self._config['webDAV'] + return None diff --git a/python_api/marklogic/models/server/module.py b/python_api/marklogic/models/server/module.py new file mode 100644 index 0000000..b80078b --- /dev/null +++ b/python_api/marklogic/models/server/module.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/10/2015 Initial development + +""" +Classes for dealing with modules +""" + +class ModuleLocation: + """ + A server module location. + """ + def __init__(self, namespace_uri, location): + """ + Create a module mapping. + + :param namespace_uri: the namespace of the module + :param location: the location of the module + """ + self._config = { + 'namespace-uri': namespace_uri, + 'location': location + } + + def namespace_uri(self): + """ + The module namespace URI. + """ + if self._config['namespace-uri'] == '': + return None + return self._config['namespace-uri'] + + def set_namespace_uri(self, namespace_uri): + """ + Set the module namespace URI. + """ + self._config['namespace-uri'] = namespace_uri + return self + + def module_location(self): + """ + The module location. + """ + return self._config['location'] + + def set_module_location(self, location): + """ + Set the module location. + """ + self._config['location'] = location + return self + diff --git a/python_api/marklogic/models/server/namespace.py b/python_api/marklogic/models/server/namespace.py new file mode 100644 index 0000000..c3cdbb5 --- /dev/null +++ b/python_api/marklogic/models/server/namespace.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/10/2015 Initial development + +""" +Classes for dealing with namespaces +""" + +class UsingNamespace: + """ + A server namespace. + """ + def __init__(self, namespace_uri): + """ + Create a server namespace + + :param namespace_uri: the namespace of the namespace + """ + self._config = { + 'namespace-uri': namespace_uri + } + + def namespace_uri(self): + """ + The server namespace URI. + """ + if self._config['namespace-uri'] == '': + return None + return self._config['namespace-uri'] + + def set_namespace_uri(self, namespace_uri): + """ + Set the server namespace URI. + """ + self._config['namespace-uri'] = namespace_uri + return self + +class Namespace(UsingNamespace): + """ + A server namespace mapping. + """ + def __init__(self, prefix, namespace_uri): + """ + Create a namespace mapping. + + :param prefix: the prefix + :param namespace_uri: the namespace of the namespace + """ + self._config = { + 'prefix': prefix, + 'namespace-uri': namespace_uri + } + + def prefix(self): + """ + The server namespace prefix. + """ + return self._config['prefix'] + + def set_prefix(self, location): + """ + Set the server namespace prefix. + """ + self._config['prefix'] = location + return self + diff --git a/python_api/marklogic/models/server/requestblackout.py b/python_api/marklogic/models/server/requestblackout.py new file mode 100644 index 0000000..3185239 --- /dev/null +++ b/python_api/marklogic/models/server/requestblackout.py @@ -0,0 +1,355 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/07/2015 Initial development + +""" +Classes for dealing with server request blackouts +""" + +from marklogic.models.utilities.validators import assert_list_of_type +from marklogic.models.utilities.utilities import PropertyLists + +class RequestBlackout(PropertyLists): + """ + A request blackout period. This is an abstract class. + """ + def __init__(self): + raise ValueError("Do not instantiate RequestBlackout directly") + + def blackout_type(self): + """ + The blackout type. + """ + return self._config['blackout_type'] + + def user_names(self): + """ + The names of the users to whom this blackout applies. + """ + if 'user' in self._config: + return self._config['user'] + return None + + def add_user_name(self, user): + """ + Add a user name to the list of blackout names. + """ + return self.add_to_property_list('user', user) + + def set_user_names(self, users): + """ + Set the user names to the list of blackout names. + """ + return self.set_property_list('user', users) + + def remove_user_name(self, user): + """ + Remove a user names from the list of blackout names. + """ + return self.remove_from_property_list('user', user) + + def role_names(self): + """ + The names of the roles to which this blackout applies. + """ + if 'role' in self._config: + return self._config['role'] + return None + + def add_role_name(self, role): + """ + Add a role to the list of blackout roles. + """ + return self.add_to_property_list('role', role) + + def set_role_names(self, roles): + """ + Set the list of blackout roles. + """ + return self.set_property_list('role', roles) + + def remove_role_name(self, role): + """ + Remove a role from the list of blackout roles. + """ + return self.remove_from_property_list('role', role) + + @classmethod + def recurringDuration(cls, days, start_time, duration, + users=None, roles=None): + """ + Create a recurring request blackout period with a duration + """ + # FIXME: validate args + return RequestBlackoutRecurringDuration(days, start_time, duration, + users, roles) + + @classmethod + def recurringStartEnd(cls, days, start_time, end_time, + users=None, roles=None): + """ + Create a recurring request blackout period with start and end times. + """ + # FIXME: validate args + return RequestBlackoutRecurringStartEnd(days, start_time, end_time, + users, roles) + + @classmethod + def recurringAllDay(cls, days, users=None, roles=None): + """ + Create a recurring request blackout period that lasts all day. + """ + # FIXME: validate args + return RequestBlackoutRecurringAllDay(days, users, roles) + + @classmethod + def oneTimeDuration(cls, start_date, start_time, duration, + users=None, roles=None): + """ + Create a one time request blackout period with a duration. + """ + # FIXME: validate args + return RequestBlackoutOneTimeDuration(start_date, start_time, duration, + users, roles) + + @classmethod + def oneTimeStartEnd(cls, start_date, start_time, end_date, end_time, + users=None, roles=None): + """ + Create a one time request blackout period with a start and end time. + """ + # FIXME: validate args + return RequestBlackoutOneTimeStartEnd(start_date, start_time, + end_date, end_time, + users, roles) + +class RequestBlackoutRecurringDuration(RequestBlackout): + """ + A recurring request blackout period for a duration + """ + def __init__(self, days, start_time, duration, + users=None, roles=None): + """ + Create a recurring request blackout period for a duration + """ + if users is None and roles is None: + raise ValidationError('A request blackout must specify users or roles', + None) + + self._config = { + 'blackout-type': 'recurring', + 'day': assert_list_of_type(days, str), + 'period': { + 'start-time': start_time, + 'duration': duration + } + } + if users is not None: + self._config['user'] = users + if roles is not None: + self._config['roles'] = roles + + def days(): + """ + The blackout days. + """ + return self._config['days'] + + def start_time(): + """ + The blackout start time. + """ + return self._config['period']['start-time'] + + def duration(): + """ + The blackout duration. + """ + return self._config['period']['duration'] + +class RequestBlackoutRecurringStartEnd(RequestBlackout): + """ + A recurring request blackout period with start and end times + """ + def __init__(self, days, start_time, end_time, + users=None, roles=None): + """ + Create a recurring request blackout period with start and end times + """ + if users is None and roles is None: + raise ValidationError('A request blackout must specify users or roles', + None) + + self._config = { + 'blackout-type': "recurring", + 'day': assert_list_of_type(days, str), + 'period': { + 'start-time': start_time, + 'end-time': end_time + } + } + if users is not None: + self._config['user'] = users + if roles is not None: + self._config['roles'] = roles + + def days(): + """ + The blackout days. + """ + return self._config['days'] + + def start_time(): + """ + The blackout start time. + """ + return self._config['period']['start-time'] + + def end_time(): + """ + The blackout end time. + """ + return self._config['period']['end-time'] + +class RequestBlackoutRecurringAllDay(RequestBlackout): + """ + A recurring request blackout period for a whole day + """ + def __init__(self, days, users=None, roles=None): + """ + Create a recurring request blackout period for a whole day + """ + if users is None and roles is None: + raise ValidationError('A request blackout must specify users or roles', + None) + + self._config = { + 'blackout-type': "recurring", + 'day': assert_list_of_type(days, str), + 'period': None + } + if users is not None: + self._config['user'] = users + if roles is not None: + self._config['roles'] = roles + + def days(): + """ + The blackout days. + """ + return self._config['days'] + +class RequestBlackoutOneTimeDuration(RequestBlackout): + """ + A one time request blackout period with a duration + """ + def __init__(self, start_date, start_time, duration, + users=None, roles=None): + """ + Create a one time request blackout period with a duration + """ + + if users is None and roles is None: + raise ValidationError('A request blackout must specify users or roles', + None) + + self._config = { + 'blackout-type': "once", + 'period': { + 'start-date': start_date, + 'start-time': start_time, + 'duration': duration + } + } + if users is not None: + self._config['user'] = users + if roles is not None: + self._config['roles'] = roles + + def start_date(): + """ + The blackout start date. + """ + return self._config['period']['start-date'] + + def start_time(): + """ + The blackout start time. + """ + return self._config['period']['start-time'] + + def duration(): + """ + The blackout duration. + """ + return self._config['period']['duration'] + + +class RequestBlackoutOneTimeStartEnd(RequestBlackout): + """ + A one time request blackout period with start and end times + """ + def __init__(self, start_date, start_time, end_date, end_time, + users=None, roles=None): + """ + Create a one time request blackout period with start and end times + """ + + if users is None and roles is None: + raise ValidationError('A request blackout must specify users or roles', + None) + + self._config = { + 'blackout-type': "once", + 'period': { + 'start-date': start_date, + 'start-time': start_time, + 'end-date': end_date, + 'end-time': end_time, + } + } + if users is not None: + self._config['user'] = users + if roles is not None: + self._config['roles'] = roles + + def start_date(): + """ + The blackout start date. + """ + return self._config['period']['start-date'] + + def start_time(): + """ + The blackout start time. + """ + return self._config['period']['start-time'] + + def end_date(): + """ + The blackout end date. + """ + return self._config['period']['end-date'] + + def end_time(): + """ + The blackout end time. + """ + return self._config['period']['end-time'] diff --git a/python_api/marklogic/models/server/schema.py b/python_api/marklogic/models/server/schema.py new file mode 100644 index 0000000..36119ad --- /dev/null +++ b/python_api/marklogic/models/server/schema.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/10/2015 Initial development + +""" +Classes for dealing with schemas +""" + +class Schema: + """ + A server schema mapping. + """ + def __init__(self, namespace_uri, location): + """ + Create a schema mapping. + + :param namespace_uri: the namespace of the schema + :param location: the location of the schema + """ + self._config = { + 'namespace-uri': namespace_uri, + 'schema-location': location + } + + def namespace_uri(self): + """ + The schema namespace URI. + """ + if self._config['namespace-uri'] == '': + return None + return self._config['namespace-uri'] + + def set_namespace_uri(self, namespace_uri): + """ + Set the schema namespace URI. + """ + self._config['namespace-uri'] = namespace_uri + return self + + def schema_location(self): + """ + The schema location. + """ + return self._config['schema-location'] + + def set_schema_location(self, location): + """ + Set the schema location. + """ + self._config['schema-location'] = location + return self + diff --git a/python_api/marklogic/models/user.py b/python_api/marklogic/models/user.py new file mode 100644 index 0000000..069652b --- /dev/null +++ b/python_api/marklogic/models/user.py @@ -0,0 +1,407 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 04/02/2015 Initial development +# Norman Walsh 04/29/2015 Hacked role.py into user.py +# + +""" +User related classes for manipulating MarkLogic users +""" + +from __future__ import unicode_literals, print_function, absolute_import + +import requests +from marklogic.models.utilities import exceptions +from marklogic.models.permission import Permission +from marklogic.models.utilities.utilities import PropertyLists +import json + +class User(PropertyLists): + """ + The User class encapsulates a MarkLogic user. It provides + methods to set/get database attributes. The use of methods will + allow IDEs with tooling to provide auto-completion hints. + """ + def __init__(self, name, password=None): + self._config = {} + self._config['user-name'] = name + if password is not None: + self._config['password'] = password + self.etag = None + self.name = name + + def user_name(self): + """ + Return the name of the user. + + :return: The user name + """ + return self._config['user-name'] + + def set_user_name(self, name): + """ + Set the name of the user. + + :return: The user object + """ + self._config['user-name'] = name + return self + + def set_password(self, psw): + """ + Set the password of the user. + + There is no method to get the password. + + :return: The user object + """ + self._config['password'] = psw + return self + + def description(self): + """ + Returns the description for the user. + + :return: The user description + """ + if 'description' not in self._config: + return None + return self._config['description'] + + def set_description(self, description): + """ + Set the description for the user + + :param description: A description for the user + + :return: The user object + """ + self._config['description'] = description + return self + + def role_names(self): + """ + Returns the roles for this user + + :return: The list of roles + """ + if u'role' not in self._config: + return None + return self._config[u'role'] + + def set_role_names(self, roles): + """ + Sets the roles for this user + + :return: The user object + """ + return self.set_property_list('role', roles) + + def add_role_name(self, add_role): + """ + Adds the specified role to roles for this user + + :return: The user object + """ + return self.add_to_property_list('role', add_role) + + def remove_role_name(self, remove_role): + """ + Removes the specified role to roles for this user + + :return: The user object + """ + return self.remove_from_property_list('role', remove_role) + + def permissions(self): + """ + Returns the permissions for this user + + :return: The list of :class:`marklogic.models.permission.Permission` + """ + if 'permission' not in self._config: + return None + + perms = [] + for item in self._config['permission']: + perm = Permission(item['role-name'],item['capability']) + perms.append(perm) + + return perms + + def set_permissions(self, perms): + """ + Sets the permissions for this user + + :return: The user object + """ + return self.set_property_list('permission', perms, Permission) + + def add_permission(self, perm): + """ + Adds the specified permission to the list of permissions for this user + + :return: The user object + """ + return self.add_to_property_list('permission', perm, Permission) + + def remove_permission(self, perm): + """ + Removes the specified permission from the permissions for this user + + :param perm: The permission to remove + + :return: The user object + """ + return self.remove_from_property_list('permission', perm, Permission) + + def collections(self): + """ + Returns the collections for this user + + :return: The list of collections + """ + if 'collection' not in self._config: + return None + return self._config['collection'] + + def set_collections(self, collections): + """ + Sets the collections for this user + + :return: The user object + """ + return self.set_property_list('collection', collections) + + def add_collection(self, collection): + """ + Adds the specified collection to the list of collections for this user + + :return: The user object + """ + return self.add_to_property_list('collection', collection) + + def remove_collection(self, collection): + """ + Removes the specified collection from the collections for this user + + :param perm: The collection to remove + + :return: The user object + """ + return self.remove_from_property_list('collection', collection) + + def external_names(self): + """ + Returns the external_names for this user + + :return: The list of external_names + """ + if 'external-name' not in self._config: + return None + return self._config['external-name'] + + def set_external_names(self, names): + """ + Sets the external names for this user + + :param: names: The external names + :return: The user object + """ + return self.set_property_list('external-name', names) + + def add_external_name(self, name): + """ + Adds the specified external name to the list of external + names for this user + + :param: name: The external name + :return: The user object + """ + return self.add_to_property_list('external-name', name) + + def remove_external_name(self, name): + """ + Removes the specified external name from the external + names for this user + + :param perm: The external name to remove + + :return: The user object + """ + return self.remove_from_property_list('external-name', name) + + def marshal(self): + """ + Return a flat structure suitable for conversion to JSON or XML. + + :return: A hash of the keys in this object and their values, recursively. + """ + struct = { } + for key in self._config: + struct[key] = self._config[key]; + return struct + + @classmethod + def unmarshal(cls, config): + """ + Construct a new User from a flat structure. This method is + principally used to construct an object from a Management API + payload. The configuration passed in is largely assumed to be + valid. + + :param: config: A hash of properties + :return: A newly constructed User object with the specified properties. + """ + result = User("temp") + result._config = config + result.name = config['user-name'] + result.etag = None + return result + + def create(self, connection): + """ + Creates the User on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The User object + """ + uri = "http://{0}:{1}/manage/v2/users" \ + .format(connection.host, connection.management_port) + + response = requests.post(uri, json=self._config, auth=connection.auth) + + if response.status_code not in [200, 201, 204]: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + return self + + def read(self, connection): + """ + Loads the User from the MarkLogic server. This will refresh + the properties of the object. + + :param connection: The connection to a MarkLogic server + :return: The User object + """ + user = User.lookup(self._config['role-name']) + if user is None: + return None + else: + self._config = user._config + self.etag = user.etag + return self + + def update(self, connection): + """ + Updates the User on the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The User object + """ + uri = "http://{0}:{1}/manage/v2/users/{2}/properties" \ + .format(connection.host, connection.management_port,self.name) + + headers = {} + if self.etag is not None: + headers['if-match'] = self.etag + + response = requests.put(uri, json=self._config, auth=connection.auth, + headers=headers) + + if response.status_code not in [200, 204]: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + self.name = self._config['user-name'] + if 'etag' in response.headers: + self.etag = response.headers['etag'] + + return self + + def delete(self, connection): + """ + Deletes the User from the MarkLogic server. + + :param connection: The connection to a MarkLogic server + :return: The User object + """ + uri = "http://{0}:{1}/manage/v2/users/{2}" \ + .format(connection.host, connection.management_port, self.name) + + headers = {} + if self.etag is not None: + headers['if-match'] = self.etag + + response = requests.delete(uri, auth=connection.auth, headers=headers) + + if (response.status_code not in [200, 204] + and not response.status_code == 404): + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + return self + + @classmethod + def list(cls, connection): + """ + List all the user names. + + :param connection: The connection to a MarkLogic server + :return: A list of user names + """ + + uri = "http://{0}:{1}/manage/v2/users" \ + .format(connection.host, connection.management_port) + + response = requests.get(uri, auth=connection.auth, + headers={'accept': 'application/json'}) + + if response.status_code != 200: + raise exceptions.UnexpectedManagementAPIResponse(response.text) + + results = [] + json_doc = json.loads(response.text) + + for item in json_doc['user-default-list']['list-items']['list-item']: + results.append(item['nameref']) + + return results + + @classmethod + def lookup(cls, connection, name): + """ + Look up an individual user. + + :param name: The name of the user + :param connection: The connection to the MarkLogic database + :return: The user + """ + uri = "http://{0}:{1}/manage/v2/users/{2}/properties".format(connection.host, connection.port, + name) + response = requests.get(uri, auth=connection.auth, headers={'accept': 'application/json'}) + + if response.status_code == 200: + result = User.unmarshal(json.loads(response.text)) + if 'etag' in response.headers: + result.etag = response.headers['etag'] + return result + elif response.status_code == 404: + return None + else: + raise exceptions.UnexpectedManagementAPIResponse(response.text) diff --git a/python_api/marklogic/models/utilities/__init__.py b/python_api/marklogic/models/utilities/__init__.py new file mode 100644 index 0000000..e4744ea --- /dev/null +++ b/python_api/marklogic/models/utilities/__init__.py @@ -0,0 +1,17 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .files import walk_directories \ No newline at end of file diff --git a/python_api/marklogic/models/utilities/exceptions.py b/python_api/marklogic/models/utilities/exceptions.py new file mode 100644 index 0000000..854ae1d --- /dev/null +++ b/python_api/marklogic/models/utilities/exceptions.py @@ -0,0 +1,49 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/26/2015 Initial development +# + +""" +MarkLogic exception classes +""" + +class MLClientException(Exception): + """ + Base class for MarkLogic client exceptions. + + """ + pass + + +class UnexpectedManagementAPIResponse(MLClientException): + """ + This exception class is for exceptions that arise from unexpected management + API responses. + + """ + pass + + +class UnexpectedAPIResponse(MLClientException): + """ + This exception class is for exceptions that arise from unexpected REST api + responses when dealing with search or documents. + + """ + pass diff --git a/python_api/marklogic/models/utilities/files.py b/python_api/marklogic/models/utilities/files.py new file mode 100644 index 0000000..f50b5d9 --- /dev/null +++ b/python_api/marklogic/models/utilities/files.py @@ -0,0 +1,41 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/01/2015 Initial development +# + +import os, sys, stat + +""" +MarkLogic file classes +""" + +def walk_directories(current_directory): + """ + Recursively walk a directory returning all of the files found. + """ + file_list = [] + for dir in os.listdir(current_directory): + pathname = os.path.join(current_directory, dir) + mode = os.stat(pathname).st_mode + + if stat.S_ISDIR(mode): + file_list.extend(walk_directories(pathname)) + else: + file_list.append({u'filename': dir, u'partial-directory': pathname}) + return file_list diff --git a/python_api/marklogic/models/utilities/utilities.py b/python_api/marklogic/models/utilities/utilities.py new file mode 100644 index 0000000..e19a0e5 --- /dev/null +++ b/python_api/marklogic/models/utilities/utilities.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/05/2015 Initial development +# + +""" +Various utility classes. +""" + +from __future__ import unicode_literals, print_function, absolute_import +from abc import ABCMeta, abstractmethod +from marklogic.models.utilities.validators import validate_type +from marklogic.models.utilities.validators import validate_list_of_type +from marklogic.models.utilities.validators import assert_list_of_type + +class PropertyLists: + """ + The PropertyLists class is an abstract, mixin class. It defines + methods for adding, removing and setting the values of a list + property on an object. + """ + __metaclass__ = ABCMeta + + def _add_to_object_list(self, objlist, obj, objtype): + """ + Adds a value to a list of objects. + + The `obj` and any objects in the `objlist` + must be instances of that type. The `objlist` may be None. + + If `obj` is not currently a member of the list, it is appended + to the list. If it is already a member, the list returned will + have the same members as the original list. + + :param: objlist: A list of objects, possibly None + :param: obj: An object + :param: objtype: The required type of the objects and the list + :return: The updated list. + """ + validate_type(obj,objtype) + + found = False + newlist = [] + if objlist is not None: + validate_list_of_type(objlist, objtype) + for item in objlist: + found = found or item is obj + newlist.append(item) + if not found: + newlist.append(obj) + return newlist + + def _add_to_atomic_list(self, atomlist, atom): + """ + Adds a value to a list of atomic values. + + If `atom` is not currently a member of the list, it is appended + to the list. If it is already a member, the list returned will + have the same members as the original list. + + :param: atomlist: A list of atomic values + :param: atom: An atomic value + :return: The updated list. + """ + found = False + newlist = [] + if atomlist is not None: + for item in atomlist: + found = found or item == atom + newlist.append(item) + if not found: + newlist.append(atom) + return newlist + + def add_to_property_list(self, propname, theitem, thetype=None): + """ + Adds an item to a configuration property list. + + The `propname` is the name of a property list. + + If `thetype` is not specified, the the list is assumed to be of + atomic values. Otherwise, `theitem` must be instance of + `thetype`. + + If `theitem` is not currently a member of the list, it is appended + to the list. If it is already a member, the list returned will + have the same members as the original list. + + Atomic values are considered equal if they compare `==`. Objects + are considered equal if they are the same object. + + :param: propname: The name of a configuration property list + :param: theitem: An object + :param: thetype: The required type of the objects and the list or None + :return: The calling object + """ + if propname in self._config: + thelist = self._config[propname] + else: + thelist = [] + + if thetype is None: + thelist = self._add_to_atomic_list(thelist, theitem) + else: + thelist = self._add_to_object_list(thelist, theitem, thetype) + + if thelist: + self._config[propname] = thelist + else: + if propname in self._config: + del self._config[propname] + + return self + + def set_property_list(self, propname, objlist, objtype=None): + """ + Sets the objects in a configuration property list. + + If `objtype` is specified, any objects in the `objlist` + must be instances of that type. The `objlist` may be empty + or None. + + :param: objlist: A list of objects + :param: objtype: The required type of the objects in the list + :return: The calling object. + """ + if objlist is None or not objlist: + thelist = None + else: + if objtype is None: + thelist = objlist + else: + thelist = assert_list_of_type(objlist, objtype) + + if propname in self._config and thelist is None: + del self._config[propname] + else: + if thelist is not None: + self._config[propname] = thelist + + return self + + def _remove_from_object_list(self, objlist, obj, objtype): + """ + Removes a value from the list. + + The `obj` and any objects in the `objlist` + must be instances of that type. The `objlist` may be None. + + If `obj` is currently a member of the list, it is removed. + + The resulting list is returned. If the resulting list is empty, + None is returned. + + :param: objlist: A list of objects, possibly None + :param: obj: An object + :param: objtype: The required type of the objects and the list + :return: The updated list. + """ + validate_type(obj,objtype) + + newlist = [] + if objlist is not None: + validate_list_of_type(objlist, objtype) + for item in objlist: + if item is not obj: + newlist.append(item) + + if newlist: + return newlist + else: + return None + + def _remove_from_atomic_list(self, atomlist, atom): + """ + Removes a value from the list. + + If `atom` is currently a member of the list, it is removed. + + The resulting list is returned. If the resulting list is empty, + None is returned. + + :param: atomlist: A list of atomic values, possibly None. + :param: atom: An atomic value + :return: The updated list. + """ + newlist = [] + if atomlist is not None: + for item in atomlist: + if item != atom: + newlist.append(item) + + if newlist: + return newlist + else: + return None + + def remove_from_property_list(self, propname, theitem, thetype=None): + """ + Removes an item from a configuration property list. + + The `propname` is the name of a property list. + + If `thetype` is not specified, the the list is assumed to be of + atomic values. Otherwise, `theitem` must be instance of + `thetype`. + + If `theitem` is currently a member of the list, it is removed from + the list. If it is not already a member, the list returned will + have the same members as the original list. + + Atomic values are considered equal if they compare `==`. Objects + are considered equal if they are the same object. + + :param: propname: The name of a configuration property list + :param: theitem: An object + :param: thetype: The required type of the objects and the list or None + :return: The calling object + """ + if propname in self._config: + thelist = self._config[propname] + if thetype is not None: + thelist = self._remove_from_object_list(thelist, theitem, thetype) + else: + thelist = self._remove_from_atomic_list(thelist, theitem) + if thelist: + self._config[propname] = thelist + else: + del self._config[propname] diff --git a/python_api/marklogic/models/utilities/validators.py b/python_api/marklogic/models/utilities/validators.py new file mode 100644 index 0000000..83fb49b --- /dev/null +++ b/python_api/marklogic/models/utilities/validators.py @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/05/2015 Initial development +# + + +""" +Validators are utility functions used by various classes to validate +input. +""" + +class ValidationError(Exception): + """ + A validation error class + """ + def __init__(self, message, original_value): + self._message = message + self._original_value = original_value + + def __repr__(self): + "Validation Error('{0}', {1})".format(self._message, self._original_value) + + +def validate_boolean(raw_val): + """ + Validate a boolean. + """ + if type(raw_val) != bool: + raise ValidationError('Value passed is not a boolean', repr(raw_val)) + + +def validate_index_type(raw_val): + """ + Validate a scalar index type. + """ + valid_index_types = {"int", "unsignedInt", "long", "unsignedLong", "float", "double", "decimal", "dateTime", + "time", "date", "gYearMonth", "gYear", "gMonth", "gDay", "yearMonthDuration", + "dayTimeDuration", "string", "anyURI"} + if raw_val not in valid_index_types: + raise ValidationError('Value is not a valid index type', repr(raw_val)) + + +def validate_index_invalid_value_actions(raw_val): + """ + Validate the invalid value actions on an index. + """ + valid_actions = {'ignore', 'reject'} + + if raw_val not in valid_actions: + raise ValidationError("Value is not a valid action for invalid index values", repr(raw_val)) + + +def validate_stemmed_searches_type(raw_val): + """ + Validate the stemmed searches value. + """ + valid_types = {'off', 'basic', 'advanced', 'decompounding'} + + if raw_val not in valid_types: + raise ValidationError("Stemmed search type is not a valid type of stemmed search", repr(raw_val)) + + +def validate_integer_range(raw_val, min, max): + """ + Validate an intenger in a range. + """ + if raw_val not in range(min, (1 + max)): + raise ValidationError("Integer value out of range", repr(raw_val)) + + +def validate_directory_creation(raw_val): + """ + Validate the directory creation setting. + """ + if raw_val not in ['manual', 'automatic', 'manual-enforced']: + raise ValidationError("Invalid directory creation method", repr(raw_val)) + + +def validate_locking_type(raw_val): + """ + Validate locking type. + """ + if raw_val not in ['strict', 'fast', 'off']: + raise ValidationError("Invalid locking option", repr(raw_val)) + + +def validate_range_index_optimize_options(raw_val): + """ + Validate a range index optimization option. + """ + if raw_val not in ['facet-time', 'memory-size']: + raise ValidationError("Range index optimize option is not a valid value", repr(raw_val)) + + +def validate_format_compatibility_options(raw_val): + """ + Validate a format compatability option. + """ + if raw_val not in ['5.0', '4.2', '4.1', '4.0', '3.2']: + raise ValidationError("On disk index format comatibility objest is not a valide value", repr(raw_val)) + +def validate_index_detection_options(raw_val): + """ + Validate an index detection option. + """ + if raw_val not in ['automatic', 'none']: + raise ValidationError("Index detection options is not a valid value", repr(raw_val)) + + +def validate_expunge_locks_options(raw_val): + """ + Validate an expunge locks option. + """ + if raw_val not in ['automatic', 'none']: + raise ValidationError("Expunge locks option is not a valid value", repr(raw_val)) + + +def validate_term_frequency_normalization_options(raw_val): + """ + Validate a term frequency normalization option. + """ + if raw_val not in ['unscaled-log', 'weakest-scaled-log', 'weakly-scaled-log', 'moderately-scaled-log', + 'strongly-scaled-log', 'scaled-log']: + raise ValidationError("Term frequency normalization option is not a valid value", repr(raw_val)) + + +def validate_merge_priority_options(raw_val): + """ + Validate a merge priority optoin. + """ + if raw_val not in ['lower', 'normal']: + raise ValidationError("Merge priority option is not a valid value", repr(raw_val)) + + +def validate_assignment_policy_options(raw_val): + """ + Validate an assignment policy option. + """ + if raw_val not in ['bucket', 'statistical', 'range', 'legacy']: + raise ValidationError("Assignment policy option is not a valid value", repr(raw_val)) + +def validate_privilege_kind(raw_val): + """ + Validate a privilege kind. + """ + if raw_val not in ['uri', 'execute']: + raise ValidationError("Privilege kind is not a valid value", repr(raw_val)) + +def validate_custom(message): + """ + Raise a validation error. + """ + raise ValidationError("Validation error", repr(message)) + + +def validate_forest_availability(raw_val): + """ + Validate a forest availability value. + """ + if raw_val not in ['online', 'offline']: + raise ValidationError("Forest availability status is not a valid value", repr(raw_val)) + +def validate_string(raw_val): + """ + Validate that the value is a string. + """ + if type(raw_val) is not str: + raise ValidationError("String expected.", repr(raw_val)) + +def validate_list_of_strings(raw_val): + """ + Validate that the value is a list of strings. + """ + if type(raw_val) is not list: + raise ValidationError("List of strings expected.", repr(raw_val)) + for value in raw_val: + if type(value) is not str: + raise ValidationError("List of strings expected.", repr(raw_val)) + +def validate_coordinate_system(raw_val): + """ + Validate a geospatial index coordinate system. + """ + if raw_val not in ['wgs84', 'raw']: + raise ValidationError("Invalid coordinate system", repr(raw_val)) + +def validate_point_format(raw_val): + """ + Validate a geospatial index point format. + """ + if raw_val not in ['point', 'lat-long-point']: + raise ValidationError("Invalid point format", repr(raw_val)) + +def validate_capability(raw_val): + """ + Validate a capability. + """ + if raw_val not in ['read', 'insert', 'update', 'execute']: + raise ValidationError("Invalid capability", repr(raw_val)) + +def validate_collation(index_type, collation): + """ + Validate a colation for an index type. + """ + # FIXME: really validate the collation string! + if index_type == "string": + return + if (index_type == "anyURI" + and collation == "http://marklogic.com/collation/codepoint"): + return + if collation is None or collation == "": + return + raise ValidationError('Collation cannot be {0} for an index of type {1}' \ + .format(index_type, collation)) + +def validate_type(raw_val, cls): + """ + Validate that the value is of the specified type. + """ + if not isinstance(raw_val, cls): + raise ValidationError('Value passed is not a {0}' \ + .format(cls.__name__), repr(raw_val)) + +def assert_type(raw_val, cls): + """ + Assert that the value is of the specified type. + + :return The value if it passes the type test, otherwise raise an exception + """ + if isinstance(raw_val, cls): + return raw_val + raise ValidationError('Value passed is not a {0}' \ + .format(cls.__name__), repr(raw_val)) + +def assert_boolean(raw_val): + """ + Assert that the value is boolean. + + :return The value if it is boolean, otherwise raise an exception + """ + return assert_type(raw_val, bool) + +def validate_list_of_type(raw_val, cls): + """ + Validate a list of the specified type. + """ + if type(raw_val) is not list: + raise ValidationError("List of {0} expected.".format(cls.__name__), + repr(raw_val)) + for value in raw_val: + if type(value) is not cls: + raise ValidationError("List of {0} expected.".format(cls.__name__), + repr(raw_val)) + +def assert_list_of_type(raw_val, cls): + """ + Assert that the value is a list of the specified type. + + A single value of the specified type is returned as a list of length 1. + + :return The value if it is an appropriate list, otherwise raise an exception + """ + if type(raw_val) is cls: + return [ raw_val ] + validate_list_of_type(raw_val, cls) + return raw_val + diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..a8da5ee --- /dev/null +++ b/setup.py @@ -0,0 +1,67 @@ +from __future__ import print_function +from setuptools import setup, find_packages +from setuptools.command.test import test as TestCommand +import io +import codecs +import os +import sys + +import python_api + +here = os.path.abspath(os.path.dirname(__file__)) + +def read(*filenames, **kwargs): + encoding = kwargs.get('encoding', 'utf-8') + sep = kwargs.get('sep', '\n') + buf = [] + for filename in filenames: + with io.open(filename, encoding=encoding) as f: + buf.append(f.read()) + return sep.join(buf) + +long_description = read('README.md') + +class PyTest(TestCommand): + def finalize_options(self): + TestCommand.finalize_options(self) + self.test_args = [] + self.test_suite = True + + def run_tests(self): + import pytest + errcode = pytest.main(self.test_args) + sys.exit(errcode) + +setup( + name='python_api', + version=python_api.__version__, + url='https://github.com/marklogic/python_api/', + license='Apache Software License', + author='Norman Walsh', + tests_require=['pytest'], + cmdclass={'test': PyTest}, + author_email='norman.walsh@marklogic.com', + description='MarkLogic Python API', + long_description=long_description, + packages=find_packages("python_api"), + package_dir = {'':'python_api'}, + install_requires=[ + 'requests>=2.5.0' + ], + include_package_data=True, + platforms='any', + test_suite='python_api.test.test_python_api', + classifiers = [ + 'Programming Language :: Python', + 'Development Status :: 3 - Alpha', + 'Natural Language :: English', + 'Environment :: Console', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + extras_require={ + 'testing': ['pytest'], + } +) diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 0000000..7a8978e --- /dev/null +++ b/test/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +import os +import unittest + diff --git a/test/certs/__init__.py b/test/certs/__init__.py new file mode 100644 index 0000000..cafdeb8 --- /dev/null +++ b/test/certs/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +# Making the tests.databases tests package diff --git a/test/certs/test_authorities.py b/test/certs/test_authorities.py new file mode 100644 index 0000000..036a894 --- /dev/null +++ b/test/certs/test_authorities.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/13/2015 Initial development + +from __future__ import unicode_literals, print_function, absolute_import + +import unittest +from marklogic.models import Connection +from marklogic.models.certificate.authority import Authority +from test.resources import TestConnection as tc + +class TestAuthority(unittest.TestCase): + def test_list(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + names = Authority.list(connection, include_names=True) + + self.assertGreater(len(names), 100) + found = False + for name in names: + found = found or "Equifax" in name + self.assertEqual(True, found) + + def test_create(self): + pem = ("-----BEGIN CERTIFICATE-----\n" + "MIIC3TCCAkYCCQCJtpKDQbobyTANBgkqhkiG9w0BAQsFADCBsjELMAkGA1UEBhMC\n" + "VVMxCzAJBgNVBAgMAlRYMQ8wDQYDVQQHDAZBdXN0aW4xHjAcBgNVBAoMFU1hcmtM\n" + "b2dpYyBDb3Jwb3JhdGlvbjEXMBUGA1UECwwOVFggRW5naW5lZXJpbmcxITAfBgNV\n" + "BAMMGE1hcmtMb2dpYyBUWCBFbmdpbmVlcmluZzEpMCcGCSqGSIb3DQEJARYabm9y\n" + "bWFuLndhbHNoQG1hcmtsb2dpYy5jb20wHhcNMTQwODI3MTkyMzQyWhcNMTUwODI3\n" + "MTkyMzQyWjCBsjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlRYMQ8wDQYDVQQHDAZB\n" + "dXN0aW4xHjAcBgNVBAoMFU1hcmtMb2dpYyBDb3Jwb3JhdGlvbjEXMBUGA1UECwwO\n" + "VFggRW5naW5lZXJpbmcxITAfBgNVBAMMGE1hcmtMb2dpYyBUWCBFbmdpbmVlcmlu\n" + "ZzEpMCcGCSqGSIb3DQEJARYabm9ybWFuLndhbHNoQG1hcmtsb2dpYy5jb20wgZ8w\n" + "DQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJSo3wFMDvTV7Q+4NDDMu9aJZ6uK4l8b\n" + "ACIk5/Ug+MoST+CuIfeBlb2Y6dxNCwkADwVPpykslDcHYFygxFIcnHHVhgqZ0xzu\n" + "LPXBagXmHyj+mb6im1tkbqAxQ7gj/SDeCnQYRIwNRlGgWZJFViaYJH3CC8G/f16F\n" + "IhDyQS3h28W3AgMBAAEwDQYJKoZIhvcNAQELBQADgYEAWbidV4huPlf8Ac0c3Cbs\n" + "Nx2xogODSjNPKqwug0Y3jKx33uxeY7i9oParWSnVFkG0JYUZEfrO5fmtS6JSA1Lk\n" + "e3BioC9xgclEYFiDoZSARasL8hdNvu7v+EYZEnS43rR4M7CQiq/Tf50o4VjiVM9S\n" + "I0Bo+VZSaShQKipBEHS8sP8=\n" + "-----END CERTIFICATE-----\n") + + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + cert = Authority.create(connection, pem) + + self.assertIsNotNone(cert) + self.assertEqual('true', cert.enabled()) + self.assertIsNotNone(cert.properties()) + + cert.delete(connection) + + + def test_lookup(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + names = Authority.list(connection) + auth = Authority.lookup(connection, names[0]) + + self.assertIsNotNone(auth) + self.assertEqual(auth.certificate_id(), names[0]) + +# Not yet supported by underlying API +# def test_toggle_enable(self): +# connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) +# +# names = Authority.list(connection) +# auth = Authority.lookup(connection, names[0]) +# +# newvalue = not auth.enabled() +# auth.set_enabled(newvalue) +# auth.update(connection) +# +# newauth = Authority.lookup(connection, names[0]) +# self.assertIsNotNone(newauth) +# self.assertEqual(newvalue, newauth.enabled()) +# +# auth.set_enabled(not newvalue) +# auth.update(connection) +# auth.read(connection) +# +# self.assertEqual(not newvalue, auth.enabled()) + +if __name__ == "__main__": + unittest.main() diff --git a/test/certs/test_requests.py b/test/certs/test_requests.py new file mode 100644 index 0000000..d6e8cb3 --- /dev/null +++ b/test/certs/test_requests.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/13/2015 Initial development + +from __future__ import unicode_literals, print_function, absolute_import + +import unittest +from marklogic.models.certificate.request import Request + +class TestRequest(unittest.TestCase): + def test_request(self): + req = Request(countryName="US", stateOrProvinceName="TX", + localityName="Austin", organizationName="MarkLogic", + emailAddress="Norman.Walsh@marklogic.com", + version=0) + + self.assertEqual("US", req.countryName()) + self.assertEqual("TX", req.stateOrProvinceName()) + self.assertEqual("Austin", req.localityName()) + self.assertEqual("Norman.Walsh@marklogic.com", req.emailAddress()) + self.assertEqual(0, req.version()) + self.assertIsNone(req.v3ext()) + + ext = { + "nsCertType": { + "critical": False, + "value": "SSL Server" + }, + "subjectKeyIdentifier": { + "critical": False, + "value": "B2:2C:0C:F8:5E:A7:44:B7" + } + } + + req = Request(countryName="US", stateOrProvinceName="TX", + localityName="Austin", organizationName="MarkLogic", + emailAddress="Norman.Walsh@marklogic.com", + version=0, v3ext=ext) + + self.assertEqual("SSL Server", req.v3ext()["nsCertType"]["value"]) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/certs/test_templates.py b/test/certs/test_templates.py new file mode 100644 index 0000000..fa2d214 --- /dev/null +++ b/test/certs/test_templates.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/13/2015 Initial development + +from __future__ import unicode_literals, print_function, absolute_import + +import unittest +from marklogic.models import Connection +from marklogic.models.certificate.request import Request +from marklogic.models.certificate.template import Template +from test.resources import TestConnection as tc + +class TestRequest(unittest.TestCase): + def test_template(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + req = Request(countryName="US", stateOrProvinceName="TX", + localityName="Austin", organizationName="MarkLogic", + emailAddress="Norman.Walsh@marklogic.com", + version=0) + temp = Template("Test Template", "Test description", req) + + self.assertEqual("Test Template", temp.template_name()) + + temp.create(connection) + + names = Template.list(connection) + + self.assertGreater(len(names), 0) + self.assertIn(temp.template_id(), names) + + temp.set_template_name("New Name") + temp.set_template_description("New Description") + temp.update(connection) + self.assertIsNotNone(temp) + + newtemp = Template.lookup(connection, temp.template_id()) + self.assertEqual(temp.template_name(), newtemp.template_name()) + + temp.delete(connection) + + self.assertIsNotNone(temp) + self.assertIsNone(temp.template_id()) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/databases/__init__.py b/test/databases/__init__.py new file mode 100644 index 0000000..cafdeb8 --- /dev/null +++ b/test/databases/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +# Making the tests.databases tests package diff --git a/test/databases/test_database.py b/test/databases/test_database.py new file mode 100644 index 0000000..f0e1deb --- /dev/null +++ b/test/databases/test_database.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/25/2015 Initial development +# Paul Hoehne 03/26/2015 Adding dynamic lookup of host name +# + +import unittest +from marklogic.models import Database, Connection, Host, Forest +from marklogic.models.utilities.exceptions import UnexpectedManagementAPIResponse +from requests.auth import HTTPDigestAuth +from test.resources import TestConnection as tc +from test.settings import DatabaseSettings as ds + +class TestDatabase(unittest.TestCase): + """ + Basic creation test function. + + """ + + def test_simple_create(self): + """ + TODO: The hostname should come from the server's hostname + + Test the basic create function. Creates a database and then check to see that it + exists by getting the database configuration from the server. It then destroys + the database. + + :return: None + """ + conn = Connection(tc.hostname, HTTPDigestAuth(tc.admin, tc.password)) + hosts = Host.list(conn) + db = Database("test-db", hosts[0]) + + db.create(conn) + + validate_db = Database.lookup(conn, "test-db") + try: + self.assertIsNotNone(validate_db) + self.assertEqual('test-db', validate_db.database_name()) + + finally: + validate_db.delete(conn) + validate_db = Database.lookup(conn, "test-db") + self.assertIsNone(validate_db) + + def test_no_database_found(self): + conn = Connection(tc.hostname, HTTPDigestAuth(tc.admin, tc.password)) + db = Database.lookup(conn, "No-Such-Database") + + self.assertIsNone(db) + + def test_list_databases(self): + conn = Connection(tc.hostname, HTTPDigestAuth(tc.admin, tc.password)) + databases = Database.list_databases(conn) + + self.assertGreater(len(databases), 4) + + db_names = [db.database_name() for db in databases] + self.assertTrue("Modules" in db_names) + self.assertTrue("Documents" in db_names) + + def test_create_simple_forests(self): + """ + Test the following scenario: + + The database is given the names of two forests. + It should then create the two named forests. + + """ + conn = Connection(tc.hostname, HTTPDigestAuth(tc.admin, tc.password)) + + hosts = Host.list(conn) + db = Database("simple-forest-create-test-db", hosts[0]) + + db.set_forest_names(["simple-forest-create-forest1", + "simple-forest-create-forest2"]) + + db.create(conn) + + db = Database.lookup(conn, "simple-forest-create-test-db") + try: + self.assertEqual(2, len(db.forest_names())) + + self.assertIn("simple-forest-create-forest1", db.forest_names()) + self.assertIn("simple-forest-create-forest2", db.forest_names()) + + finally: + db.delete(conn) + + def test_create_single_detailed_forest(self): + """ + Test the following scenario: + + The database is given a forest object. It should create a forest with + the given name. That forest should match the features of the datailed + forest. + + """ + + conn = Connection(tc.hostname, HTTPDigestAuth(tc.admin, tc.password)) + + hosts = Host.list(conn) + db = Database("detailed-forest-create-test-db", hosts[0]) + + forest = Forest("detailed-forest-create-forest1", host=hosts[0], + large_data_directory=ds.large_data_directory) + + db.set_forest_names([forest.forest_name()]) + + db.create(conn) + + forest = Forest.lookup(conn, "detailed-forest-create-forest1") + + try: + self.assertEqual("detailed-forest-create-forest1", forest.forest_name()) + self.assertEqual(ds.large_data_directory, forest.large_data_directory()) + finally: + db.delete(conn) + +if __name__ == "__main__": + unittest.main() diff --git a/test/databases/test_field_range.py b/test/databases/test_field_range.py new file mode 100644 index 0000000..99b728f --- /dev/null +++ b/test/databases/test_field_range.py @@ -0,0 +1,137 @@ +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/05/2015 Initial development +# + +import unittest + +from marklogic.models.database.index import FieldRangeIndex +from marklogic.models.database import Database +from marklogic.models.database.index import FieldRangeIndex +from marklogic.models.database.field import PathField, RootField, FieldPath +from marklogic.models.database.field import IncludedElement, ExcludedElement + +class TestField(unittest.TestCase): + def test_create_field(self): + db = Database("testdb") + + self.assertNotIn('field', db._config) + + field = PathField("invoice-id", FieldPath("bill:invoice-id", 1)) + field.add_field_path(FieldPath("inv:id", 1)) + + result = db.add_field(field) + self.assertIn('field', db._config) + self.assertEqual(result, db) + + self.assertEqual(1, len(db._config['field'])) + + field = db.fields()[0] + self.assertEqual("invoice-id", field.field_name()) + + field = db.fields()[0] + self.assertEqual(2, len(field.field_paths())) + + self.assertEqual("bill:invoice-id", field.field_paths()[0].path()) + self.assertEqual(1, field.field_paths()[0].weight()) + + def test_include_references(self): + db = Database("testdb") + + field = RootField("invoice-id", includes=[IncludedElement("http://foo.bar.com/invoice", "id")]) + + self.assertEqual(1, len(field.included_elements())) + self.assertEqual("http://foo.bar.com/invoice", field.included_elements()[0].namespace_uri()) + self.assertEqual("id", field.included_elements()[0].localname()) + + def test_exclude_references(self): + db = Database("testdb") + + field = RootField("invoice-id", excludes=[ExcludedElement("http://foo.bar.com/invoice", "id")]) + + self.assertEqual(1, len(field.excluded_elements())) + self.assertEqual("http://foo.bar.com/invoice", field.excluded_elements()[0].namespace_uri()) + self.assertEqual("id", field.excluded_elements()[0].localname()) + + + def test_create_element_reference(self): + element_reference = IncludedElement("http://foo.bar.com/invoice", "id") + + self.assertEqual("http://foo.bar.com/invoice", + element_reference.namespace_uri()) + self.assertEqual("id", element_reference.localname()) + + element_reference = IncludedElement("http://foo.bar.com/invoice", + "id", attribute_localname="foo") + + self.assertEqual("foo", element_reference.attribute_localname()) + + element_reference = IncludedElement("http://foo.bar.com/invoice", "id", + attribute_namespace_uri="http://foo.bar.com/billing", + attribute_localname="bill") + + self.assertEqual("http://foo.bar.com/billing", + element_reference.attribute_namespace_uri()) + + element_reference = ExcludedElement("http://foo.bar.com/invoice", "id") + + self.assertEqual("http://foo.bar.com/invoice", + element_reference.namespace_uri()) + self.assertEqual("id", element_reference.localname()) + + element_reference = ExcludedElement("http://foo.bar.com/invoice", + "id", attribute_localname="foo") + + self.assertEqual("foo", element_reference.attribute_localname()) + + element_reference = ExcludedElement("http://foo.bar.com/invoice", "id", + attribute_namespace_uri="http://foo.bar.com/billing", + attribute_localname="bill") + + self.assertEqual("http://foo.bar.com/billing", + element_reference.attribute_namespace_uri()) + + # + # { + # "scalar-type": "int", + # "collation": "", + # "field-name": "test-one", + # "range-value-positions": false, + # "invalid-values": "reject" + # } + # + def test_create_range_field(self): + db = Database("foo") + + field = RootField("invoice-id", False) + db.add_field(field) + + range_field = FieldRangeIndex("int", "invoice-id") + db.add_index(range_field) + + index = db.field_range_indexes()[0] + self.assertEqual("invoice-id", index.field_name()) + self.assertEqual("int", index.scalar_type()) + + indexes = db.field_range_indexes() + self.assertEqual(1, len(indexes)) + +if __name__ == "__main__": + unittest.main() + diff --git a/test/databases/test_paths.py b/test/databases/test_paths.py new file mode 100644 index 0000000..fa4c546 --- /dev/null +++ b/test/databases/test_paths.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/05/2015 Initial development +# + +import unittest +from marklogic.models import Database +from marklogic.models.database.path import PathNamespace + +# "path-namespaces": { +# "path-namespace": [ +# { +# "prefix": "inv", +# "namespace-uri": "http:\/\/foo.bar.com\/invoice" +# }, +# { +# "prefix": "bill", +# "namespace-uri": "http:\/\/foo.bar.com\/billing" +# } +# ] +# } + +class TestPaths(unittest.TestCase): + + def test_create_paths(self): + db = Database(u'testdb') + + self.assertNotIn('path-namespaces', db._config) + return_val = db.add_path_namespace(PathNamespace("inv", "http://foo.bar.com/invoice")) + + namespaces = db.path_namespaces() + self.assertEqual(1, len(namespaces)) + self.assertEqual("inv", namespaces[0].prefix()) + self.assertEqual('http://foo.bar.com/invoice', namespaces[0].namespace_uri()) + + self.assertEqual(db, return_val) + +if __name__ == "__main__": + unittest.main() diff --git a/test/forests/__init__.py b/test/forests/__init__.py new file mode 100644 index 0000000..cafdeb8 --- /dev/null +++ b/test/forests/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +# Making the tests.databases tests package diff --git a/test/forests/test_forest.py b/test/forests/test_forest.py new file mode 100644 index 0000000..72744af --- /dev/null +++ b/test/forests/test_forest.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/28/2015 Initial development +# + +import unittest +from marklogic.models import Forest, Host, Connection +from test.resources import TestConnection as tc +from requests.auth import HTTPDigestAuth +from marklogic.models.utilities.validators import ValidationError +from test.settings import DatabaseSettings as ds + +class TestForest(unittest.TestCase): + def test_forest_defaults(self): + pass + + def test_getters_and_setters(self): + forest = Forest("Foo", host="bar", data_directory=ds.data_directory, + large_data_directory=ds.large_data_directory, + fast_data_directory=ds.fast_data_directory) + + self.assertEqual(forest.forest_name(), "Foo") + + forest.set_availability("offline") + self.assertEqual("offline", forest.availability()) + + with self.assertRaises(ValidationError): + forest.set_availability("foo") + + self.assertEqual("bar", forest.host()) + + self.assertEqual(ds.data_directory, forest.data_directory()) + + forest.set_database("foo") + self.assertEqual("foo", forest.database()) + + self.assertEqual(ds.fast_data_directory, forest.fast_data_directory()) + + self.assertEqual(ds.large_data_directory, forest.large_data_directory()) + + def test_create_forest(self): + conn = Connection(tc.hostname, HTTPDigestAuth(tc.admin, tc.password)) + + host = Host.list(conn)[0] + + forest = Forest("test-forest-simple-create", host=host, + large_data_directory=ds.large_data_directory, + fast_data_directory=ds.fast_data_directory, ) + forest.create(conn) + + forest = Forest.lookup(conn, "test-forest-simple-create") + + try: + self.assertIsNotNone(forest) + self.assertEqual("test-forest-simple-create", forest.forest_name()) + self.assertEqual(host, forest.host()) + self.assertEqual(ds.large_data_directory, forest.large_data_directory()) + self.assertEqual(ds.fast_data_directory, forest.fast_data_directory()) + finally: + forest.remove(conn) + +if __name__ == "__main__": + unittest.main(); diff --git a/test/hosts/__init__.py b/test/hosts/__init__.py new file mode 100644 index 0000000..cafdeb8 --- /dev/null +++ b/test/hosts/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +# Making the tests.databases tests package diff --git a/test/hosts/test_host.py b/test/hosts/test_host.py new file mode 100644 index 0000000..94ea795 --- /dev/null +++ b/test/hosts/test_host.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 03/26/2015 Initial development +# + +import unittest +from marklogic.models import Connection, Host +from requests.auth import HTTPDigestAuth +from test.resources import TestConnection as tc + + +class TestHost(unittest.TestCase): + + def test_list_hosts(self): + conn = Connection(tc.hostname, HTTPDigestAuth(tc.admin, tc.password)) + + hosts = Host.list(conn) + self.assertGreater(len(hosts), 0) + self.assertIsNotNone(hosts[0]) + +if __name__ == "__main__": + unittest.main() diff --git a/test/privileges/__init__.py b/test/privileges/__init__.py new file mode 100644 index 0000000..cafdeb8 --- /dev/null +++ b/test/privileges/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +# Making the tests.databases tests package diff --git a/test/privileges/test_privilege.py b/test/privileges/test_privilege.py new file mode 100644 index 0000000..902a649 --- /dev/null +++ b/test/privileges/test_privilege.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/01/2015 Initial development +# + +import unittest +from marklogic.models import Connection, Role, Privilege +from test.resources import TestConnection as tc + +class TestPrivilege(unittest.TestCase): + + def test_list(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + privileges = Privilege.list(connection) + + self.assertGreater(len(privileges), 300) + self.assertIn("execute|manage-admin", privileges) + + def test_lookup(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + privilege = Privilege.lookup(connection, "manage-admin", "execute") + + self.assertIsNotNone(privilege) + self.assertEqual(privilege.privilege_name(), "manage-admin") + + def test_lookup_action(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + privilege = Privilege.lookup(connection, kind="execute", \ + action="http://marklogic.com/xdmp/privileges/admin-module-write") + + self.assertIsNotNone(privilege) + self.assertEqual(privilege.privilege_name(), "admin-module-write") + + def test_create_privilege(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + new_privilege = Privilege("foo-privilege","http://example.com/","execute") + + self.assertEqual(new_privilege.privilege_name(), "foo-privilege") + + new_privilege.create(connection) + + privileges = Privilege.list(connection) + self.assertIn("execute|foo-privilege", privileges) + + new_privilege.delete(connection) + + def test_add_role(self): + privilege = Privilege("foo-privilege","http://example.com/","execute") + + privilege.add_role_name(u'manage-admin') + + role = privilege.role_names()[0] + self.assertEqual(u'manage-admin', role) + + def test_create_remove_privilege(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + privilege = Privilege("foo-privilege","http://example.com/","execute") + + privilege.create(connection) + + the_privilege = Privilege.lookup(connection, "foo-privilege", "execute") + self.assertIsNotNone(the_privilege) + + the_privilege.delete(connection) + the_privilege = Privilege.lookup(connection, "foo-privilege", "execute") + self.assertIsNone(the_privilege) + + def test_save_privilege(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + privilege = Privilege("foo-privilege","http://example.com/","execute") + privilege.create(connection) + + privilege.add_role_name("manage-user") + privilege.update(connection) + + self.assertIn("manage-user", privilege.role_names()) + + privilege.delete(connection) + +if __name__ == "__main__": + unittest.main() diff --git a/test/resources.py b/test/resources.py new file mode 100644 index 0000000..fd2bc9d --- /dev/null +++ b/test/resources.py @@ -0,0 +1,4 @@ +class TestConnection(object): + hostname = "localhost" + admin = "admin" + password = "admin" diff --git a/test/roles/__init__.py b/test/roles/__init__.py new file mode 100644 index 0000000..cafdeb8 --- /dev/null +++ b/test/roles/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +# Making the tests.databases tests package diff --git a/test/roles/test_role.py b/test/roles/test_role.py new file mode 100644 index 0000000..3537b8b --- /dev/null +++ b/test/roles/test_role.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Paul Hoehne 04/03/2015 Initial development +# + +import unittest +from marklogic.models import Connection, Role, Privilege +from test.resources import TestConnection as tc + +class TestRole(unittest.TestCase): + + def test_list(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + names = Role.list(connection) + + self.assertGreater(len(names), 65) + self.assertIn("admin", names) + + def test_lookup(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + role = Role.lookup(connection, "admin") + + self.assertIsNotNone(role) + self.assertEqual(role.role_name(), "admin") + + def test_create_role(self): + new_role = Role("foo-role") + + self.assertEqual(new_role.role_name(), "foo-role") + + new_role.add_role_name("admin") + self.assertIn("admin", new_role.role_names()) + + def test_description(self): + role = Role("foo-role") + role.set_description("This is the foo role") + + self.assertEqual(role.description(), "This is the foo role") + + def test_add_privilege(self): + role = Role("foo-role") + + name = "foodle" + action = "http://marklogic.com/xdmp/privileges/foodle" + kind = "execute" + + role.add_privilege(name, kind) + + priv = role.privileges()[0] + self.assertEqual("execute|foodle",priv) + + def test_create_remove_role(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + role = Role("foo-role") + + role.create(connection) + + the_role = Role.lookup(connection, "foo-role") + self.assertIsNotNone(the_role) + + the_role.delete(connection) + the_role = Role.lookup(connection, "foo-role") + self.assertIsNone(the_role) + + def test_save_role(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + role = Role("foo-role") + + self.assertIsNone(role.create(connection).description()) + role.set_description("This is the foo role") + + role.update(connection) + + role = Role.lookup(connection, "foo-role") + self.assertEqual("This is the foo role", role.description()) + + role.delete(connection) + + def test_roles(self): + role = Role("foo-role") + + role.add_role_name("bar-role") + role.add_role_name("baz-role") + + self.assertEqual(2, len(role.role_names())) + self.assertTrue("bar-role" in role.role_names()) + self.assertTrue("baz-role" in role.role_names()) + + def test_privileges(self): + role = Role("foo-role") + + role.add_privilege("bar-priv", "execute") + role.add_privilege("baz-priv", "execute") + + self.assertEqual(2, len(role.privileges())) + self.assertEqual("execute|bar-priv", role.privileges()[0]) + +if __name__ == "__main__": + unittest.main() diff --git a/test/servers/__init__.py b/test/servers/__init__.py new file mode 100644 index 0000000..cafdeb8 --- /dev/null +++ b/test/servers/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +# Making the tests.databases tests package diff --git a/test/servers/test_server.py b/test/servers/test_server.py new file mode 100644 index 0000000..7e238cf --- /dev/null +++ b/test/servers/test_server.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/01/2015 Initial development +# + +import unittest +from marklogic.models import Connection, Server, HttpServer, XdbcServer, OdbcServer, WebDAVServer +from test.resources import TestConnection as tc + +class TestServer(unittest.TestCase): + + def test_list(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + names = Server.list(connection) + self.assertGreater(len(names), 3) + self.assertIn("Default|Manage", names) + + def test_lookup(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + server = Server.lookup(connection, "Manage") + + self.assertIsNotNone(server) + self.assertEqual(server.server_name(), "Manage") + + def test_load(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + server = HttpServer("Manage", "Default") + self.assertEqual(server.server_name(), "Manage") + self.assertIsNotNone(server.read(connection)) + self.assertEqual("http", server.server_type()) + + def test_create_http_server(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + server = HttpServer("foo-http", "Default", 10101, '/', 'Documents') + self.assertEqual(server.server_name(), "foo-http") + server.create(connection) + self.assertIsNotNone(server) + self.assertEqual("http", server.server_type()) + server.delete(connection) + server = Server.lookup(connection, "foo-http") + self.assertIsNone(server) + + def test_create_odbc_server(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + server = OdbcServer("foo-odbc", "Default", 10101, '/', 'Documents') + self.assertEqual(server.server_name(), "foo-odbc") + server.create(connection) + self.assertIsNotNone(server) + self.assertEqual("odbc", server.server_type()) + server.delete(connection) + server = Server.lookup(connection, "foo-odbc") + self.assertIsNone(server) + + def test_create_xdbc_server(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + server = XdbcServer("foo-xdbc", "Default", 10101, '/', 'Documents') + self.assertEqual(server.server_name(), "foo-xdbc") + server.create(connection) + self.assertIsNotNone(server) + self.assertEqual("xdbc", server.server_type()) + server.delete(connection) + server = Server.lookup(connection, "foo-xdbc") + self.assertIsNone(server) + + def test_create_webdav_server(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + server = WebDAVServer("foo-webdav", "Default", 10101, '/', 'Documents') + self.assertEqual(server.server_name(), "foo-webdav") + server.create(connection) + self.assertIsNotNone(server) + self.assertEqual("webdav", server.server_type()) + server.delete(connection) + server = Server.lookup(connection, "foo-webdav") + self.assertIsNone(server) + +if __name__ == "__main__": + unittest.main() diff --git a/test/settings.py b/test/settings.py new file mode 100644 index 0000000..986d22d --- /dev/null +++ b/test/settings.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# Copyright 2015 MarkLogic Corporation + +""" +Edit this file to create test settings applicable to your system. +The string must represent a complete path to an existing directory on +the server. You will find the current root data directory on the ML +Admin Forests Summary page. +""" + +class DatabaseSettings(object): + large_data_directory = "" + fast_data_directory = "" + data_directory = "" diff --git a/test/users/__init__.py b/test/users/__init__.py new file mode 100644 index 0000000..cafdeb8 --- /dev/null +++ b/test/users/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +# Making the tests.databases tests package diff --git a/test/users/test_user.py b/test/users/test_user.py new file mode 100644 index 0000000..d1897e6 --- /dev/null +++ b/test/users/test_user.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import + +# +# Copyright 2015 MarkLogic Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0# +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# File History +# ------------ +# +# Norman Walsh 05/01/2015 Initial development +# + +import unittest +from marklogic.models import Connection, User +from test.resources import TestConnection as tc + +class TestUser(unittest.TestCase): + + def test_list(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + users = User.list(connection) + + self.assertGreater(len(users), 2) + self.assertIn("nobody", users) + + def test_lookup(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + + user = User.lookup(connection, "nobody") + + self.assertIsNotNone(user) + self.assertEqual(user.user_name(), "nobody") + + def test_create_user(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + new_user = User("foo-user", "password") + + self.assertEqual(new_user.user_name(), "foo-user") + + new_user.create(connection) + + users = User.list(connection) + + self.assertIn("foo-user", users) + new_user.delete(connection) + + def test_description(self): + user = User("foo-user") + user.set_description("This is the foo user") + + self.assertEqual(user.description(), "This is the foo user") + + def test_add_role_name(self): + user = User("foo-user") + + user.add_role_name(u'manage-user') + + role = user.role_names()[0] + self.assertEqual(u'manage-user', role) + + def test_create_remove_user(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + user = User("foo-user", "password") + + user.create(connection) + + the_user = User.lookup(connection, "foo-user") + self.assertIsNotNone(the_user) + + the_user.delete(connection) + the_user = User.lookup(connection, "foo-user") + self.assertIsNone(the_user) + + def test_save_user(self): + connection = Connection.make_connection(tc.hostname, tc.admin, tc.password) + user = User("foo-user", "password") + + self.assertIsNone(user.create(connection).description()) + user.set_description("This is the foo user") + + user.update(connection) + + user = User.lookup(connection, "foo-user") + self.assertEqual("This is the foo user", user.description()) + + user.delete(connection) + +if __name__ == "__main__": + unittest.main()